code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jared
"""
import pandas as pd
import pymongo
import json
from os import listdir
from os.path import isfile, join
import multiprocessing as mp
import numpy as np
import dbConfig
from builder.dummyCrystalBuilder import processDummyCrystals
from ml.feature import getCompFeature
def import_content(db, filename, collection):
data = pd.read_csv(filename)
data = data.dropna()
data_json = json.loads(data.to_json(orient='records'))
db[collection].insert_many(data_json)
def update_database(db, folder, collection):
filepaths = [f for f in listdir(folder) if
(isfile(join(folder, f)) and f.endswith('.csv'))]
db[collection].delete_many({})
for filename in filepaths:
import_content(db, folder + filename, collection)
print('Loading ' + str(db[collection].count()) +
' items from ' + collection + '...')
db[collection].aggregate([
{
"$lookup": {
"from": collection,
"localField": "crystal_id",
"foreignField": "crystal_id",
"as" : "fromItems"
}
},
{
"$replaceRoot": { "newRoot": { "$mergeObjects":
[ { "$arrayElemAt": [ "$fromItems", 0 ] },
"$$ROOT" ] } }
},
{ "$project": { "fromItems": 0 } },
{ "$out": collection + "_aggregated" }
])
print('Done.')
def parallelize(df, numProcesses, func):
df_split = np.array_split(df, numProcesses)
pool = mp.Pool(processes=numProcesses)
results = pool.map(func, df_split)
pool.close()
pool.join()
results_df = pd.concat(results)
return results_df
def process_features(db, **kwargs):
df = pd.DataFrame(list(db['qw_outputs_aggregated'].find()))
if dbConfig.dummy == True:
df = processDummyCrystals(df)
print('Processing Features... ')
df = df.drop(df[df['nIterations'] >= 201].index).copy()
if kwargs['numProcesses'] > 1:
feature = parallelize(df, kwargs['numProcesses'], getCompFeature)
else:
feature = getCompFeature(df)
print('Len features', len(feature.columns))
if dbConfig.saveFeatures == True:
feature.to_csv(dbConfig.saveFeaturesPath +
dbConfig.saveFeaturesFile, index=False)
print('Done.')
def getDB():
client = pymongo.MongoClient(dbConfig.host, dbConfig.port)
return(client['perovskites'])
def main():
db = getDB()
update_database(db, dbConfig.crystalDBFolder, 'qw_outputs')
process_features(db, numProcesses = 4)
update_database(db, dbConfig.featureDBFolder, 'features')
if __name__ == "__main__":
main()
|
[
"pymongo.MongoClient",
"os.listdir",
"builder.dummyCrystalBuilder.processDummyCrystals",
"pandas.read_csv",
"ml.feature.getCompFeature",
"multiprocessing.Pool",
"numpy.array_split",
"os.path.join",
"pandas.concat"
] |
[((397, 418), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (408, 418), True, 'import pandas as pd\n'), ((1600, 1632), 'numpy.array_split', 'np.array_split', (['df', 'numProcesses'], {}), '(df, numProcesses)\n', (1614, 1632), True, 'import numpy as np\n'), ((1645, 1676), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'numProcesses'}), '(processes=numProcesses)\n', (1652, 1676), True, 'import multiprocessing as mp\n'), ((1777, 1795), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (1786, 1795), True, 'import pandas as pd\n'), ((2528, 2577), 'pymongo.MongoClient', 'pymongo.MongoClient', (['dbConfig.host', 'dbConfig.port'], {}), '(dbConfig.host, dbConfig.port)\n', (2547, 2577), False, 'import pymongo\n'), ((1971, 1995), 'builder.dummyCrystalBuilder.processDummyCrystals', 'processDummyCrystals', (['df'], {}), '(df)\n', (1991, 1995), False, 'from builder.dummyCrystalBuilder import processDummyCrystals\n'), ((2237, 2255), 'ml.feature.getCompFeature', 'getCompFeature', (['df'], {}), '(df)\n', (2251, 2255), False, 'from ml.feature import getCompFeature\n'), ((631, 646), 'os.listdir', 'listdir', (['folder'], {}), '(folder)\n', (638, 646), False, 'from os import listdir\n'), ((676, 691), 'os.path.join', 'join', (['folder', 'f'], {}), '(folder, f)\n', (680, 691), False, 'from os.path import isfile, join\n')]
|
def polyFit(xData, yData, degree):
pass
fitValues = np.polyfit(xData, yData, degree)
yFit = np.zeros(len(xData))
for i in range(degree+1):
yFit = yFit + xData**(degree-i)*fitValues[i]
def function(x):
func = 0
for i in fitValues:
func = func*x + i
return func
return (fitValues,function)
if __name__ == "__main__":
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pltStyle # used for formatting the plots
# read some data
data = pd.read_csv("polyFit.csv", header=None, names=["x","y"])
# create a new figure object
fig = plt.figure()
# create axis and a new subplot within the figure
ax = fig.add_subplot(1, 1, 1)
# plot the measurement data
ax.plot(data.x, data.y,marker="+", label="Measurement data")
# add polynomial fits with different degrees
for i in range(1,7,1):
ax.plot(data.x, polyFit(data.x,data.y,i)[1](data.x), label="Polynomial fit degree = "+str(i))
# create the legend and set its position
ax.legend(loc="lower left")
# manually set the axes limits and label them
ax.set_xlim([0,12])
ax.set_ylim([-2,1.1])
ax.set_xlabel(r'x axis label using \TeX\ and SI-units such as an upright $\si{\micro}$')
ax.set_ylabel(r'unusual symbols {\"a} \c{s} \AE\ \~{n}')
ax.grid(True)
#plt.tight_layout()
plt.savefig("polyFit.png")
|
[
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.polyfit"
] |
[((60, 92), 'numpy.polyfit', 'np.polyfit', (['xData', 'yData', 'degree'], {}), '(xData, yData, degree)\n', (70, 92), True, 'import numpy as np\n'), ((554, 611), 'pandas.read_csv', 'pd.read_csv', (['"""polyFit.csv"""'], {'header': 'None', 'names': "['x', 'y']"}), "('polyFit.csv', header=None, names=['x', 'y'])\n", (565, 611), True, 'import pandas as pd\n'), ((655, 667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (665, 667), True, 'import matplotlib.pyplot as plt\n'), ((1410, 1436), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""polyFit.png"""'], {}), "('polyFit.png')\n", (1421, 1436), True, 'import matplotlib.pyplot as plt\n')]
|
"""Minimal implementation of Wasserstein GAN for MNIST."""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.examples.tutorials.mnist import input_data
import threading
from rendering import draw_figure, export_video
def leaky_relu(x):
return tf.maximum(x, 0.2 * x)
def generator(z):
with tf.variable_scope('generator'):
z = layers.fully_connected(z, num_outputs=4096)
z = tf.reshape(z, [-1, 4, 4, 256])
z = layers.conv2d_transpose(z, num_outputs=128, kernel_size=5, stride=2)
z = layers.conv2d_transpose(z, num_outputs=64, kernel_size=5, stride=2)
z = layers.conv2d_transpose(z, num_outputs=1, kernel_size=5, stride=2,
activation_fn=tf.nn.sigmoid)
return z[:, 2:-2, 2:-2, :]
def discriminator(x, reuse):
with tf.variable_scope('discriminator', reuse=reuse):
x = layers.conv2d(x, num_outputs=64, kernel_size=5, stride=2,
activation_fn=leaky_relu)
x = layers.conv2d(x, num_outputs=128, kernel_size=5, stride=2,
activation_fn=leaky_relu)
x = layers.conv2d(x, num_outputs=256, kernel_size=5, stride=2,
activation_fn=leaky_relu)
x = layers.flatten(x)
return layers.fully_connected(x, num_outputs=1, activation_fn=None)
############# Create Tensorflow Graph ###############
with tf.name_scope('placeholders'):
x_true = tf.placeholder(tf.float32, [None, 28, 28, 1])
z = tf.placeholder(tf.float32, [None, 128])
x_generated = generator(z)
d_true = discriminator(x_true, reuse=False)
d_generated = discriminator(x_generated, reuse=True)
with tf.name_scope('regularizer'):
epsilon = tf.random_uniform([50, 1, 1, 1], 0.0, 1.0)
x_hat = epsilon * x_true + (1 - epsilon) * x_generated
d_hat = discriminator(x_hat, reuse=True)
gradients = tf.gradients(d_hat, x_hat)[0]
ddx = tf.sqrt(tf.reduce_sum(gradients ** 2, axis=[1, 2]))
d_regularizer = tf.reduce_mean((ddx - 1.0) ** 2)
with tf.name_scope('loss'):
g_loss = tf.reduce_mean(d_generated)
d_loss = (tf.reduce_mean(d_true) - tf.reduce_mean(d_generated) +
10 * d_regularizer)
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0, beta2=0.9)
g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='generator')
g_train = optimizer.minimize(g_loss, var_list=g_vars)
d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='discriminator')
d_train = optimizer.minimize(d_loss, var_list=d_vars)
#####################################################
############# Initialize Variables ###############
session = tf.InteractiveSession()
# session = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
tf.global_variables_initializer().run()
mnist = input_data.read_data_sets('MNIST_data')
generated_images = []
export_video_nth_frame = 30
height, width, channels = (28, 28, 1)
#####################################################
############# Start Rendering Thread ###############
drawing_thread = threading.Thread(target=draw_figure, args=(generated_images,))
drawing_thread.setDaemon(True)
drawing_thread.start()
#####################################################
############# Train ###############
for i in range(20000):
batch = mnist.train.next_batch(50)
images = batch[0].reshape([-1, height, width, channels])
z_train = np.random.randn(50, 128)
session.run(g_train, feed_dict={z: z_train})
for j in range(5):
session.run(d_train, feed_dict={x_true: images, z: z_train})
print('iter={}/20000'.format(i))
z_validate = np.random.randn(1, 128)
generated = x_generated.eval(feed_dict={z: z_validate}).squeeze()
generated = np.uint8(generated*255) # hand over to thread
generated_images.append(generated)
if i%export_video_nth_frame == 0:
pass
export_video(generated_images)
#####################################################
################ Finalize #####################
export_video(generated_images)
#####################################################
|
[
"tensorflow.reduce_sum",
"tensorflow.get_collection",
"tensorflow.maximum",
"tensorflow.contrib.layers.flatten",
"tensorflow.reshape",
"tensorflow.InteractiveSession",
"numpy.random.randn",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.contrib.layers.conv2d_transpose",
"tensorflow.gradients",
"tensorflow.name_scope",
"threading.Thread",
"numpy.uint8",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.contrib.layers.conv2d",
"tensorflow.random_uniform",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.train.AdamOptimizer",
"rendering.export_video"
] |
[((2786, 2809), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (2807, 2809), True, 'import tensorflow as tf\n'), ((2942, 2981), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {}), "('MNIST_data')\n", (2967, 2981), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((3196, 3258), 'threading.Thread', 'threading.Thread', ([], {'target': 'draw_figure', 'args': '(generated_images,)'}), '(target=draw_figure, args=(generated_images,))\n', (3212, 3258), False, 'import threading\n'), ((4155, 4185), 'rendering.export_video', 'export_video', (['generated_images'], {}), '(generated_images)\n', (4167, 4185), False, 'from rendering import draw_figure, export_video\n'), ((329, 351), 'tensorflow.maximum', 'tf.maximum', (['x', '(0.2 * x)'], {}), '(x, 0.2 * x)\n', (339, 351), True, 'import tensorflow as tf\n'), ((1477, 1506), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholders"""'], {}), "('placeholders')\n", (1490, 1506), True, 'import tensorflow as tf\n'), ((1521, 1566), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 28, 28, 1]'], {}), '(tf.float32, [None, 28, 28, 1])\n', (1535, 1566), True, 'import tensorflow as tf\n'), ((1575, 1614), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 128]'], {}), '(tf.float32, [None, 128])\n', (1589, 1614), True, 'import tensorflow as tf\n'), ((1746, 1774), 'tensorflow.name_scope', 'tf.name_scope', (['"""regularizer"""'], {}), "('regularizer')\n", (1759, 1774), True, 'import tensorflow as tf\n'), ((1790, 1832), 'tensorflow.random_uniform', 'tf.random_uniform', (['[50, 1, 1, 1]', '(0.0)', '(1.0)'], {}), '([50, 1, 1, 1], 0.0, 1.0)\n', (1807, 1832), True, 'import tensorflow as tf\n'), ((2066, 2098), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((ddx - 1.0) ** 2)'], {}), '((ddx - 1.0) ** 2)\n', (2080, 2098), True, 'import tensorflow as tf\n'), ((2105, 2126), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (2118, 2126), True, 'import tensorflow as tf\n'), ((2141, 2168), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_generated'], {}), '(d_generated)\n', (2155, 2168), True, 'import tensorflow as tf\n'), ((2278, 2304), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (2291, 2304), True, 'import tensorflow as tf\n'), ((2322, 2386), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)', 'beta1': '(0)', 'beta2': '(0.9)'}), '(learning_rate=0.0001, beta1=0, beta2=0.9)\n', (2344, 2386), True, 'import tensorflow as tf\n'), ((2399, 2466), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""generator"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='generator')\n", (2416, 2466), True, 'import tensorflow as tf\n'), ((2538, 2609), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""discriminator"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='discriminator')\n", (2555, 2609), True, 'import tensorflow as tf\n'), ((3542, 3566), 'numpy.random.randn', 'np.random.randn', (['(50)', '(128)'], {}), '(50, 128)\n', (3557, 3566), True, 'import numpy as np\n'), ((3764, 3787), 'numpy.random.randn', 'np.random.randn', (['(1)', '(128)'], {}), '(1, 128)\n', (3779, 3787), True, 'import numpy as np\n'), ((3875, 3900), 'numpy.uint8', 'np.uint8', (['(generated * 255)'], {}), '(generated * 255)\n', (3883, 3900), True, 'import numpy as np\n'), ((380, 410), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generator"""'], {}), "('generator')\n", (397, 410), True, 'import tensorflow as tf\n'), ((424, 467), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['z'], {'num_outputs': '(4096)'}), '(z, num_outputs=4096)\n', (446, 467), False, 'from tensorflow.contrib import layers\n'), ((480, 510), 'tensorflow.reshape', 'tf.reshape', (['z', '[-1, 4, 4, 256]'], {}), '(z, [-1, 4, 4, 256])\n', (490, 510), True, 'import tensorflow as tf\n'), ((524, 592), 'tensorflow.contrib.layers.conv2d_transpose', 'layers.conv2d_transpose', (['z'], {'num_outputs': '(128)', 'kernel_size': '(5)', 'stride': '(2)'}), '(z, num_outputs=128, kernel_size=5, stride=2)\n', (547, 592), False, 'from tensorflow.contrib import layers\n'), ((605, 672), 'tensorflow.contrib.layers.conv2d_transpose', 'layers.conv2d_transpose', (['z'], {'num_outputs': '(64)', 'kernel_size': '(5)', 'stride': '(2)'}), '(z, num_outputs=64, kernel_size=5, stride=2)\n', (628, 672), False, 'from tensorflow.contrib import layers\n'), ((685, 784), 'tensorflow.contrib.layers.conv2d_transpose', 'layers.conv2d_transpose', (['z'], {'num_outputs': '(1)', 'kernel_size': '(5)', 'stride': '(2)', 'activation_fn': 'tf.nn.sigmoid'}), '(z, num_outputs=1, kernel_size=5, stride=2,\n activation_fn=tf.nn.sigmoid)\n', (708, 784), False, 'from tensorflow.contrib import layers\n'), ((892, 939), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""discriminator"""'], {'reuse': 'reuse'}), "('discriminator', reuse=reuse)\n", (909, 939), True, 'import tensorflow as tf\n'), ((953, 1041), 'tensorflow.contrib.layers.conv2d', 'layers.conv2d', (['x'], {'num_outputs': '(64)', 'kernel_size': '(5)', 'stride': '(2)', 'activation_fn': 'leaky_relu'}), '(x, num_outputs=64, kernel_size=5, stride=2, activation_fn=\n leaky_relu)\n', (966, 1041), False, 'from tensorflow.contrib import layers\n'), ((1075, 1164), 'tensorflow.contrib.layers.conv2d', 'layers.conv2d', (['x'], {'num_outputs': '(128)', 'kernel_size': '(5)', 'stride': '(2)', 'activation_fn': 'leaky_relu'}), '(x, num_outputs=128, kernel_size=5, stride=2, activation_fn=\n leaky_relu)\n', (1088, 1164), False, 'from tensorflow.contrib import layers\n'), ((1198, 1287), 'tensorflow.contrib.layers.conv2d', 'layers.conv2d', (['x'], {'num_outputs': '(256)', 'kernel_size': '(5)', 'stride': '(2)', 'activation_fn': 'leaky_relu'}), '(x, num_outputs=256, kernel_size=5, stride=2, activation_fn=\n leaky_relu)\n', (1211, 1287), False, 'from tensorflow.contrib import layers\n'), ((1322, 1339), 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['x'], {}), '(x)\n', (1336, 1339), False, 'from tensorflow.contrib import layers\n'), ((1355, 1415), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['x'], {'num_outputs': '(1)', 'activation_fn': 'None'}), '(x, num_outputs=1, activation_fn=None)\n', (1377, 1415), False, 'from tensorflow.contrib import layers\n'), ((1954, 1980), 'tensorflow.gradients', 'tf.gradients', (['d_hat', 'x_hat'], {}), '(d_hat, x_hat)\n', (1966, 1980), True, 'import tensorflow as tf\n'), ((2002, 2044), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gradients ** 2)'], {'axis': '[1, 2]'}), '(gradients ** 2, axis=[1, 2])\n', (2015, 2044), True, 'import tensorflow as tf\n'), ((2894, 2927), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2925, 2927), True, 'import tensorflow as tf\n'), ((4020, 4050), 'rendering.export_video', 'export_video', (['generated_images'], {}), '(generated_images)\n', (4032, 4050), False, 'from rendering import draw_figure, export_video\n'), ((2183, 2205), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_true'], {}), '(d_true)\n', (2197, 2205), True, 'import tensorflow as tf\n'), ((2208, 2235), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_generated'], {}), '(d_generated)\n', (2222, 2235), True, 'import tensorflow as tf\n')]
|
"""Parsing of signal logs from experiments, and version logging."""
import datetime
import importlib
import json
import logging
import os
import pprint
import subprocess
import time
import git
import numpy as np
# these should be moved to other (optional) module
from openpromela import logic
from openpromela import slugs
logger = logging.getLogger(__name__)
CONFIG_FILE = 'config.json'
def git_version(path):
"""Return SHA-dirty for repo under `path`."""
repo = git.Repo(path)
sha = repo.head.commit.hexsha
dirty = repo.is_dirty()
return sha + ('-dirty' if dirty else '')
def snapshot_versions(check=True):
"""Log versions of software used."""
d = dict()
d['slugs'] = slugs_version()
# versions of python packages
packages = [
'dd', 'omega', 'tugs',
'openpromela', 'promela']
for s in packages:
pkg = importlib.import_module(s)
d[s] = pkg.__version__
t_now = time.strftime('%Y-%b-%d-%A-%T-%Z')
d['time'] = t_now
d['platform'] = os.uname()
if not check:
return d
# existing log ?
try:
with open(CONFIG_FILE, 'r') as f:
d_old = json.load(f)
except IOError:
d_old = None
# check versions
compare = list(packages)
compare.append('slugs')
if d_old is not None:
for k in compare:
assert d[k] == d_old[k], (
('versions differ from {cfg}:\n\n'
'NEW: {d}'
'\n -----\n\n'
'OLD: {d_old}').format(
cfg=CONFIG_FILE,
d=pprint.pformat(d),
d_old=pprint.pformat(d_old)))
# dump
with open(CONFIG_FILE, 'w') as f:
json.dump(d, f, indent=4)
return d
def slugs_version():
cmd = ['slugs', '--version']
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
if e.errno == os.errno.ENOENT:
print('Warning: `slugs` not found on path')
return
else:
raise
p.wait()
if p.returncode != 0:
print('`{cmd}` returned {r}'.format(
cmd=' '.join(cmd),
r=p.returncode))
return
version = p.stdout.read().strip()
return version
def add_logfile(fname, logger_name):
h = logging.FileHandler(fname, mode='w')
log = logging.getLogger(logger_name)
log.addHandler(h)
return h
def close_logfile(h, logger_name):
log = logging.getLogger(logger_name)
log.removeHandler(h)
h.close()
def load_log_file(fname):
data = dict()
with open(fname, 'r') as f:
for line in f:
if "'time'" not in line:
continue
try:
d = eval(line)
split_data(d, data)
except:
continue
for k, v in data.iteritems():
for q, r in v.iteritems():
try:
data[k][q] = np.array(r, dtype=float)
except:
pass
return data
def split_data(d, data):
"""Store sample in `d` as a signal in `data`.
@type d: `dict`
@type data: `dict(dict(time=list(), value=list()))`
"""
t = d['time']
for k, v in d.iteritems():
if k == 'time':
continue
# is a signal
# new ?
if k not in data:
data[k] = dict(time=list(), value=list())
data[k]['time'].append(t)
data[k]['value'].append(v)
def get_signal(name, data):
return data[name]['time'], data[name]['value']
def inspect_data(data):
for k in data:
t = data[k]['time']
v = data[k]['value']
print(k, len(t), len(v))
def translate_promela_to_slugsin(code):
"""Return SlugsIn code from Promela `code`."""
t0 = time.time()
spec = logic.compile_spec(code)
aut = slugs._symbolic._bitblast(spec)
s = slugs._to_slugs(aut)
t1 = time.time()
dt = datetime.timedelta(seconds=t1 - t0)
logger.info('translated Promela -> SlugsIn in {dt}.'.format(dt=dt))
return s
|
[
"json.dump",
"subprocess.Popen",
"json.load",
"pprint.pformat",
"logging.FileHandler",
"importlib.import_module",
"os.uname",
"openpromela.slugs._to_slugs",
"time.strftime",
"git.Repo",
"time.time",
"datetime.timedelta",
"numpy.array",
"openpromela.logic.compile_spec",
"openpromela.slugs._symbolic._bitblast",
"logging.getLogger"
] |
[((334, 361), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (351, 361), False, 'import logging\n'), ((476, 490), 'git.Repo', 'git.Repo', (['path'], {}), '(path)\n', (484, 490), False, 'import git\n'), ((947, 981), 'time.strftime', 'time.strftime', (['"""%Y-%b-%d-%A-%T-%Z"""'], {}), "('%Y-%b-%d-%A-%T-%Z')\n", (960, 981), False, 'import time\n'), ((1024, 1034), 'os.uname', 'os.uname', ([], {}), '()\n', (1032, 1034), False, 'import os\n'), ((2318, 2354), 'logging.FileHandler', 'logging.FileHandler', (['fname'], {'mode': '"""w"""'}), "(fname, mode='w')\n", (2337, 2354), False, 'import logging\n'), ((2365, 2395), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (2382, 2395), False, 'import logging\n'), ((2478, 2508), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (2495, 2508), False, 'import logging\n'), ((3798, 3809), 'time.time', 'time.time', ([], {}), '()\n', (3807, 3809), False, 'import time\n'), ((3821, 3845), 'openpromela.logic.compile_spec', 'logic.compile_spec', (['code'], {}), '(code)\n', (3839, 3845), False, 'from openpromela import logic\n'), ((3856, 3887), 'openpromela.slugs._symbolic._bitblast', 'slugs._symbolic._bitblast', (['spec'], {}), '(spec)\n', (3881, 3887), False, 'from openpromela import slugs\n'), ((3896, 3916), 'openpromela.slugs._to_slugs', 'slugs._to_slugs', (['aut'], {}), '(aut)\n', (3911, 3916), False, 'from openpromela import slugs\n'), ((3926, 3937), 'time.time', 'time.time', ([], {}), '()\n', (3935, 3937), False, 'import time\n'), ((3947, 3982), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(t1 - t0)'}), '(seconds=t1 - t0)\n', (3965, 3982), False, 'import datetime\n'), ((877, 903), 'importlib.import_module', 'importlib.import_module', (['s'], {}), '(s)\n', (900, 903), False, 'import importlib\n'), ((1722, 1747), 'json.dump', 'json.dump', (['d', 'f'], {'indent': '(4)'}), '(d, f, indent=4)\n', (1731, 1747), False, 'import json\n'), ((1838, 1883), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (1854, 1883), False, 'import subprocess\n'), ((1162, 1174), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1171, 1174), False, 'import json\n'), ((2955, 2979), 'numpy.array', 'np.array', (['r'], {'dtype': 'float'}), '(r, dtype=float)\n', (2963, 2979), True, 'import numpy as np\n'), ((1596, 1613), 'pprint.pformat', 'pprint.pformat', (['d'], {}), '(d)\n', (1610, 1613), False, 'import pprint\n'), ((1641, 1662), 'pprint.pformat', 'pprint.pformat', (['d_old'], {}), '(d_old)\n', (1655, 1662), False, 'import pprint\n')]
|
import sklearn.tree
import os
import pandas as pd
import numpy as np
from hydroDL import kPath
from hydroDL.data import usgs, gageII
from hydroDL.post import axplot
import matplotlib.pyplot as plt
dirCQ = os.path.join(kPath.dirWQ, 'C-Q')
dfS = pd.read_csv(os.path.join(dirCQ, 'slope'), dtype={
'siteNo': str}).set_index('siteNo')
dfN = pd.read_csv(os.path.join(dirCQ, 'nSample'), dtype={
'siteNo': str}).set_index('siteNo')
siteNoLst = dfS.index.tolist()
codeLst = dfS.columns.tolist()
dropColLst = ['STANAME', 'WR_REPORT_REMARKS',
'ADR_CITATION', 'SCREENING_COMMENTS']
dfX = gageII.readData(siteNoLst=siteNoLst).drop(columns=dropColLst)
dfX = gageII.updateCode(dfX)
dfCrd = gageII.readData(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
code = '00955'
indValid = np.where((~np.isnan(dfS['00955'].values))
& (dfN['00955'].values > 10))[0]
dataAll = dfS[code][indValid]
vr = np.max([np.abs(np.percentile(dataAll, 1)),
np.abs(np.percentile(dataAll, 99))])
vRange = [-vr, vr]
def subTree(indInput):
x = dfX.values[indInput, :]
y = dfS[code].values[indInput]
x[np.isnan(x)] = -99
clf = sklearn.tree.DecisionTreeRegressor(max_depth=1)
clf = clf.fit(x, y)
tree = clf.tree_
feat = dfX.columns[tree.feature[0]]
th = tree.threshold[0]
indLeft = np.where(x[:, tree.feature[0]] <= tree.threshold[0])[0]
indRight = np.where(x[:, tree.feature[0]] > tree.threshold[0])[0]
indLeftG = indInput[indLeft]
indRightG = indInput[indRight]
return indLeftG, indRightG, feat, th
def plotCdf(ax, indInput, indLeft, indRight):
cLst = 'gbr'
labLst = ['parent', 'left', 'right']
y0 = dfS[code].values[indInput]
y1 = dfS[code].values[indLeft]
y2 = dfS[code].values[indRight]
dataLst = [y0, y1, y2]
for k, data in enumerate(dataLst):
xSort = np.sort(data[~np.isnan(data)])
yRank = np.arange(1, len(xSort)+1) / float(len(xSort))
ax.plot(xSort, yRank, color=cLst[k], label=labLst[k])
ax.set_xlim(vRange)
ax.legend(loc='best', frameon=False)
def plotMap(ax, indInput):
lat = dfCrd['LAT_GAGE'][indInput]
lon = dfCrd['LNG_GAGE'][indInput]
data = dfS[code][indInput]
axplot.mapPoint(ax, lat, lon, data, vRange=vRange, s=10)
indInput = indValid
indLeft, indRight, feat, th = subTree(indInput)
fig, ax = plt.subplots(1, 1)
plotCdf(ax, indInput, indLeft, indRight)
fig.show()
fig, axes = plt.subplots(2, 1)
plotMap(axes[0], indLeft)
plotMap(axes[1], indRight)
fig.show()
|
[
"os.path.join",
"numpy.isnan",
"numpy.percentile",
"numpy.where",
"hydroDL.post.axplot.mapPoint",
"hydroDL.data.gageII.updateCode",
"hydroDL.data.gageII.readData",
"matplotlib.pyplot.subplots"
] |
[((206, 238), 'os.path.join', 'os.path.join', (['kPath.dirWQ', '"""C-Q"""'], {}), "(kPath.dirWQ, 'C-Q')\n", (218, 238), False, 'import os\n'), ((682, 704), 'hydroDL.data.gageII.updateCode', 'gageII.updateCode', (['dfX'], {}), '(dfX)\n', (699, 704), False, 'from hydroDL.data import usgs, gageII\n'), ((713, 782), 'hydroDL.data.gageII.readData', 'gageII.readData', ([], {'varLst': "['LAT_GAGE', 'LNG_GAGE']", 'siteNoLst': 'siteNoLst'}), "(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)\n", (728, 782), False, 'from hydroDL.data import usgs, gageII\n'), ((2385, 2403), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2397, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2487), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (2481, 2487), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2304), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['ax', 'lat', 'lon', 'data'], {'vRange': 'vRange', 's': '(10)'}), '(ax, lat, lon, data, vRange=vRange, s=10)\n', (2263, 2304), False, 'from hydroDL.post import axplot\n'), ((614, 650), 'hydroDL.data.gageII.readData', 'gageII.readData', ([], {'siteNoLst': 'siteNoLst'}), '(siteNoLst=siteNoLst)\n', (629, 650), False, 'from hydroDL.data import usgs, gageII\n'), ((1150, 1161), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1158, 1161), True, 'import numpy as np\n'), ((1353, 1405), 'numpy.where', 'np.where', (['(x[:, tree.feature[0]] <= tree.threshold[0])'], {}), '(x[:, tree.feature[0]] <= tree.threshold[0])\n', (1361, 1405), True, 'import numpy as np\n'), ((1424, 1475), 'numpy.where', 'np.where', (['(x[:, tree.feature[0]] > tree.threshold[0])'], {}), '(x[:, tree.feature[0]] > tree.threshold[0])\n', (1432, 1475), True, 'import numpy as np\n'), ((257, 285), 'os.path.join', 'os.path.join', (['dirCQ', '"""slope"""'], {}), "(dirCQ, 'slope')\n", (269, 285), False, 'import os\n'), ((353, 383), 'os.path.join', 'os.path.join', (['dirCQ', '"""nSample"""'], {}), "(dirCQ, 'nSample')\n", (365, 383), False, 'import os\n'), ((955, 980), 'numpy.percentile', 'np.percentile', (['dataAll', '(1)'], {}), '(dataAll, 1)\n', (968, 980), True, 'import numpy as np\n'), ((1003, 1029), 'numpy.percentile', 'np.percentile', (['dataAll', '(99)'], {}), '(dataAll, 99)\n', (1016, 1029), True, 'import numpy as np\n'), ((821, 850), 'numpy.isnan', 'np.isnan', (["dfS['00955'].values"], {}), "(dfS['00955'].values)\n", (829, 850), True, 'import numpy as np\n'), ((1897, 1911), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (1905, 1911), True, 'import numpy as np\n')]
|
from sklearn import svm
import numpy as np
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
model = svm.SVC(kernel='linear',C=1,gamma=1)
model.fit(X,y)
print(model.predict([[-0.8,-1]]))
|
[
"numpy.array",
"sklearn.svm.SVC"
] |
[((48, 94), 'numpy.array', 'np.array', (['[[-1, -1], [-2, -1], [1, 1], [2, 1]]'], {}), '([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n', (56, 94), True, 'import numpy as np\n'), ((99, 121), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (107, 121), True, 'import numpy as np\n'), ((132, 170), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': '(1)', 'gamma': '(1)'}), "(kernel='linear', C=1, gamma=1)\n", (139, 170), False, 'from sklearn import svm\n')]
|
#################################
# CSI function
#################################
#########################################################
# import libraries
import scipy.spatial.distance as ssd
import numpy as np
import scipy.io as sio
#########################################################
# Function definition
###############################
# Load CSI
def load_csi(num_UAV, location, pthH, SaveFile):
"""
This function generate the CSI parameters based on the LOS propagation model and the location of nodes at the
beginning of the problem.
:param num_UAV: Number of UAVs.
:param location: A dictionary including all location.
:param pthH: The directory to save the CSI parameters on a file.
:param SaveFile: A Flag(True, False) to save or load data.
:return: Returns a Numpy array including CSI parameters.
"""
if SaveFile:
X_U = location.get('X_U')
X_S = location.get('X_S')
X_F = location.get('X_F')
X_GT = location.get('X_GT')
X_GR = location.get('X_GR')
Y_U = location.get('Y_U')
Y_S = location.get('Y_S')
Y_F = location.get('Y_F')
Y_GT = location.get('Y_GT')
Y_GR = location.get('Y_GR')
Z_U = location.get('Z_U')
Z_S = location.get('Z_S')
Z_F = location.get('Z_F')
Z_GT = location.get('Z_GT')
Z_GR = location.get('Z_GR')
dist_S_uav = [ssd.euclidean([X_S, Y_S, Z_S], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_S_uav = np.asarray(dist_S_uav)
dist_uav_F = [ssd.euclidean([X_F, Y_F, Z_F], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_F = np.asarray(dist_uav_F)
dist_GT_uav = [ssd.euclidean([X_GT, Y_GT, Z_GT], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_GT_uav = np.asarray(dist_GT_uav)
dist_uav_GR = [ssd.euclidean([X_GR, Y_GR, Z_GR], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_GR = np.asarray(dist_uav_GR)
dist_S_uav_Norm = dist_S_uav/min(dist_S_uav)
dist_uav_F_Norm = dist_uav_F/min(dist_uav_F)
dist_GT_uav_Norm = dist_GT_uav/min(dist_GT_uav)
dist_uav_GR_Norm = dist_uav_GR/min(dist_uav_GR)
h_S_uav = np.multiply(1/(dist_S_uav_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_S_uav = h_S_uav.T
h_uav_F = np.multiply(1/(dist_uav_F_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_F = h_uav_F.T
h_GT_uav = np.multiply(1/(dist_GT_uav_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_GT_uav = h_GT_uav.T
h_uav_GR = np.multiply(1/(dist_uav_GR_Norm**2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_GR = h_uav_GR.T
csi_h = np.zeros([num_UAV, 4, 1], dtype=complex)
csi_h[:, 0, :] = h_S_uav
csi_h[:, 1, :] = h_uav_F
csi_h[:, 2, :] = h_GT_uav
csi_h[:, 3, :] = h_uav_GR
sio.savemat(pthH, {'csi_h': csi_h})
else:
csi_h_dict = sio.loadmat(pthH)
csi_h = csi_h_dict.get('csi_h')
return csi_h
###############################
# GET CSI
def get_csi(num_UAV, location, x_u, y_u):
"""
This function updates the CSI location based on the changed location of drones.
:param num_UAV: Number of UAVs.
:param location: The initial location of drones and the fixed nodes.
:param x_u: The updated longitude of UAVs.
:param y_u: The updated latitude of UAVs.
:return: It returns an update numpy array for the CSI parameters.
"""
source_uav = 0
uav_fusion = 1
gtuser_uav = 2
uav_gruser = 3
X_U = x_u
X_S = location.get('X_S')
X_F = location.get('X_F')
X_GT = location.get('X_GT')
X_GR = location.get('X_GR')
Y_U = y_u
Y_S = location.get('Y_S')
Y_F = location.get('Y_F')
Y_GT = location.get('Y_GT')
Y_GR = location.get('Y_GR')
Z_U = location.get('Z_U')
Z_S = location.get('Z_S')
Z_F = location.get('Z_F')
Z_GT = location.get('Z_GT')
Z_GR = location.get('Z_GR')
dist_S_uav = [ssd.euclidean([X_S, Y_S, Z_S], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_S_uav = np.asarray(dist_S_uav)
dist_uav_F = [ssd.euclidean([X_F, Y_F, Z_F], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_F = np.asarray(dist_uav_F)
dist_GT_uav = [ssd.euclidean([X_GT, Y_GT, Z_GT], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_GT_uav = np.asarray(dist_GT_uav)
dist_uav_GR = [ssd.euclidean([X_GR, Y_GR, Z_GR], [i, j, k]) for i, j, k in zip(X_U, Y_U, Z_U)]
dist_uav_GR = np.asarray(dist_uav_GR)
dist_S_uav_Norm = dist_S_uav
dist_uav_F_Norm = dist_uav_F
dist_GT_uav_Norm = dist_GT_uav
dist_uav_GR_Norm = dist_uav_GR
h_S_uav = np.multiply(1 / (dist_S_uav_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_S_uav = h_S_uav.T
h_uav_F = np.multiply(1 / (dist_uav_F_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_F = h_uav_F.T
h_GT_uav = np.multiply(1 / (dist_GT_uav_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_GT_uav = h_GT_uav.T
h_uav_GR = np.multiply(1 / (dist_uav_GR_Norm ** 2), (np.ones([num_UAV, 1]) + 1j * np.ones([num_UAV, 1])).T)
h_uav_GR = h_uav_GR.T
csi_h = np.zeros([num_UAV, 4], dtype=complex)
csi_h[:, source_uav] = np.squeeze(h_S_uav)
csi_h[:, uav_fusion] = np.squeeze(h_uav_F)
csi_h[:, gtuser_uav] = np.squeeze(h_GT_uav)
csi_h[:, uav_gruser] = np.squeeze(h_uav_GR)
return csi_h
|
[
"scipy.spatial.distance.euclidean",
"scipy.io.loadmat",
"numpy.asarray",
"numpy.zeros",
"scipy.io.savemat",
"numpy.ones",
"numpy.squeeze"
] |
[((4331, 4353), 'numpy.asarray', 'np.asarray', (['dist_S_uav'], {}), '(dist_S_uav)\n', (4341, 4353), True, 'import numpy as np\n'), ((4470, 4492), 'numpy.asarray', 'np.asarray', (['dist_uav_F'], {}), '(dist_uav_F)\n', (4480, 4492), True, 'import numpy as np\n'), ((4614, 4637), 'numpy.asarray', 'np.asarray', (['dist_GT_uav'], {}), '(dist_GT_uav)\n', (4624, 4637), True, 'import numpy as np\n'), ((4759, 4782), 'numpy.asarray', 'np.asarray', (['dist_uav_GR'], {}), '(dist_uav_GR)\n', (4769, 4782), True, 'import numpy as np\n'), ((5500, 5537), 'numpy.zeros', 'np.zeros', (['[num_UAV, 4]'], {'dtype': 'complex'}), '([num_UAV, 4], dtype=complex)\n', (5508, 5537), True, 'import numpy as np\n'), ((5566, 5585), 'numpy.squeeze', 'np.squeeze', (['h_S_uav'], {}), '(h_S_uav)\n', (5576, 5585), True, 'import numpy as np\n'), ((5614, 5633), 'numpy.squeeze', 'np.squeeze', (['h_uav_F'], {}), '(h_uav_F)\n', (5624, 5633), True, 'import numpy as np\n'), ((5662, 5682), 'numpy.squeeze', 'np.squeeze', (['h_GT_uav'], {}), '(h_GT_uav)\n', (5672, 5682), True, 'import numpy as np\n'), ((5711, 5731), 'numpy.squeeze', 'np.squeeze', (['h_uav_GR'], {}), '(h_uav_GR)\n', (5721, 5731), True, 'import numpy as np\n'), ((1576, 1598), 'numpy.asarray', 'np.asarray', (['dist_S_uav'], {}), '(dist_S_uav)\n', (1586, 1598), True, 'import numpy as np\n'), ((1723, 1745), 'numpy.asarray', 'np.asarray', (['dist_uav_F'], {}), '(dist_uav_F)\n', (1733, 1745), True, 'import numpy as np\n'), ((1875, 1898), 'numpy.asarray', 'np.asarray', (['dist_GT_uav'], {}), '(dist_GT_uav)\n', (1885, 1898), True, 'import numpy as np\n'), ((2028, 2051), 'numpy.asarray', 'np.asarray', (['dist_uav_GR'], {}), '(dist_uav_GR)\n', (2038, 2051), True, 'import numpy as np\n'), ((2871, 2911), 'numpy.zeros', 'np.zeros', (['[num_UAV, 4, 1]'], {'dtype': 'complex'}), '([num_UAV, 4, 1], dtype=complex)\n', (2879, 2911), True, 'import numpy as np\n'), ((3063, 3098), 'scipy.io.savemat', 'sio.savemat', (['pthH', "{'csi_h': csi_h}"], {}), "(pthH, {'csi_h': csi_h})\n", (3074, 3098), True, 'import scipy.io as sio\n'), ((3132, 3149), 'scipy.io.loadmat', 'sio.loadmat', (['pthH'], {}), '(pthH)\n', (3143, 3149), True, 'import scipy.io as sio\n'), ((4236, 4277), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_S, Y_S, Z_S]', '[i, j, k]'], {}), '([X_S, Y_S, Z_S], [i, j, k])\n', (4249, 4277), True, 'import scipy.spatial.distance as ssd\n'), ((4375, 4416), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_F, Y_F, Z_F]', '[i, j, k]'], {}), '([X_F, Y_F, Z_F], [i, j, k])\n', (4388, 4416), True, 'import scipy.spatial.distance as ssd\n'), ((4515, 4559), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_GT, Y_GT, Z_GT]', '[i, j, k]'], {}), '([X_GT, Y_GT, Z_GT], [i, j, k])\n', (4528, 4559), True, 'import scipy.spatial.distance as ssd\n'), ((4660, 4704), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_GR, Y_GR, Z_GR]', '[i, j, k]'], {}), '([X_GR, Y_GR, Z_GR], [i, j, k])\n', (4673, 4704), True, 'import scipy.spatial.distance as ssd\n'), ((1477, 1518), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_S, Y_S, Z_S]', '[i, j, k]'], {}), '([X_S, Y_S, Z_S], [i, j, k])\n', (1490, 1518), True, 'import scipy.spatial.distance as ssd\n'), ((1624, 1665), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_F, Y_F, Z_F]', '[i, j, k]'], {}), '([X_F, Y_F, Z_F], [i, j, k])\n', (1637, 1665), True, 'import scipy.spatial.distance as ssd\n'), ((1772, 1816), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_GT, Y_GT, Z_GT]', '[i, j, k]'], {}), '([X_GT, Y_GT, Z_GT], [i, j, k])\n', (1785, 1816), True, 'import scipy.spatial.distance as ssd\n'), ((1925, 1969), 'scipy.spatial.distance.euclidean', 'ssd.euclidean', (['[X_GR, Y_GR, Z_GR]', '[i, j, k]'], {}), '([X_GR, Y_GR, Z_GR], [i, j, k])\n', (1938, 1969), True, 'import scipy.spatial.distance as ssd\n'), ((4983, 5004), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (4990, 5004), True, 'import numpy as np\n'), ((5121, 5142), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5128, 5142), True, 'import numpy as np\n'), ((5261, 5282), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5268, 5282), True, 'import numpy as np\n'), ((5403, 5424), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5410, 5424), True, 'import numpy as np\n'), ((2334, 2355), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2341, 2355), True, 'import numpy as np\n'), ((2476, 2497), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2483, 2497), True, 'import numpy as np\n'), ((2620, 2641), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2627, 2641), True, 'import numpy as np\n'), ((2766, 2787), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2773, 2787), True, 'import numpy as np\n'), ((5012, 5033), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5019, 5033), True, 'import numpy as np\n'), ((5150, 5171), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5157, 5171), True, 'import numpy as np\n'), ((5290, 5311), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5297, 5311), True, 'import numpy as np\n'), ((5432, 5453), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (5439, 5453), True, 'import numpy as np\n'), ((2363, 2384), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2370, 2384), True, 'import numpy as np\n'), ((2505, 2526), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2512, 2526), True, 'import numpy as np\n'), ((2649, 2670), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2656, 2670), True, 'import numpy as np\n'), ((2795, 2816), 'numpy.ones', 'np.ones', (['[num_UAV, 1]'], {}), '([num_UAV, 1])\n', (2802, 2816), True, 'import numpy as np\n')]
|
# Machine Learning/Data Science Precourse Work
# ###
# LAMBDA SCHOOL
# ###
# MIT LICENSE
# ###
# Free example function definition
# This function passes one of the 11 tests contained inside of test.py. Write the rest, defined in README.md, here,
# and execute python test.py to test. Passing this precourse work will greatly increase your odds of acceptance
# into the program.
import math
import numpy as np
def f(x):
return x**2
def f_2(x):
return x**3
def f_3(x):
return (x**3) + (5*x)
def d_f(x):
return 2*x
def d_f_2(x):
return 3*(x**2)
def d_f_3(x):
return 3*(x**2) + 5
# for all values of x, return x + y
def vector_sum(x, y):
for num in range(len(x)):
return [x[num] + y[num]]
# for all values of x, return x - y
def vector_less(x, y):
for num in range(len(x)):
return [x[num] - y[num]]
def vector_magnitude(v):
sqvector = 0
for vector in v:
sqvector += vector**2
return math.sqrt(sqvector)
def vec5():
return np.array([1, 1, 1, 1, 1])
def vec3():
return np.array([0, 0, 0])
def vec2_1():
return np.array([1, 0])
def vec2_2():
return np.array([0, 1])
def matrix_multiply(vec, matrix):
return np.dot(vec, matrix)
|
[
"numpy.dot",
"numpy.array",
"math.sqrt"
] |
[((968, 987), 'math.sqrt', 'math.sqrt', (['sqvector'], {}), '(sqvector)\n', (977, 987), False, 'import math\n'), ((1013, 1038), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (1021, 1038), True, 'import numpy as np\n'), ((1064, 1083), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1072, 1083), True, 'import numpy as np\n'), ((1111, 1127), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1119, 1127), True, 'import numpy as np\n'), ((1155, 1171), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1163, 1171), True, 'import numpy as np\n'), ((1219, 1238), 'numpy.dot', 'np.dot', (['vec', 'matrix'], {}), '(vec, matrix)\n', (1225, 1238), True, 'import numpy as np\n')]
|
import h5py
import tools.pymus_utils as pymusutil
import numpy as np
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.DEBUG)
class ImageFormatError(Exception):
pass
class EchoImage(object):
''' Echogeneicity grayscale image
'''
def __init__(self,scan):
self.scan = scan
self.data_array = np.zeros((len(scan.z_axis),len(scan.x_axis)))
self.scan_x_bounds = [self.scan.x_axis.min(), self.scan.x_axis.max()]
self.scan_z_bounds = [self.scan.z_axis.min(), self.scan.z_axis.max()]
self.title = ""
def import_data(self,data):
try:
self.data_array = np.abs( np.reshape(data,self.data_array.shape) )
except:
raise ImageFormatError(" format error - cannot reshape %s to %s " % ( str(data.shape),str(self.data_array.shape) ))
return
def set_title(self,title):
self.title = title
def show_image(self,dbScale=True,dynamic_range=60,to_file=None):
z_m, z_M = self.scan_z_bounds
x_m, x_M = self.scan_x_bounds
z_span = z_M - z_m
x_span = x_M - x_m
x_ratio = x_span/z_span
print("X -> %s %s Z -> %s %s / %s %s / %s " % (x_m,x_M,z_m,z_M,z_span,x_span,x_ratio))
base_sz = 6.
im_M = self.data_array.max()
fig, ax = plt.subplots(figsize=(1.0 + x_ratio*base_sz,0.3 + base_sz))
xtent = [x_m,x_M,z_m,z_M]
if dbScale:
plt_im = 20.*np.log10((1./im_M)*self.data_array)
else:
plt_im = self.data_array
cax = ax.imshow(plt_im,interpolation='none',vmin=-1.*dynamic_range,vmax=0.,extent=xtent,cmap='gray')
ax.set_xlabel(" x [mm] ")
ax.set_ylabel(" z [mm] ")
ax.set_title(self.title)
range_ticks = [-1.*k for k in np.arange(int(dynamic_range + 1))[::-10]]
fig.colorbar(cax, ticks = range_ticks)
if to_file is not None:
plt.savefig(to_file)
plt.show()
def write_file(self,filename,prefix=None,overwrite=False):
data_to_write = {'title' : self.title, 'data' : self.data_array}
pymusutil.generic_hdf5_write(filename,prefix,overwrite,data_to_write)
def read_file(self,filename,prefix):
data_from_file = {'title' : None, 'data' : None}
res = pymusutil.generic_hdf5_read(filename,prefix,data_from_file)
logging.debug(data_from_file)
if data_from_file['title'] is None:
logging.error("title not found in %s:%s " % (filename,prefix))
else:
self.title = data_from_file['title'][0]
if data_from_file['data'] is None:
logging.error("image data not found in %s:%s " % (filename,prefix))
else:
self.data_array = data_from_file['data'][:]
|
[
"logging.error",
"tools.pymus_utils.generic_hdf5_read",
"matplotlib.pyplot.show",
"logging.basicConfig",
"logging.debug",
"numpy.reshape",
"numpy.log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"tools.pymus_utils.generic_hdf5_write"
] |
[((117, 157), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (136, 157), False, 'import logging\n'), ((1185, 1247), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(1.0 + x_ratio * base_sz, 0.3 + base_sz)'}), '(figsize=(1.0 + x_ratio * base_sz, 0.3 + base_sz))\n', (1197, 1247), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1738), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1736, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1944), 'tools.pymus_utils.generic_hdf5_write', 'pymusutil.generic_hdf5_write', (['filename', 'prefix', 'overwrite', 'data_to_write'], {}), '(filename, prefix, overwrite, data_to_write)\n', (1900, 1944), True, 'import tools.pymus_utils as pymusutil\n'), ((2040, 2101), 'tools.pymus_utils.generic_hdf5_read', 'pymusutil.generic_hdf5_read', (['filename', 'prefix', 'data_from_file'], {}), '(filename, prefix, data_from_file)\n', (2067, 2101), True, 'import tools.pymus_utils as pymusutil\n'), ((2102, 2131), 'logging.debug', 'logging.debug', (['data_from_file'], {}), '(data_from_file)\n', (2115, 2131), False, 'import logging\n'), ((1705, 1725), 'matplotlib.pyplot.savefig', 'plt.savefig', (['to_file'], {}), '(to_file)\n', (1716, 1725), True, 'import matplotlib.pyplot as plt\n'), ((2173, 2236), 'logging.error', 'logging.error', (["('title not found in %s:%s ' % (filename, prefix))"], {}), "('title not found in %s:%s ' % (filename, prefix))\n", (2186, 2236), False, 'import logging\n'), ((2327, 2395), 'logging.error', 'logging.error', (["('image data not found in %s:%s ' % (filename, prefix))"], {}), "('image data not found in %s:%s ' % (filename, prefix))\n", (2340, 2395), False, 'import logging\n'), ((608, 647), 'numpy.reshape', 'np.reshape', (['data', 'self.data_array.shape'], {}), '(data, self.data_array.shape)\n', (618, 647), True, 'import numpy as np\n'), ((1303, 1341), 'numpy.log10', 'np.log10', (['(1.0 / im_M * self.data_array)'], {}), '(1.0 / im_M * self.data_array)\n', (1311, 1341), True, 'import numpy as np\n')]
|
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.optimizers import Adam
from nets.facenet import facenet
from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss
from utils.callbacks import (ExponentDecayScheduler, LFW_callback, LossHistory,
ModelCheckpoint)
from utils.utils_fit import fit_one_epoch
#------------------------------------------------#
# 计算一共有多少个人,用于利用交叉熵辅助收敛
#------------------------------------------------#
def get_num_classes(annotation_path):
with open(annotation_path) as f:
dataset_path = f.readlines()
labels = []
for path in dataset_path:
path_split = path.split(";")
labels.append(int(path_split[0]))
num_classes = np.max(labels) + 1
return num_classes
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if __name__ == "__main__":
#----------------------------------------------------#
# 是否使用eager模式训练
#----------------------------------------------------#
eager = False
#--------------------------------------------------------#
# 指向根目录下的cls_train.txt,读取人脸路径与标签
#--------------------------------------------------------#
annotation_path = "cls_train.txt"
#--------------------------------------------------------#
# 输入图像大小,常用设置如[112, 112, 3]
#--------------------------------------------------------#
input_shape = [160, 160, 3]
#--------------------------------------------------------#
# 主干特征提取网络的选择
# mobilenet;inception_resnetv1
#--------------------------------------------------------#
backbone = "mobilenet"
#----------------------------------------------------------------------------------------------------------------------------#
# 权值文件的下载请看README,可以通过网盘下载。
# 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。
#
# 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。
# 同时修改下方的训练参数,来保证模型epoch的连续性。
#
# 当model_path = ''的时候不加载整个模型的权值。
#
# 如果想要让模型从主干的预训练权值开始训练,则设置model_path为主干网络的权值,此时仅加载主干。
# 如果想要让模型从0开始训练,则设置model_path = '',Freeze_Train = False,此时从0开始训练,且没有冻结主干的过程。
# 一般来讲,从0开始训练效果会很差,因为权值太过随机,特征提取效果不明显。
#
# 网络一般不从0开始训练,至少会使用主干部分的权值,有些论文提到可以不用预训练,主要原因是他们 数据集较大 且 调参能力优秀。
# 如果一定要训练网络的主干部分,可以了解imagenet数据集,首先训练分类模型,分类模型的 主干部分 和该模型通用,基于此进行训练。
#----------------------------------------------------------------------------------------------------------------------------#
model_path = "model_data/facenet_mobilenet.h5"
#-------------------------------------------------------------------#
# 是否进行冻结训练,默认先冻结主干训练后解冻训练。
#-------------------------------------------------------------------#
Freeze_Train = True
#-------------------------------------------------------------------#
# 用于设置是否使用多线程读取数据,1代表关闭多线程
# 开启后会加快数据读取速度,但是会占用更多内存
# 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。
#-------------------------------------------------------------------#
num_workers = 1
#-------------------------------------------------------------------#
# 是否开启LFW评估
#-------------------------------------------------------------------#
lfw_eval_flag = True
#-------------------------------------------------------------------#
# LFW评估数据集的文件路径和对应的txt文件
#-------------------------------------------------------------------#
lfw_dir_path = "lfw"
lfw_pairs_path = "model_data/lfw_pair.txt"
num_classes = get_num_classes(annotation_path)
model = facenet(input_shape, num_classes, backbone=backbone, mode="train")
model.load_weights(model_path, by_name=True, skip_mismatch=True)
#-------------------------------------------------------------------------------#
# 训练参数的设置
# logging表示tensorboard的保存地址
# checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次
# reduce_lr用于设置学习率下降的方式
# early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛
#-------------------------------------------------------------------------------#
checkpoint_period = ModelCheckpoint('logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)
reduce_lr = ExponentDecayScheduler(decay_rate = 0.94, verbose = 1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
tensorboard = TensorBoard(log_dir='logs/')
loss_history = LossHistory('logs/')
#----------------------#
# LFW估计
#----------------------#
test_loader = LFWDataset(dir=lfw_dir_path, pairs_path=lfw_pairs_path, batch_size=32, input_shape=input_shape) if lfw_eval_flag else None
lfw_callback = LFW_callback(test_loader)
#-------------------------------------------------------#
# 0.05用于验证,0.95用于训练
#-------------------------------------------------------#
val_split = 0.05
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
if backbone=="mobilenet":
freeze_layer = 81
elif backbone=="inception_resnetv1":
freeze_layer = 440
else:
raise ValueError('Unsupported backbone - `{}`, Use mobilenet, inception_resnetv1.'.format(backbone))
if Freeze_Train:
for i in range(freeze_layer):
model.layers[i].trainable = False
#---------------------------------------------------------#
# 训练分为两个阶段,分别是冻结阶段和解冻阶段。
# 显存不足与数据集大小无关,提示显存不足请调小batch_size。
# 受到BatchNorm层影响,batch_size最小为2,不能为1。
#---------------------------------------------------------#
#---------------------------------------------------------#
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#---------------------------------------------------------#
if True:
#----------------------------------------------------#
# 冻结阶段训练参数
# 此时模型的主干被冻结了,特征提取网络不发生改变
# 占用的显存较小,仅对网络进行微调
#----------------------------------------------------#
Batch_size = 32
Lr = 1e-3
Init_epoch = 0
Freeze_epoch = 50
epoch_step = num_train // Batch_size
epoch_step_val = num_val // Batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
train_dataset = FacenetDataset(input_shape, lines[:num_train], num_train, num_classes, Batch_size)
val_dataset = FacenetDataset(input_shape, lines[num_train:], num_val, num_classes, Batch_size)
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataset.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataset.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
gen_val = gen_val.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = Lr, decay_steps = epoch_step, decay_rate=0.94, staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate = lr_schedule)
for epoch in range(Init_epoch, Freeze_epoch):
fit_one_epoch(model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_epoch, triplet_loss(batch_size=Batch_size), test_loader, lfw_eval_flag)
else:
model.compile(
loss={'Embedding' : triplet_loss(batch_size=Batch_size), 'Softmax' : 'categorical_crossentropy'},
optimizer = Adam(lr=Lr), metrics = {'Softmax' : 'categorical_accuracy'}
)
model.fit_generator(
generator = train_dataset,
steps_per_epoch = epoch_step,
validation_data = val_dataset,
validation_steps = epoch_step_val,
epochs = Freeze_epoch,
initial_epoch = Init_epoch,
use_multiprocessing = True if num_workers > 1 else False,
workers = num_workers,
callbacks = [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history, lfw_callback] if lfw_eval_flag else [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history]
)
if Freeze_Train:
for i in range(freeze_layer):
model.layers[i].trainable = True
if True:
#----------------------------------------------------#
# 解冻阶段训练参数
# 此时模型的主干不被冻结了,特征提取网络会发生改变
# 占用的显存较大,网络所有的参数都会发生改变
#----------------------------------------------------#
Batch_size = 32
Lr = 1e-4
Freeze_epoch = 50
Epoch = 100
epoch_step = num_train // Batch_size
epoch_step_val = num_val // Batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
train_dataset = FacenetDataset(input_shape, lines[:num_train], num_train, num_classes, Batch_size)
val_dataset = FacenetDataset(input_shape, lines[num_train:], num_val, num_classes, Batch_size)
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataset.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataset.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
gen_val = gen_val.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = Lr, decay_steps = epoch_step, decay_rate=0.94, staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate = lr_schedule)
for epoch in range(Freeze_epoch, Epoch):
fit_one_epoch(model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_epoch, triplet_loss(batch_size=Batch_size), test_loader, lfw_eval_flag)
else:
model.compile(
loss={'Embedding' : triplet_loss(batch_size=Batch_size), 'Softmax' : 'categorical_crossentropy'},
optimizer = Adam(lr=Lr), metrics = {'Softmax' : 'categorical_accuracy'}
)
model.fit_generator(
generator = train_dataset,
steps_per_epoch = epoch_step,
validation_data = val_dataset,
validation_steps = epoch_step_val,
epochs = Epoch,
initial_epoch = Freeze_epoch,
use_multiprocessing = True if num_workers > 1 else False,
workers = num_workers,
callbacks = [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history, lfw_callback] if lfw_eval_flag else [checkpoint_period, reduce_lr, early_stopping, tensorboard, loss_history]
)
|
[
"utils.callbacks.LFW_callback",
"functools.partial",
"numpy.random.seed",
"numpy.random.shuffle",
"nets.facenet_training.triplet_loss",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"nets.facenet_training.LFWDataset",
"nets.facenet.facenet",
"numpy.max",
"nets.facenet_training.FacenetDataset",
"tensorflow.keras.optimizers.Adam",
"utils.callbacks.LossHistory",
"utils.callbacks.ExponentDecayScheduler",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.keras.callbacks.EarlyStopping",
"utils.callbacks.ModelCheckpoint"
] |
[((897, 960), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', ([], {'device_type': '"""GPU"""'}), "(device_type='GPU')\n", (941, 960), True, 'import tensorflow as tf\n'), ((982, 1033), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (1022, 1033), True, 'import tensorflow as tf\n'), ((3776, 3842), 'nets.facenet.facenet', 'facenet', (['input_shape', 'num_classes'], {'backbone': 'backbone', 'mode': '"""train"""'}), "(input_shape, num_classes, backbone=backbone, mode='train')\n", (3783, 3842), False, 'from nets.facenet import facenet\n'), ((4301, 4459), 'utils.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5"""'], {'monitor': '"""val_loss"""', 'save_weights_only': '(True)', 'save_best_only': '(False)', 'period': '(1)'}), "('logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\n monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)\n", (4316, 4459), False, 'from utils.callbacks import ExponentDecayScheduler, LFW_callback, LossHistory, ModelCheckpoint\n'), ((4510, 4560), 'utils.callbacks.ExponentDecayScheduler', 'ExponentDecayScheduler', ([], {'decay_rate': '(0.94)', 'verbose': '(1)'}), '(decay_rate=0.94, verbose=1)\n', (4532, 4560), False, 'from utils.callbacks import ExponentDecayScheduler, LFW_callback, LossHistory, ModelCheckpoint\n'), ((4591, 4661), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='val_loss', min_delta=0, patience=10, verbose=1)\n", (4604, 4661), False, 'from tensorflow.keras.callbacks import EarlyStopping, TensorBoard\n'), ((4688, 4716), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""logs/"""'}), "(log_dir='logs/')\n", (4699, 4716), False, 'from tensorflow.keras.callbacks import EarlyStopping, TensorBoard\n'), ((4743, 4763), 'utils.callbacks.LossHistory', 'LossHistory', (['"""logs/"""'], {}), "('logs/')\n", (4754, 4763), False, 'from utils.callbacks import ExponentDecayScheduler, LFW_callback, LossHistory, ModelCheckpoint\n'), ((5011, 5036), 'utils.callbacks.LFW_callback', 'LFW_callback', (['test_loader'], {}), '(test_loader)\n', (5023, 5036), False, 'from utils.callbacks import ExponentDecayScheduler, LFW_callback, LossHistory, ModelCheckpoint\n'), ((5284, 5305), 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), '(10101)\n', (5298, 5305), True, 'import numpy as np\n'), ((5310, 5334), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (5327, 5334), True, 'import numpy as np\n'), ((5339, 5359), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (5353, 5359), True, 'import numpy as np\n'), ((847, 861), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (853, 861), True, 'import numpy as np\n'), ((4862, 4961), 'nets.facenet_training.LFWDataset', 'LFWDataset', ([], {'dir': 'lfw_dir_path', 'pairs_path': 'lfw_pairs_path', 'batch_size': '(32)', 'input_shape': 'input_shape'}), '(dir=lfw_dir_path, pairs_path=lfw_pairs_path, batch_size=32,\n input_shape=input_shape)\n', (4872, 4961), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((6853, 6939), 'nets.facenet_training.FacenetDataset', 'FacenetDataset', (['input_shape', 'lines[:num_train]', 'num_train', 'num_classes', 'Batch_size'], {}), '(input_shape, lines[:num_train], num_train, num_classes,\n Batch_size)\n', (6867, 6939), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((6960, 7045), 'nets.facenet_training.FacenetDataset', 'FacenetDataset', (['input_shape', 'lines[num_train:]', 'num_val', 'num_classes', 'Batch_size'], {}), '(input_shape, lines[num_train:], num_val, num_classes, Batch_size\n )\n', (6974, 7045), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((9821, 9907), 'nets.facenet_training.FacenetDataset', 'FacenetDataset', (['input_shape', 'lines[:num_train]', 'num_train', 'num_classes', 'Batch_size'], {}), '(input_shape, lines[:num_train], num_train, num_classes,\n Batch_size)\n', (9835, 9907), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((9928, 10013), 'nets.facenet_training.FacenetDataset', 'FacenetDataset', (['input_shape', 'lines[num_train:]', 'num_val', 'num_classes', 'Batch_size'], {}), '(input_shape, lines[num_train:], num_val, num_classes, Batch_size\n )\n', (9942, 10013), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((7636, 7769), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': 'Lr', 'decay_steps': 'epoch_step', 'decay_rate': '(0.94)', 'staircase': '(True)'}), '(initial_learning_rate=Lr,\n decay_steps=epoch_step, decay_rate=0.94, staircase=True)\n', (7682, 7769), True, 'import tensorflow as tf\n'), ((7824, 7875), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), '(learning_rate=lr_schedule)\n', (7848, 7875), True, 'import tensorflow as tf\n'), ((10604, 10737), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': 'Lr', 'decay_steps': 'epoch_step', 'decay_rate': '(0.94)', 'staircase': '(True)'}), '(initial_learning_rate=Lr,\n decay_steps=epoch_step, decay_rate=0.94, staircase=True)\n', (10650, 10737), True, 'import tensorflow as tf\n'), ((10792, 10843), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), '(learning_rate=lr_schedule)\n', (10816, 10843), True, 'import tensorflow as tf\n'), ((7245, 7276), 'functools.partial', 'partial', (['train_dataset.generate'], {}), '(train_dataset.generate)\n', (7252, 7276), False, 'from functools import partial\n'), ((7357, 7386), 'functools.partial', 'partial', (['val_dataset.generate'], {}), '(val_dataset.generate)\n', (7364, 7386), False, 'from functools import partial\n'), ((10213, 10244), 'functools.partial', 'partial', (['train_dataset.generate'], {}), '(train_dataset.generate)\n', (10220, 10244), False, 'from functools import partial\n'), ((10325, 10354), 'functools.partial', 'partial', (['val_dataset.generate'], {}), '(val_dataset.generate)\n', (10332, 10354), False, 'from functools import partial\n'), ((8091, 8126), 'nets.facenet_training.triplet_loss', 'triplet_loss', ([], {'batch_size': 'Batch_size'}), '(batch_size=Batch_size)\n', (8103, 8126), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((8340, 8351), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'Lr'}), '(lr=Lr)\n', (8344, 8351), False, 'from tensorflow.keras.optimizers import Adam\n'), ((11054, 11089), 'nets.facenet_training.triplet_loss', 'triplet_loss', ([], {'batch_size': 'Batch_size'}), '(batch_size=Batch_size)\n', (11066, 11089), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((11303, 11314), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'Lr'}), '(lr=Lr)\n', (11307, 11314), False, 'from tensorflow.keras.optimizers import Adam\n'), ((8233, 8268), 'nets.facenet_training.triplet_loss', 'triplet_loss', ([], {'batch_size': 'Batch_size'}), '(batch_size=Batch_size)\n', (8245, 8268), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n'), ((11196, 11231), 'nets.facenet_training.triplet_loss', 'triplet_loss', ([], {'batch_size': 'Batch_size'}), '(batch_size=Batch_size)\n', (11208, 11231), False, 'from nets.facenet_training import FacenetDataset, LFWDataset, triplet_loss\n')]
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import wta_sparisty as wta
fX = theano.config.floatX
def test_wta_spatial_sparsity_node_serialization():
tn.check_serialization(wta.WTASpatialSparsityNode("a"))
def test_wta_sparsity_node_serialization():
tn.check_serialization(wta.WTASparsityNode("a"))
def test_wta_spatial_sparsity_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 2, 2, 2)),
wta.WTASpatialSparsityNode("a")]
).network()
fn = network.function(["i"], ["s"])
x = np.arange(16).reshape(2, 2, 2, 2).astype(fX)
ans = x.copy()
ans[..., 0] = 0
ans[..., 0, :] = 0
np.testing.assert_allclose(fn(x)[0],
ans)
def test_wta_sparsity_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 2, 2, 2)),
wta.WTASparsityNode("a", percentile=0.5)]
).network()
fn = network.function(["i"], ["s"])
x = np.arange(16).reshape(2, 2, 2, 2).astype(fX)
ans = x.copy()
ans[..., 0] = 0
ans[..., 0, :] = 0
ans[0] = 0
res = fn(x)[0]
np.testing.assert_allclose(res, ans)
|
[
"treeano.sandbox.nodes.wta_sparisty.WTASparsityNode",
"treeano.sandbox.nodes.wta_sparisty.WTASpatialSparsityNode",
"treeano.nodes.InputNode",
"numpy.arange",
"numpy.testing.assert_allclose"
] |
[((1234, 1270), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'ans'], {}), '(res, ans)\n', (1260, 1270), True, 'import numpy as np\n'), ((290, 321), 'treeano.sandbox.nodes.wta_sparisty.WTASpatialSparsityNode', 'wta.WTASpatialSparsityNode', (['"""a"""'], {}), "('a')\n", (316, 321), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((396, 420), 'treeano.sandbox.nodes.wta_sparisty.WTASparsityNode', 'wta.WTASparsityNode', (['"""a"""'], {}), "('a')\n", (415, 420), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((517, 554), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': '(2, 2, 2, 2)'}), "('i', shape=(2, 2, 2, 2))\n", (529, 554), True, 'import treeano.nodes as tn\n'), ((565, 596), 'treeano.sandbox.nodes.wta_sparisty.WTASpatialSparsityNode', 'wta.WTASpatialSparsityNode', (['"""a"""'], {}), "('a')\n", (591, 596), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((663, 676), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (672, 676), True, 'import numpy as np\n'), ((934, 971), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': '(2, 2, 2, 2)'}), "('i', shape=(2, 2, 2, 2))\n", (946, 971), True, 'import treeano.nodes as tn\n'), ((982, 1022), 'treeano.sandbox.nodes.wta_sparisty.WTASparsityNode', 'wta.WTASparsityNode', (['"""a"""'], {'percentile': '(0.5)'}), "('a', percentile=0.5)\n", (1001, 1022), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((1089, 1102), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (1098, 1102), True, 'import numpy as np\n')]
|
# Copyright (c) 2016-2019 <NAME>
#
# This file is part of XL-mHG.
"""Contains the `mHGResult` class."""
import sys
import hashlib
import logging
import numpy as np
try:
# This is a duct-tape fix for the Google App Engine, on which importing
# the C extension fails.
from . import mhg_cython
except ImportError:
print('Warning (xlmhg): Failed to import "mhg_cython" C extension.',
file=sys.stderr)
from . import mhg as mhg_cython
logger = logging.getLogger(__name__)
class mHGResult(object):
"""The result of an XL-mHG test.
This class is used by the `get_xlmhg_test_result` function to represent the
result of an XL-mHG test.
Parameters
----------
N: int
See :attr:`N` attribute.
indices
See :attr:`indices` attribute.
X: int
See :attr:`X` attribute.
L: int
See :attr:'L' attribute.
stat: float
See :attr:`stat` attribute.
cutoff: int
See :attr:`cutoff` attribute.
pval: float
See :attr:`pval` attribute.
pval_thresh: float, optional
See :attr:`pval_thresh` attribute.
escore_pval_thresh: float, optional
See :attr:`escore_pval_thresh` attribute.
escore_tol: float, optional
See :attr:`escore_tol` attribute.
Attributes
----------
N: int
The length of the ranked list (i.e., the number of elements in it).
indices: `numpy.ndarray` with ``ndim=1`` and ``dtype=np.uint16``.
A sorted (!) list of indices of all the 1's in the ranked list.
X: int
The XL-mHG X parameter.
L: int
The XL-mHG L parameter.
stat: float
The XL-mHG test statistic.
cutoff: int
The XL-mHG cutoff.
pval: float
The XL-mHG p-value.
pval_thresh: float or None
The user-specified significance (p-value) threshold for this test.
escore_pval_thresh: float or None
The user-specified p-value threshold used in the E-score calculation.
escore_tol: float or None
The floating point tolerance used in the E-score calculation.
"""
def __init__(self, N, indices, X, L, stat, cutoff, pval,
pval_thresh=None, escore_pval_thresh=None, escore_tol=None):
assert isinstance(N, int)
assert isinstance(indices, np.ndarray) and indices.ndim == 1 and \
np.issubdtype(indices.dtype, np.uint16) and \
indices.flags.c_contiguous
assert isinstance(X, int)
assert isinstance(L, int)
assert isinstance(stat, float)
assert isinstance(cutoff, int)
assert isinstance(pval, float)
if pval_thresh is not None:
assert isinstance(pval_thresh, float)
if escore_pval_thresh is not None:
assert isinstance(escore_pval_thresh, float)
if escore_tol is not None:
assert isinstance(escore_tol, float)
self.indices = indices
self.N = N
self.X = X
self.L = L
self.stat = stat
self.cutoff = cutoff
self.pval = pval
self.pval_thresh = pval_thresh
self.escore_pval_thresh = escore_pval_thresh
self.escore_tol = escore_tol
def __repr__(self):
return '<%s object (N=%d, K=%d, pval=%.1e, hash="%s")>' \
% (self.__class__.__name__,
self.N, self.K, self.pval, self.hash)
def __str__(self):
return '<%s object (N=%d, K=%d, X=%d, L=%d, pval=%.1e)>' \
% (self.__class__.__name__,
self.N, self.K, self.X, self.L, self.pval)
def __eq__(self, other):
if self is other:
return True
elif type(self) == type(other):
return self.hash == other.hash
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
@property
def v(self):
"""(property) Returns the list as a `numpy.ndarray`
(with dtype ``np.uint8``).
"""
v = np.zeros(self.N, dtype=np.uint8)
v[self.indices] = 1
return v
@property
def K(self):
"""(property) Returns the number of 1's in the list."""
return self.indices.size
@property
def k(self):
"""(property) Returns the number of 1's above the XL-mHG cutoff."""
return int(np.sum(self.indices < self.cutoff))
@property
def hash(self):
"""(property) Returns a unique hash value for the result."""
data_str = ';'.join(
[str(repr(var)) for var in
[self.N, self.K, self.X, self.L,
self.stat, self.cutoff, self.pval,
self.pval_thresh, self.escore_pval_thresh]])
data_str += ';'
data = data_str.encode('UTF-8') + self.indices.tobytes()
return str(hashlib.md5(data).hexdigest())
@property
def fold_enrichment(self):
"""(property) Returns the fold enrichment at the XL-mHG cutoff."""
return self.k / (self.K*(self.cutoff/float(self.N)))
@property
def escore(self):
"""(property) Returns the E-score associated with the result."""
hg_pval_thresh = self.escore_pval_thresh or self.pval
escore_tol = self.escore_tol or mhg_cython.get_default_tol()
es = mhg_cython.get_xlmhg_escore(
self.indices, self.N, self.K, self.X, self.L,
hg_pval_thresh, escore_tol)
return es
|
[
"hashlib.md5",
"numpy.sum",
"numpy.zeros",
"logging.getLogger",
"numpy.issubdtype"
] |
[((473, 500), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (490, 500), False, 'import logging\n'), ((4035, 4067), 'numpy.zeros', 'np.zeros', (['self.N'], {'dtype': 'np.uint8'}), '(self.N, dtype=np.uint8)\n', (4043, 4067), True, 'import numpy as np\n'), ((2366, 2405), 'numpy.issubdtype', 'np.issubdtype', (['indices.dtype', 'np.uint16'], {}), '(indices.dtype, np.uint16)\n', (2379, 2405), True, 'import numpy as np\n'), ((4369, 4403), 'numpy.sum', 'np.sum', (['(self.indices < self.cutoff)'], {}), '(self.indices < self.cutoff)\n', (4375, 4403), True, 'import numpy as np\n'), ((4839, 4856), 'hashlib.md5', 'hashlib.md5', (['data'], {}), '(data)\n', (4850, 4856), False, 'import hashlib\n')]
|
import numpy as np
import pytest
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
def test_Reshape():
original_shape = [0, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([3, 4, 0], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape, allowzero=True)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
op = Reshape(
Input((0, 3, 4), np.dtype(np.float32)),
Input((3,), np.dtype(np.int64)),
allowzero=True,
)
tf_op = TensorflowConverter().visit(op)
result = tf_op(data, new_shape).numpy()
assert np.allclose(result, y)
def test_Reshape_reordered_all_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([4, 2, 3], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_reordered_last_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 4, 3], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_reduced_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 12], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_extended_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 3, 2, 2], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_one_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([24], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_negative_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, -1, 2], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_negative_extended_dims():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([-1, 2, 3, 4], dtype=np.int64)
y = np.reshape(data, new_shape)
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_zero_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 0, 4, 1], dtype=np.int64)
y = np.reshape(data, [2, 3, 4, 1])
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
def test_Reshape_zero_and_negative_dim():
original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.float32)
new_shape = np.array([2, 0, 1, -1], dtype=np.int64)
y = np.reshape(data, [2, 3, 1, -1])
op = Reshape(data, new_shape)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
|
[
"numpy.random.random_sample",
"numpy.allclose",
"numpy.dtype",
"numpy.array",
"numpy.reshape"
] |
[((250, 285), 'numpy.array', 'np.array', (['[3, 4, 0]'], {'dtype': 'np.int64'}), '([3, 4, 0], dtype=np.int64)\n', (258, 285), True, 'import numpy as np\n'), ((294, 321), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (304, 321), True, 'import numpy as np\n'), ((457, 479), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (468, 479), True, 'import numpy as np\n'), ((717, 739), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (728, 739), True, 'import numpy as np\n'), ((898, 933), 'numpy.array', 'np.array', (['[4, 2, 3]'], {'dtype': 'np.int64'}), '([4, 2, 3], dtype=np.int64)\n', (906, 933), True, 'import numpy as np\n'), ((942, 969), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (952, 969), True, 'import numpy as np\n'), ((1089, 1111), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1100, 1111), True, 'import numpy as np\n'), ((1271, 1306), 'numpy.array', 'np.array', (['[2, 4, 3]'], {'dtype': 'np.int64'}), '([2, 4, 3], dtype=np.int64)\n', (1279, 1306), True, 'import numpy as np\n'), ((1315, 1342), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (1325, 1342), True, 'import numpy as np\n'), ((1462, 1484), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1473, 1484), True, 'import numpy as np\n'), ((1637, 1670), 'numpy.array', 'np.array', (['[2, 12]'], {'dtype': 'np.int64'}), '([2, 12], dtype=np.int64)\n', (1645, 1670), True, 'import numpy as np\n'), ((1679, 1706), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (1689, 1706), True, 'import numpy as np\n'), ((1826, 1848), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1837, 1848), True, 'import numpy as np\n'), ((2002, 2040), 'numpy.array', 'np.array', (['[2, 3, 2, 2]'], {'dtype': 'np.int64'}), '([2, 3, 2, 2], dtype=np.int64)\n', (2010, 2040), True, 'import numpy as np\n'), ((2049, 2076), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (2059, 2076), True, 'import numpy as np\n'), ((2196, 2218), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (2207, 2218), True, 'import numpy as np\n'), ((2366, 2396), 'numpy.array', 'np.array', (['[24]'], {'dtype': 'np.int64'}), '([24], dtype=np.int64)\n', (2374, 2396), True, 'import numpy as np\n'), ((2405, 2432), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (2415, 2432), True, 'import numpy as np\n'), ((2552, 2574), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (2563, 2574), True, 'import numpy as np\n'), ((2727, 2763), 'numpy.array', 'np.array', (['[2, -1, 2]'], {'dtype': 'np.int64'}), '([2, -1, 2], dtype=np.int64)\n', (2735, 2763), True, 'import numpy as np\n'), ((2772, 2799), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (2782, 2799), True, 'import numpy as np\n'), ((2919, 2941), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (2930, 2941), True, 'import numpy as np\n'), ((3104, 3143), 'numpy.array', 'np.array', (['[-1, 2, 3, 4]'], {'dtype': 'np.int64'}), '([-1, 2, 3, 4], dtype=np.int64)\n', (3112, 3143), True, 'import numpy as np\n'), ((3152, 3179), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (3162, 3179), True, 'import numpy as np\n'), ((3299, 3321), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (3310, 3321), True, 'import numpy as np\n'), ((3470, 3508), 'numpy.array', 'np.array', (['[2, 0, 4, 1]'], {'dtype': 'np.int64'}), '([2, 0, 4, 1], dtype=np.int64)\n', (3478, 3508), True, 'import numpy as np\n'), ((3517, 3547), 'numpy.reshape', 'np.reshape', (['data', '[2, 3, 4, 1]'], {}), '(data, [2, 3, 4, 1])\n', (3527, 3547), True, 'import numpy as np\n'), ((3667, 3689), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (3678, 3689), True, 'import numpy as np\n'), ((3851, 3890), 'numpy.array', 'np.array', (['[2, 0, 1, -1]'], {'dtype': 'np.int64'}), '([2, 0, 1, -1], dtype=np.int64)\n', (3859, 3890), True, 'import numpy as np\n'), ((3899, 3930), 'numpy.reshape', 'np.reshape', (['data', '[2, 3, 1, -1]'], {}), '(data, [2, 3, 1, -1])\n', (3909, 3930), True, 'import numpy as np\n'), ((4050, 4072), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (4061, 4072), True, 'import numpy as np\n'), ((175, 214), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (198, 214), True, 'import numpy as np\n'), ((524, 544), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (532, 544), True, 'import numpy as np\n'), ((567, 585), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (575, 585), True, 'import numpy as np\n'), ((823, 862), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (846, 862), True, 'import numpy as np\n'), ((1196, 1235), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (1219, 1235), True, 'import numpy as np\n'), ((1562, 1601), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (1585, 1601), True, 'import numpy as np\n'), ((1927, 1966), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (1950, 1966), True, 'import numpy as np\n'), ((2291, 2330), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (2314, 2330), True, 'import numpy as np\n'), ((2652, 2691), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (2675, 2691), True, 'import numpy as np\n'), ((3029, 3068), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (3052, 3068), True, 'import numpy as np\n'), ((3395, 3434), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (3418, 3434), True, 'import numpy as np\n'), ((3776, 3815), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (3799, 3815), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import sys, platform, os
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib import pyplot
import numpy as np
from matplotlib.patches import Ellipse
import camb
from camb import model, initialpower
from pysm.nominal import models
import healpy as hp
import site
plt.rcParams["figure.facecolor"] = 'w'
plt.rcParams["axes.facecolor"] = 'w'
plt.rcParams["savefig.facecolor"] = 'w'
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def conf_legend():
conf=[99,95,68]
concolor=['orangered','red','maroon']
Ep_handle={}
Ep_label={}
for i in range(0,3):
Ep_handle[i]=[]
Ep_label[i]=[]
Ep_handle[i] = [mpatches.Patch(color=concolor[i], alpha=0.6, linewidth=0)]
Ep_label[i] = [u'${0}\%$ CL'.format(conf[i])]
handles2=[]
labels2=[]
for i in range(0,3):
handles2.extend(Ep_handle[i])
labels2.extend(Ep_label[i])
legend22 = plt.legend(handles2,labels2,loc='center right',bbox_to_anchor = [0.9325, 0.61],
ncol=2,prop={'size':12},numpoints=1)
pyplot.gca().add_artist(legend22)
plt.legend(loc='center right',bbox_to_anchor = [0.99, 0.27])
def quantum_levels_legend(colours,l):
p_handle={}
p_label={}
for i in range(0,5):
p_handle[i]=[]
p_label[i]=[]
p_handle[i] = [mpatches.Patch(color=colours[i], alpha=1.0, linewidth=1.5)]
p_label[i] = [u'$l=m={0}$'.format(l[i])]
plt.text(13.11, 0.34, r'$\mu_{\rm ax}=10^{-11}eV$', fontsize=15,bbox={'facecolor':'white', 'alpha':1.0, 'pad':12})
#handle, label = ax.get_legend_handles_labels()
handles=[]
labels=[]
for i in range(0,5):
handles.extend(p_handle[i])
labels.extend(p_label[i])
legend2 = plt.legend(handles,labels,loc='lower right',
ncol=2,prop={'size':12},numpoints=1)
pyplot.gca().add_artist(legend2)
def regge_plane_plot(x1,y1,colours,sr_spins,sr_masses,sr_spin_up,sr_spin_low,sr_mass_up,sr_mass_low):
fig, ax = plt.subplots(figsize=(10,6))
for i in range(4,-1,-1):
ax.fill_between(x1[i], y1[i], 1,facecolor=colours[i],linewidth=2.0,zorder=2)
labels=(r'$\rm Continuum\ Fit \ Black$'
'\n'
r'$\rm Hole \ Data$')
ax.errorbar(sr_masses, sr_spins, yerr=[sr_spin_up,sr_spin_low], xerr=[sr_mass_up,sr_mass_low], fmt='o',color='k',label=labels)
plt.legend(loc='lower right',prop={'size':12})
plt.xlabel(r'$\rm Black \ Hole \ Mass \ \left(\rm{M_{\rm BH}} \ / M_{\odot} \right)$', ha='center', va='center',size=20,labelpad=15)
plt.ylabel(r'$\rm Black \ Hole \ Spin \ \left( a_{*}\right)$',size=21)
plt.ylim(0,1)
plt.xlim(0,x1[4].max())
def regge_region_plot(fx,fy,blackholes,rt,xtem,ytem,dytem,dxtem,example_mass,example_spin,example_spin_error,example_mass_error,error_ellipse,bmhu):
plt.plot(fx,fy,linestyle='-',color='black')
print(xtem)
plt.fill_between(fx, fy,1, color='deepskyblue',alpha=0.3)
plt.xlim(fx.min(),fx.max())
if blackholes == True:
for i in range(len(ytem)):
plt.errorbar(xtem[i], ytem[i], yerr=dytem[i], xerr=dxtem[i], fmt='o',color='k')
plt.errorbar(example_mass,example_spin,yerr=example_spin_error,xerr=example_mass_error, fmt='o',color='k')
if error_ellipse==True:
for i in range (len(example_mass_error)):
plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=3, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8)
plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=2, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8)
plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=1, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8)
plt.xlabel(r'${\rm M_{BH}} \left( M_{\odot} \right)$', ha='center', va='center',size=20,labelpad=15)
plt.ylabel(r'$ a_{*}$',size=21)
plt.ylim(0,1)
plt.xlim(0,70)
def intersection_plot(nx,ny,indx,indx2):
plt.plot(nx[4][indx2[3]], ny[4][indy2[3]], 'ro')
plt.plot(nx[0][0:indx[0]],ny[0][0:indx[0]])
plt.plot(nx[1][indx2[0]:indx[1]],ny[1][indx2[0]:indx[1]])
plt.plot(nx[2][indx2[1]:indx[2]],ny[2][indx2[1]:indx[2]])
plt.plot(nx[3][indx2[2]:indx[3]],ny[3][indx2[2]:indx[3]])
plt.plot(nx[4][indx2[3]:-1],ny[4][indx2[3]:-1])
def superradiance_rates_plot(alpha,rates):
for i in range(0,5):
plt.plot(alpha,rates[i]*2,linewidth=2)
plt.yscale('log')
plt.xlabel(r'$\mu_{\rm ax} r_g$', size=24,labelpad=4.15)
plt.ylabel(r'$ \log_{10}(M_{\rm BH} \ IM(\omega))$',size=21,labelpad=2)
plt.xlim(0,2.55)
plt.ylim(10**-16.5,10**-6.5)
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.fill_between",
"numpy.arctan2",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"matplotlib.pyplot.text",
"numpy.linalg.eigh",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.errorbar",
"numpy.sqrt"
] |
[((831, 897), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos', 'width': 'width', 'height': 'height', 'angle': 'theta'}), '(xy=pos, width=width, height=height, angle=theta, **kwargs)\n', (838, 897), False, 'from matplotlib.patches import Ellipse\n'), ((1348, 1473), 'matplotlib.pyplot.legend', 'plt.legend', (['handles2', 'labels2'], {'loc': '"""center right"""', 'bbox_to_anchor': '[0.9325, 0.61]', 'ncol': '(2)', 'prop': "{'size': 12}", 'numpoints': '(1)'}), "(handles2, labels2, loc='center right', bbox_to_anchor=[0.9325, \n 0.61], ncol=2, prop={'size': 12}, numpoints=1)\n", (1358, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1572), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center right"""', 'bbox_to_anchor': '[0.99, 0.27]'}), "(loc='center right', bbox_to_anchor=[0.99, 0.27])\n", (1523, 1572), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1942), 'matplotlib.pyplot.text', 'plt.text', (['(13.11)', '(0.34)', '"""$\\\\mu_{\\\\rm ax}=10^{-11}eV$"""'], {'fontsize': '(15)', 'bbox': "{'facecolor': 'white', 'alpha': 1.0, 'pad': 12}"}), "(13.11, 0.34, '$\\\\mu_{\\\\rm ax}=10^{-11}eV$', fontsize=15, bbox={\n 'facecolor': 'white', 'alpha': 1.0, 'pad': 12})\n", (1826, 1942), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2189), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '"""lower right"""', 'ncol': '(2)', 'prop': "{'size': 12}", 'numpoints': '(1)'}), "(handles, labels, loc='lower right', ncol=2, prop={'size': 12},\n numpoints=1)\n", (2109, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2373), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2356, 2373), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2728), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'prop': "{'size': 12}"}), "(loc='lower right', prop={'size': 12})\n", (2690, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\rm Black \\\\ Hole \\\\ Mass \\\\ \\\\left(\\\\rm{M_{\\\\rm BH}} \\\\ / M_{\\\\odot} \\\\right)$"""'], {'ha': '"""center"""', 'va': '"""center"""', 'size': '(20)', 'labelpad': '(15)'}), "(\n '$\\\\rm Black \\\\ Hole \\\\ Mass \\\\ \\\\left(\\\\rm{M_{\\\\rm BH}} \\\\ / M_{\\\\odot} \\\\right)$'\n , ha='center', va='center', size=20, labelpad=15)\n", (2738, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2862, 2938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rm Black \\\\ Hole \\\\ Spin \\\\ \\\\left( a_{*}\\\\right)$"""'], {'size': '(21)'}), "('$\\\\rm Black \\\\ Hole \\\\ Spin \\\\ \\\\left( a_{*}\\\\right)$', size=21)\n", (2872, 2938), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2949), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (2943, 2949), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3172), 'matplotlib.pyplot.plot', 'plt.plot', (['fx', 'fy'], {'linestyle': '"""-"""', 'color': '"""black"""'}), "(fx, fy, linestyle='-', color='black')\n", (3134, 3172), True, 'import matplotlib.pyplot as plt\n'), ((3184, 3243), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['fx', 'fy', '(1)'], {'color': '"""deepskyblue"""', 'alpha': '(0.3)'}), "(fx, fy, 1, color='deepskyblue', alpha=0.3)\n", (3200, 3243), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4300), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\rm M_{BH}} \\\\left( M_{\\\\odot} \\\\right)$"""'], {'ha': '"""center"""', 'va': '"""center"""', 'size': '(20)', 'labelpad': '(15)'}), "('${\\\\rm M_{BH}} \\\\left( M_{\\\\odot} \\\\right)$', ha='center', va=\n 'center', size=20, labelpad=15)\n", (4200, 4300), True, 'import matplotlib.pyplot as plt\n'), ((4292, 4324), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ a_{*}$"""'], {'size': '(21)'}), "('$ a_{*}$', size=21)\n", (4302, 4324), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4341), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4335, 4341), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4357), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(70)'], {}), '(0, 70)\n', (4350, 4357), True, 'import matplotlib.pyplot as plt\n'), ((4403, 4451), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[4][indx2[3]]', 'ny[4][indy2[3]]', '"""ro"""'], {}), "(nx[4][indx2[3]], ny[4][indy2[3]], 'ro')\n", (4411, 4451), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4497), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[0][0:indx[0]]', 'ny[0][0:indx[0]]'], {}), '(nx[0][0:indx[0]], ny[0][0:indx[0]])\n', (4461, 4497), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4556), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[1][indx2[0]:indx[1]]', 'ny[1][indx2[0]:indx[1]]'], {}), '(nx[1][indx2[0]:indx[1]], ny[1][indx2[0]:indx[1]])\n', (4506, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4615), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[2][indx2[1]:indx[2]]', 'ny[2][indx2[1]:indx[2]]'], {}), '(nx[2][indx2[1]:indx[2]], ny[2][indx2[1]:indx[2]])\n', (4565, 4615), True, 'import matplotlib.pyplot as plt\n'), ((4616, 4674), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[3][indx2[2]:indx[3]]', 'ny[3][indx2[2]:indx[3]]'], {}), '(nx[3][indx2[2]:indx[3]], ny[3][indx2[2]:indx[3]])\n', (4624, 4674), True, 'import matplotlib.pyplot as plt\n'), ((4675, 4723), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[4][indx2[3]:-1]', 'ny[4][indx2[3]:-1]'], {}), '(nx[4][indx2[3]:-1], ny[4][indx2[3]:-1])\n', (4683, 4723), True, 'import matplotlib.pyplot as plt\n'), ((4835, 4852), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4845, 4852), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu_{\\\\rm ax} r_g$"""'], {'size': '(24)', 'labelpad': '(4.15)'}), "('$\\\\mu_{\\\\rm ax} r_g$', size=24, labelpad=4.15)\n", (4864, 4913), True, 'import matplotlib.pyplot as plt\n'), ((4913, 4989), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ \\\\log_{10}(M_{\\\\rm BH} \\\\ IM(\\\\omega))$"""'], {'size': '(21)', 'labelpad': '(2)'}), "('$ \\\\log_{10}(M_{\\\\rm BH} \\\\ IM(\\\\omega))$', size=21, labelpad=2)\n", (4923, 4989), True, 'import matplotlib.pyplot as plt\n'), ((4986, 5003), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2.55)'], {}), '(0, 2.55)\n', (4994, 5003), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5037), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10 ** -16.5)', '(10 ** -6.5)'], {}), '(10 ** -16.5, 10 ** -6.5)\n', (5012, 5037), True, 'import matplotlib.pyplot as plt\n'), ((528, 547), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (542, 547), True, 'import numpy as np\n'), ((639, 648), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (646, 648), True, 'import matplotlib.pyplot as plt\n'), ((699, 728), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (709, 728), True, 'import numpy as np\n'), ((808, 821), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (815, 821), True, 'import numpy as np\n'), ((3410, 3525), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['example_mass', 'example_spin'], {'yerr': 'example_spin_error', 'xerr': 'example_mass_error', 'fmt': '"""o"""', 'color': '"""k"""'}), "(example_mass, example_spin, yerr=example_spin_error, xerr=\n example_mass_error, fmt='o', color='k')\n", (3422, 3525), True, 'import matplotlib.pyplot as plt\n'), ((4795, 4837), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', '(rates[i] * 2)'], {'linewidth': '(2)'}), '(alpha, rates[i] * 2, linewidth=2)\n', (4803, 4837), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1172), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'concolor[i]', 'alpha': '(0.6)', 'linewidth': '(0)'}), '(color=concolor[i], alpha=0.6, linewidth=0)\n', (1129, 1172), True, 'import matplotlib.patches as mpatches\n'), ((1478, 1490), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (1488, 1490), False, 'from matplotlib import pyplot\n'), ((1713, 1771), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[i]', 'alpha': '(1.0)', 'linewidth': '(1.5)'}), '(color=colours[i], alpha=1.0, linewidth=1.5)\n', (1727, 1771), True, 'import matplotlib.patches as mpatches\n'), ((2194, 2206), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (2204, 2206), False, 'from matplotlib import pyplot\n'), ((3328, 3413), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['xtem[i]', 'ytem[i]'], {'yerr': 'dytem[i]', 'xerr': 'dxtem[i]', 'fmt': '"""o"""', 'color': '"""k"""'}), "(xtem[i], ytem[i], yerr=dytem[i], xerr=dxtem[i], fmt='o', color='k'\n )\n", (3340, 3413), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from base.RecommenderUtils import check_matrix
from base.BaseRecommender import RecommenderSystem
from tqdm import tqdm
import models.MF.Cython.MF_RMSE as mf
class IALS_numpy(RecommenderSystem):
'''
binary Alternating Least Squares model (or Weighed Regularized Matrix Factorization)
Reference: Collaborative Filtering for binary Feedback Datasets (Hu et al., 2008)
Factorization model for binary feedback.
First, splits the feedback matrix R as the element-wise a Preference matrix P and a Confidence matrix C.
Then computes the decomposition of them into the dot product of two matrices X and Y of latent factors.
X represent the user latent factors, Y the item latent factors.
The model is learned by solving the following regularized Least-squares objective function with Stochastic Gradient Descent
\operatornamewithlimits{argmin}\limits_{x*,y*}\frac{1}{2}\sum_{i,j}{c_{ij}(p_{ij}-x_i^T y_j) + \lambda(\sum_{i}{||x_i||^2} + \sum_{j}{||y_j||^2})}
'''
# TODO: Add support for multiple confidence scaling functions (e.g. linear and log scaling)
def __init__(self,
num_factors=50,
reg=0.011,
iters=30,
scaling='log',
alpha=40,
epsilon=1.0,
init_mean=0.0,
init_std=0.1,
rnd_seed=42):
super(IALS_numpy, self).__init__()
assert scaling in ['linear', 'log'], 'Unsupported scaling: {}'.format(scaling)
self.num_factors = num_factors
self.reg = reg
self.iters = iters
self.scaling = scaling
self.alpha = alpha
self.epsilon = epsilon
self.init_mean = init_mean
self.init_std = init_std
self.rnd_seed = rnd_seed
self.parameters = "num_factors={}, reg={}, iters={}, scaling={}, alpha={}, episilon={}, init_mean={}, " \
"init_std={}, rnd_seed={}".format(
self.num_factors, self.reg, self.iters, self.scaling, self.alpha, self.epsilon, self.init_mean,
self.init_std, self.rnd_seed)
def __str__(self):
return "WRMF-iALS Implementation"
def _linear_scaling(self, R):
C = R.copy().tocsr()
C.data *= self.alpha
C.data += 1.0
return C
def _log_scaling(self, R):
C = R.copy().tocsr()
C.data = 1.0 + self.alpha * np.log(1.0 + C.data / self.epsilon)
return C
def fit(self, R):
self.dataset = R
# compute the confidence matrix
if self.scaling == 'linear':
C = self._linear_scaling(R)
else:
C = self._log_scaling(R)
Ct = C.T.tocsr()
M, N = R.shape
# set the seed
np.random.seed(self.rnd_seed)
# initialize the latent factors
self.X = np.random.normal(self.init_mean, self.init_std, size=(M, self.num_factors))
self.Y = np.random.normal(self.init_mean, self.init_std, size=(N, self.num_factors))
for it in tqdm(range(self.iters)):
self.X = self._lsq_solver_fast(C, self.X, self.Y, self.reg)
self.Y = self._lsq_solver_fast(Ct, self.Y, self.X, self.reg)
def recommend(self, playlist_id, n=None, exclude_seen=True,export= False):
scores = np.dot(self.X[playlist_id], self.Y.T)
ranking = scores.argsort()[::-1]
# rank items
if exclude_seen:
ranking = self._filter_seen(playlist_id, ranking)
if not export:
return ranking[:n]
elif export:
return str(ranking[:n]).strip("[]")
def _lsq_solver(self, C, X, Y, reg):
# precompute YtY
rows, factors = X.shape
YtY = np.dot(Y.T, Y)
for i in range(rows):
# accumulate YtCiY + reg*I in A
A = YtY + reg * np.eye(factors)
# accumulate Yt*Ci*p(i) in b
b = np.zeros(factors)
for j, cij in self._nonzeros(C, i):
vj = Y[j]
A += (cij - 1.0) * np.outer(vj, vj)
b += cij * vj
X[i] = np.linalg.solve(A, b)
return X
def _lsq_solver_fast(self, C, X, Y, reg):
# precompute YtY
rows, factors = X.shape
YtY = np.dot(Y.T, Y)
for i in range(rows):
# accumulate YtCiY + reg*I in A
A = YtY + reg * np.eye(factors)
start, end = C.indptr[i], C.indptr[i + 1]
j = C.indices[start:end] # indices of the non-zeros in Ci
ci = C.data[start:end] # non-zeros in Ci
Yj = Y[j] # only the factors with non-zero confidence
# compute Yt(Ci-I)Y
aux = np.dot(Yj.T, np.diag(ci - 1.0))
A += np.dot(aux, Yj)
# compute YtCi
b = np.dot(Yj.T, ci)
X[i] = np.linalg.solve(A, b)
return X
def _nonzeros(self, R, row):
for i in range(R.indptr[row], R.indptr[row + 1]):
yield (R.indices[i], R.data[i])
def _get_user_ratings(self, playlist_id):
self.dataset = check_matrix(self.dataset, "csr")
return self.dataset[playlist_id]
def _get_item_ratings(self, track_id):
self.dataset = check_matrix(self.dataset, "csc")
return self.dataset[:, track_id]
def _filter_seen(self, playlist_id, ranking):
user_profile = self._get_user_ratings(playlist_id)
seen = user_profile.indices
unseen_mask = np.in1d(ranking, seen, assume_unique=True, invert=True)
return ranking[unseen_mask]
|
[
"base.RecommenderUtils.check_matrix",
"numpy.outer",
"numpy.random.seed",
"numpy.log",
"numpy.eye",
"numpy.zeros",
"numpy.random.normal",
"numpy.dot",
"numpy.linalg.solve",
"numpy.diag",
"numpy.in1d"
] |
[((2790, 2819), 'numpy.random.seed', 'np.random.seed', (['self.rnd_seed'], {}), '(self.rnd_seed)\n', (2804, 2819), True, 'import numpy as np\n'), ((2877, 2952), 'numpy.random.normal', 'np.random.normal', (['self.init_mean', 'self.init_std'], {'size': '(M, self.num_factors)'}), '(self.init_mean, self.init_std, size=(M, self.num_factors))\n', (2893, 2952), True, 'import numpy as np\n'), ((2970, 3045), 'numpy.random.normal', 'np.random.normal', (['self.init_mean', 'self.init_std'], {'size': '(N, self.num_factors)'}), '(self.init_mean, self.init_std, size=(N, self.num_factors))\n', (2986, 3045), True, 'import numpy as np\n'), ((3332, 3369), 'numpy.dot', 'np.dot', (['self.X[playlist_id]', 'self.Y.T'], {}), '(self.X[playlist_id], self.Y.T)\n', (3338, 3369), True, 'import numpy as np\n'), ((3755, 3769), 'numpy.dot', 'np.dot', (['Y.T', 'Y'], {}), '(Y.T, Y)\n', (3761, 3769), True, 'import numpy as np\n'), ((4295, 4309), 'numpy.dot', 'np.dot', (['Y.T', 'Y'], {}), '(Y.T, Y)\n', (4301, 4309), True, 'import numpy as np\n'), ((5117, 5150), 'base.RecommenderUtils.check_matrix', 'check_matrix', (['self.dataset', '"""csr"""'], {}), "(self.dataset, 'csr')\n", (5129, 5150), False, 'from base.RecommenderUtils import check_matrix\n'), ((5259, 5292), 'base.RecommenderUtils.check_matrix', 'check_matrix', (['self.dataset', '"""csc"""'], {}), "(self.dataset, 'csc')\n", (5271, 5292), False, 'from base.RecommenderUtils import check_matrix\n'), ((5502, 5557), 'numpy.in1d', 'np.in1d', (['ranking', 'seen'], {'assume_unique': '(True)', 'invert': '(True)'}), '(ranking, seen, assume_unique=True, invert=True)\n', (5509, 5557), True, 'import numpy as np\n'), ((3945, 3962), 'numpy.zeros', 'np.zeros', (['factors'], {}), '(factors)\n', (3953, 3962), True, 'import numpy as np\n'), ((4138, 4159), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (4153, 4159), True, 'import numpy as np\n'), ((4776, 4791), 'numpy.dot', 'np.dot', (['aux', 'Yj'], {}), '(aux, Yj)\n', (4782, 4791), True, 'import numpy as np\n'), ((4835, 4851), 'numpy.dot', 'np.dot', (['Yj.T', 'ci'], {}), '(Yj.T, ci)\n', (4841, 4851), True, 'import numpy as np\n'), ((4872, 4893), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (4887, 4893), True, 'import numpy as np\n'), ((2442, 2477), 'numpy.log', 'np.log', (['(1.0 + C.data / self.epsilon)'], {}), '(1.0 + C.data / self.epsilon)\n', (2448, 2477), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.diag', 'np.diag', (['(ci - 1.0)'], {}), '(ci - 1.0)\n', (4747, 4757), True, 'import numpy as np\n'), ((3872, 3887), 'numpy.eye', 'np.eye', (['factors'], {}), '(factors)\n', (3878, 3887), True, 'import numpy as np\n'), ((4072, 4088), 'numpy.outer', 'np.outer', (['vj', 'vj'], {}), '(vj, vj)\n', (4080, 4088), True, 'import numpy as np\n'), ((4413, 4428), 'numpy.eye', 'np.eye', (['factors'], {}), '(factors)\n', (4419, 4428), True, 'import numpy as np\n')]
|
import numpy as np
import heapq
import math
import time
import pygame
class Node:
def __init__(self, state=None, cost=float('inf'), costToCome=float('inf'), parent=None, collision=None):
self.state = state
self.parent = parent
self.cost = cost
self.costToCome = costToCome
self.collision = collision
class CoupledPlannerNode:
def __init__(self, state=None, collision=None, parent=None, f_score=float('inf'), cost_to_come=float('inf')):
self.state = state
self.parent = parent
self.collision = collision
self.f_score = f_score
self.cost_to_come = cost_to_come
class CoupledNode:
def __init__(self, state=None, collision=None, parent=None, f_score=None, cost_to_go=None, cost_to_come=None):
self.state = state
self.parent = parent
self.collision = collision
self.f_score = f_score
self.cost_to_go = cost_to_go
self.cost_to_come = cost_to_come
def pointInValidWorkspace(point, res, radiusClearance, scale):
x, y = point
# --------------------------------------------------------------------------------
# Checking whether point inside obstacles
# --------------------------------------------------------------------------------
X = np.float32([8, 12.5, 12.5, 8]) * scale / res
Y = np.float32([9, 9, 9.5, 9.5]) * scale / res
ptInRectangle = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([10, 10.5, 10.5, 10]) * scale / res
Y = np.float32([7, 7, 11.5, 11.5]) * scale / res
ptInRectangle1 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([4, 4.25, 4.25, 4]) * scale / res
Y = np.float32([8, 8, 10.5, 10.5]) * scale / res
ptInRectangle2 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([1.5, 3, 3, 1.5]) * scale / res
Y = np.float32([9, 9, 9.25, 9.25]) * scale / res
ptInRectangle3 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([16, 16.25, 16.25, 16]) * scale / res
Y = np.float32([8, 8, 10.5, 10.5]) * scale / res
ptInRectangle4 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([17, 18.5, 18.5, 17]) * scale / res
Y = np.float32([9, 9, 9.25, 9.25]) * scale / res
ptInRectangle5 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([9, 11.5, 11.5, 9]) * scale / res
Y = np.float32([3, 3, 3.25, 3.25]) * scale / res
ptInRectangle6 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([10.15, 10.40, 10.40, 10.15]) * scale / res
Y = np.float32([0.8, 0.8, 2.3, 2.3]) * scale / res
ptInRectangle7 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([9, 11.5, 11.5, 9]) * scale / res
Y = np.float32([15, 15, 15.25, 15.25]) * scale / res
ptInRectangle8 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
X = np.float32([10.15, 10.40, 10.40, 10.15]) * scale / res
Y = np.float32([16, 16, 17.5, 17.5]) * scale / res
ptInRectangle9 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \
X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res
if ptInRectangle or ptInRectangle1 or ptInRectangle2 or ptInRectangle3 or ptInRectangle4 or \
ptInRectangle5 or ptInRectangle6 or ptInRectangle7 or ptInRectangle8 or ptInRectangle9:
return False
return True
# checks whether next action is near an obstacle or ill defined
def isSafe(newState, scale, r=1, radiusClearance=0):
col = math.floor(800 / r)
row = math.floor(800 / r)
newState = list(newState)
if not isinstance(newState[0], list):
if newState[0] < 0 or newState[0] > col or newState[1] < 0 or newState[1] > row:
return False
return pointInValidWorkspace(newState[0:2], r, radiusClearance, scale)
else:
check = True
for i in range(len(newState)):
check = check or newState[i][0] < 0 or newState[i][0] > col or newState[i][1] < 0 or newState[i][1] > row
if check:
check = pointInValidWorkspace(newState[i][0:2], r, radiusClearance, scale)
else:
return False
return check
# prints solution path
def printPath(node):
l = []
current = node
while current:
l.append(current.state)
current = current.parent
return l
3
def normalize(startPosition, startOrientation, threshDistance=0.5, threshAngle=30):
x, y = startPosition
t = startOrientation
x = round(x / threshDistance) * threshDistance
y = round(y / threshDistance) * threshDistance
t = round(t / threshAngle) * threshAngle
return [x, y, t]
# Calculating the Euclidean distance
def distance(startPosition, goalPosition):
sx, sy = startPosition
gx, gy = goalPosition
return math.sqrt((gx - sx) ** 2 + (gy - sy) ** 2)
# generates optimal path for robot
def Astar(q, startPosition, startOrientation, goalPosition, nodesExplored, scale, threshDistance=0.5, threshAngle=30,
radiusClearance=0):
# normalize goal and start positions
sx, sy, st = normalize(startPosition, startOrientation, threshDistance, threshAngle)
gx, gy, gt = normalize(goalPosition, 0, threshDistance, threshAngle)
# Initializing root node
key = str(sx) + str(sy) + str(st)
root = Node(np.array([sx, sy, st]), 0.0, 0.0, None)
if key not in nodesExplored:
nodesExplored[key] = root
count = 1
heapq.heappush(q, (root.cost, count, root))
while len(q) > 0:
_, _, currentNode = heapq.heappop(q)
if distance(currentNode.state[0:2], goalPosition) <= 3 * 1.5:
sol = printPath(currentNode)
return [True, sol]
angle = 360 // threshAngle
for theta in range(angle):
x, y, t = currentNode.state
newOrientation = math.radians((threshAngle * theta + t) % 360)
newPosX = threshDistance * math.cos(newOrientation) + x
newPosY = threshDistance * math.sin(newOrientation) + y
newState = np.array(normalize([newPosX, newPosY], newOrientation, threshDistance, threshAngle))
s = str(newState[0]) + str(newState[1]) + str(newState[2])
if s not in nodesExplored:
if isSafe(newState, scale, 1, radiusClearance):
newCostToCome = currentNode.costToCome + distance([newState[0], newState[1]], [x, y])
newCost = newCostToCome + distance([newState[0], newState[1]], [gx, gy])
newNode = Node(state=newState, cost=newCost, costToCome=newCostToCome, parent=currentNode)
nodesExplored[s] = newNode
heapq.heappush(q, (newNode.cost, count, newNode))
count += 1
else:
if nodesExplored[s].collision is None or (
isinstance(nodesExplored[s].collision, list) and len(nodesExplored[s].collision) == 0):
if (nodesExplored[s].cost > currentNode.costToCome + distance([newState[0], newState[1]],
[x, y]) + distance(
[newState[0], newState[1]], [gx, gy])):
nodesExplored[s].costToCome = currentNode.costToCome + distance([newState[0], newState[1]],
[x, y])
nodesExplored[s].cost = nodesExplored[s].costToCome + distance([newState[0], newState[1]],
[gx, gy])
nodesExplored[s].parent = currentNode
return [False, None] # checks whether next action is near an obstacle or ill defined
def determineCollision(robotPosition):
collisionSet = []
for i in range(len(robotPosition) - 1):
collision = []
for j in range(i + 1, len(robotPosition)):
if list(robotPosition[i]) == list(robotPosition[j]):
collision.append(i)
collision.append(j)
collision = list(set(collision))
if collision:
collisionSet.append(collision)
return collisionSet
def coupledPlanner(collision, startPosition, startOrientation, goalPosition, coupledNodesExplored, nodesExplored,
solPaths1,
iterateSolPaths1, scale, threshDistance=0.5, threshAngle=30, radiusClearance=0):
nonCollisionRobots = np.array([Node()] * len(startPosition))
goalChecker = {}
solution = {}
solution1 = {}
nodeE = {}
co = 0
currentPos = startPosition.copy()
count = [0] * len(startPosition)
q = {}
col = []
for i in range(len(startPosition)):
q[i] = []
if i not in collision:
s = str(solPaths1[i][iterateSolPaths1[i]][0]) + str(solPaths1[i][iterateSolPaths1[i]][1]) + str(
solPaths1[i][iterateSolPaths1[i]][2])
nonCollisionRobots[i] = coupledNodesExplored[s]
iterateSolPaths1[i] -= 1
else:
goalChecker[i] = False
nodeE[i] = {}
root = Node(startPosition[i], 0.0, 0.0, None)
s = str(startPosition[i][0]) + str(startPosition[i][1]) + str(startPosition[i][2])
nodeE[i][s] = root
count[i] += 1
heapq.heappush(q[i], (root.cost, count[i], root))
while not all(ele for ele in goalChecker.values()):
co += 1
# print(currentPos, determineCollision(currentPos.copy()), len(currentPos), co)
if determineCollision(currentPos.copy()):
col = determineCollision(currentPos.copy())
for i in col[0]:
s = str(currentPos[i][0]) + str(currentPos[i][1]) + str(currentPos[i][2])
q[i].clear()
nodesExplored[i][s].collision = col
heapq.heappush(q[i], (nodesExplored[i][s].parent.cost, count[i], nodesExplored[i][s].parent))
nodesExplored[i][s].parent = None
nodeE[i].clear()
# collision = list(set(collision + col[0]))
# print(collision)
for i in range(len(startPosition)):
if i in collision:
if not goalChecker[i]:
_, _, currentNode = heapq.heappop(q[i])
currentPos[i] = currentNode.state
if distance(currentNode.state[0:2], goalPosition[i][0:2]) <= 3 * 1.5:
solution[i] = printPath(currentNode)
goalChecker[i] = True
continue
angle = 360 // threshAngle
for theta in range(angle):
x, y, t = currentNode.state
newOrientation = math.radians((threshAngle * theta + t) % 360)
newPosX = threshDistance * math.cos(newOrientation) + x
newPosY = threshDistance * math.sin(newOrientation) + y
newState = np.array(normalize([newPosX, newPosY], newOrientation, threshDistance, threshAngle))
s = str(newState[0]) + str(newState[1]) + str(newState[2])
if s not in nodeE[i]:
if (s in nodesExplored[i] and not nodesExplored[i][s].collision) or (
s not in nodesExplored[i]):
if isSafe(newState, scale, 1, radiusClearance):
newCostToCome = currentNode.costToCome + distance([newState[0], newState[1]],
[x, y])
newCost = newCostToCome + distance([newState[0], newState[1]], goalPosition[i][0:2])
newNode = Node(state=newState, cost=newCost, costToCome=newCostToCome,
parent=currentNode)
nodesExplored[i][s] = newNode
nodeE[i][s] = newNode
heapq.heappush(q[i], (newNode.cost, count[i], newNode))
count[i] += 1
else:
if (s in nodesExplored[i] and not nodesExplored[i][s].collision) or (
s not in nodesExplored[i]):
if (nodeE[i][s].cost > currentNode.costToCome + distance([newState[0], newState[1]],
[x, y]) + distance(
[newState[0], newState[1]], goalPosition[i][0:2])):
nodeE[i][s].costToCome = currentNode.costToCome + distance(
[newState[0], newState[1]], [x, y])
nodeE[i][s].cost = nodeE[i][s].costToCome + distance([newState[0], newState[1]],
goalPosition[i][0:2])
nodeE[i][s].parent = currentNode
# print(currentNode.state)
# print(currentPos[i])
else:
if iterateSolPaths1[i] > 0:
s = str(solPaths1[i][iterateSolPaths1[i]][0]) + str(solPaths1[i][iterateSolPaths1[i]][1]) + str(
solPaths1[i][iterateSolPaths1[i]][2])
nonCollisionRobots[i] = nodesExplored[i][s]
currentPos[i] = nonCollisionRobots[i].state.copy()
iterateSolPaths1[i] -= 1
else:
goalChecker[i] = True
return solution, nodesExplored
def updateCollsionPath(colset, previousPos, coupledNodesExplored, nodesExplored, nodesExplored1):
for i, pos in enumerate(previousPos):
s = str(pos[0]) + str(pos[1]) + str(pos[2])
while nodesExplored1[i][s].parent is not None:
for collision in colset:
if i in collision:
if coupledNodesExplored[s].collision:
for col in coupledNodesExplored[s].collision:
col = list(set(col + [i]))
else:
coupledNodesExplored[s].collision = colset
if nodesExplored[s].collision:
for col in nodesExplored[s].collision:
col = list(set(col + [i]))
else:
nodesExplored[s].collision = colset
if nodesExplored1[i][s].collision:
for col in nodesExplored1[i][s].collision:
col = list(set(col + [i]))
else:
nodesExplored1[i][s].collision = colset
st = nodesExplored1[i][s].parent.state
s = str(st[0]) + str(st[1]) + str(st[2])
def subdimensionalExpansion(solPaths, nodesExplored, nodesExplored1, iterateSolPaths, scale, threshDistance,
threshAngle,
radiusClearance):
currentPos = []
sol = []
nodeE = []
startPosition = []
goalPosition = []
previousPos = []
colset = []
count = -1
exp = False
previousNode = [Node()] * len(solPaths)
node = [Node()] * len(solPaths)
coupledNodesExplored = {}
solPaths1 = solPaths.copy()
iterateSolPaths1 = iterateSolPaths.copy()
for index, path in enumerate(solPaths):
startPosition.append(list(path[iterateSolPaths[index]]))
goalPosition.append(list(path[0]))
while not all(ele == 0 for ele in iterateSolPaths):
previousPos = currentPos.copy()
currentPos.clear()
for index, path in enumerate(solPaths):
currentPos.append(list(path[iterateSolPaths[index]]))
colset = determineCollision(currentPos)
count += 1
if count == 0:
previousPos = currentPos
if not colset:
for i, pos in enumerate(currentPos):
s = str(pos[0]) + str(pos[1]) + str(pos[2])
if count == 0:
node[i] = Node(state=pos, collision=colset, parent=None)
previousNode[i] = node[i]
else:
previousNode[i] = node[i]
node[i] = Node(state=pos, collision=colset, parent=previousNode[i])
if s not in nodesExplored:
nodesExplored[s] = node[i]
coupledNodesExplored[s] = node[i]
if iterateSolPaths[i] > 0:
iterateSolPaths[i] -= 1
else:
exp = True
# print(currentPos)
break
for i, pos in enumerate(currentPos):
s = str(pos[0]) + str(pos[1]) + str(pos[2])
for collision in colset:
if i in collision:
node[i] = Node(state=pos, collision=colset, parent=None)
coupledNodesExplored[s] = node[i]
nodesExplored[s].collision = colset
nodesExplored1[i][s].collision = colset
else:
while iterateSolPaths[i] > 0:
s = str(pos[0]) + str(pos[1]) + str(pos[2])
previousNode[i] = node[i]
node[i] = Node(state=pos, collision=[], parent=previousNode[i])
coupledNodesExplored[s] = node[i]
iterateSolPaths[i] -= 1
pos = solPaths[i][iterateSolPaths[i]]
break
if exp:
print('Collision found')
# print(colset)
updateCollsionPath(colset, previousPos, coupledNodesExplored, nodesExplored, nodesExplored1)
for collision in colset:
a = time.time()
sol, nodeE = coupledPlanner(collision, startPosition, 0, goalPosition, coupledNodesExplored, nodesExplored1,
solPaths1,
iterateSolPaths1, scale, threshDistance,
30, radiusClearance)
b = time.time()
print(b - a)
return exp, sol, colset[0], nodeE, currentPos
return exp, sol, colset, nodeE, currentPos
def triangleCoordinates(start, end, triangleSize=5):
rotation = (math.atan2(start[1] - end[1], end[0] - start[0])) + math.pi / 2
rad = math.pi / 180
coordinateList = np.array([[end[0], end[1]],
[end[0] + triangleSize * math.sin(rotation - 165 * rad),
end[1] + triangleSize * math.cos(rotation - 165 * rad)],
[end[0] + triangleSize * math.sin(rotation + 165 * rad),
end[1] + triangleSize * math.cos(rotation + 165 * rad)]])
return coordinateList
def visualizeMStar():
###################################################
# Parameters
###################################################
clearance = 10
radius = 0
stepSize = 11
threshDistance = stepSize # Step size of movement
res = 1 # resolution of grid
scale = 40 # scale of grid
# 1 Robot
# start = [[1 * scale, 16 * scale]] # Starting position of the robots
# goal = [[16 * scale, 1 * scale]] # Goal position of the robots
# 2 Robots
# start = [[1 * scale, 6 * scale], [6 * scale, 1 * scale]] # Starting position of the robots
# goal = [[14 * scale, 10 * scale], [9 * scale, 14 * scale]] # Goal position of the robots
# 3 Robots
start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
[6 * scale, 1 * scale]] # Starting position of the robots
goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
[9 * scale, 14 * scale]] # Goal position of the robots
# 4 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale]] # Goal position of the robots
# 5 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale], [17 * scale, 4 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale], [19 * scale, 19 * scale]] # Goal position of the robots
# 6 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale], [17 * scale, 4 * scale], [1 * scale, 19 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale], [19 * scale, 19 * scale], [14 * scale, 16 * scale]] # Goal position of the robots
# 7 Robots
# start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale],
# [6 * scale, 1 * scale], [19 * scale, 1 * scale],
# [17 * scale, 4 * scale], [1 * scale, 19 * scale], [2 * scale, 2 * scale]] # Starting position of the robots
# goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale],
# [9 * scale, 14 * scale], [5 * scale, 16 * scale],
# [19 * scale, 19 * scale], [14 * scale, 16 * scale], [14 * scale, 3 * scale]] # Goal position of the robots
drawing = True
threshAngle = 90 # Angle between actions
startOrientation = 0
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
lred = (255, 102, 102)
green = (0, 102, 0)
lgreen = (153, 255, 153)
orange = (255, 165, 0)
dorange = (240, 94, 35)
blue = (0, 0, 255)
lblue = (153, 204, 255)
purple = (75, 0, 130)
yellow = (255, 255, 0)
pink = (255,192,203)
dpink = (199,21,133)
gray = (220,220,220)
dgray = (105,105,105)
cyan = (0, 255, 255)
maroon = (255,160,122)
dmaroon = (128, 0, 0)
pathColours = [blue, red, green, dmaroon, orange, dpink, dgray]
colors = [lblue, lred, lgreen, maroon, dorange, pink, gray]
solutionPaths = []
size_x = 20
size_y = 20
TotalNodesExplored = {}
TotalNodesExplored1 = {}
totalTime = 0
if drawing:
pygame.init()
gameDisplay = pygame.display.set_mode((size_x * scale, size_y * scale))
gameDisplay.fill(white)
pygame.display.set_caption("M* Algorithm Implementation")
basicfont = pygame.font.SysFont('timesnewroman', 20, bold=True)
############################################################
# Display Obstacles
############################################################
pygame.draw.rect(gameDisplay, black,
[int(scale * 8), int(scale * 9), int(scale * 4.5), int(scale * 0.5)]) # plus
pygame.draw.rect(gameDisplay, black,
[int(scale * 10), int(scale * 7), int(scale * 0.5), int(scale * 4.5)]) # plus
pygame.draw.rect(gameDisplay, black, [int(scale * 4), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 1.5), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 16), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 17), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black, [int(scale * 9), int(scale * 3), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 0.8), int(scale * 0.25), int(scale * 1.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 9), int(scale * 15), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 16), int(scale * 0.25), int(scale * 1.5)]) # |
############################################################
# Display start and end points of the robots
############################################################
for i in range(len(start)):
pygame.draw.circle(gameDisplay, pathColours[i], start[i], 0.1 * scale)
pygame.draw.circle(gameDisplay, pathColours[i], goal[i], 0.1 * scale)
text = basicfont.render('s' + str(i + 1), False, pathColours[i])
text1 = basicfont.render('g' + str(i + 1), False, pathColours[i])
gameDisplay.blit(text, (start[i][0] + 5, start[i][1] + 5))
gameDisplay.blit(text1, (goal[i][0] + 5, goal[i][1] + 5))
pygame.display.update()
pygame.time.delay(500)
############################################################
# Draw Explored Nodes and solution path
############################################################
for i in range(len(start)):
nodesExplored = {}
q = []
startPosition = np.round((np.array(start[i])) / res)
goalPosition = np.round((np.array(goal[i])) / res)
if not isSafe(startPosition, scale, res, clearance + radius) or not isSafe(goalPosition, scale, res,
clearance + radius):
print('Start or goal configuration of robot ' + str(i + 1) + ' is not in a valid workspace')
else:
print('Exploring workspace for robot ' + str(i + 1))
startTime = time.time() # Start time of simulation
success, solution = Astar(q, startPosition, startOrientation, goalPosition, nodesExplored, scale,
threshDistance,
threshAngle, clearance + radius)
endTime = time.time()
TotalNodesExplored.update(nodesExplored)
TotalNodesExplored1[i] = nodesExplored
#############################################
# Drawing
#############################################
if success:
solutionPaths.append(solution)
print('Optimal path found for robot ' + str(i + 1))
print("Total time taken for exploring nodes " + str(endTime - startTime) + " seconds.")
totalTime += endTime - startTime
print('-------------------------')
if drawing:
draw = True
while draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# draw nodesExplored
for s in nodesExplored:
if nodesExplored[s].parent:
pt = nodesExplored[s].state[0:2]
ptParent = nodesExplored[s].parent.state[0:2]
x, y = pt * res
x2, y2 = ptParent * res
# draw explored nodes
pygame.draw.line(gameDisplay, colors[i], (x2, y2), (x, y), 1)
triangle = triangleCoordinates([x2, y2], [x, y], 5)
pygame.draw.polygon(gameDisplay, colors[i],
[tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])])
# draw start and goal locations
pygame.draw.rect(gameDisplay, colors[i],
(int(startPosition[0] * res * scale), int(startPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.draw.circle(gameDisplay, colors[i],
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale)),
math.floor(3 * 1.5 * res * scale))
pygame.draw.rect(gameDisplay, white,
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.display.update()
draw = False
else:
solutionPaths.append(success)
print("Total time " + str(totalTime))
print("solution Paths " + str(len(solutionPaths)))
print('Robots following their own individual optimal Paths')
print()
print()
iterateSolutionPaths = []
for i in range(len(solutionPaths)):
if solutionPaths[i]:
iterateSolutionPaths.append(len(solutionPaths[i]) - 1)
else:
iterateSolutionPaths.append(-1)
iterateSolutionPathsCopy = iterateSolutionPaths.copy()
iterateSolutionPathsCopy1 = iterateSolutionPaths.copy()
solutionPathsCopy = solutionPaths.copy()
failure, sol, collision, nodeE, currentPos = subdimensionalExpansion(solutionPathsCopy, TotalNodesExplored,
TotalNodesExplored1,
iterateSolutionPathsCopy,
scale,
threshDistance, 45,
radius + clearance)
if drawing:
temp = [True] * len(iterateSolutionPaths)
while not all(ele == -2 for ele in iterateSolutionPaths) and not all(not p for p in temp):
for i in range(len(solutionPaths)):
if list(solutionPaths[i][iterateSolutionPaths[i]]) == currentPos[i] and failure:
pt = solutionPaths[i][iterateSolutionPaths[i]][0:2]
x, y = pt[0] * res, pt[1] * res
pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
pygame.display.update()
iterateSolutionPaths[i] -= 1
temp[i] = False
else:
if iterateSolutionPaths[i] != -2:
if iterateSolutionPaths[i] == -1:
print("There is no Path for Robot " + str(i + 1))
iterateSolutionPaths[i] = -2
elif iterateSolutionPaths[i] >= 0:
pt = solutionPaths[i][iterateSolutionPaths[i]][0:2]
x, y = pt[0] * res, pt[1] * res
pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
iterateSolutionPaths[i] -= 1
if iterateSolutionPaths[i] == 0:
print("Robot " + str(i + 1) + " reached its goal")
iterateSolutionPaths[i] = -2
pygame.display.update()
pygame.time.delay(1000)
if failure:
s = ''
for i in collision:
s += str(i + 1) + ' '
print("--------------------")
print('Robot - Robot collision detected between robots ' + s)
print('Starting subdimesional Expansion')
print('Running Back propogation and updating Collision list')
temp = []
for i in range(len(iterateSolutionPaths)):
if i in collision:
temp.append(False)
else:
temp.append(True)
if drawing:
while not all(ele for ele in temp):
for i in range(len(iterateSolutionPaths)):
if i in collision:
if iterateSolutionPaths[i] != iterateSolutionPathsCopy1[i]:
pt = solutionPaths[i][iterateSolutionPaths[i]][0:2]
x, y = pt[0] * res, pt[1] * res
iterateSolutionPaths[i] += 1
pygame.draw.circle(gameDisplay, yellow, (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
pygame.display.update()
else:
temp[i] = True
pygame.time.delay(500)
# pygame.quit()
print()
print('Implementing coupled planner for robots ' + s)
print()
print('Robots following collision free path')
if drawing:
gameDisplay.fill(white)
pygame.display.set_caption("M* Algorithm Implementation")
basicfont = pygame.font.SysFont('timesnewroman', 20, bold=True)
############################################################
# Display Obstacles
############################################################
pygame.draw.rect(gameDisplay, black,
[int(scale * 8), int(scale * 9), int(scale * 4.5), int(scale * 0.5)]) # plus
pygame.draw.rect(gameDisplay, black,
[int(scale * 10), int(scale * 7), int(scale * 0.5), int(scale * 4.5)]) # plus
pygame.draw.rect(gameDisplay, black,
[int(scale * 4), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 1.5), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 16), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 17), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 9), int(scale * 3), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 0.8), int(scale * 0.25), int(scale * 1.5)]) # |
pygame.draw.rect(gameDisplay, black,
[int(scale * 9), int(scale * 15), int(scale * 2.5), int(scale * 0.25)]) # -
pygame.draw.rect(gameDisplay, black,
[int(scale * 10.15), int(scale * 16), int(scale * 0.25), int(scale * 1.5)]) # |
pygame.display.update()
solutionPaths2 = solutionPathsCopy.copy()
sol = list(np.load('sol.npy'))
sol.reverse()
for i in range(len(solutionPaths2)):
if i in collision:
solutionPaths2[i] = sol.pop(0)
iterateSolutionPaths2 = []
if drawing:
for i in range(len(start)):
pygame.draw.circle(gameDisplay, black, start[i], 0.1 * scale)
pygame.draw.circle(gameDisplay, black, goal[i], 0.1 * scale)
text = basicfont.render('s' + str(i + 1), False, black)
text1 = basicfont.render('g' + str(i + 1), False, black)
gameDisplay.blit(text, (start[i][0] + 5, start[i][1] + 5))
gameDisplay.blit(text1, (goal[i][0] + 5, goal[i][1] + 5))
pygame.display.update()
pygame.time.delay(500)
for i in range(len(start)):
# if i not in collision:
startPosition = np.round((np.array(start[i])) / res)
goalPosition = np.round((np.array(goal[i])) / res)
draw = True
while draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# draw nodesExplored
for s in nodeE[i]:
if nodeE[i][s].parent:
pt = nodeE[i][s].state[0:2]
ptParent = nodeE[i][s].parent.state[0:2]
x, y = pt * res
x2, y2 = ptParent * res
# draw explored nodes
if i in collision:
pygame.draw.line(gameDisplay, yellow, (x2, y2), (x, y), 1)
triangle = triangleCoordinates([x2, y2], [x, y], 5)
pygame.draw.polygon(gameDisplay, yellow,
[tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])])
else:
pygame.draw.line(gameDisplay, colors[i], (x2, y2), (x, y), 1)
triangle = triangleCoordinates([x2, y2], [x, y], 5)
pygame.draw.polygon(gameDisplay, colors[i],
[tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])])
# draw start and goal locations
pygame.draw.rect(gameDisplay, colors[i],
(int(startPosition[0] * res * scale), int(startPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.draw.circle(gameDisplay, colors[i],
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale)),
math.floor(3 * 1.5 * res * scale))
pygame.draw.rect(gameDisplay, white,
(int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale),
int(res * scale), int(res * scale)))
pygame.display.update()
draw = False
for i in range(len(solutionPaths2)):
iterateSolutionPaths2.append(len(solutionPaths2[i]) - 1)
print(iterateSolutionPaths2)
# draw solution path
while not all(ele == -2 for ele in iterateSolutionPaths2):
for i in range(len(solutionPaths2)):
if iterateSolutionPaths2[i] != -2:
if iterateSolutionPaths2[i] == -1:
print("There is no Path for Robot " + str(i + 1))
iterateSolutionPaths2[i] = -2
elif iterateSolutionPaths2[i] >= 0:
pt = solutionPaths2[i][iterateSolutionPaths2[i]][0:2]
x, y = pt[0] * res, pt[1] * res
pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)),
math.floor(3 * 1.5 * res))
pygame.time.delay(50)
iterateSolutionPaths2[i] -= 1
if iterateSolutionPaths2[i] == 0:
print("Robot " + str(i + 1) + " reached its goal")
iterateSolutionPaths2[i] = -2
pygame.display.update()
pygame.time.delay(4000)
pygame.quit()
def main():
visualizeMStar()
if __name__ == "__main__":
main()
|
[
"numpy.load",
"pygame.draw.line",
"heapq.heappush",
"math.atan2",
"pygame.event.get",
"pygame.display.update",
"pygame.font.SysFont",
"math.radians",
"pygame.display.set_mode",
"math.cos",
"pygame.display.set_caption",
"pygame.quit",
"math.sqrt",
"pygame.init",
"math.sin",
"pygame.draw.circle",
"numpy.float32",
"pygame.time.delay",
"math.floor",
"heapq.heappop",
"time.time",
"numpy.array"
] |
[((4574, 4593), 'math.floor', 'math.floor', (['(800 / r)'], {}), '(800 / r)\n', (4584, 4593), False, 'import math\n'), ((4604, 4623), 'math.floor', 'math.floor', (['(800 / r)'], {}), '(800 / r)\n', (4614, 4623), False, 'import math\n'), ((5885, 5927), 'math.sqrt', 'math.sqrt', (['((gx - sx) ** 2 + (gy - sy) ** 2)'], {}), '((gx - sx) ** 2 + (gy - sy) ** 2)\n', (5894, 5927), False, 'import math\n'), ((6526, 6569), 'heapq.heappush', 'heapq.heappush', (['q', '(root.cost, count, root)'], {}), '(q, (root.cost, count, root))\n', (6540, 6569), False, 'import heapq\n'), ((33090, 33113), 'pygame.time.delay', 'pygame.time.delay', (['(1000)'], {}), '(1000)\n', (33107, 33113), False, 'import pygame\n'), ((6400, 6422), 'numpy.array', 'np.array', (['[sx, sy, st]'], {}), '([sx, sy, st])\n', (6408, 6422), True, 'import numpy as np\n'), ((6621, 6637), 'heapq.heappop', 'heapq.heappop', (['q'], {}), '(q)\n', (6634, 6637), False, 'import heapq\n'), ((19612, 19660), 'math.atan2', 'math.atan2', (['(start[1] - end[1])', '(end[0] - start[0])'], {}), '(start[1] - end[1], end[0] - start[0])\n', (19622, 19660), False, 'import math\n'), ((23677, 23690), 'pygame.init', 'pygame.init', ([], {}), '()\n', (23688, 23690), False, 'import pygame\n'), ((23713, 23770), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(size_x * scale, size_y * scale)'], {}), '((size_x * scale, size_y * scale))\n', (23736, 23770), False, 'import pygame\n'), ((23811, 23868), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""M* Algorithm Implementation"""'], {}), "('M* Algorithm Implementation')\n", (23837, 23868), False, 'import pygame\n'), ((23889, 23940), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""timesnewroman"""', '(20)'], {'bold': '(True)'}), "('timesnewroman', 20, bold=True)\n", (23908, 23940), False, 'import pygame\n'), ((26261, 26284), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (26282, 26284), False, 'import pygame\n'), ((26293, 26315), 'pygame.time.delay', 'pygame.time.delay', (['(500)'], {}), '(500)\n', (26310, 26315), False, 'import pygame\n'), ((1318, 1348), 'numpy.float32', 'np.float32', (['[8, 12.5, 12.5, 8]'], {}), '([8, 12.5, 12.5, 8])\n', (1328, 1348), True, 'import numpy as np\n'), ((1371, 1399), 'numpy.float32', 'np.float32', (['[9, 9, 9.5, 9.5]'], {}), '([9, 9, 9.5, 9.5])\n', (1381, 1399), True, 'import numpy as np\n'), ((1601, 1633), 'numpy.float32', 'np.float32', (['[10, 10.5, 10.5, 10]'], {}), '([10, 10.5, 10.5, 10])\n', (1611, 1633), True, 'import numpy as np\n'), ((1656, 1686), 'numpy.float32', 'np.float32', (['[7, 7, 11.5, 11.5]'], {}), '([7, 7, 11.5, 11.5])\n', (1666, 1686), True, 'import numpy as np\n'), ((1890, 1920), 'numpy.float32', 'np.float32', (['[4, 4.25, 4.25, 4]'], {}), '([4, 4.25, 4.25, 4])\n', (1900, 1920), True, 'import numpy as np\n'), ((1943, 1973), 'numpy.float32', 'np.float32', (['[8, 8, 10.5, 10.5]'], {}), '([8, 8, 10.5, 10.5])\n', (1953, 1973), True, 'import numpy as np\n'), ((2177, 2205), 'numpy.float32', 'np.float32', (['[1.5, 3, 3, 1.5]'], {}), '([1.5, 3, 3, 1.5])\n', (2187, 2205), True, 'import numpy as np\n'), ((2228, 2258), 'numpy.float32', 'np.float32', (['[9, 9, 9.25, 9.25]'], {}), '([9, 9, 9.25, 9.25])\n', (2238, 2258), True, 'import numpy as np\n'), ((2462, 2496), 'numpy.float32', 'np.float32', (['[16, 16.25, 16.25, 16]'], {}), '([16, 16.25, 16.25, 16])\n', (2472, 2496), True, 'import numpy as np\n'), ((2519, 2549), 'numpy.float32', 'np.float32', (['[8, 8, 10.5, 10.5]'], {}), '([8, 8, 10.5, 10.5])\n', (2529, 2549), True, 'import numpy as np\n'), ((2753, 2785), 'numpy.float32', 'np.float32', (['[17, 18.5, 18.5, 17]'], {}), '([17, 18.5, 18.5, 17])\n', (2763, 2785), True, 'import numpy as np\n'), ((2808, 2838), 'numpy.float32', 'np.float32', (['[9, 9, 9.25, 9.25]'], {}), '([9, 9, 9.25, 9.25])\n', (2818, 2838), True, 'import numpy as np\n'), ((3042, 3072), 'numpy.float32', 'np.float32', (['[9, 11.5, 11.5, 9]'], {}), '([9, 11.5, 11.5, 9])\n', (3052, 3072), True, 'import numpy as np\n'), ((3095, 3125), 'numpy.float32', 'np.float32', (['[3, 3, 3.25, 3.25]'], {}), '([3, 3, 3.25, 3.25])\n', (3105, 3125), True, 'import numpy as np\n'), ((3329, 3367), 'numpy.float32', 'np.float32', (['[10.15, 10.4, 10.4, 10.15]'], {}), '([10.15, 10.4, 10.4, 10.15])\n', (3339, 3367), True, 'import numpy as np\n'), ((3392, 3424), 'numpy.float32', 'np.float32', (['[0.8, 0.8, 2.3, 2.3]'], {}), '([0.8, 0.8, 2.3, 2.3])\n', (3402, 3424), True, 'import numpy as np\n'), ((3628, 3658), 'numpy.float32', 'np.float32', (['[9, 11.5, 11.5, 9]'], {}), '([9, 11.5, 11.5, 9])\n', (3638, 3658), True, 'import numpy as np\n'), ((3681, 3715), 'numpy.float32', 'np.float32', (['[15, 15, 15.25, 15.25]'], {}), '([15, 15, 15.25, 15.25])\n', (3691, 3715), True, 'import numpy as np\n'), ((3919, 3957), 'numpy.float32', 'np.float32', (['[10.15, 10.4, 10.4, 10.15]'], {}), '([10.15, 10.4, 10.4, 10.15])\n', (3929, 3957), True, 'import numpy as np\n'), ((3982, 4014), 'numpy.float32', 'np.float32', (['[16, 16, 17.5, 17.5]'], {}), '([16, 16, 17.5, 17.5])\n', (3992, 4014), True, 'import numpy as np\n'), ((6919, 6964), 'math.radians', 'math.radians', (['((threshAngle * theta + t) % 360)'], {}), '((threshAngle * theta + t) % 360)\n', (6931, 6964), False, 'import math\n'), ((10473, 10522), 'heapq.heappush', 'heapq.heappush', (['q[i]', '(root.cost, count[i], root)'], {}), '(q[i], (root.cost, count[i], root))\n', (10487, 10522), False, 'import heapq\n'), ((19061, 19072), 'time.time', 'time.time', ([], {}), '()\n', (19070, 19072), False, 'import time\n'), ((19403, 19414), 'time.time', 'time.time', ([], {}), '()\n', (19412, 19414), False, 'import time\n'), ((25803, 25873), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'pathColours[i]', 'start[i]', '(0.1 * scale)'], {}), '(gameDisplay, pathColours[i], start[i], 0.1 * scale)\n', (25821, 25873), False, 'import pygame\n'), ((25886, 25955), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'pathColours[i]', 'goal[i]', '(0.1 * scale)'], {}), '(gameDisplay, pathColours[i], goal[i], 0.1 * scale)\n', (25904, 25955), False, 'import pygame\n'), ((27115, 27126), 'time.time', 'time.time', ([], {}), '()\n', (27124, 27126), False, 'import time\n'), ((27412, 27423), 'time.time', 'time.time', ([], {}), '()\n', (27421, 27423), False, 'import time\n'), ((34431, 34453), 'pygame.time.delay', 'pygame.time.delay', (['(500)'], {}), '(500)\n', (34448, 34453), False, 'import pygame\n'), ((34694, 34751), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""M* Algorithm Implementation"""'], {}), "('M* Algorithm Implementation')\n", (34720, 34751), False, 'import pygame\n'), ((34776, 34827), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""timesnewroman"""', '(20)'], {'bold': '(True)'}), "('timesnewroman', 20, bold=True)\n", (34795, 34827), False, 'import pygame\n'), ((36596, 36619), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (36617, 36619), False, 'import pygame\n'), ((36690, 36708), 'numpy.load', 'np.load', (['"""sol.npy"""'], {}), "('sol.npy')\n", (36697, 36708), True, 'import numpy as np\n'), ((37414, 37437), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (37435, 37437), False, 'import pygame\n'), ((37450, 37472), 'pygame.time.delay', 'pygame.time.delay', (['(500)'], {}), '(500)\n', (37467, 37472), False, 'import pygame\n'), ((41394, 41417), 'pygame.time.delay', 'pygame.time.delay', (['(4000)'], {}), '(4000)\n', (41411, 41417), False, 'import pygame\n'), ((41430, 41443), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (41441, 41443), False, 'import pygame\n'), ((11006, 11103), 'heapq.heappush', 'heapq.heappush', (['q[i]', '(nodesExplored[i][s].parent.cost, count[i], nodesExplored[i][s].parent)'], {}), '(q[i], (nodesExplored[i][s].parent.cost, count[i],\n nodesExplored[i][s].parent))\n', (11020, 11103), False, 'import heapq\n'), ((26607, 26625), 'numpy.array', 'np.array', (['start[i]'], {}), '(start[i])\n', (26615, 26625), True, 'import numpy as np\n'), ((26667, 26684), 'numpy.array', 'np.array', (['goal[i]'], {}), '(goal[i])\n', (26675, 26684), True, 'import numpy as np\n'), ((36968, 37029), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'black', 'start[i]', '(0.1 * scale)'], {}), '(gameDisplay, black, start[i], 0.1 * scale)\n', (36986, 37029), False, 'import pygame\n'), ((37046, 37106), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'black', 'goal[i]', '(0.1 * scale)'], {}), '(gameDisplay, black, goal[i], 0.1 * scale)\n', (37064, 37106), False, 'import pygame\n'), ((7004, 7028), 'math.cos', 'math.cos', (['newOrientation'], {}), '(newOrientation)\n', (7012, 7028), False, 'import math\n'), ((7072, 7096), 'math.sin', 'math.sin', (['newOrientation'], {}), '(newOrientation)\n', (7080, 7096), False, 'import math\n'), ((7762, 7811), 'heapq.heappush', 'heapq.heappush', (['q', '(newNode.cost, count, newNode)'], {}), '(q, (newNode.cost, count, newNode))\n', (7776, 7811), False, 'import heapq\n'), ((11416, 11435), 'heapq.heappop', 'heapq.heappop', (['q[i]'], {}), '(q[i])\n', (11429, 11435), False, 'import heapq\n'), ((31922, 31943), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (31939, 31943), False, 'import pygame\n'), ((31964, 31987), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (31985, 31987), False, 'import pygame\n'), ((37780, 37798), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (37796, 37798), False, 'import pygame\n'), ((11909, 11954), 'math.radians', 'math.radians', (['((threshAngle * theta + t) % 360)'], {}), '((threshAngle * theta + t) % 360)\n', (11921, 11954), False, 'import math\n'), ((19805, 19835), 'math.sin', 'math.sin', (['(rotation - 165 * rad)'], {}), '(rotation - 165 * rad)\n', (19813, 19835), False, 'import math\n'), ((19893, 19923), 'math.cos', 'math.cos', (['(rotation - 165 * rad)'], {}), '(rotation - 165 * rad)\n', (19901, 19923), False, 'import math\n'), ((19982, 20012), 'math.sin', 'math.sin', (['(rotation + 165 * rad)'], {}), '(rotation + 165 * rad)\n', (19990, 20012), False, 'import math\n'), ((20070, 20100), 'math.cos', 'math.cos', (['(rotation + 165 * rad)'], {}), '(rotation + 165 * rad)\n', (20078, 20100), False, 'import math\n'), ((28144, 28162), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (28160, 28162), False, 'import pygame\n'), ((31875, 31900), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (31885, 31900), False, 'import math\n'), ((37596, 37614), 'numpy.array', 'np.array', (['start[i]'], {}), '(start[i])\n', (37604, 37614), True, 'import numpy as np\n'), ((37664, 37681), 'numpy.array', 'np.array', (['goal[i]'], {}), '(goal[i])\n', (37672, 37681), True, 'import numpy as np\n'), ((40000, 40023), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (40021, 40023), False, 'import pygame\n'), ((30029, 30052), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (30050, 30052), False, 'import pygame\n'), ((34271, 34292), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (34288, 34292), False, 'import pygame\n'), ((34321, 34344), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (34342, 34344), False, 'import pygame\n'), ((37882, 37895), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (37893, 37895), False, 'import pygame\n'), ((39685, 39718), 'math.floor', 'math.floor', (['(3 * 1.5 * res * scale)'], {}), '(3 * 1.5 * res * scale)\n', (39695, 39718), False, 'import math\n'), ((12006, 12030), 'math.cos', 'math.cos', (['newOrientation'], {}), '(newOrientation)\n', (12014, 12030), False, 'import math\n'), ((12086, 12110), 'math.sin', 'math.sin', (['newOrientation'], {}), '(newOrientation)\n', (12094, 12110), False, 'import math\n'), ((28254, 28267), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (28265, 28267), False, 'import pygame\n'), ((28790, 28851), 'pygame.draw.line', 'pygame.draw.line', (['gameDisplay', 'colors[i]', '(x2, y2)', '(x, y)', '(1)'], {}), '(gameDisplay, colors[i], (x2, y2), (x, y), 1)\n', (28806, 28851), False, 'import pygame\n'), ((29698, 29731), 'math.floor', 'math.floor', (['(3 * 1.5 * res * scale)'], {}), '(3 * 1.5 * res * scale)\n', (29708, 29731), False, 'import math\n'), ((32749, 32770), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (32766, 32770), False, 'import pygame\n'), ((33061, 33084), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (33082, 33084), False, 'import pygame\n'), ((34216, 34241), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (34226, 34241), False, 'import math\n'), ((38409, 38467), 'pygame.draw.line', 'pygame.draw.line', (['gameDisplay', 'yellow', '(x2, y2)', '(x, y)', '(1)'], {}), '(gameDisplay, yellow, (x2, y2), (x, y), 1)\n', (38425, 38467), False, 'import pygame\n'), ((38805, 38866), 'pygame.draw.line', 'pygame.draw.line', (['gameDisplay', 'colors[i]', '(x2, y2)', '(x, y)', '(1)'], {}), '(gameDisplay, colors[i], (x2, y2), (x, y), 1)\n', (38821, 38866), False, 'import pygame\n'), ((41042, 41063), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (41059, 41063), False, 'import pygame\n'), ((41357, 41380), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (41378, 41380), False, 'import pygame\n'), ((13275, 13330), 'heapq.heappush', 'heapq.heappush', (['q[i]', '(newNode.cost, count[i], newNode)'], {}), '(q[i], (newNode.cost, count[i], newNode))\n', (13289, 13330), False, 'import heapq\n'), ((32694, 32719), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (32704, 32719), False, 'import math\n'), ((40987, 41012), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (40997, 41012), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 9 20:00:55 2021
@author: oscar
"""
import numpy as np
import math
def bin_MUA_data(MUA,bin_res):
counter = 0
binned_MUA = np.zeros([math.ceil(len(MUA[:,1])/bin_res),len(MUA[1,:])])
for bin in range(math.ceil(len(MUA[:,1])/bin_res)):
if bin != math.ceil(len(MUA[:,1])/bin_res):
temp = np.sum(MUA[counter:counter+bin_res,:],0)
else:
temp = np.sum(MUA[counter:len(MUA[:,1]),:],0)
binned_MUA[bin,:] = temp
counter = counter + bin_res
binned_MUA = binned_MUA.astype(int)
return binned_MUA
def online_histogram_w_sat_based_nb_of_samples(data_in,sample_val_cutoff, max_firing_rate):
# We consider the histogram to be full when "sample_val_cutoff" values have
# been entered into it.
# Inputs:
# data_in = 1d vector of MUA data from 1 channel.
# sample_val_cutoff = how mnay values the histogram will measure until we
# consider the histogram training period to have ended.
# max_firing_rate: S-1, max value that we consider in the MUA data.
# Outputs:
# approx sorted histogram, how many samples we measure (just for testing purposes)
hist = {'0':0}
flag_1 = False
i = 0
while not flag_1: # the histogram isn't full yet
# Saturate the histogram at the max firing rate
if data_in[i] >= max_firing_rate:
data_in[i] = max_firing_rate
symbol = str(data_in[i])
if symbol in hist: # If this symbol is represented in the histogram
hist[symbol] += 1
else: # If this symbol is new in the histogram
hist[symbol] = 1
# If the histogram is full, end the while loop
hist_count = 0
for symbol_hist in hist:
hist_count += int(hist.get(str(symbol_hist)))
if hist_count > sample_val_cutoff-1:
flag_1 = True
# If we've exceeded the number of samples in the data, end the while loop
if i+1 == len(data_in):
flag_1 = True
i += 1 # Increment counter
return hist, i
# Approx sort used in the work, where the histogram is assumed to follow a
# unimodal distribution. The peak in the histogram is identified and given an
# index of 0, and values on either side are iteratively assigned the next
# indices.
def approx_sort(hist):
idx = np.arange(0,len(hist))
p_idx = np.argmax(hist)
if (p_idx>len(hist)/2): # peak shows on right half
right = np.arange(2,(len(hist)-1-p_idx)*2+1,2) #idx on the right (even or odd doesn't matter)
idx = np.delete(idx,right) # remove used idx
left = idx
else: # peak shows on left half
left = np.arange(1,(2*p_idx-1)+1,2)
idx = np.delete(idx,left)
right = idx
idx = np.hstack((np.flip(left),right))
idx = np.argsort(idx)
return idx.astype(int), hist[idx.astype(int)]
|
[
"numpy.sum",
"numpy.flip",
"numpy.argmax",
"numpy.argsort",
"numpy.arange",
"numpy.delete"
] |
[((2571, 2586), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (2580, 2586), True, 'import numpy as np\n'), ((3054, 3069), 'numpy.argsort', 'np.argsort', (['idx'], {}), '(idx)\n', (3064, 3069), True, 'import numpy as np\n'), ((2778, 2799), 'numpy.delete', 'np.delete', (['idx', 'right'], {}), '(idx, right)\n', (2787, 2799), True, 'import numpy as np\n'), ((2908, 2942), 'numpy.arange', 'np.arange', (['(1)', '(2 * p_idx - 1 + 1)', '(2)'], {}), '(1, 2 * p_idx - 1 + 1, 2)\n', (2917, 2942), True, 'import numpy as np\n'), ((2952, 2972), 'numpy.delete', 'np.delete', (['idx', 'left'], {}), '(idx, left)\n', (2961, 2972), True, 'import numpy as np\n'), ((382, 426), 'numpy.sum', 'np.sum', (['MUA[counter:counter + bin_res, :]', '(0)'], {}), '(MUA[counter:counter + bin_res, :], 0)\n', (388, 426), True, 'import numpy as np\n'), ((3021, 3034), 'numpy.flip', 'np.flip', (['left'], {}), '(left)\n', (3028, 3034), True, 'import numpy as np\n')]
|
from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from rlkit.core import logger
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_meta_irl_algorithm import TorchMetaIRLAlgorithm
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.core.train_util import linear_schedule
from rlkit.torch.core import PyTorchModule
from rlkit.torch.sac.policies import PostCondMLPPolicyWrapper
from rlkit.data_management.path_builder import PathBuilder
from gym.spaces import Dict
from rlkit.torch.irl.encoders.aggregators import sum_aggregator
from rlkit.torch.distributions import ReparamMultivariateNormalDiag
OUTER_RADIUS = 2.0
TASK_RADIUS = 2.0
SAME_COLOUR_RADIUS = 1.0
def concat_trajs(trajs):
new_dict = {}
for k in trajs[0].keys():
if isinstance(trajs[0][k], dict):
new_dict[k] = concat_trajs([t[k] for t in trajs])
else:
new_dict[k] = np.concatenate([t[k] for t in trajs], axis=0)
return new_dict
def subsample_traj(traj, num_samples):
traj_len = traj['observations'].shape[0]
idxs = np.random.choice(traj_len, size=num_samples, replace=traj_len<num_samples)
new_traj = {k: traj[k][idxs,...] for k in traj}
return new_traj
class R2ZMap(PyTorchModule):
def __init__(
self,
r_dim,
z_dim,
hid_dim,
# this makes it be closer to deterministic, makes it easier to train
# before we turn on the KL regularization
LOG_STD_SUBTRACT_VALUE=2.0
):
self.save_init_params(locals())
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(r_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Linear(hid_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
)
self.mean_fc = nn.Linear(hid_dim, z_dim)
self.log_sig_fc = nn.Linear(hid_dim, z_dim)
self.LOG_STD_SUBTRACT_VALUE = LOG_STD_SUBTRACT_VALUE
print('LOG STD SUBTRACT VALUE IS FOR APPROX POSTERIOR IS %f' % LOG_STD_SUBTRACT_VALUE)
def forward(self, r):
trunk_output = self.trunk(r)
mean = self.mean_fc(trunk_output)
log_sig = self.log_sig_fc(trunk_output) - self.LOG_STD_SUBTRACT_VALUE
return mean, log_sig
class Encoder(PyTorchModule):
def __init__(self, z_dim):
self.save_init_params(locals())
super().__init__()
HID_DIM = 64
self.encoder_mlp = nn.Sequential(
nn.Linear(6, HID_DIM),
nn.BatchNorm1d(HID_DIM),
nn.ReLU(),
nn.Linear(HID_DIM, HID_DIM),
nn.BatchNorm1d(HID_DIM),
nn.ReLU(),
nn.Linear(HID_DIM, HID_DIM)
)
self.agg = sum_aggregator
self.r2z_map = R2ZMap(HID_DIM, z_dim, HID_DIM)
def forward(self, context, mask):
N_tasks, N_max_cont, N_dim = context.size(0), context.size(1), context.size(2)
context = context.view(-1, N_dim)
embedded_context = self.encoder_mlp(context)
embed_dim = embedded_context.size(1)
embedded_context = embedded_context.view(N_tasks, N_max_cont, embed_dim)
agg = self.agg(embedded_context, mask)
post_mean, post_log_sig = self.r2z_map(agg)
return ReparamMultivariateNormalDiag(post_mean, post_log_sig)
class FetchTaskDesign():
def __init__(
self,
mlp,
num_tasks_used_per_update=5,
min_context_size=1,
max_context_size=5,
classification_batch_size_per_task=32,
encoder_lr=1e-3,
encoder_optimizer_class=optim.Adam,
mlp_lr=1e-3,
mlp_optimizer_class=optim.Adam,
num_update_loops_per_train_call=1000,
num_epochs=10000,
z_dim=16,
**kwargs
):
self.mlp = mlp
self.encoder = Encoder(z_dim)
self.num_tasks_used_per_update = num_tasks_used_per_update
self.min_context_size = min_context_size
self.max_context_size = max_context_size
self.classification_batch_size_per_task = classification_batch_size_per_task
self.encoder_optimizer = encoder_optimizer_class(
self.encoder.parameters(),
lr=encoder_lr,
betas=(0.9, 0.999)
)
self.mlp_optimizer = mlp_optimizer_class(
self.mlp.parameters(),
lr=mlp_lr,
betas=(0.9, 0.999)
)
self.bce = nn.BCEWithLogitsLoss()
self.num_update_loops_per_train_call = num_update_loops_per_train_call
self.num_epochs = num_epochs
def _sample_color_within_radius(self, center, radius):
new_color = self._uniform_sample_from_sphere(radius) + center
while np.linalg.norm(new_color) > OUTER_RADIUS:
new_color = self._uniform_sample_from_sphere(radius) + center
return new_color
def _uniform_sample_from_sphere(self, radius):
x = np.random.normal(size=3)
x /= np.linalg.norm(x, axis=-1)
r = radius
u = np.random.uniform()
sampled_color = r * (u**(1.0/3.0)) * x
return sampled_color
def _sample_color_with_min_dist(self, color, min_dist):
new_color = self._uniform_sample_from_sphere(OUTER_RADIUS)
while np.linalg.norm(new_color - color, axis=-1) < min_dist:
new_color = self._uniform_sample_from_sphere(OUTER_RADIUS)
return new_color
def _get_training_batch(self):
task_colors = []
for _ in range(self.num_tasks_used_per_update):
task_colors.append(self._uniform_sample_from_sphere(TASK_RADIUS))
task_colors = np.array(task_colors)
input_batch = []
labels = []
for task in task_colors:
for _ in range(self.classification_batch_size_per_task):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
bad = self._sample_color_with_min_dist(task, 0.0) # HERE
if np.random.uniform() > 0.5:
input_batch.append(np.concatenate((good, bad)))
labels.append([1.0])
else:
input_batch.append(np.concatenate((bad, good)))
labels.append([0.0])
input_batch = Variable(ptu.from_numpy(np.array(input_batch)))
labels = Variable(ptu.from_numpy(np.array(labels)))
context = []
mask = Variable(ptu.from_numpy(np.zeros((self.num_tasks_used_per_update, self.max_context_size, 1))))
for task_num, task in enumerate(task_colors):
task_context = []
for _ in range(self.max_context_size):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
bad = self._sample_color_with_min_dist(task, 0.0) # HERE
# always the same order because it's the context
task_context.append(np.concatenate((good, bad)))
context.append(task_context)
con_size = np.random.randint(self.min_context_size, self.max_context_size+1)
mask[task_num,:con_size,:] = 1.0
context = Variable(ptu.from_numpy(np.array(context)))
return context, mask, input_batch, labels
def _get_eval_batch(self):
task_colors = []
for _ in range(self.num_tasks_used_per_update):
task_colors.append(self._uniform_sample_from_sphere(TASK_RADIUS))
task_colors = np.array(task_colors)
# task_colors = np.zeros((self.num_tasks_used_per_update, 3)) # THIS
# task_colors[:,0] = -1.0 # THIS
input_batch = []
labels = []
for task in task_colors:
for _ in range(self.classification_batch_size_per_task):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
bad = self._sample_color_with_min_dist(task, SAME_COLOUR_RADIUS)
if np.random.uniform() > 0.5:
input_batch.append(np.concatenate((good, bad)))
labels.append([1.0])
else:
input_batch.append(np.concatenate((bad, good)))
labels.append([0.0])
input_batch = Variable(ptu.from_numpy(np.array(input_batch)))
labels = Variable(ptu.from_numpy(np.array(labels)))
context = []
mask = Variable(ptu.from_numpy(np.zeros((self.num_tasks_used_per_update, self.max_context_size, 1))))
for task_num, task in enumerate(task_colors):
task_context = []
for _ in range(self.max_context_size):
good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS)
# good = np.zeros(3) # THIS
bad = self._sample_color_with_min_dist(task, 0.0) # HERE
# bad = np.array([2.0, 0.0, 0.0]) # THIS
# always the same order because it's the context
task_context.append(np.concatenate((good, bad)))
context.append(task_context)
con_size = np.random.randint(self.min_context_size, self.max_context_size+1)
mask[task_num,:con_size,:] = 1.0
context = Variable(ptu.from_numpy(np.array(context)))
return context, mask, input_batch, labels
def train(self):
for e in range(self.num_epochs):
self._do_training(e, self.num_update_loops_per_train_call)
self.evaluate()
def _do_training(self, epoch, num_updates):
'''
Train the discriminator
'''
self.mlp.train()
self.encoder.train()
for _ in range(num_updates):
self.encoder_optimizer.zero_grad()
self.mlp_optimizer.zero_grad()
# prep the batches
context, mask, input_batch, labels = self._get_training_batch()
post_dist = self.encoder(context, mask)
# z = post_dist.sample() # N_tasks x Dim
z = post_dist.mean
repeated_z = z.repeat(1, self.classification_batch_size_per_task).view(-1, z.size(1))
mlp_input = torch.cat([input_batch, repeated_z], dim=-1)
preds = self.mlp(mlp_input)
loss = self.bce(preds, labels)
loss.backward()
self.mlp_optimizer.step()
self.encoder_optimizer.step()
def evaluate(self):
eval_statistics = OrderedDict()
self.mlp.eval()
self.encoder.eval()
for i in range(1, 12):
# prep the batches
# context, mask, input_batch, labels = self._get_training_batch()
context, mask, input_batch, labels = self._get_eval_batch()
post_dist = self.encoder(context, mask)
# z = post_dist.sample() # N_tasks x Dim
z = post_dist.mean
repeated_z = z.repeat(1, self.classification_batch_size_per_task).view(-1, z.size(1))
mlp_input = torch.cat([input_batch, repeated_z], dim=-1)
preds = self.mlp(mlp_input)
class_preds = (preds > 0).type(preds.data.type())
accuracy = (class_preds == labels).type(torch.FloatTensor).mean()
eval_statistics['Acc for %d' % i] = np.mean(ptu.get_numpy(accuracy))
# for key, value in eval_statistics.items():
# logger.record_tabular(key, value)
# logger.dump_tabular(with_prefix=False, with_timestamp=False)
print(np.mean(list(eval_statistics.values())))
def cuda(self):
self.encoder.cuda()
self.mlp.cuda()
def cpu(self):
self.encoder.cpu()
self.mlp.cpu()
def _elem_or_tuple_to_variable(elem_or_tuple):
if isinstance(elem_or_tuple, tuple):
return tuple(
_elem_or_tuple_to_variable(e) for e in elem_or_tuple
)
return Variable(ptu.from_numpy(elem_or_tuple).float(), requires_grad=False)
def _filter_batch(np_batch):
for k, v in np_batch.items():
if v.dtype == np.bool:
yield k, v.astype(int)
else:
yield k, v
def np_to_pytorch_batch(np_batch):
return {
k: _elem_or_tuple_to_variable(x)
for k, x in _filter_batch(np_batch)
if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)
}
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
|
[
"torch.cat",
"rlkit.torch.pytorch_util.from_numpy",
"numpy.random.randint",
"numpy.linalg.norm",
"numpy.random.normal",
"torch.exp",
"rlkit.torch.distributions.ReparamMultivariateNormalDiag",
"numpy.random.choice",
"torch.nn.Linear",
"torch.log",
"torch.nn.BCEWithLogitsLoss",
"rlkit.torch.pytorch_util.get_numpy",
"torch.nn.BatchNorm1d",
"torch.max",
"numpy.concatenate",
"numpy.random.uniform",
"torch.nn.ReLU",
"numpy.dtype",
"numpy.zeros",
"numpy.array",
"collections.OrderedDict"
] |
[((1297, 1373), 'numpy.random.choice', 'np.random.choice', (['traj_len'], {'size': 'num_samples', 'replace': '(traj_len < num_samples)'}), '(traj_len, size=num_samples, replace=traj_len < num_samples)\n', (1313, 1373), True, 'import numpy as np\n'), ((2060, 2085), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'z_dim'], {}), '(hid_dim, z_dim)\n', (2069, 2085), True, 'from torch import nn as nn\n'), ((2112, 2137), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'z_dim'], {}), '(hid_dim, z_dim)\n', (2121, 2137), True, 'from torch import nn as nn\n'), ((3509, 3563), 'rlkit.torch.distributions.ReparamMultivariateNormalDiag', 'ReparamMultivariateNormalDiag', (['post_mean', 'post_log_sig'], {}), '(post_mean, post_log_sig)\n', (3538, 3563), False, 'from rlkit.torch.distributions import ReparamMultivariateNormalDiag\n'), ((4729, 4751), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (4749, 4751), True, 'from torch import nn as nn\n'), ((5223, 5247), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3)'}), '(size=3)\n', (5239, 5247), True, 'import numpy as np\n'), ((5261, 5287), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (5275, 5287), True, 'import numpy as np\n'), ((5319, 5338), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5336, 5338), True, 'import numpy as np\n'), ((5931, 5952), 'numpy.array', 'np.array', (['task_colors'], {}), '(task_colors)\n', (5939, 5952), True, 'import numpy as np\n'), ((7739, 7760), 'numpy.array', 'np.array', (['task_colors'], {}), '(task_colors)\n', (7747, 7760), True, 'import numpy as np\n'), ((10688, 10701), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10699, 10701), False, 'from collections import OrderedDict\n'), ((12832, 12871), 'torch.max', 'torch.max', (['value'], {'dim': 'dim', 'keepdim': '(True)'}), '(value, dim=dim, keepdim=True)\n', (12841, 12871), False, 'import torch\n'), ((13105, 13121), 'torch.max', 'torch.max', (['value'], {}), '(value)\n', (13114, 13121), False, 'import torch\n'), ((1134, 1179), 'numpy.concatenate', 'np.concatenate', (['[t[k] for t in trajs]'], {'axis': '(0)'}), '([t[k] for t in trajs], axis=0)\n', (1148, 1179), True, 'import numpy as np\n'), ((1839, 1864), 'torch.nn.Linear', 'nn.Linear', (['r_dim', 'hid_dim'], {}), '(r_dim, hid_dim)\n', (1848, 1864), True, 'from torch import nn as nn\n'), ((1878, 1901), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hid_dim'], {}), '(hid_dim)\n', (1892, 1901), True, 'from torch import nn as nn\n'), ((1915, 1924), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1922, 1924), True, 'from torch import nn as nn\n'), ((1938, 1965), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'hid_dim'], {}), '(hid_dim, hid_dim)\n', (1947, 1965), True, 'from torch import nn as nn\n'), ((1979, 2002), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hid_dim'], {}), '(hid_dim)\n', (1993, 2002), True, 'from torch import nn as nn\n'), ((2016, 2025), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2023, 2025), True, 'from torch import nn as nn\n'), ((2715, 2736), 'torch.nn.Linear', 'nn.Linear', (['(6)', 'HID_DIM'], {}), '(6, HID_DIM)\n', (2724, 2736), True, 'from torch import nn as nn\n'), ((2750, 2773), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['HID_DIM'], {}), '(HID_DIM)\n', (2764, 2773), True, 'from torch import nn as nn\n'), ((2787, 2796), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2794, 2796), True, 'from torch import nn as nn\n'), ((2810, 2837), 'torch.nn.Linear', 'nn.Linear', (['HID_DIM', 'HID_DIM'], {}), '(HID_DIM, HID_DIM)\n', (2819, 2837), True, 'from torch import nn as nn\n'), ((2851, 2874), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['HID_DIM'], {}), '(HID_DIM)\n', (2865, 2874), True, 'from torch import nn as nn\n'), ((2888, 2897), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2895, 2897), True, 'from torch import nn as nn\n'), ((2911, 2938), 'torch.nn.Linear', 'nn.Linear', (['HID_DIM', 'HID_DIM'], {}), '(HID_DIM, HID_DIM)\n', (2920, 2938), True, 'from torch import nn as nn\n'), ((5013, 5038), 'numpy.linalg.norm', 'np.linalg.norm', (['new_color'], {}), '(new_color)\n', (5027, 5038), True, 'import numpy as np\n'), ((5562, 5604), 'numpy.linalg.norm', 'np.linalg.norm', (['(new_color - color)'], {'axis': '(-1)'}), '(new_color - color, axis=-1)\n', (5576, 5604), True, 'import numpy as np\n'), ((7297, 7364), 'numpy.random.randint', 'np.random.randint', (['self.min_context_size', '(self.max_context_size + 1)'], {}), '(self.min_context_size, self.max_context_size + 1)\n', (7314, 7364), True, 'import numpy as np\n'), ((9353, 9420), 'numpy.random.randint', 'np.random.randint', (['self.min_context_size', '(self.max_context_size + 1)'], {}), '(self.min_context_size, self.max_context_size + 1)\n', (9370, 9420), True, 'import numpy as np\n'), ((10399, 10443), 'torch.cat', 'torch.cat', (['[input_batch, repeated_z]'], {'dim': '(-1)'}), '([input_batch, repeated_z], dim=-1)\n', (10408, 10443), False, 'import torch\n'), ((11226, 11270), 'torch.cat', 'torch.cat', (['[input_batch, repeated_z]'], {'dim': '(-1)'}), '([input_batch, repeated_z], dim=-1)\n', (11235, 11270), False, 'import torch\n'), ((13150, 13170), 'torch.exp', 'torch.exp', (['(value - m)'], {}), '(value - m)\n', (13159, 13170), False, 'import torch\n'), ((6588, 6609), 'numpy.array', 'np.array', (['input_batch'], {}), '(input_batch)\n', (6596, 6609), True, 'import numpy as np\n'), ((6653, 6669), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6661, 6669), True, 'import numpy as np\n'), ((6741, 6809), 'numpy.zeros', 'np.zeros', (['(self.num_tasks_used_per_update, self.max_context_size, 1)'], {}), '((self.num_tasks_used_per_update, self.max_context_size, 1))\n', (6749, 6809), True, 'import numpy as np\n'), ((7450, 7467), 'numpy.array', 'np.array', (['context'], {}), '(context)\n', (7458, 7467), True, 'import numpy as np\n'), ((8539, 8560), 'numpy.array', 'np.array', (['input_batch'], {}), '(input_batch)\n', (8547, 8560), True, 'import numpy as np\n'), ((8604, 8620), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8612, 8620), True, 'import numpy as np\n'), ((8692, 8760), 'numpy.zeros', 'np.zeros', (['(self.num_tasks_used_per_update, self.max_context_size, 1)'], {}), '((self.num_tasks_used_per_update, self.max_context_size, 1))\n', (8700, 8760), True, 'import numpy as np\n'), ((9506, 9523), 'numpy.array', 'np.array', (['context'], {}), '(context)\n', (9514, 9523), True, 'import numpy as np\n'), ((11507, 11530), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['accuracy'], {}), '(accuracy)\n', (11520, 11530), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12120, 12149), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['elem_or_tuple'], {}), '(elem_or_tuple)\n', (12134, 12149), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12505, 12518), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (12513, 12518), True, 'import numpy as np\n'), ((13290, 13308), 'torch.log', 'torch.log', (['sum_exp'], {}), '(sum_exp)\n', (13299, 13308), False, 'import torch\n'), ((6275, 6294), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6292, 6294), True, 'import numpy as np\n'), ((7203, 7230), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (7217, 7230), True, 'import numpy as np\n'), ((8226, 8245), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8243, 8245), True, 'import numpy as np\n'), ((9259, 9286), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (9273, 9286), True, 'import numpy as np\n'), ((12998, 13015), 'torch.exp', 'torch.exp', (['value0'], {}), '(value0)\n', (13007, 13015), False, 'import torch\n'), ((6341, 6368), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (6355, 6368), True, 'import numpy as np\n'), ((6472, 6499), 'numpy.concatenate', 'np.concatenate', (['(bad, good)'], {}), '((bad, good))\n', (6486, 6499), True, 'import numpy as np\n'), ((8292, 8319), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (8306, 8319), True, 'import numpy as np\n'), ((8423, 8450), 'numpy.concatenate', 'np.concatenate', (['(bad, good)'], {}), '((bad, good))\n', (8437, 8450), True, 'import numpy as np\n')]
|
import numpy as np
from plantcv.plantcv.visualize import colorize_label_img
def test_colorize_label_img():
"""Test for PlantCV."""
label_img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
colored_img = colorize_label_img(label_img)
assert (colored_img.shape[0:-1] == label_img.shape) and colored_img.shape[-1] == 3
|
[
"numpy.array",
"plantcv.plantcv.visualize.colorize_label_img"
] |
[((153, 196), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (161, 196), True, 'import numpy as np\n'), ((215, 244), 'plantcv.plantcv.visualize.colorize_label_img', 'colorize_label_img', (['label_img'], {}), '(label_img)\n', (233, 244), False, 'from plantcv.plantcv.visualize import colorize_label_img\n')]
|
from matplotlib import pyplot
import numpy
x = numpy.linspace(-1, 1, 1000)
y1 = numpy.exp(-x**2 * 10) * (1 + 0.05 * numpy.random.rand(len(x)))
y2 = (numpy.exp(10*(-(x-0.3)**2 - 0.75*x**4 - 0.25*x**6)) + numpy.piecewise(x, [x < 0.3, x >= 0.3], [lambda x: -(x-0.3)*numpy.sqrt(1+x), 0])) * (1 + 0.05 * numpy.random.rand(len(x)))
def plot_max(x, y):
x_c = numpy.argmax(y) / len(x)
ax_lim = (x_c - 0.1, 0.2, 0.2, 0.2)
f = pyplot.plot(x, y)
pyplot.xlim(-1, 1)
pyplot.arrow(2 * x_c - 1, 0.4, 0, 0.5, head_width=0.025, head_length=0.05)
ax = pyplot.axes(ax_lim)
ax.plot(x, y)
ax.set_xlim(2 * (x_c - 0.56), 2 * (x_c - 0.44))
ax.set_ylim(0.9, 1.1)
return f
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"numpy.argmax",
"matplotlib.pyplot.axes",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.arrow",
"numpy.sqrt"
] |
[((48, 75), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (62, 75), False, 'import numpy\n'), ((81, 104), 'numpy.exp', 'numpy.exp', (['(-x ** 2 * 10)'], {}), '(-x ** 2 * 10)\n', (90, 104), False, 'import numpy\n'), ((431, 448), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {}), '(x, y)\n', (442, 448), False, 'from matplotlib import pyplot\n'), ((453, 471), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (464, 471), False, 'from matplotlib import pyplot\n'), ((476, 550), 'matplotlib.pyplot.arrow', 'pyplot.arrow', (['(2 * x_c - 1)', '(0.4)', '(0)', '(0.5)'], {'head_width': '(0.025)', 'head_length': '(0.05)'}), '(2 * x_c - 1, 0.4, 0, 0.5, head_width=0.025, head_length=0.05)\n', (488, 550), False, 'from matplotlib import pyplot\n'), ((560, 579), 'matplotlib.pyplot.axes', 'pyplot.axes', (['ax_lim'], {}), '(ax_lim)\n', (571, 579), False, 'from matplotlib import pyplot\n'), ((150, 215), 'numpy.exp', 'numpy.exp', (['(10 * (-(x - 0.3) ** 2 - 0.75 * x ** 4 - 0.25 * x ** 6))'], {}), '(10 * (-(x - 0.3) ** 2 - 0.75 * x ** 4 - 0.25 * x ** 6))\n', (159, 215), False, 'import numpy\n'), ((358, 373), 'numpy.argmax', 'numpy.argmax', (['y'], {}), '(y)\n', (370, 373), False, 'import numpy\n'), ((264, 281), 'numpy.sqrt', 'numpy.sqrt', (['(1 + x)'], {}), '(1 + x)\n', (274, 281), False, 'import numpy\n')]
|
import numpy as np
def cgls(A, b):
height, width = A.shape
x = np.zeros((height))
while(True):
sumA = A.sum()
if (sumA < 100):
break
if (np.linalg.det(A) < 1):
A = A + np.eye(height, width) * sumA * 0.000000005
else:
x = np.linalg.inv(A).dot(b)
break
return x
|
[
"numpy.linalg.det",
"numpy.linalg.inv",
"numpy.zeros",
"numpy.eye"
] |
[((72, 88), 'numpy.zeros', 'np.zeros', (['height'], {}), '(height)\n', (80, 88), True, 'import numpy as np\n'), ((186, 202), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (199, 202), True, 'import numpy as np\n'), ((302, 318), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (315, 318), True, 'import numpy as np\n'), ((229, 250), 'numpy.eye', 'np.eye', (['height', 'width'], {}), '(height, width)\n', (235, 250), True, 'import numpy as np\n')]
|
'''
Created on 19 Nov 2017
@author: Simon
'''
from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm
import numpy as np
import datetime
import pickle
from scipy.interpolate import interp1d
from boundary import BoundaryConditionCollection1D
from diagnostic import DiagnosticModule
class ThawSlump(object): # 1D
# time_initial only works when forcing is provided
def __init__(
self, tsmesh, time_step_module=None, output_step_module=None,
forcing_module=None, thermal_properties=None, time_initial=None):
self.mesh = tsmesh
self.variables = {}
self.variables_store = []
self.diagnostic_modules = {}
self.diagnostic_update_order = []
self.eq = None
self.boundary_condition_collection = None
self._time = Variable(value=0)
self._time_step_module = time_step_module
self._timeref = None # will generally be set by forcing_module; otherwise manually
if forcing_module is not None:
self.initializeForcing(forcing_module)
if time_initial is not None:
self.time = time_initial
if thermal_properties is not None:
self.initializeThermalProperties(thermal_properties)
self._output_step_module = output_step_module
self._output_module = SlumpOutput()
if output_step_module is None:
self._output_step_module = OutputStep()
@property
def time(self):
return float(self._time.value)
@time.setter
def time(self, t): # can also handle date objects
try:
self.date = t
except:
self._time.setValue(t)
@property
def timeStep(self):
return self._time_step_module.calculate(self)
@property
def date(self):
return self._internal_time_to_date(self.time)
def _internal_time_to_date(self, internal_time):
return self._timeref + datetime.timedelta(seconds=internal_time)
@date.setter
def date(self, d):
dtsec = self._date_to_internal_time(d)
self._time.setValue(dtsec)
def _date_to_internal_time(self, d):
dt = d - self._timeref
dtsec = dt.days * 24 * 3600 + dt.seconds + dt.microseconds * 1e-6
return dtsec
def initializeTimeReference(self, timeref):
# timeref is a datetime object
self._timeref = timeref
def initializePDE(self, tseq=None):
self.eq = tseq
def initializeTimeStepModule(self, time_step_module):
self._time_step_module = time_step_module
def _initializeSourcesZero(self, source_name='S'):
self.variables[source_name] = CellVariable(
name=source_name, mesh=self.mesh.mesh, value=0.0)
def initializeDiagnostic(
self, variable, funpointer, default=0.0, face_variable=False,
output_variable=True):
if not face_variable:
self.variables[variable] = CellVariable(
name=variable, mesh=self.mesh.mesh, value=default)
else:
self.variables[variable] = FaceVariable(
name=variable, mesh=self.mesh.mesh, value=default)
self.diagnostic_modules[variable] = DiagnosticModule(funpointer, self)
if output_variable:
self.variables_store.append(variable)
self.diagnostic_update_order.append(variable)
def initializeOutputStepModule(self, output_step_module):
self._output_step_module = output_step_module
def initializeThermalProperties(self, thermal_properties):
self.thermal_properties = thermal_properties
self.thermal_properties.initializeVariables(self)
self.initializeTright()
def initializeForcing(self, forcing_module):
self.forcing_module = forcing_module
for varj in self.forcing_module.variables:
assert varj not in self.variables
self.variables[varj] = self.forcing_module.variables[varj]
self.initializeTimeReference(self.forcing_module._timeref)
def initializeEnthalpyTemperature(self, T_initial, proportion_frozen=None,
time=None):
# time can be internal time or also a datetime object
pf = 0.0 if proportion_frozen is None else proportion_frozen
assert pf >= 0.0 and pf <= 1.0
self.variables['T'].setValue(T_initial)
self.variables['h'].setValue(self.thermal_properties.enthalpyFromTemperature(
self, T=T_initial, proportion_frozen=pf))
self.updateDiagnostics()
if time is not None:
self.time = time
def updateDiagnostic(self, variable):
self.variables[variable].setValue(self.diagnostic_modules[variable].evaluate())
def updateDiagnostics(self, variables=None):
if variables is not None:
variablesorder = variables
else:
variablesorder = self.diagnostic_update_order
for variable in variablesorder:
self.updateDiagnostic(variable)
def specifyBoundaryConditions(self, boundary_condition_collection):
self.boundary_condition_collection = boundary_condition_collection
self.updateGeometryBoundaryConditions()
self.invokeBoundaryConditions()
self.initializePDE()
def updateGeometryBoundaryConditions(self):
self.boundary_condition_collection.updateGeometry(self)
def updateBoundaryConditions(self, bc_data, invoke=True):
self.boundary_condition_collection.update(bc_data)
if invoke:
self.invokeBoundaryConditions()
def invokeBoundaryConditions(self):
self.boundary_condition_collection.invoke(self)
def updateGeometry(self):
self.boundary_condition_collection.updateGeometry(self)
def nextOutput(self):
return self._output_step_module.next(self)
def updateOutput(self, datanew={}):
for v in self.variables_store:
datanew[v] = np.copy(self.variables[v].value)
# boundary condition outputs:
# separate routine: total source, source components, or for basic b.c. just value)
datanew.update(self.boundary_condition_collection.output())
self._output_module.update(self.date, datanew)
def exportOutput(self, fn):
self._output_module.export(fn)
def addStoredVariable(self, varname):
# varname can also be list
if isinstance(varname, str):
if varname not in self.variables_store:
self.variables_store.append(varname)
else: # tuple/list,etc.
for varnamej in varname:
self.addStoredVariable(varnamej)
class ThawSlumpEnthalpy(ThawSlump):
# both boundary conditions bc_inside and bc_headwall have to be provided,
# and they are only activated when forcing and thermal_properties are also given
def __init__(
self, tsmesh, time_step_module=None, output_step_module=None, h_initial=0.0,
T_initial=None, time_initial=None, proportion_frozen_initial=None,
forcing_module=None, thermal_properties=None, bc_inside=None, bc_headwall=None):
# T_initial only works if thermal_properties are provided
ThawSlump.__init__(
self, tsmesh, time_step_module=time_step_module,
output_step_module=output_step_module, time_initial=time_initial,
forcing_module=forcing_module, thermal_properties=thermal_properties)
self._initializeSourcesZero(source_name='S')
self._initializeSourcesZero(source_name='S_inside')
self._initializeSourcesZero(source_name='S_headwall')
# specific volumetric enthalpy
self.variables['h'] = CellVariable(
name='h', mesh=self.mesh.mesh, value=h_initial, hasOld=True)
self.addStoredVariable('h')
if T_initial is not None: # essentially overrides h_initial
self.initializeEnthalpyTemperature(
T_initial, proportion_frozen=proportion_frozen_initial)
if (bc_inside is not None and bc_headwall is not None
and self.thermal_properties is not None and self.forcing_module is not None):
bcc = BoundaryConditionCollection1D(
bc_headwall=bc_headwall, bc_inside=bc_inside)
self.specifyBoundaryConditions(bcc)
self._output_module.storeInitial(self)
def initializePDE(self):
self.eq = (TransientTerm(var=self.variables['h']) ==
DiffusionTerm(coeff=self.variables['k'], var=self.variables['T']) +
self.variables['S'] + self.variables['S_headwall'] +
self.variables['S_inside'])
def initializeTright(self):
extrapol_dist = (self.mesh.mesh.faceCenters[0, self.mesh.mesh.facesRight()][0]
-self.mesh.cell_mid_points)
self.dxf = CellVariable(mesh=self.mesh.mesh, value=extrapol_dist)
self.variables['T_right'] = (
self.variables['T'] + self.variables['T'].grad[0] * self.dxf)
def updateGeometry(self):
ThawSlump.updateGeometry(self)
self.initializeTright()
def _integrate(
self, time_step, max_time_step=None, residual_threshold=1e-3, max_steps=20):
apply_max_time_step = False
if time_step is None:
time_step = self.timeStep
if max_time_step is not None and time_step > max_time_step:
time_step = max_time_step
apply_max_time_step = True
residual = residual_threshold + 1
steps = 0
assert self._timeref == self.forcing_module._timeref
self.forcing_module.evaluateToVariable(t=self.time)
while residual > residual_threshold:
residual = self.eq.sweep(var=self.variables['h'], dt=time_step)
steps = steps + 1
if steps >= max_steps:
raise RuntimeError('Sweep did not converge')
self.time = self.time + time_step
self.variables['h'].updateOld()
self.updateDiagnostics()
return time_step, apply_max_time_step
def integrate(
self, time_end, time_step=None, residual_threshold=1e-2, max_steps=10,
time_start=None, viewer=None):
# time_end can also be date
if time_start is not None:
self.time = time_start
self.variables['h'].updateOld()
try:
interval = time_end - self.time
time_end_internal = time_end
except:
time_end_internal = self._date_to_internal_time(time_end)
time_output = self.nextOutput()
write_output = False
write_output_limit = False
time_steps = []
while self.time < time_end_internal:
max_time_step = time_end_internal - self.time
if time_output is not None and time_output < time_end_internal:
max_time_step = time_output - self.time
write_output_limit = True
time_step_actual, apply_max_time_step = self._integrate(
time_step, max_time_step=max_time_step)
time_steps.append(time_step_actual)
if apply_max_time_step and write_output_limit:
write_output = True
if viewer is not None:
viewer.plot()
viewer.axes.set_title(self.date)
if write_output:
time_output = self.nextOutput()
write_output = False
write_output_limit = False
# actually write output
datanew = {'nsteps':len(time_steps), 'mean_time_step':np.mean(time_steps)}
self.updateOutput(datanew=datanew)
time_steps = []
class SlumpOutput(object):
def __init__(self):
self.dates = []
self.data = {}
self.initial = {}
def update(self, date, datanew):
records = set(self.data.keys() + datanew.keys())
for record in records:
if record in self.data and record in datanew:
self.data[record].append(datanew[record])
elif record in self.data:
self.data[record].append(None)
else:
# new record; fill with Nones
self.data[record] = [None] * len(self.dates)
self.data[record].append(datanew[record])
self.dates.append(date)
def storeInitial(self, ts):
self.initial['mesh_mid_points'] = ts.mesh.cell_mid_points
self.initial['mesh_face_left'] = ts.mesh.face_left_position
self.initial['mesh_face_right'] = ts.mesh.face_right_position
self.initial['mesh_cell_volumes'] = ts.mesh.cell_volumes
self.initial['T_initial'] = np.copy(ts.variables['T'].value)
self.initial.update(ts.thermal_properties.output())
def export(self, fn):
with open(fn, 'wb') as f:
pickle.dump(self.read(), f)
def read(self):
return (self.dates, self.data, self.initial)
# nice way to read pickled SlumpOutput data (read method)
class SlumpResults(object):
def __init__(self, dates, data, initial, timeref=None):
self.dates = dates
self.data = data
self.initial = initial
if timeref is not None:
self._timeref = timeref
else:
self._timeref = self.dates[0]
@classmethod
def fromFile(cls, fn):
dates, data, initial = pickle.load(open(fn, 'rb'))
return cls(dates, data, initial)
def _date_to_internal_time(self, ds):
# ds is list
dts = [d - self._timeref for d in ds]
dtsec = [dt.days * 24 * 3600 + dt.seconds + dt.microseconds * 1e-6 for dt in dts]
return np.array(dtsec)
@property
def _depths(self):
return self.initial['mesh_face_right'] - self.initial['mesh_mid_points']
def readVariable(self, variable_name='T', interp_dates=None, interp_depths=None):
vararr = np.array(self.data[variable_name])
if interp_dates is not None:
dates_int = self._date_to_internal_time(self.dates)
interp_dates_int = self._date_to_internal_time(interp_dates)
interpolator_dates = interp1d(dates_int, vararr, axis=0)
vararr = interpolator_dates(interp_dates_int)
if interp_depths is not None:
# check dimensions
assert len(vararr.shape) == 2
assert vararr.shape[1] == self.initial['mesh_mid_points'].shape[0]
# interpolate
interpolator_depths = interp1d(self._depths, vararr, axis=1)
vararr = interpolator_depths(interp_depths)
return vararr
class TimeStep(object):
def __init__(self):
pass
def calculate(self, ts):
pass
class TimeStepConstant(TimeStep):
def __init__(self, step=1.0):
self.step = step
def calculate(self, ts):
return self.step
class TimeStepCFL(TimeStep):
def __init__(self, safety=0.9):
self.safety = safety
def calculate(self, ts):
K = np.array(ts.variables['K'])
K = 0.5 * (K[1::] + K[:-1:])
CFL = np.min(0.5 * (ts.mesh.cell_volumes) ** 2 / np.array((K / ts.variables['C'])))
step = self.safety * CFL
return step
class TimeStepCFLSources(TimeStep):
def __init__(
self, safety=0.9, relative_enthalpy_change=0.01,
slow_time_scale=3600 * 24 * 30):
self.safety = safety
self.relative_enthalpy_change = relative_enthalpy_change
# internal time scale, should be >> process time scale; to avoid / zero
self.slow_time_scale = slow_time_scale
def calculate(self, ts):
K = np.array(ts.variables['k'])
# hack, only works in 1D and is insufficient for highly irregular grids
K = 0.5 * (K[1::] + K[:-1:])
CFL = np.min(0.5 * (ts.mesh.cell_volumes) ** 2 / np.array((K / ts.variables['c'])))
step = self.safety * CFL
S_total = np.abs(
ts.variables['S'] + ts.variables['S_headwall'] + ts.variables['S_inside'])
denom = (np.abs(ts.variables['h']) / self.slow_time_scale + S_total)
step_sources = (self.relative_enthalpy_change
* np.min(np.abs(np.array(ts.variables['h'] / denom))))
if step_sources < step:
step = step_sources
return step
class OutputStep(object):
def __init__(self):
pass
def next(self, ts):
return None
class OutputStepHourly(OutputStep):
def __init__(self):
pass
def next(self, ts):
d0 = ts.date
datenext = (datetime.datetime(d0.year, d0.month, d0.day, d0.hour)
+ datetime.timedelta(seconds=3600))
return ts._date_to_internal_time(datenext)
class Forcing(object):
def __init__(self, values_inp, timeref=datetime.datetime(2012, 1, 1), variables=None):
if variables is None:
self.variables = [vj for vj in values_inp]
else:
self.variables = variables
self._timeref = timeref
self.variables = {vj:Variable(value=values_inp[vj]) for vj in self.variables}
self.values = {vj: values_inp[vj] for vj in self.variables}
def evaluate(self, t=None):
return self.values
def evaluateToVariable(self, t=None):
for vj, ij in self.evaluate(t=t).iteritems():
self.variables[vj].setValue(ij)
class ForcingInterpolation(Forcing):
def __init__(self, values_inp, t_inp=None, variables=None, key_time='time'):
if t_inp is None:
t_inp_int = values_inp[key_time]
else:
t_inp_int = t_inp
self.t_inp = t_inp_int
self._timeref = t_inp_int[0]
t_inp_rel = [tj - self._timeref for tj in self.t_inp]
try:
self.t_inp_rel = np.array([tj.total_seconds() for tj in t_inp_rel])
except:
self.t_inp_rel = np.array(t_inp_rel)
if variables is None:
self.variables = [vj for vj in values_inp if vj != key_time]
else:
self.variables = variables
self.variables = {vj:Variable(value=values_inp[vj][0]) for vj in self.variables}
self.values = {vj: values_inp[vj] for vj in self.variables}
def evaluate(self, t=0):
try:
t_rel = (t - self._timeref).total_seconds() # datetime object
except:
t_rel = t # slump-internal time
vals = {vj:np.interp(t_rel, self.t_inp_rel, self.values[vj])
for vj in self.variables}
return vals
|
[
"fipy.CellVariable",
"numpy.abs",
"numpy.copy",
"fipy.DiffusionTerm",
"boundary.BoundaryConditionCollection1D",
"numpy.interp",
"datetime.datetime",
"numpy.mean",
"diagnostic.DiagnosticModule",
"numpy.array",
"fipy.Variable",
"datetime.timedelta",
"fipy.FaceVariable",
"scipy.interpolate.interp1d",
"fipy.TransientTerm"
] |
[((858, 875), 'fipy.Variable', 'Variable', ([], {'value': '(0)'}), '(value=0)\n', (866, 875), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((2770, 2832), 'fipy.CellVariable', 'CellVariable', ([], {'name': 'source_name', 'mesh': 'self.mesh.mesh', 'value': '(0.0)'}), '(name=source_name, mesh=self.mesh.mesh, value=0.0)\n', (2782, 2832), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((3326, 3360), 'diagnostic.DiagnosticModule', 'DiagnosticModule', (['funpointer', 'self'], {}), '(funpointer, self)\n', (3342, 3360), False, 'from diagnostic import DiagnosticModule\n'), ((7912, 7985), 'fipy.CellVariable', 'CellVariable', ([], {'name': '"""h"""', 'mesh': 'self.mesh.mesh', 'value': 'h_initial', 'hasOld': '(True)'}), "(name='h', mesh=self.mesh.mesh, value=h_initial, hasOld=True)\n", (7924, 7985), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((9095, 9149), 'fipy.CellVariable', 'CellVariable', ([], {'mesh': 'self.mesh.mesh', 'value': 'extrapol_dist'}), '(mesh=self.mesh.mesh, value=extrapol_dist)\n', (9107, 9149), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((13068, 13100), 'numpy.copy', 'np.copy', (["ts.variables['T'].value"], {}), "(ts.variables['T'].value)\n", (13075, 13100), True, 'import numpy as np\n'), ((14118, 14133), 'numpy.array', 'np.array', (['dtsec'], {}), '(dtsec)\n', (14126, 14133), True, 'import numpy as np\n'), ((14372, 14406), 'numpy.array', 'np.array', (['self.data[variable_name]'], {}), '(self.data[variable_name])\n', (14380, 14406), True, 'import numpy as np\n'), ((15526, 15553), 'numpy.array', 'np.array', (["ts.variables['K']"], {}), "(ts.variables['K'])\n", (15534, 15553), True, 'import numpy as np\n'), ((16187, 16214), 'numpy.array', 'np.array', (["ts.variables['k']"], {}), "(ts.variables['k'])\n", (16195, 16214), True, 'import numpy as np\n'), ((16482, 16568), 'numpy.abs', 'np.abs', (["(ts.variables['S'] + ts.variables['S_headwall'] + ts.variables['S_inside'])"], {}), "(ts.variables['S'] + ts.variables['S_headwall'] + ts.variables[\n 'S_inside'])\n", (16488, 16568), True, 'import numpy as np\n'), ((17407, 17436), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(1)', '(1)'], {}), '(2012, 1, 1)\n', (17424, 17436), False, 'import datetime\n'), ((2027, 2068), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'internal_time'}), '(seconds=internal_time)\n', (2045, 2068), False, 'import datetime\n'), ((3062, 3125), 'fipy.CellVariable', 'CellVariable', ([], {'name': 'variable', 'mesh': 'self.mesh.mesh', 'value': 'default'}), '(name=variable, mesh=self.mesh.mesh, value=default)\n', (3074, 3125), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((3199, 3262), 'fipy.FaceVariable', 'FaceVariable', ([], {'name': 'variable', 'mesh': 'self.mesh.mesh', 'value': 'default'}), '(name=variable, mesh=self.mesh.mesh, value=default)\n', (3211, 3262), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((6142, 6174), 'numpy.copy', 'np.copy', (['self.variables[v].value'], {}), '(self.variables[v].value)\n', (6149, 6174), True, 'import numpy as np\n'), ((8402, 8477), 'boundary.BoundaryConditionCollection1D', 'BoundaryConditionCollection1D', ([], {'bc_headwall': 'bc_headwall', 'bc_inside': 'bc_inside'}), '(bc_headwall=bc_headwall, bc_inside=bc_inside)\n', (8431, 8477), False, 'from boundary import BoundaryConditionCollection1D\n'), ((8647, 8685), 'fipy.TransientTerm', 'TransientTerm', ([], {'var': "self.variables['h']"}), "(var=self.variables['h'])\n", (8660, 8685), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((14618, 14653), 'scipy.interpolate.interp1d', 'interp1d', (['dates_int', 'vararr'], {'axis': '(0)'}), '(dates_int, vararr, axis=0)\n', (14626, 14653), False, 'from scipy.interpolate import interp1d\n'), ((14969, 15007), 'scipy.interpolate.interp1d', 'interp1d', (['self._depths', 'vararr'], {'axis': '(1)'}), '(self._depths, vararr, axis=1)\n', (14977, 15007), False, 'from scipy.interpolate import interp1d\n'), ((17166, 17219), 'datetime.datetime', 'datetime.datetime', (['d0.year', 'd0.month', 'd0.day', 'd0.hour'], {}), '(d0.year, d0.month, d0.day, d0.hour)\n', (17183, 17219), False, 'import datetime\n'), ((17243, 17275), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (17261, 17275), False, 'import datetime\n'), ((17660, 17690), 'fipy.Variable', 'Variable', ([], {'value': 'values_inp[vj]'}), '(value=values_inp[vj])\n', (17668, 17690), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((18740, 18773), 'fipy.Variable', 'Variable', ([], {'value': 'values_inp[vj][0]'}), '(value=values_inp[vj][0])\n', (18748, 18773), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((19082, 19131), 'numpy.interp', 'np.interp', (['t_rel', 'self.t_inp_rel', 'self.values[vj]'], {}), '(t_rel, self.t_inp_rel, self.values[vj])\n', (19091, 19131), True, 'import numpy as np\n'), ((15650, 15681), 'numpy.array', 'np.array', (["(K / ts.variables['C'])"], {}), "(K / ts.variables['C'])\n", (15658, 15681), True, 'import numpy as np\n'), ((16394, 16425), 'numpy.array', 'np.array', (["(K / ts.variables['c'])"], {}), "(K / ts.variables['c'])\n", (16402, 16425), True, 'import numpy as np\n'), ((16596, 16621), 'numpy.abs', 'np.abs', (["ts.variables['h']"], {}), "(ts.variables['h'])\n", (16602, 16621), True, 'import numpy as np\n'), ((18530, 18549), 'numpy.array', 'np.array', (['t_inp_rel'], {}), '(t_inp_rel)\n', (18538, 18549), True, 'import numpy as np\n'), ((11913, 11932), 'numpy.mean', 'np.mean', (['time_steps'], {}), '(time_steps)\n', (11920, 11932), True, 'import numpy as np\n'), ((16752, 16787), 'numpy.array', 'np.array', (["(ts.variables['h'] / denom)"], {}), "(ts.variables['h'] / denom)\n", (16760, 16787), True, 'import numpy as np\n'), ((8709, 8774), 'fipy.DiffusionTerm', 'DiffusionTerm', ([], {'coeff': "self.variables['k']", 'var': "self.variables['T']"}), "(coeff=self.variables['k'], var=self.variables['T'])\n", (8722, 8774), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n')]
|
#!/usr/bin/env python
import numpy as np
from typing import Callable
def rectangle(a: float, b: float, f: Callable[[np.array], np.array],
h: float) -> float:
return h * np.sum(f(np.arange(a + h / 2, b + h / 2, h)))
def trapezoid(a: float, b: float, f: Callable[[np.array], np.array],
h: float) -> float:
return h / 2 * (f(a) + 2 * np.sum(f(np.arange(a + h, b, h))) + f(b))
def simpson(a: float, b: float, f: Callable[[np.array], np.array],
h: float) -> float:
return h / 6 * (f(a) + 2 * np.sum(f(np.arange(a + h, b, h))) +
4 * np.sum(f(np.arange(a + h / 2, b + h / 2, h))) + f(b))
|
[
"numpy.arange"
] |
[((193, 227), 'numpy.arange', 'np.arange', (['(a + h / 2)', '(b + h / 2)', 'h'], {}), '(a + h / 2, b + h / 2, h)\n', (202, 227), True, 'import numpy as np\n'), ((370, 392), 'numpy.arange', 'np.arange', (['(a + h)', 'b', 'h'], {}), '(a + h, b, h)\n', (379, 392), True, 'import numpy as np\n'), ((590, 624), 'numpy.arange', 'np.arange', (['(a + h / 2)', '(b + h / 2)', 'h'], {}), '(a + h / 2, b + h / 2, h)\n', (599, 624), True, 'import numpy as np\n'), ((541, 563), 'numpy.arange', 'np.arange', (['(a + h)', 'b', 'h'], {}), '(a + h, b, h)\n', (550, 563), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
class Packing(object):
def getMappedFitness(self, chromosome):
mappedChromosome = self.items[chromosome]
spaces = np.zeros(len(mappedChromosome), dtype=int)
result = np.cumsum(mappedChromosome) - self.BIN_CAPACITY
index_of_old_bin = 0
binsRequired = 0
spacesLeftOpen = []
consumedSpaces = []
itemsInBin = []
while True:
binsRequired += 1
max_accumulate = np.maximum.accumulate(np.flipud(result <= 0))
index_of_new_bin = self.PROBLEM_SIZE - next((idx for idx, val in np.ndenumerate(max_accumulate) if val == True))[0] - 1
space_left_open = np.abs(result[index_of_new_bin])
spaces[index_of_new_bin] = space_left_open
result += space_left_open
spacesLeftOpen.append(space_left_open)
consumedSpaces.append(self.BIN_CAPACITY - space_left_open)
itemsInBin.append(index_of_new_bin - index_of_old_bin)
index_of_old_bin = index_of_new_bin
if np.max(result) <= 0:
break
result -= self.BIN_CAPACITY
exec_result = self.fitTree.execute([spacesLeftOpen, consumedSpaces, itemsInBin], [binsRequired, self.BIN_CAPACITY, 1, 2])
return exec_result, binsRequired
def toStringMappedFitness(self, chromosome):
result = np.cumsum(self.problemSet[chromosome]) - self.BIN_CAPACITY
output = ''
while True:
max_accumulate = np.maximum.accumulate(np.flipud(result <= 0))
index_of_new_bin = self.PROBLEM_SIZE - next((idx for idx, val in np.ndenumerate(max_accumulate) if val == True))[
0] - 1
space_left_open = np.abs(result[index_of_new_bin])
result += space_left_open
output += '|'
output += (self.BIN_CAPACITY - space_left_open - 2) * 'X'
output += '|'
output += '_' * space_left_open
output += '\n'
if np.max(result) <= 0:
break
result -= self.BIN_CAPACITY
return output
def tournamentSelector(self, population, reverse=False):
random_indicies = np.random.randint(self.POPULATION_SIZE, size=self.TOURNAMENT_SIZE).tolist()
tournament = []
for idx, val in np.ndenumerate(random_indicies):
tournament.append(population[val])
results = []
for val in tournament:
result, bin = self.getMappedFitness(val)
results.append(result)
results = np.array(results)
if not reverse:
pos = np.argmin(results)
else:
pos = np.argmax(results)
return population[random_indicies[pos]], random_indicies[pos], results[pos]
def multipleSwapCrossover(self, p1, p2, swaps=4):
draws = np.random.randint(self.PROBLEM_SIZE, size=swaps)
c1 = p1.copy()
c2 = p2.copy()
for i, val in enumerate(draws):
c1item = c1[val]
c2item = c2[val]
c1 = np.delete(c1, np.where(c1 == c2item))
c2 = np.delete(c2, np.where(c2 == c1item))
c1 = np.insert(c1, val, c2item)
c2 = np.insert(c2, val, c1item)
return c1, c2
def multipleMutator(self, p, swaps=4):
draws = np.random.randint(self.PROBLEM_SIZE, size=(swaps, 2))
child = p.copy()
for i, val in enumerate(draws):
tmp = child[val[0]]
child = np.delete(child, val[0])
child = np.insert(child, val[1], tmp)
return child
def tryMutate(self, population):
draw = np.random.rand()
if draw < self.MUTATION_RATE:
p, pos, fit = self.tournamentSelector(population)
_, kpos, _ = self.tournamentSelector(population, reverse=True)
c = self.multipleMutator(p, 1)
population[kpos] = c
return population
def tryCrossover(self, population):
draw = np.random.rand()
if draw < self.CROSSOVER_RATE:
p1, p1pos, p1fit = self.tournamentSelector(population)
p2, p2pos, p2fit = self.tournamentSelector(population)
if any(p1 != p2):
_, k1pos, _ = self.tournamentSelector(population, reverse=True)
_, k2pos, _ = self.tournamentSelector(population, reverse=True)
c1, c2 = self.multipleSwapCrossover(p1, p2, 3)
population[k1pos] = c1
population[k2pos] = c2
else:
p1 = self.multipleMutator(p1, swaps=int(self.PROBLEM_SIZE / 5))
population[p1pos] = p1
return population
def run(self, fitTree, binFile, minBins):
self.problemSet = pd.read_csv(binFile, header=None).values.tolist()
self.PROBLEM_SIZE = self.problemSet.pop(0)[0]
self.BIN_CAPACITY = self.problemSet.pop(0)[0]
self.POPULATION_SIZE = 50
self.TOURNAMENT_SIZE = 4
self.GENERATIONS = 250
self.SAMPLES = 1
self.SAMPLE_RATE = 50
self.MUTATION_RATE = 0.3
self.CROSSOVER_RATE = 1
self.items = pd.DataFrame(self.problemSet)
self.items = np.array(self.items[0])
self.organisedChromosome = np.arange(self.items.size)
assert self.PROBLEM_SIZE == len(self.items)
self.fitTree = fitTree
population = []
chromosome = np.arange(self.PROBLEM_SIZE)
for i in range(self.POPULATION_SIZE):
np.random.shuffle(chromosome)
population.append(chromosome.copy())
foundMin = False
# Mutate and crossover for each generation
for idx, generation in enumerate(range(self.GENERATIONS)):
if foundMin == False:
population = self.tryMutate(population)
population = self.tryCrossover(population)
if idx % self.SAMPLE_RATE == 0:
bins = []
fitness = []
for chromosome in population:
result, bin = self.getMappedFitness(chromosome)
bins.append(bin)
fitness.append(result)
position = int(np.argmin(fitness))
if bins[position] == minBins:
foundMin = True
bins = []
fitness = []
for chromosome in population:
result, bin = self.getMappedFitness(chromosome)
bins.append(bin)
fitness.append(np.array(result))
position = int(np.argmin(fitness))
return fitness[position], bins[position]
|
[
"pandas.DataFrame",
"numpy.abs",
"numpy.ndenumerate",
"numpy.argmax",
"pandas.read_csv",
"numpy.flipud",
"numpy.argmin",
"numpy.insert",
"numpy.cumsum",
"numpy.max",
"numpy.random.randint",
"numpy.array",
"numpy.arange",
"numpy.where",
"numpy.random.rand",
"numpy.delete",
"numpy.random.shuffle"
] |
[((2352, 2383), 'numpy.ndenumerate', 'np.ndenumerate', (['random_indicies'], {}), '(random_indicies)\n', (2366, 2383), True, 'import numpy as np\n'), ((2591, 2608), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (2599, 2608), True, 'import numpy as np\n'), ((2876, 2924), 'numpy.random.randint', 'np.random.randint', (['self.PROBLEM_SIZE'], {'size': 'swaps'}), '(self.PROBLEM_SIZE, size=swaps)\n', (2893, 2924), True, 'import numpy as np\n'), ((3352, 3405), 'numpy.random.randint', 'np.random.randint', (['self.PROBLEM_SIZE'], {'size': '(swaps, 2)'}), '(self.PROBLEM_SIZE, size=(swaps, 2))\n', (3369, 3405), True, 'import numpy as np\n'), ((3675, 3691), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3689, 3691), True, 'import numpy as np\n'), ((4027, 4043), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4041, 4043), True, 'import numpy as np\n'), ((5189, 5218), 'pandas.DataFrame', 'pd.DataFrame', (['self.problemSet'], {}), '(self.problemSet)\n', (5201, 5218), True, 'import pandas as pd\n'), ((5240, 5263), 'numpy.array', 'np.array', (['self.items[0]'], {}), '(self.items[0])\n', (5248, 5263), True, 'import numpy as np\n'), ((5300, 5326), 'numpy.arange', 'np.arange', (['self.items.size'], {}), '(self.items.size)\n', (5309, 5326), True, 'import numpy as np\n'), ((5458, 5486), 'numpy.arange', 'np.arange', (['self.PROBLEM_SIZE'], {}), '(self.PROBLEM_SIZE)\n', (5467, 5486), True, 'import numpy as np\n'), ((234, 261), 'numpy.cumsum', 'np.cumsum', (['mappedChromosome'], {}), '(mappedChromosome)\n', (243, 261), True, 'import numpy as np\n'), ((703, 735), 'numpy.abs', 'np.abs', (['result[index_of_new_bin]'], {}), '(result[index_of_new_bin])\n', (709, 735), True, 'import numpy as np\n'), ((1403, 1441), 'numpy.cumsum', 'np.cumsum', (['self.problemSet[chromosome]'], {}), '(self.problemSet[chromosome])\n', (1412, 1441), True, 'import numpy as np\n'), ((1756, 1788), 'numpy.abs', 'np.abs', (['result[index_of_new_bin]'], {}), '(result[index_of_new_bin])\n', (1762, 1788), True, 'import numpy as np\n'), ((2651, 2669), 'numpy.argmin', 'np.argmin', (['results'], {}), '(results)\n', (2660, 2669), True, 'import numpy as np\n'), ((2702, 2720), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (2711, 2720), True, 'import numpy as np\n'), ((3198, 3224), 'numpy.insert', 'np.insert', (['c1', 'val', 'c2item'], {}), '(c1, val, c2item)\n', (3207, 3224), True, 'import numpy as np\n'), ((3242, 3268), 'numpy.insert', 'np.insert', (['c2', 'val', 'c1item'], {}), '(c2, val, c1item)\n', (3251, 3268), True, 'import numpy as np\n'), ((3525, 3549), 'numpy.delete', 'np.delete', (['child', 'val[0]'], {}), '(child, val[0])\n', (3534, 3549), True, 'import numpy as np\n'), ((3570, 3599), 'numpy.insert', 'np.insert', (['child', 'val[1]', 'tmp'], {}), '(child, val[1], tmp)\n', (3579, 3599), True, 'import numpy as np\n'), ((5545, 5574), 'numpy.random.shuffle', 'np.random.shuffle', (['chromosome'], {}), '(chromosome)\n', (5562, 5574), True, 'import numpy as np\n'), ((6582, 6600), 'numpy.argmin', 'np.argmin', (['fitness'], {}), '(fitness)\n', (6591, 6600), True, 'import numpy as np\n'), ((517, 539), 'numpy.flipud', 'np.flipud', (['(result <= 0)'], {}), '(result <= 0)\n', (526, 539), True, 'import numpy as np\n'), ((1082, 1096), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (1088, 1096), True, 'import numpy as np\n'), ((1553, 1575), 'numpy.flipud', 'np.flipud', (['(result <= 0)'], {}), '(result <= 0)\n', (1562, 1575), True, 'import numpy as np\n'), ((2035, 2049), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (2041, 2049), True, 'import numpy as np\n'), ((2228, 2294), 'numpy.random.randint', 'np.random.randint', (['self.POPULATION_SIZE'], {'size': 'self.TOURNAMENT_SIZE'}), '(self.POPULATION_SIZE, size=self.TOURNAMENT_SIZE)\n', (2245, 2294), True, 'import numpy as np\n'), ((3102, 3124), 'numpy.where', 'np.where', (['(c1 == c2item)'], {}), '(c1 == c2item)\n', (3110, 3124), True, 'import numpy as np\n'), ((3157, 3179), 'numpy.where', 'np.where', (['(c2 == c1item)'], {}), '(c2 == c1item)\n', (3165, 3179), True, 'import numpy as np\n'), ((6540, 6556), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (6548, 6556), True, 'import numpy as np\n'), ((4789, 4822), 'pandas.read_csv', 'pd.read_csv', (['binFile'], {'header': 'None'}), '(binFile, header=None)\n', (4800, 4822), True, 'import pandas as pd\n'), ((6244, 6262), 'numpy.argmin', 'np.argmin', (['fitness'], {}), '(fitness)\n', (6253, 6262), True, 'import numpy as np\n'), ((618, 648), 'numpy.ndenumerate', 'np.ndenumerate', (['max_accumulate'], {}), '(max_accumulate)\n', (632, 648), True, 'import numpy as np\n'), ((1654, 1684), 'numpy.ndenumerate', 'np.ndenumerate', (['max_accumulate'], {}), '(max_accumulate)\n', (1668, 1684), True, 'import numpy as np\n')]
|
'''
Munivariate statistics exercises
================================
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
np.random.seed(seed=42) # make the example reproducible
'''
### Dot product and Euclidean norm
'''
a = np.array([2,1])
b = np.array([1,1])
def euclidian(x):
return np.sqrt(np.dot(x, x))
euclidian(a)
euclidian(a - b)
np.dot(b, a / euclidian(a))
X = np.random.randn(100, 2)
np.dot(X, a / euclidian(a))
'''
### Covariance matrix and Mahalanobis norm
'''
N = 100
mu = np.array([1, 1])
Cov = np.array([[1, .8],
[.8, 1]])
X = np.random.multivariate_normal(mu, Cov, N)
xbar = np.mean(X, axis=0)
print(xbar)
Xc = (X - xbar)
np.mean(Xc, axis=0)
S = 1 / (N - 1) * np.dot(Xc.T, Xc)
print(S)
#import scipy
Sinv = np.linalg.inv(S)
def mahalanobis(x, xbar, Sinv):
xc = x - xbar
return np.sqrt(np.dot(np.dot(xc, Sinv), xc))
dists = pd.DataFrame(
[[mahalanobis(X[i, :], xbar, Sinv),
euclidian(X[i, :] - xbar)] for i in range(X.shape[0])],
columns = ['Mahalanobis', 'Euclidean'])
print(dists[:10])
x = X[0, :]
import scipy.spatial
assert(mahalanobis(X[0, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[0, :], Sinv))
assert(mahalanobis(X[1, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[1, :], Sinv))
|
[
"numpy.random.seed",
"numpy.random.randn",
"numpy.mean",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.linalg.inv",
"numpy.dot"
] |
[((165, 188), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(42)'}), '(seed=42)\n', (179, 188), True, 'import numpy as np\n'), ((271, 287), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (279, 287), True, 'import numpy as np\n'), ((291, 307), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (299, 307), True, 'import numpy as np\n'), ((425, 448), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (440, 448), True, 'import numpy as np\n'), ((543, 559), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (551, 559), True, 'import numpy as np\n'), ((566, 596), 'numpy.array', 'np.array', (['[[1, 0.8], [0.8, 1]]'], {}), '([[1, 0.8], [0.8, 1]])\n', (574, 596), True, 'import numpy as np\n'), ((616, 657), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'Cov', 'N'], {}), '(mu, Cov, N)\n', (645, 657), True, 'import numpy as np\n'), ((666, 684), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (673, 684), True, 'import numpy as np\n'), ((715, 734), 'numpy.mean', 'np.mean', (['Xc'], {'axis': '(0)'}), '(Xc, axis=0)\n', (722, 734), True, 'import numpy as np\n'), ((803, 819), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (816, 819), True, 'import numpy as np\n'), ((754, 770), 'numpy.dot', 'np.dot', (['Xc.T', 'Xc'], {}), '(Xc.T, Xc)\n', (760, 770), True, 'import numpy as np\n'), ((345, 357), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (351, 357), True, 'import numpy as np\n'), ((898, 914), 'numpy.dot', 'np.dot', (['xc', 'Sinv'], {}), '(xc, Sinv)\n', (904, 914), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from memory import State
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DRRN(torch.nn.Module):
"""
Deep Reinforcement Relevance Network - He et al. '16
"""
def __init__(self, vocab_size, embedding_dim, hidden_dim):
super(DRRN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.obs_encoder = nn.GRU(embedding_dim, hidden_dim)
self.look_encoder = nn.GRU(embedding_dim, hidden_dim)
self.inv_encoder = nn.GRU(embedding_dim, hidden_dim)
self.act_encoder = nn.GRU(embedding_dim, hidden_dim)
self.hidden = nn.Linear(4 * hidden_dim, hidden_dim)
self.act_scorer = nn.Linear(hidden_dim, 1)
def packed_rnn(self, x, rnn):
""" Runs the provided rnn on the input x. Takes care of packing/unpacking.
x: list of unpadded input sequences
Returns a tensor of size: len(x) x hidden_dim
"""
lengths = torch.tensor([len(n) for n in x], dtype=torch.long, device=device)
# Sort this batch in descending order by seq length
lengths, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
idx_sort = torch.autograd.Variable(idx_sort)
idx_unsort = torch.autograd.Variable(idx_unsort)
padded_x = pad_sequences(x)
x_tt = torch.from_numpy(padded_x).type(torch.long).to(device)
x_tt = x_tt.index_select(0, idx_sort)
# Run the embedding layer
embed = self.embedding(x_tt).permute(1, 0, 2) # Time x Batch x EncDim
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embed, lengths.cpu())
# Run the RNN
out, _ = rnn(packed)
# Unpack
out, _ = nn.utils.rnn.pad_packed_sequence(out)
# Get the last step of each sequence
idx = (lengths - 1).view(-1, 1).expand(len(lengths), out.size(2)).unsqueeze(0)
out = out.gather(0, idx).squeeze(0)
# Unsort
out = out.index_select(0, idx_unsort)
return out
def forward(self, state_batch, act_batch, poss_acts, detach=False, cond_weight=0,
cclm=None, cond_threshold=0, args=None, testing_flag=False):
"""
Batched forward pass.
obs_id_batch: iterable of unpadded sequence ids
act_batch: iterable of lists of unpadded admissible command ids
Returns a tuple of tensors containing q-values for each item in the batch
"""
# Zip the state_batch into an easy access format
state = State(*zip(*state_batch))
# This is number of admissible commands in each element of the batch
act_sizes = [len(a) for a in act_batch]
# Combine next actions into one long list
act_batch = list(itertools.chain.from_iterable(act_batch))
act_out = self.packed_rnn(act_batch, self.act_encoder)
# Encode the various aspects of the state
obs_out = self.packed_rnn(state.obs, self.obs_encoder)
look_out = self.packed_rnn(state.description, self.look_encoder)
inv_out = self.packed_rnn(state.inventory, self.inv_encoder)
state_out = torch.cat((obs_out, look_out, inv_out), dim=1)
# Expand the state to match the batches of actions
state_out = torch.cat([state_out[i].repeat(j, 1) for i, j in enumerate(act_sizes)], dim=0)
z = torch.cat((state_out, act_out), dim=1) # Concat along hidden_dim
z = F.relu(self.hidden(z))
act_values = self.act_scorer(z).squeeze(-1)
if detach:
act_values = act_values.detach().cpu()
q_values = act_values.split(act_sizes)
if cclm:
cond_probs = cclm.get_probs(state_batch, poss_acts, cond_threshold=cond_threshold, testing_flag=testing_flag)
if cond_weight > 0:
if args.adaptive_cond_weight:
new_q_values = []
for vals, probs in zip(q_values, cond_probs):
probs = probs.int()
scaling = (cond_weight * probs) + (1 - probs)
new_q_values.append(torch.log(F.softmax(vals, dim=0) * scaling) + torch.logsumexp(vals, dim=0))
q_values = new_q_values
elif args.adaptive_cond_weight2:
new_q_values = []
for vals, probs in zip(q_values, cond_probs):
probs = probs.int() # basically an indicator for whether the action is immoral
mult_weight = torch.minimum(2 * F.softmax(vals, dim=0).pow(0.5), torch.ones_like(vals))
scaling = (mult_weight * probs) + (1 - probs)
new_q_values.append(torch.log(F.softmax(vals, dim=0) * scaling) + torch.logsumexp(vals, dim=0))
q_values = new_q_values
else:
q_values = [vals - cond_weight * probs for vals, probs in zip(q_values, cond_probs)]
else:
raise NotImplementedError
else:
cond_probs = q_values
if cond_weight > 0:
raise NotImplementedError
q_values = [vals - cond_weight * torch.rand(vals.shape, device=device) for vals, probs in zip(q_values, cond_probs)]
# Split up the q-values by batch
return q_values, cond_probs
@torch.no_grad()
def act(self, states, poss_acts_tokenized, poss_acts, lm=None, eps=None, alpha=0, k=-1, argmax=False, cond_weight=0, cclm=None, cond_threshold=0, args=None):
""" Returns an action-string, optionally sampling from the distribution
of Q-Values.
"""
valid_ids = poss_acts_tokenized
q_values, cond_probs = self.forward(states, valid_ids, poss_acts, detach=False, cond_weight=cond_weight, cclm=cclm,
cond_threshold=cond_threshold, args=args) # detach only when using two GPUs
if alpha > 0 or (eps is not None and k != -1): # need to use lm_values
lm_values = [torch.tensor(lm.score(state.obs, act_ids), device=device) for state, act_ids in
zip(states, valid_ids)]
act_values = [q_value * (1 - alpha) + bert_value * alpha
for q_value, bert_value in zip(q_values, lm_values)]
else:
act_values = q_values
if eps is None: # sample ~ softmax(act_values)
if argmax:
sampling_func = torch.argmax
else:
sampling_func = lambda vals: torch.multinomial(F.softmax(vals, dim=0), num_samples=1)
act_idxs = [sampling_func(vals).item() for vals in act_values]
else: # w.p. eps, ~ softmax(act_values) | uniform(top_k(act_values)), w.p. (1-eps) arg max q_values
raise NotImplementedError
if k == 0: # soft sampling
act_idxs = [torch.multinomial(F.softmax(vals, dim=0), num_samples=1).item() for vals in lm_values]
elif k == -1:
act_idxs = [np.random.choice(range(len(vals))) for vals in q_values]
else: # hard (uniform) sampling
act_idxs = [np.random.choice(vals.topk(k=min(k, len(vals)), dim=0).indices.tolist()) for vals in
lm_values]
act_idxs = [vals.argmax(dim=0).item() if np.random.rand() > eps else idx for idx, vals in
zip(act_idxs, q_values)]
return act_idxs, act_values, cond_probs
def pad_sequences(sequences, maxlen=None, dtype='int32', value=0.):
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
# pre truncating
trunc = s[-maxlen:]
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
# post padding
x[idx, :len(trunc)] = trunc
return x
|
[
"torch.nn.Embedding",
"torch.cat",
"numpy.ones",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.no_grad",
"numpy.max",
"torch.nn.Linear",
"torch.nn.GRU",
"torch.logsumexp",
"torch.autograd.Variable",
"numpy.asarray",
"torch.cuda.is_available",
"torch.rand",
"torch.sort",
"torch.from_numpy",
"torch.ones_like",
"torch.nn.functional.softmax",
"numpy.random.rand",
"itertools.chain.from_iterable"
] |
[((5580, 5595), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5593, 5595), False, 'import torch\n'), ((162, 187), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (185, 187), False, 'import torch\n'), ((434, 473), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (446, 473), True, 'import torch.nn as nn\n'), ((501, 534), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (507, 534), True, 'import torch.nn as nn\n'), ((563, 596), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (569, 596), True, 'import torch.nn as nn\n'), ((624, 657), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (630, 657), True, 'import torch.nn as nn\n'), ((685, 718), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (691, 718), True, 'import torch.nn as nn\n'), ((741, 778), 'torch.nn.Linear', 'nn.Linear', (['(4 * hidden_dim)', 'hidden_dim'], {}), '(4 * hidden_dim, hidden_dim)\n', (750, 778), True, 'import torch.nn as nn\n'), ((805, 829), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (814, 829), True, 'import torch.nn as nn\n'), ((1239, 1282), 'torch.sort', 'torch.sort', (['lengths'], {'dim': '(0)', 'descending': '(True)'}), '(lengths, dim=0, descending=True)\n', (1249, 1282), False, 'import torch\n'), ((1307, 1334), 'torch.sort', 'torch.sort', (['idx_sort'], {'dim': '(0)'}), '(idx_sort, dim=0)\n', (1317, 1334), False, 'import torch\n'), ((1354, 1387), 'torch.autograd.Variable', 'torch.autograd.Variable', (['idx_sort'], {}), '(idx_sort)\n', (1377, 1387), False, 'import torch\n'), ((1409, 1444), 'torch.autograd.Variable', 'torch.autograd.Variable', (['idx_unsort'], {}), '(idx_unsort)\n', (1432, 1444), False, 'import torch\n'), ((1924, 1961), 'torch.nn.utils.rnn.pad_packed_sequence', 'nn.utils.rnn.pad_packed_sequence', (['out'], {}), '(out)\n', (1956, 1961), True, 'import torch.nn as nn\n'), ((3343, 3389), 'torch.cat', 'torch.cat', (['(obs_out, look_out, inv_out)'], {'dim': '(1)'}), '((obs_out, look_out, inv_out), dim=1)\n', (3352, 3389), False, 'import torch\n'), ((3560, 3598), 'torch.cat', 'torch.cat', (['(state_out, act_out)'], {'dim': '(1)'}), '((state_out, act_out), dim=1)\n', (3569, 3598), False, 'import torch\n'), ((7911, 7926), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (7917, 7926), True, 'import numpy as np\n'), ((8485, 8515), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (8495, 8515), True, 'import numpy as np\n'), ((2963, 3003), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['act_batch'], {}), '(act_batch)\n', (2992, 3003), False, 'import itertools\n'), ((8196, 8240), 'numpy.ones', 'np.ones', (['((nb_samples, maxlen) + sample_shape)'], {}), '((nb_samples, maxlen) + sample_shape)\n', (8203, 8240), True, 'import numpy as np\n'), ((8145, 8158), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (8155, 8158), True, 'import numpy as np\n'), ((1496, 1522), 'torch.from_numpy', 'torch.from_numpy', (['padded_x'], {}), '(padded_x)\n', (1512, 1522), False, 'import torch\n'), ((6804, 6826), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (6813, 6826), True, 'import torch.nn.functional as F\n'), ((7581, 7597), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7595, 7597), True, 'import numpy as np\n'), ((5412, 5449), 'torch.rand', 'torch.rand', (['vals.shape'], {'device': 'device'}), '(vals.shape, device=device)\n', (5422, 5449), False, 'import torch\n'), ((4356, 4384), 'torch.logsumexp', 'torch.logsumexp', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4371, 4384), False, 'import torch\n'), ((4776, 4797), 'torch.ones_like', 'torch.ones_like', (['vals'], {}), '(vals)\n', (4791, 4797), False, 'import torch\n'), ((7151, 7173), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (7160, 7173), True, 'import torch.nn.functional as F\n'), ((4959, 4987), 'torch.logsumexp', 'torch.logsumexp', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4974, 4987), False, 'import torch\n'), ((4320, 4342), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4329, 4342), True, 'import torch.nn.functional as F\n'), ((4743, 4765), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4752, 4765), True, 'import torch.nn.functional as F\n'), ((4923, 4945), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4932, 4945), True, 'import torch.nn.functional as F\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import itertools
import numpy as np
from absl.testing import parameterized
from tensorflow.python.client import session as sl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python import ipu
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
# Error threshold for forward pass test.
THRESHOLD = 0.03
# Dimensions of the random data tensor.
DIMS = (1024, 1024, 4)
# Initialise with a random seed.
SEED = np.random.randint(np.iinfo(np.int32).max, size=[2], dtype=np.int32)
# Number of times to verify output for a given seed.
SEED_TEST_REPETITIONS = 6
def build_test_cases(exhaustive=False):
# Dropout rate(s) to test.
rate = [0.1, 0.5, 0.9] if exhaustive else [0.5]
# User specified and non-specified cases.
seed = [SEED, None]
# Shape of the dropout.
# Note that shaping the dropout such that a very large portion of
# the input weights are dropped will fail the test criteria, as expected.
noise_shape = [[], [DIMS[0], DIMS[1], 1]]
if exhaustive:
noise_shape.append([DIMS[0], 1, DIMS[2]])
noise_shape.append([1, DIMS[1], DIMS[2]])
# Get the cartesian product (can get very large).
prod = itertools.product(rate, seed, noise_shape)
test_cases = []
for n, perm in enumerate(prod):
test = {
'testcase_name': ' Case: %3d' % n,
'rate': perm[0],
'seed': perm[1],
'noise_shape': perm[2]
}
test_cases.append(test)
return test_cases
# Default is not to test every combination.
TEST_CASES = build_test_cases()
class PopnnRandomDropoutTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@staticmethod
def _ipu_dropout(w, rate, seed, noise_shape):
output = ipu.ops.rand_ops.dropout(w,
rate=rate,
seed=seed,
noise_shape=noise_shape)
return [output]
@staticmethod
def _setup_test(f):
with ops.device('cpu'):
input_data = array_ops.placeholder(np.float32, DIMS)
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(f, inputs=[input_data])
cfg = ipu.utils.create_ipu_config()
cfg = ipu.utils.set_ipu_model_options(cfg, compile_ipu_code=False)
ipu.utils.configure_ipu_system(cfg)
return r, input_data
@test_util.deprecated_graph_mode_only
def testInvalidNoiseShape(self):
in_data = np.random.rand(16, 8, 16)
print(in_data.shape)
seed = np.array([12, 34], dtype=np.int32)
with sl.Session() as sess:
with self.assertRaisesRegex(ValueError, "must equal the rank of x."):
def _wrong_length(w):
return self._ipu_dropout(w, 0.5, seed, [1])
r, input_data = self._setup_test(_wrong_length)
_ = sess.run(r, {input_data: in_data})
with self.assertRaisesRegex(ValueError, "Dimension mismatch"):
def _wrong_dims(w):
return self._ipu_dropout(w, 0.5, seed, [8, 1, 16])
r, input_data = self._setup_test(_wrong_dims)
_ = sess.run(r, {input_data: in_data})
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testDropout(self, rate, seed, noise_shape):
def _run_dropout(w):
return self._ipu_dropout(w, rate, seed, noise_shape)
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.random.rand(*DIMS)
result = sess.run(r, {input_data: in_data})
percent_kept = np.count_nonzero(result) / np.count_nonzero(in_data)
# There's a considerable amount for randomness so we have a reasonably
# large dimensionality of test data to make sure the error is smaller.
is_roughly_close = abs(percent_kept - (1.0 - rate))
# The observed error is actually a lot less than this (>1%) but we don't
# want to cause random regressions and 3% is probably still acceptable
# for any outlier randoms.
self.assertTrue(is_roughly_close < THRESHOLD)
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testUserSeed(self, rate, seed, noise_shape):
def _run_dropout(w):
return self._ipu_dropout(w, rate, seed, noise_shape)
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.random.rand(*DIMS)
# For a given output, verify that each subsequent output is equal to it.
first_result = None
for _ in range(SEED_TEST_REPETITIONS):
result = sess.run(r, {input_data: in_data})
if first_result is None:
first_result = result
continue
self.assertAllEqual(first_result, result)
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testDropoutBackwardPass(self, rate, seed, noise_shape):
def _run_dropout(w):
output = self._ipu_dropout(w, rate, seed, noise_shape)
largest = output
cost = math_ops.square(largest)
opt = gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
gradients = opt.compute_gradients(cost, w)
return [output, gradients]
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.random.rand(*DIMS)
result = sess.run(r, {input_data: in_data})
dropout_out = result[0]
gradients = result[1][0][0]
# Check we have the same number of zeros.
self.assertAllEqual(np.count_nonzero(dropout_out),
np.count_nonzero(gradients))
@parameterized.named_parameters(*TEST_CASES)
@test_util.deprecated_graph_mode_only
def testScaling(self, rate, seed, noise_shape):
def _run_dropout(w):
return self._ipu_dropout(w, rate, seed, noise_shape)
r, input_data = self._setup_test(_run_dropout)
with sl.Session() as sess:
in_data = np.ones(DIMS)
[result] = sess.run(r, {input_data: in_data})
kept_values = result[np.nonzero(result)]
expected_kept_values = 1 / (1 - rate) * np.ones(kept_values.shape)
self.assertAllClose(kept_values, expected_kept_values)
if __name__ == "__main__":
googletest.main()
|
[
"numpy.iinfo",
"numpy.ones",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ipu.utils.set_ipu_model_options",
"tensorflow.python.client.session.Session",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.array_ops.placeholder",
"itertools.product",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ipu.utils.configure_ipu_system",
"absl.testing.parameterized.named_parameters",
"tensorflow.python.ipu.scopes.ipu_scope",
"numpy.count_nonzero",
"tensorflow.python.ipu.ops.rand_ops.dropout",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"numpy.nonzero",
"numpy.array",
"numpy.random.rand",
"tensorflow.python.ipu.utils.create_ipu_config"
] |
[((2023, 2065), 'itertools.product', 'itertools.product', (['rate', 'seed', 'noise_shape'], {}), '(rate, seed, noise_shape)\n', (2040, 2065), False, 'import itertools\n'), ((3961, 4004), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TEST_CASES'], {}), '(*TEST_CASES)\n', (3991, 4004), False, 'from absl.testing import parameterized\n'), ((4884, 4927), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TEST_CASES'], {}), '(*TEST_CASES)\n', (4914, 4927), False, 'from absl.testing import parameterized\n'), ((5568, 5611), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TEST_CASES'], {}), '(*TEST_CASES)\n', (5598, 5611), False, 'from absl.testing import parameterized\n'), ((6421, 6464), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TEST_CASES'], {}), '(*TEST_CASES)\n', (6451, 6464), False, 'from absl.testing import parameterized\n'), ((7019, 7036), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (7034, 7036), False, 'from tensorflow.python.platform import googletest\n'), ((1319, 1337), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1327, 1337), True, 'import numpy as np\n'), ((2582, 2656), 'tensorflow.python.ipu.ops.rand_ops.dropout', 'ipu.ops.rand_ops.dropout', (['w'], {'rate': 'rate', 'seed': 'seed', 'noise_shape': 'noise_shape'}), '(w, rate=rate, seed=seed, noise_shape=noise_shape)\n', (2606, 2656), False, 'from tensorflow.python import ipu\n'), ((3301, 3326), 'numpy.random.rand', 'np.random.rand', (['(16)', '(8)', '(16)'], {}), '(16, 8, 16)\n', (3315, 3326), True, 'import numpy as np\n'), ((3363, 3397), 'numpy.array', 'np.array', (['[12, 34]'], {'dtype': 'np.int32'}), '([12, 34], dtype=np.int32)\n', (3371, 3397), True, 'import numpy as np\n'), ((2839, 2856), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2849, 2856), False, 'from tensorflow.python.framework import ops\n'), ((2877, 2916), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.float32', 'DIMS'], {}), '(np.float32, DIMS)\n', (2898, 2916), False, 'from tensorflow.python.ops import array_ops\n'), ((2927, 2964), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2947, 2964), False, 'from tensorflow.python import ipu\n'), ((2976, 3024), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['f'], {'inputs': '[input_data]'}), '(f, inputs=[input_data])\n', (3000, 3024), False, 'from tensorflow.python import ipu\n'), ((3038, 3067), 'tensorflow.python.ipu.utils.create_ipu_config', 'ipu.utils.create_ipu_config', ([], {}), '()\n', (3065, 3067), False, 'from tensorflow.python import ipu\n'), ((3080, 3140), 'tensorflow.python.ipu.utils.set_ipu_model_options', 'ipu.utils.set_ipu_model_options', (['cfg'], {'compile_ipu_code': '(False)'}), '(cfg, compile_ipu_code=False)\n', (3111, 3140), False, 'from tensorflow.python import ipu\n'), ((3147, 3182), 'tensorflow.python.ipu.utils.configure_ipu_system', 'ipu.utils.configure_ipu_system', (['cfg'], {}), '(cfg)\n', (3177, 3182), False, 'from tensorflow.python import ipu\n'), ((3408, 3420), 'tensorflow.python.client.session.Session', 'sl.Session', ([], {}), '()\n', (3418, 3420), True, 'from tensorflow.python.client import session as sl\n'), ((4241, 4253), 'tensorflow.python.client.session.Session', 'sl.Session', ([], {}), '()\n', (4251, 4253), True, 'from tensorflow.python.client import session as sl\n'), ((4279, 4300), 'numpy.random.rand', 'np.random.rand', (['*DIMS'], {}), '(*DIMS)\n', (4293, 4300), True, 'import numpy as np\n'), ((5165, 5177), 'tensorflow.python.client.session.Session', 'sl.Session', ([], {}), '()\n', (5175, 5177), True, 'from tensorflow.python.client import session as sl\n'), ((5203, 5224), 'numpy.random.rand', 'np.random.rand', (['*DIMS'], {}), '(*DIMS)\n', (5217, 5224), True, 'import numpy as np\n'), ((5837, 5861), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['largest'], {}), '(largest)\n', (5852, 5861), False, 'from tensorflow.python.ops import math_ops\n'), ((5875, 5935), 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer', 'gradient_descent.GradientDescentOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (5916, 5935), False, 'from tensorflow.python.training import gradient_descent\n'), ((6081, 6093), 'tensorflow.python.client.session.Session', 'sl.Session', ([], {}), '()\n', (6091, 6093), True, 'from tensorflow.python.client import session as sl\n'), ((6119, 6140), 'numpy.random.rand', 'np.random.rand', (['*DIMS'], {}), '(*DIMS)\n', (6133, 6140), True, 'import numpy as np\n'), ((6701, 6713), 'tensorflow.python.client.session.Session', 'sl.Session', ([], {}), '()\n', (6711, 6713), True, 'from tensorflow.python.client import session as sl\n'), ((6739, 6752), 'numpy.ones', 'np.ones', (['DIMS'], {}), '(DIMS)\n', (6746, 6752), True, 'import numpy as np\n'), ((4372, 4396), 'numpy.count_nonzero', 'np.count_nonzero', (['result'], {}), '(result)\n', (4388, 4396), True, 'import numpy as np\n'), ((4399, 4424), 'numpy.count_nonzero', 'np.count_nonzero', (['in_data'], {}), '(in_data)\n', (4415, 4424), True, 'import numpy as np\n'), ((6331, 6360), 'numpy.count_nonzero', 'np.count_nonzero', (['dropout_out'], {}), '(dropout_out)\n', (6347, 6360), True, 'import numpy as np\n'), ((6388, 6415), 'numpy.count_nonzero', 'np.count_nonzero', (['gradients'], {}), '(gradients)\n', (6404, 6415), True, 'import numpy as np\n'), ((6833, 6851), 'numpy.nonzero', 'np.nonzero', (['result'], {}), '(result)\n', (6843, 6851), True, 'import numpy as np\n'), ((6899, 6925), 'numpy.ones', 'np.ones', (['kept_values.shape'], {}), '(kept_values.shape)\n', (6906, 6925), True, 'import numpy as np\n')]
|
from static import *
from lib import map_value
from point import Point
from ray import Ray
import numpy as np
import random
import math
class Source:
def __init__(self, x, y, fov, pg, screen):
self.pos = Point(x, y)
self.angle = np.random.randint(0, 360)
self.view_mode = 0
self.pg = pg
self.screen = screen
self.fov = fov
return
def generate_rays(self):
''' list to store all light ray objects emerging from light source '''
self.rays = []
self.ray_color = BLUE
self.point_color = GREEN
for i in range(0, N):
angle = i*self.fov/N * np.pi/180
self.rays.append(Ray(self.pos.x, self.pos.y, self.ray_color, self.point_color, self.pg, self.screen, angle))
return
def change_ray_colors(self):
self.ray_color = random.choice(COLORS)
self.point_color = random.choice(COLORS)
for ray in self.rays:
ray.change_color(self.ray_color, self.point_color)
return
def move(self, x, y):
self.pos.move(x, y)
for ray in self.rays:
ray.move(x, y)
return
def dist(self, ip):
return np.sqrt(np.sum([(self.pos.x-ip[0])**2, (self.pos.y-ip[1])**2]))
def draw(self):
self.pg.draw.rect(self.screen, BLACK, (0, 0, SWIDTH, HEIGHT))
if (self.pos.x < WIDTH):
self.pg.draw.circle(self.screen, GREEN, (self.pos.x, self.pos.y), 10)
return
''' 3D Rendering of ray-casting process '''
''' There are dozens of other ways to map 2D info to 3D, '''
''' which affects how the rendering process looks like to our eyes. '''
''' parameters i and distance refers to the index of a ray and its distance to the nearest wall '''
''' '''
def draw3D(self, i, distance, color):
if distance==0:
return
''' width of rectangle being rendered in 3D '''
dx = int(WIDTH/N)
''' height of rectangle being rendered in 3D '''
if VIEW_MODES[self.view_mode] == 'tangent':
dy = int(DISTORTION_ANGLE/distance)
elif VIEW_MODES[self.view_mode] == 'cosine':
dy = int((N*HEIGHT/distance)*math.cos(abs(i*(self.fov/N)-self.fov)*math.pi/180))
elif VIEW_MODES[self.view_mode] == 'fisheye':
dy = int(HEIGHT-distance)
''' color value provides an effect in which wall's color being altered '''
''' depending on its distance to the light source '''
#color = 255-map_value(distance)
color = tuple([v-map_value(distance, v) for v in color])
try:
self.pg.draw.rect(self.screen, color, (WIDTH + (i*dx), int((HEIGHT-dy)/2), dx, dy))
except:
pass
return
|
[
"numpy.sum",
"lib.map_value",
"random.choice",
"numpy.random.randint",
"ray.Ray",
"point.Point"
] |
[((220, 231), 'point.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (225, 231), False, 'from point import Point\n'), ((253, 278), 'numpy.random.randint', 'np.random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (270, 278), True, 'import numpy as np\n'), ((876, 897), 'random.choice', 'random.choice', (['COLORS'], {}), '(COLORS)\n', (889, 897), False, 'import random\n'), ((925, 946), 'random.choice', 'random.choice', (['COLORS'], {}), '(COLORS)\n', (938, 946), False, 'import random\n'), ((1243, 1305), 'numpy.sum', 'np.sum', (['[(self.pos.x - ip[0]) ** 2, (self.pos.y - ip[1]) ** 2]'], {}), '([(self.pos.x - ip[0]) ** 2, (self.pos.y - ip[1]) ** 2])\n', (1249, 1305), True, 'import numpy as np\n'), ((706, 801), 'ray.Ray', 'Ray', (['self.pos.x', 'self.pos.y', 'self.ray_color', 'self.point_color', 'self.pg', 'self.screen', 'angle'], {}), '(self.pos.x, self.pos.y, self.ray_color, self.point_color, self.pg, self\n .screen, angle)\n', (709, 801), False, 'from ray import Ray\n'), ((2611, 2633), 'lib.map_value', 'map_value', (['distance', 'v'], {}), '(distance, v)\n', (2620, 2633), False, 'from lib import map_value\n')]
|
"""Simple client to the Channel Archiver using xmlrpc."""
import logging as log
from xmlrpc.client import ServerProxy
import numpy
from . import data, utils
from .fetcher import Fetcher
__all__ = [
"CaClient",
"CaFetcher",
]
class CaClient(object):
"""Class to handle XMLRPC interaction with a channel archiver."""
def __init__(self, url):
"""
Args:
url: url for the channel archiver
"""
self._proxy = ServerProxy(url)
@staticmethod
def _create_archive_event(pv, ca_event):
"""Create ArchiveEvent from the objects received over XMLRPC.
Args:
pv: PV name to add to the event
ca_event: object received over XMLRPC
Returns:
ArchiveEvent object
"""
value = ca_event["value"]
timestamp = ca_event["secs"] + 1e-9 * ca_event["nano"]
severity = ca_event["sevr"]
return data.ArchiveEvent(pv, value, timestamp, severity)
def get(self, pv, start, end, count):
"""Request events over XMLRPC.
Args:
pv: PV name to request events for
start: datetime of start of requested period
end: datetime of end of requested period
count: maximum number of events to retrieve
Returns:
List of ArchiveEvent objects
"""
start_secs = utils.datetime_to_epoch(start)
end_secs = utils.datetime_to_epoch(end)
response = self._proxy.archiver.values(
1, [pv], start_secs, 0, end_secs, 0, count, 0
)
return [
CaClient._create_archive_event(pv, val) for val in response[0]["values"]
]
class CaFetcher(Fetcher):
"""Class to retrieve data from a channel archiver."""
def __init__(self, url):
"""
Args:
url: url for the channel archiver
"""
self._client = CaClient(url)
def _get_values(self, pv, start, end=None, count=None, request_params=None):
# Make count a large number if not specified to ensure we get all
# data.
count = 2 ** 31 if count is None else count
empty_array = numpy.zeros((0,))
all_data = data.ArchiveData(pv, empty_array, empty_array, empty_array)
last_timestamp = -1
done = False
while done is not True and len(all_data) < count:
requested = min(count - len(all_data), 10000)
if all_data.timestamps.size:
last_timestamp = all_data.timestamps[-1]
start = utils.epoch_to_datetime(last_timestamp)
log.info("Request PV {} for {} samples.".format(pv, requested))
log.info("Request start {} end {}".format(start, end))
events = self._client.get(pv, start, end, requested)
done = len(events) < requested
# Drop any events that are earlier than ones already fetched.
events = [e for e in events if e.timestamp > last_timestamp]
new_data = data.data_from_events(pv, events)
all_data = all_data.concatenate(new_data, zero_pad=True)
return all_data
|
[
"numpy.zeros",
"xmlrpc.client.ServerProxy"
] |
[((468, 484), 'xmlrpc.client.ServerProxy', 'ServerProxy', (['url'], {}), '(url)\n', (479, 484), False, 'from xmlrpc.client import ServerProxy\n'), ((2181, 2198), 'numpy.zeros', 'numpy.zeros', (['(0,)'], {}), '((0,))\n', (2192, 2198), False, 'import numpy\n')]
|
import random as rn
import numpy as np
import matplotlib.pyplot as plt
import math
from matplotlib import patches
from matplotlib.patches import Polygon
def random_population(_nv, n, _lb, _ub):
_pop = np.zeros((n, 2 * nv))
for i in range(n):
_pop[i, :] = np.random.uniform(lb, ub)
for j in range(int(_pop[i, :].size / 2)):
if _pop[i, j * 2] < 0:
_pop[i, j * 2] = int(-1)
else:
_pop[i, j * 2] = int(1)
return _pop
def crossover(_pop, crossover_rate):
next_gen = np.zeros((crossover_rate, _pop.shape[1]))
for i in range(int(crossover_rate / 2)):
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
while r1 == r2:
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
cutting_point = np.random.randint(1, _pop.shape[1])
next_gen[2 * i, 0:cutting_point] = _pop[r1, 0:cutting_point]
next_gen[2 * i, cutting_point:] = _pop[r2, cutting_point:]
next_gen[2 * i + 1, 0:cutting_point] = _pop[r2, 0:cutting_point]
next_gen[2 * i + 1, cutting_point:] = _pop[r1, cutting_point:]
return next_gen
def mutation(_pop, mutation_rate):
next_gen = np.zeros((mutation_rate, _pop.shape[1]))
for i in range(int(mutation_rate / 2)):
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
while r1 == r2:
r1 = np.random.randint(0, _pop.shape[0])
r2 = np.random.randint(0, _pop.shape[0])
cutting_point = np.random.randint(0, _pop.shape[1])
next_gen[2 * i] = _pop[r1]
next_gen[2 * i, cutting_point] = _pop[r2, cutting_point]
next_gen[2 * i + 1] = _pop[r2]
next_gen[2 * i + 1, cutting_point] = _pop[r1, cutting_point]
return next_gen
def local_search(_pop, n, _step_size):
next_gen = np.zeros((n, _pop.shape[1]))
for i in range(n):
r1 = np.random.randint(0, _pop.shape[0])
unit = _pop[r1, :]
unit[1] += np.random.uniform(-_step_size, _step_size)
if unit[1] < lb[1]:
unit[1] = lb[1]
if unit[1] > ub[1]:
unit[1] = ub[1]
next_gen[i, :] = unit
return next_gen
def evaluation(_pop, x_s, y_s, alfa_s, _done):
_fitness_values = np.zeros((_pop.shape[0], 2))
_flipped_fitness_values = np.zeros((_pop.shape[0], 2))
i = 0
_trajectory = []
V = np.zeros(nv)
angle = np.zeros(nv)
for individual in _pop:
for n in range(nv):
V[n] = individual[2 * n]
angle[n] = individual[2 * n + 1]
x = x_s - ds * math.cos(alfa_s)
y = y_s - ds * math.sin(alfa_s)
alfa_n = alfa_s
for u in range(nv):
if abs(angle[u]) < 0.0001:
x_n = x + V[u] * math.cos(alfa_n)
y_n = y + V[u] * math.sin(alfa_n)
else:
a = dist_between_axles / math.tan(angle[u])
Ro = math.sqrt(dist_between_axles ** 2 / 4 + (abs(a) + car_width / 2) ** 2)
tau = math.copysign(1, angle[u]) * alfa_n + a * math.sin(dist_between_axles / 2 * Ro)
gama = V[u] * dt / Ro
x_n = x + Ro * (math.sin(gama + tau) - math.sin(tau))
y_n = y + math.copysign(1, angle[u]) * Ro * (math.cos(tau) - math.cos(gama + tau))
alfa_n = alfa_n + math.copysign(1, angle[u]) * gama
if abs(alfa_n) > math.pi:
alfa_n = alfa_n - math.copysign(1, alfa_n) * math.pi * 2
x = x_n + ds * math.cos(alfa_n)
y = y_n + ds * math.sin(alfa_n)
for j in range(2):
if j == 0: # objective 1
if parking_length < x < -5 or parking_width < y < -5:
_fitness_values[i, j] = 1000
else:
_fitness_values[i, j] = math.sqrt(x ** 2 + y ** 2)
elif j == 1: # objective 2
_fitness_values[i, j] = beta - alfa_n
_flipped_fitness_values[i, 0] = 1 / _fitness_values[i, 0]
_flipped_fitness_values[i, 1] = 1 / _fitness_values[i, 1]
if _fitness_values[i, 0] <= 0.8 and \
(abs(_fitness_values[i, 1]) <= 0.1745 or abs(_fitness_values[i, 1]) >= 2.9671):
_done = True
if final is True:
_trajectory = np.append(_trajectory, [individual])
i = i + 1
return _fitness_values, _trajectory, _done, _flipped_fitness_values
def best_individuals_visualization(best, x_s, y_s, alfa_s):
_positions_x = []
_positions_y = []
_car_angle = []
i = 0
C = nv * 2
V = np.zeros(nv)
angle = np.zeros(nv)
best_units = np.array_split(best, len(best) / C)
for individual in best_units:
for n in range(nv):
V[n] = individual[2 * n]
angle[n] = individual[2 * n + 1]
x = x_s - ds * math.cos(alfa_s)
y = y_s - ds * math.sin(alfa_s)
alfa_n = alfa_s
for u in range(nv):
if abs(angle[u]) < 0.0001:
x_n = x + V[u] * dt * math.cos(alfa_n)
y_n = y + V[u] * dt * math.sin(alfa_n)
else:
a = dist_between_axles / math.tan(angle[u])
Ro = math.sqrt(dist_between_axles ** 2 / 4 + (abs(a) + car_width / 2) ** 2)
tau = math.copysign(1, angle[u]) * alfa_n + a * math.sin(dist_between_axles / 2 * Ro)
gama = V[u] * dt / Ro
x_n = x + Ro * (math.sin(gama + tau) - math.sin(tau))
y_n = y + math.copysign(1, angle[u]) * Ro * (math.cos(tau) - math.cos(gama + tau))
alfa_n = alfa_n + math.copysign(1, angle[u]) * gama
if abs(alfa_n) > math.pi:
alfa_n = alfa_n - math.copysign(1, alfa_n) * math.pi * 2
x = x_n + ds * math.cos(alfa_n)
y = y_n + ds * math.sin(alfa_n)
_positions_x = np.append(_positions_x, [x])
_positions_y = np.append(_positions_y, [y])
_car_angle = np.append(_car_angle, [alfa_n])
i = i + 1
position_x_arr = _positions_x
position_y_arr = _positions_y
car_angles_arr = _car_angle
return position_x_arr, position_y_arr, car_angles_arr
def crowding_calculation(_fitness_values):
_pop_size = len(_fitness_values[:, 0])
fitness_value_number = len(_fitness_values[0, :])
matrix_for_crowding = np.zeros((_pop_size, fitness_value_number))
normalize_fitness_values = (_fitness_values - _fitness_values.min(0)) / _fitness_values.ptp(0) # normalize fit val
for i in range(fitness_value_number):
crowding_results = np.zeros(_pop_size)
crowding_results[0] = 1 # extreme point has the max crowding distance
crowding_results[_pop_size - 1] = 1 # extreme point has the max crowding distance
sorting_normalize_fitness_values = np.sort(normalize_fitness_values[:, i])
sorting_normalized_values_index = np.argsort(normalize_fitness_values[:, i])
# crowding distance calculation
crowding_results[1:_pop_size - 1] = (
sorting_normalize_fitness_values[2:_pop_size] - sorting_normalize_fitness_values[0:_pop_size - 2])
re_sorting = np.argsort(sorting_normalized_values_index) # re_sorting to the original order
matrix_for_crowding[:, i] = crowding_results[re_sorting]
crowding_distance = np.sum(matrix_for_crowding, axis=1) # crowding distance of each solution
return crowding_distance
def remove_using_crowding(_fitness_values, number_solutions_needed):
pop_index = np.arange(_fitness_values.shape[0])
crowding_distance = crowding_calculation(_fitness_values)
selected_pop_index = np.zeros(number_solutions_needed)
selected_fitness_values = np.zeros((number_solutions_needed, len(_fitness_values[0, :])))
for i in range(number_solutions_needed):
_pop_size = pop_index.shape[0]
solution_1 = rn.randint(0, _pop_size - 1)
solution_2 = rn.randint(0, _pop_size - 1)
if crowding_distance[solution_1] >= crowding_distance[solution_2]:
selected_pop_index[i] = pop_index[solution_1]
selected_fitness_values[i, :] = _fitness_values[solution_1, :]
pop_index = np.delete(pop_index, solution_1, axis=0)
_fitness_values = np.delete(fitness_values, solution_1, axis=0)
crowding_distance = np.delete(crowding_distance, solution_1, axis=0)
else:
selected_pop_index[i] = pop_index[solution_2]
selected_fitness_values[i, :] = _fitness_values[solution_2, :]
pop_index = np.delete(pop_index, solution_2, axis=0)
_fitness_values = np.delete(fitness_values, solution_2, axis=0)
crowding_distance = np.delete(crowding_distance, solution_2, axis=0)
selected_pop_index = np.asarray(selected_pop_index, dtype=int)
return selected_pop_index
def pareto_front_finding(_fitness_values, pop_index):
_pop_size = _fitness_values.shape[0]
_pareto_front = np.ones(_pop_size, dtype=bool)
for i in range(_pop_size):
for j in range(_pop_size):
if all(_fitness_values[j] <= _fitness_values[i]) and any(_fitness_values[j] < _fitness_values[i]):
_pareto_front[i] = 0
break
return pop_index[_pareto_front]
def selection(_pop, _fitness_values, _pop_size):
pop_index_0 = np.arange(pop.shape[0])
pop_index = np.arange(pop.shape[0])
_pareto_front_index = []
while len(_pareto_front_index) < _pop_size:
new_pareto_front = pareto_front_finding(fitness_values[pop_index_0, :], pop_index_0)
total_pareto_size = len(_pareto_front_index) + len(new_pareto_front)
if total_pareto_size > _pop_size:
number_solutions_needed = pop_size - len(_pareto_front_index)
selected_solutions = (remove_using_crowding(_fitness_values[new_pareto_front], number_solutions_needed))
new_pareto_front = new_pareto_front[selected_solutions]
_pareto_front_index = np.hstack((_pareto_front_index, new_pareto_front)) # add to pareto
remaining_index = set(pop_index) - set(_pareto_front_index)
pop_index_0 = np.array(list(remaining_index))
selected_pop = _pop[_pareto_front_index.astype(int)]
return selected_pop
def GOL(_flipped_fitness_values, _fitness_values):
gol = []
max_fitness_val_pos = max(_fitness_values[:, 0])
max_fitness_val_ang = max(_fitness_values[:, 1])
for k in range(pop_summed):
if _flipped_fitness_values[k, 0] / max_fitness_val_pos < _flipped_fitness_values[k, 1] / max_fitness_val_ang:
gol = np.append(gol, _flipped_fitness_values[k, 0] / max_fitness_val_pos)
else:
gol = np.append(gol, _flipped_fitness_values[k, 1] / max_fitness_val_ang)
best_gol = max(gol)
return best_gol
########################
# Parameters #
########################
starting_x = 50.0 # wartości od 10.0 do 55.0
starting_y = 35.0 # wartości od 10.0 do 35.0
car_rotation = -math.pi/3 # wartości od -math.pi do math.pi
number_of_controls = 60
population_size = 160
########################
# Parameters #
########################
stan = [starting_x, starting_y, car_rotation]
nv = number_of_controls
lb = []
ub = []
for _ in range(nv):
lb = np.append(lb, [-1, -math.pi / 6])
ub = np.append(ub, [1, math.pi / 6])
pop_size = population_size
rate_crossover = 30
rate_mutation = 20
rate_local_search = 30
pop_summed = int(population_size + rate_crossover + rate_mutation + rate_local_search)
step_size = 0.1
pop = random_population(nv, pop_size, lb, ub)
best_gols = []
final = False
done = False
parking_spot_length = 6.0
parking_spot_width = 3.0
beta = 0
parking_length = 60.0
parking_width = 40.0
car_width = 1.8
car_length = 4.0
front_axle = 1.2
rear_axle = 0.34
ds = (front_axle - rear_axle / 2)
dist_between_axles = car_length - front_axle - rear_axle
dt = 1
iterations = 0
while not done:
offspring_from_crossover = crossover(pop, rate_crossover)
offspring_from_mutation = mutation(pop, rate_mutation)
offspring_from_local_search = local_search(pop, rate_local_search, step_size)
pop = np.append(pop, offspring_from_crossover, axis=0)
pop = np.append(pop, offspring_from_mutation, axis=0)
pop = np.append(pop, offspring_from_local_search, axis=0)
fitness_values, trajectory, done, flipped_fitness_values = evaluation(pop, stan[0], stan[1], stan[2], done)
best_gols = np.append(best_gols, GOL(flipped_fitness_values, fitness_values))
pop = selection(pop, fitness_values, pop_size)
print('iteration', iterations)
iterations = iterations + 1
final = True
fitness_values, final_trajectory, done, final_flipped_fitness_values = evaluation(pop, stan[0], stan[1], stan[2], done)
positions_x, positions_y, car_angles = best_individuals_visualization(final_trajectory, stan[0], stan[1], stan[2])
index = np.arange(pop.shape[0]).astype(int)
pareto_front_index = pareto_front_finding(fitness_values, index)
pop = pop[pareto_front_index, :]
pareto_front = fitness_values[pareto_front_index]
print("______________")
print("Kryteria optymalizacji:")
print("Odl. od miejsca | Różnica kąta wzgl.")
print("parkingowego | miejsca parkingowego")
print(fitness_values)
plt.scatter(fitness_values[:, 0], abs(abs(fitness_values[:, 1] * (180 / math.pi)) - 180), marker='x', c='r')
plt.scatter(pareto_front[:, 0], abs(abs(pareto_front[:, 1] * (180 / math.pi)) - 180), marker='x', c='b')
blue_patch = patches.Patch(color='blue', label='Osobniki Pareto Optymalne')
red_patch = patches.Patch(color='red', label='Reszta populacji')
plt.legend(handles=[blue_patch, red_patch])
plt.xlabel('Odległość od miejsca parkingowego w linii prostej [m]')
plt.ylabel('Różnica kąta względem miejsca parkingowego [stopnie]')
plt.show()
fig = plt.figure()
ax = fig.add_subplot()
ax.set_title('Trasa przejazdu optymalnego osobnika')
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_xlim(-10, parking_length)
ax.set_ylim(-10, parking_width)
ax.add_patch(patches.Rectangle((0 - parking_spot_length / 2, 0 - parking_spot_width / 2), parking_spot_length,
parking_spot_width, edgecolor='black', fill=False))
fig.show()
for m in range(nv):
xA = positions_x[m] - car_length / 2 * math.cos(car_angles[m]) - car_width / 2 * math.sin(car_angles[m])
yA = positions_y[m] - car_length / 2 * math.sin(car_angles[m]) + car_width / 2 * math.cos(car_angles[m])
xB = xA + car_width * math.sin(car_angles[m])
yB = yA - car_width * math.cos(car_angles[m])
xD = xA + car_length * math.cos(car_angles[m])
yD = yA + car_length * math.sin(car_angles[m])
xC = xB + car_length * math.cos(car_angles[m])
yC = yB + car_length * math.sin(car_angles[m])
points = [[xA, yA], [xB, yB], [xC, yC], [xD, yD]]
car = Polygon(points, fill=None, edgecolor='r')
ax.add_patch(car)
plt.show()
plot_iterations = np.arange(iterations)
plt.scatter(plot_iterations, best_gols, marker='o', c='g')
plt.title('Najlepszy parametr GOL dla każdej iteracji')
plt.xlabel('Numer iteracji')
plt.ylabel('Parametr GOL')
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"numpy.ones",
"numpy.argsort",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.arange",
"math.copysign",
"matplotlib.patches.Patch",
"random.randint",
"matplotlib.patches.Rectangle",
"numpy.append",
"math.cos",
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.legend",
"numpy.asarray",
"math.sin",
"numpy.hstack",
"numpy.sort",
"matplotlib.pyplot.ylabel",
"numpy.delete",
"numpy.random.uniform",
"matplotlib.pyplot.scatter",
"math.tan",
"numpy.zeros",
"matplotlib.pyplot.xlabel"
] |
[((14013, 14075), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""blue"""', 'label': '"""Osobniki Pareto Optymalne"""'}), "(color='blue', label='Osobniki Pareto Optymalne')\n", (14026, 14075), False, 'from matplotlib import patches\n'), ((14089, 14141), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""red"""', 'label': '"""Reszta populacji"""'}), "(color='red', label='Reszta populacji')\n", (14102, 14141), False, 'from matplotlib import patches\n'), ((14143, 14186), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[blue_patch, red_patch]'}), '(handles=[blue_patch, red_patch])\n', (14153, 14186), True, 'import matplotlib.pyplot as plt\n'), ((14188, 14255), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Odległość od miejsca parkingowego w linii prostej [m]"""'], {}), "('Odległość od miejsca parkingowego w linii prostej [m]')\n", (14198, 14255), True, 'import matplotlib.pyplot as plt\n'), ((14257, 14323), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Różnica kąta względem miejsca parkingowego [stopnie]"""'], {}), "('Różnica kąta względem miejsca parkingowego [stopnie]')\n", (14267, 14323), True, 'import matplotlib.pyplot as plt\n'), ((14325, 14335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14333, 14335), True, 'import matplotlib.pyplot as plt\n'), ((14345, 14357), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14355, 14357), True, 'import matplotlib.pyplot as plt\n'), ((15452, 15462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15460, 15462), True, 'import matplotlib.pyplot as plt\n'), ((15484, 15505), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (15493, 15505), True, 'import numpy as np\n'), ((15507, 15565), 'matplotlib.pyplot.scatter', 'plt.scatter', (['plot_iterations', 'best_gols'], {'marker': '"""o"""', 'c': '"""g"""'}), "(plot_iterations, best_gols, marker='o', c='g')\n", (15518, 15565), True, 'import matplotlib.pyplot as plt\n'), ((15567, 15622), 'matplotlib.pyplot.title', 'plt.title', (['"""Najlepszy parametr GOL dla każdej iteracji"""'], {}), "('Najlepszy parametr GOL dla każdej iteracji')\n", (15576, 15622), True, 'import matplotlib.pyplot as plt\n'), ((15624, 15652), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Numer iteracji"""'], {}), "('Numer iteracji')\n", (15634, 15652), True, 'import matplotlib.pyplot as plt\n'), ((15654, 15680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Parametr GOL"""'], {}), "('Parametr GOL')\n", (15664, 15680), True, 'import matplotlib.pyplot as plt\n'), ((15682, 15692), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15690, 15692), True, 'import matplotlib.pyplot as plt\n'), ((216, 237), 'numpy.zeros', 'np.zeros', (['(n, 2 * nv)'], {}), '((n, 2 * nv))\n', (224, 237), True, 'import numpy as np\n'), ((576, 617), 'numpy.zeros', 'np.zeros', (['(crossover_rate, _pop.shape[1])'], {}), '((crossover_rate, _pop.shape[1]))\n', (584, 617), True, 'import numpy as np\n'), ((1321, 1361), 'numpy.zeros', 'np.zeros', (['(mutation_rate, _pop.shape[1])'], {}), '((mutation_rate, _pop.shape[1]))\n', (1329, 1361), True, 'import numpy as np\n'), ((1996, 2024), 'numpy.zeros', 'np.zeros', (['(n, _pop.shape[1])'], {}), '((n, _pop.shape[1]))\n', (2004, 2024), True, 'import numpy as np\n'), ((2435, 2463), 'numpy.zeros', 'np.zeros', (['(_pop.shape[0], 2)'], {}), '((_pop.shape[0], 2))\n', (2443, 2463), True, 'import numpy as np\n'), ((2495, 2523), 'numpy.zeros', 'np.zeros', (['(_pop.shape[0], 2)'], {}), '((_pop.shape[0], 2))\n', (2503, 2523), True, 'import numpy as np\n'), ((2566, 2578), 'numpy.zeros', 'np.zeros', (['nv'], {}), '(nv)\n', (2574, 2578), True, 'import numpy as np\n'), ((2592, 2604), 'numpy.zeros', 'np.zeros', (['nv'], {}), '(nv)\n', (2600, 2604), True, 'import numpy as np\n'), ((4847, 4859), 'numpy.zeros', 'np.zeros', (['nv'], {}), '(nv)\n', (4855, 4859), True, 'import numpy as np\n'), ((4873, 4885), 'numpy.zeros', 'np.zeros', (['nv'], {}), '(nv)\n', (4881, 4885), True, 'import numpy as np\n'), ((6685, 6728), 'numpy.zeros', 'np.zeros', (['(_pop_size, fitness_value_number)'], {}), '((_pop_size, fitness_value_number))\n', (6693, 6728), True, 'import numpy as np\n'), ((7682, 7717), 'numpy.sum', 'np.sum', (['matrix_for_crowding'], {'axis': '(1)'}), '(matrix_for_crowding, axis=1)\n', (7688, 7717), True, 'import numpy as np\n'), ((7879, 7914), 'numpy.arange', 'np.arange', (['_fitness_values.shape[0]'], {}), '(_fitness_values.shape[0])\n', (7888, 7914), True, 'import numpy as np\n'), ((8004, 8037), 'numpy.zeros', 'np.zeros', (['number_solutions_needed'], {}), '(number_solutions_needed)\n', (8012, 8037), True, 'import numpy as np\n'), ((9162, 9203), 'numpy.asarray', 'np.asarray', (['selected_pop_index'], {'dtype': 'int'}), '(selected_pop_index, dtype=int)\n', (9172, 9203), True, 'import numpy as np\n'), ((9359, 9389), 'numpy.ones', 'np.ones', (['_pop_size'], {'dtype': 'bool'}), '(_pop_size, dtype=bool)\n', (9366, 9389), True, 'import numpy as np\n'), ((9745, 9768), 'numpy.arange', 'np.arange', (['pop.shape[0]'], {}), '(pop.shape[0])\n', (9754, 9768), True, 'import numpy as np\n'), ((9786, 9809), 'numpy.arange', 'np.arange', (['pop.shape[0]'], {}), '(pop.shape[0])\n', (9795, 9809), True, 'import numpy as np\n'), ((11762, 11795), 'numpy.append', 'np.append', (['lb', '[-1, -math.pi / 6]'], {}), '(lb, [-1, -math.pi / 6])\n', (11771, 11795), True, 'import numpy as np\n'), ((11806, 11837), 'numpy.append', 'np.append', (['ub', '[1, math.pi / 6]'], {}), '(ub, [1, math.pi / 6])\n', (11815, 11837), True, 'import numpy as np\n'), ((12666, 12714), 'numpy.append', 'np.append', (['pop', 'offspring_from_crossover'], {'axis': '(0)'}), '(pop, offspring_from_crossover, axis=0)\n', (12675, 12714), True, 'import numpy as np\n'), ((12726, 12773), 'numpy.append', 'np.append', (['pop', 'offspring_from_mutation'], {'axis': '(0)'}), '(pop, offspring_from_mutation, axis=0)\n', (12735, 12773), True, 'import numpy as np\n'), ((12785, 12836), 'numpy.append', 'np.append', (['pop', 'offspring_from_local_search'], {'axis': '(0)'}), '(pop, offspring_from_local_search, axis=0)\n', (12794, 12836), True, 'import numpy as np\n'), ((14565, 14717), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(0 - parking_spot_length / 2, 0 - parking_spot_width / 2)', 'parking_spot_length', 'parking_spot_width'], {'edgecolor': '"""black"""', 'fill': '(False)'}), "((0 - parking_spot_length / 2, 0 - parking_spot_width / 2),\n parking_spot_length, parking_spot_width, edgecolor='black', fill=False)\n", (14582, 14717), False, 'from matplotlib import patches\n'), ((15386, 15427), 'matplotlib.patches.Polygon', 'Polygon', (['points'], {'fill': 'None', 'edgecolor': '"""r"""'}), "(points, fill=None, edgecolor='r')\n", (15393, 15427), False, 'from matplotlib.patches import Polygon\n'), ((284, 309), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {}), '(lb, ub)\n', (301, 309), True, 'import numpy as np\n'), ((678, 713), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (695, 713), True, 'import numpy as np\n'), ((728, 763), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (745, 763), True, 'import numpy as np\n'), ((922, 957), 'numpy.random.randint', 'np.random.randint', (['(1)', '_pop.shape[1]'], {}), '(1, _pop.shape[1])\n', (939, 957), True, 'import numpy as np\n'), ((1421, 1456), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (1438, 1456), True, 'import numpy as np\n'), ((1471, 1506), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (1488, 1506), True, 'import numpy as np\n'), ((1665, 1700), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[1]'], {}), '(0, _pop.shape[1])\n', (1682, 1700), True, 'import numpy as np\n'), ((2063, 2098), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (2080, 2098), True, 'import numpy as np\n'), ((2147, 2189), 'numpy.random.uniform', 'np.random.uniform', (['(-_step_size)', '_step_size'], {}), '(-_step_size, _step_size)\n', (2164, 2189), True, 'import numpy as np\n'), ((6921, 6940), 'numpy.zeros', 'np.zeros', (['_pop_size'], {}), '(_pop_size)\n', (6929, 6940), True, 'import numpy as np\n'), ((7157, 7196), 'numpy.sort', 'np.sort', (['normalize_fitness_values[:, i]'], {}), '(normalize_fitness_values[:, i])\n', (7164, 7196), True, 'import numpy as np\n'), ((7240, 7282), 'numpy.argsort', 'np.argsort', (['normalize_fitness_values[:, i]'], {}), '(normalize_fitness_values[:, i])\n', (7250, 7282), True, 'import numpy as np\n'), ((7509, 7552), 'numpy.argsort', 'np.argsort', (['sorting_normalized_values_index'], {}), '(sorting_normalized_values_index)\n', (7519, 7552), True, 'import numpy as np\n'), ((8243, 8271), 'random.randint', 'rn.randint', (['(0)', '(_pop_size - 1)'], {}), '(0, _pop_size - 1)\n', (8253, 8271), True, 'import random as rn\n'), ((8294, 8322), 'random.randint', 'rn.randint', (['(0)', '(_pop_size - 1)'], {}), '(0, _pop_size - 1)\n', (8304, 8322), True, 'import random as rn\n'), ((10399, 10449), 'numpy.hstack', 'np.hstack', (['(_pareto_front_index, new_pareto_front)'], {}), '((_pareto_front_index, new_pareto_front))\n', (10408, 10449), True, 'import numpy as np\n'), ((13418, 13441), 'numpy.arange', 'np.arange', (['pop.shape[0]'], {}), '(pop.shape[0])\n', (13427, 13441), True, 'import numpy as np\n'), ((807, 842), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (824, 842), True, 'import numpy as np\n'), ((861, 896), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (878, 896), True, 'import numpy as np\n'), ((1550, 1585), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (1567, 1585), True, 'import numpy as np\n'), ((1604, 1639), 'numpy.random.randint', 'np.random.randint', (['(0)', '_pop.shape[0]'], {}), '(0, _pop.shape[0])\n', (1621, 1639), True, 'import numpy as np\n'), ((6180, 6208), 'numpy.append', 'np.append', (['_positions_x', '[x]'], {}), '(_positions_x, [x])\n', (6189, 6208), True, 'import numpy as np\n'), ((6237, 6265), 'numpy.append', 'np.append', (['_positions_y', '[y]'], {}), '(_positions_y, [y])\n', (6246, 6265), True, 'import numpy as np\n'), ((6292, 6323), 'numpy.append', 'np.append', (['_car_angle', '[alfa_n]'], {}), '(_car_angle, [alfa_n])\n', (6301, 6323), True, 'import numpy as np\n'), ((8559, 8599), 'numpy.delete', 'np.delete', (['pop_index', 'solution_1'], {'axis': '(0)'}), '(pop_index, solution_1, axis=0)\n', (8568, 8599), True, 'import numpy as np\n'), ((8631, 8676), 'numpy.delete', 'np.delete', (['fitness_values', 'solution_1'], {'axis': '(0)'}), '(fitness_values, solution_1, axis=0)\n', (8640, 8676), True, 'import numpy as np\n'), ((8710, 8758), 'numpy.delete', 'np.delete', (['crowding_distance', 'solution_1'], {'axis': '(0)'}), '(crowding_distance, solution_1, axis=0)\n', (8719, 8758), True, 'import numpy as np\n'), ((8934, 8974), 'numpy.delete', 'np.delete', (['pop_index', 'solution_2'], {'axis': '(0)'}), '(pop_index, solution_2, axis=0)\n', (8943, 8974), True, 'import numpy as np\n'), ((9006, 9051), 'numpy.delete', 'np.delete', (['fitness_values', 'solution_2'], {'axis': '(0)'}), '(fitness_values, solution_2, axis=0)\n', (9015, 9051), True, 'import numpy as np\n'), ((9085, 9133), 'numpy.delete', 'np.delete', (['crowding_distance', 'solution_2'], {'axis': '(0)'}), '(crowding_distance, solution_2, axis=0)\n', (9094, 9133), True, 'import numpy as np\n'), ((11025, 11092), 'numpy.append', 'np.append', (['gol', '(_flipped_fitness_values[k, 0] / max_fitness_val_pos)'], {}), '(gol, _flipped_fitness_values[k, 0] / max_fitness_val_pos)\n', (11034, 11092), True, 'import numpy as np\n'), ((11127, 11194), 'numpy.append', 'np.append', (['gol', '(_flipped_fitness_values[k, 1] / max_fitness_val_ang)'], {}), '(gol, _flipped_fitness_values[k, 1] / max_fitness_val_ang)\n', (11136, 11194), True, 'import numpy as np\n'), ((14868, 14891), 'math.sin', 'math.sin', (['car_angles[m]'], {}), '(car_angles[m])\n', (14876, 14891), False, 'import math\n'), ((14978, 15001), 'math.cos', 'math.cos', (['car_angles[m]'], {}), '(car_angles[m])\n', (14986, 15001), False, 'import math\n'), ((15031, 15054), 'math.sin', 'math.sin', (['car_angles[m]'], {}), '(car_angles[m])\n', (15039, 15054), False, 'import math\n'), ((15082, 15105), 'math.cos', 'math.cos', (['car_angles[m]'], {}), '(car_angles[m])\n', (15090, 15105), False, 'import math\n'), ((15136, 15159), 'math.cos', 'math.cos', (['car_angles[m]'], {}), '(car_angles[m])\n', (15144, 15159), False, 'import math\n'), ((15188, 15211), 'math.sin', 'math.sin', (['car_angles[m]'], {}), '(car_angles[m])\n', (15196, 15211), False, 'import math\n'), ((15242, 15265), 'math.cos', 'math.cos', (['car_angles[m]'], {}), '(car_angles[m])\n', (15250, 15265), False, 'import math\n'), ((15294, 15317), 'math.sin', 'math.sin', (['car_angles[m]'], {}), '(car_angles[m])\n', (15302, 15317), False, 'import math\n'), ((2773, 2789), 'math.cos', 'math.cos', (['alfa_s'], {}), '(alfa_s)\n', (2781, 2789), False, 'import math\n'), ((2814, 2830), 'math.sin', 'math.sin', (['alfa_s'], {}), '(alfa_s)\n', (2822, 2830), False, 'import math\n'), ((4546, 4582), 'numpy.append', 'np.append', (['_trajectory', '[individual]'], {}), '(_trajectory, [individual])\n', (4555, 4582), True, 'import numpy as np\n'), ((5114, 5130), 'math.cos', 'math.cos', (['alfa_s'], {}), '(alfa_s)\n', (5122, 5130), False, 'import math\n'), ((5155, 5171), 'math.sin', 'math.sin', (['alfa_s'], {}), '(alfa_s)\n', (5163, 5171), False, 'import math\n'), ((14826, 14849), 'math.cos', 'math.cos', (['car_angles[m]'], {}), '(car_angles[m])\n', (14834, 14849), False, 'import math\n'), ((14936, 14959), 'math.sin', 'math.sin', (['car_angles[m]'], {}), '(car_angles[m])\n', (14944, 14959), False, 'import math\n'), ((3090, 3108), 'math.tan', 'math.tan', (['angle[u]'], {}), '(angle[u])\n', (3098, 3108), False, 'import math\n'), ((3733, 3749), 'math.cos', 'math.cos', (['alfa_n'], {}), '(alfa_n)\n', (3741, 3749), False, 'import math\n'), ((3778, 3794), 'math.sin', 'math.sin', (['alfa_n'], {}), '(alfa_n)\n', (3786, 3794), False, 'import math\n'), ((4053, 4079), 'math.sqrt', 'math.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4062, 4079), False, 'import math\n'), ((5445, 5463), 'math.tan', 'math.tan', (['angle[u]'], {}), '(angle[u])\n', (5453, 5463), False, 'import math\n'), ((6090, 6106), 'math.cos', 'math.cos', (['alfa_n'], {}), '(alfa_n)\n', (6098, 6106), False, 'import math\n'), ((6135, 6151), 'math.sin', 'math.sin', (['alfa_n'], {}), '(alfa_n)\n', (6143, 6151), False, 'import math\n'), ((2961, 2977), 'math.cos', 'math.cos', (['alfa_n'], {}), '(alfa_n)\n', (2969, 2977), False, 'import math\n'), ((3012, 3028), 'math.sin', 'math.sin', (['alfa_n'], {}), '(alfa_n)\n', (3020, 3028), False, 'import math\n'), ((3225, 3251), 'math.copysign', 'math.copysign', (['(1)', 'angle[u]'], {}), '(1, angle[u])\n', (3238, 3251), False, 'import math\n'), ((3267, 3304), 'math.sin', 'math.sin', (['(dist_between_axles / 2 * Ro)'], {}), '(dist_between_axles / 2 * Ro)\n', (3275, 3304), False, 'import math\n'), ((3550, 3576), 'math.copysign', 'math.copysign', (['(1)', 'angle[u]'], {}), '(1, angle[u])\n', (3563, 3576), False, 'import math\n'), ((5309, 5325), 'math.cos', 'math.cos', (['alfa_n'], {}), '(alfa_n)\n', (5317, 5325), False, 'import math\n'), ((5365, 5381), 'math.sin', 'math.sin', (['alfa_n'], {}), '(alfa_n)\n', (5373, 5381), False, 'import math\n'), ((5580, 5606), 'math.copysign', 'math.copysign', (['(1)', 'angle[u]'], {}), '(1, angle[u])\n', (5593, 5606), False, 'import math\n'), ((5622, 5659), 'math.sin', 'math.sin', (['(dist_between_axles / 2 * Ro)'], {}), '(dist_between_axles / 2 * Ro)\n', (5630, 5659), False, 'import math\n'), ((5905, 5931), 'math.copysign', 'math.copysign', (['(1)', 'angle[u]'], {}), '(1, angle[u])\n', (5918, 5931), False, 'import math\n'), ((3377, 3397), 'math.sin', 'math.sin', (['(gama + tau)'], {}), '(gama + tau)\n', (3385, 3397), False, 'import math\n'), ((3400, 3413), 'math.sin', 'math.sin', (['tau'], {}), '(tau)\n', (3408, 3413), False, 'import math\n'), ((3442, 3468), 'math.copysign', 'math.copysign', (['(1)', 'angle[u]'], {}), '(1, angle[u])\n', (3455, 3468), False, 'import math\n'), ((3477, 3490), 'math.cos', 'math.cos', (['tau'], {}), '(tau)\n', (3485, 3490), False, 'import math\n'), ((3493, 3513), 'math.cos', 'math.cos', (['(gama + tau)'], {}), '(gama + tau)\n', (3501, 3513), False, 'import math\n'), ((5732, 5752), 'math.sin', 'math.sin', (['(gama + tau)'], {}), '(gama + tau)\n', (5740, 5752), False, 'import math\n'), ((5755, 5768), 'math.sin', 'math.sin', (['tau'], {}), '(tau)\n', (5763, 5768), False, 'import math\n'), ((5797, 5823), 'math.copysign', 'math.copysign', (['(1)', 'angle[u]'], {}), '(1, angle[u])\n', (5810, 5823), False, 'import math\n'), ((5832, 5845), 'math.cos', 'math.cos', (['tau'], {}), '(tau)\n', (5840, 5845), False, 'import math\n'), ((5848, 5868), 'math.cos', 'math.cos', (['(gama + tau)'], {}), '(gama + tau)\n', (5856, 5868), False, 'import math\n'), ((3666, 3690), 'math.copysign', 'math.copysign', (['(1)', 'alfa_n'], {}), '(1, alfa_n)\n', (3679, 3690), False, 'import math\n'), ((6021, 6045), 'math.copysign', 'math.copysign', (['(1)', 'alfa_n'], {}), '(1, alfa_n)\n', (6034, 6045), False, 'import math\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
outbursts --- Lightcurve and outburst analysis
==============================================
"""
__all__ = [
'CometaryTrends'
]
from collections import namedtuple
import logging
import numpy as np
from scipy.cluster import hierarchy
from scipy.optimize import leastsq
import astropy.units as u
from astropy.time import Time
from astropy.stats import sigma_clip
from ..util import linefit
dmdtFit = namedtuple(
'dmdtFit', ['m0', 'dmdt', 'm0_unc', 'dmdt_unc', 'rms', 'rchisq']
)
ExpFit = namedtuple(
'ExpFit', ['dm', 'tau', 'dm_unc', 'tau_unc', 'rms', 'rchisq']
)
Color = namedtuple(
'Color', ['t', 'clusters', 'm_filter', 'm',
'm_unc', 'c', 'c_unc', 'avg', 'avg_unc']
)
Color.__doc__ = 'Color estimate.'
Color.t.__doc__ = 'Average observation date for each color estimate. [astropy Time]'
Color.clusters.__doc__ = 'Observation clusters used to define color; 0 for unused.'
Color.m_filter.__doc__ = 'Filter for m.'
Color.m.__doc__ = 'Apparent mag for each date in given filter. [mag]'
Color.m_unc.__doc__ = 'Uncertainty on m. [mag]'
Color.c.__doc__ = 'Individual colors. [mag]'
Color.c_unc.__doc__ = 'Uncertainty on c. [mag]'
Color.avg.__doc__ = 'Weighted average color. [mag]'
Color.avg_unc.__doc__ = 'Uncertainty on avg. [mag]'
class CometaryTrends:
"""Define lightcurve trends designed for identifying cometary outbursts.
Parameters
----------
eph : sbpy Ephem
Ephemeris of the target. Field requirements depend on the trend
fitting methods to be used. Generally provide date, rh, delta, phase.
m, m_unc : Quantity
Photometry and uncertainty in magnitudes.
filt : array, optional
Filters for each ``m``.
fit_mask : array, optional
``True`` for elements to ignore when fitting (e.g., outbursts).
logger : Logger, optional
Use this logger for messaging.
**kwargs
Any ``CometaryTrends`` property.
Properties
----------
m_original : Quantity
Unmodified (input) photometry.
m : Quantity
Apparent magnitude, possibly limited to one filter (see ``fit_filter``)
or filter transformed (see ``color_transform``).
colors : dict of Quantity
Use these colors when transforming between filters. Key by filter
tuple in wavelength order, e.g., to set g-r use:
`{('g', 'r'): 0.5 * u.mag}`
``colors`` is also set when ``self.color`` is used.
fit_filter : str or None
Set to a filter in ``self.filt`` to limit fitting to this filter.
color_transform : bool
Set to ``True`` to transform observations to that specified in
``fit_filter`` via ``colors``.
"""
def __init__(self, eph, m, m_unc, filt=None, fit_mask=None, logger=None,
**kwargs):
# store parameters and properties
self.eph = eph
self.m = m
self.m_unc = m_unc
self.filt = np.array(filt)
self.fit_mask = (
np.zeros(len(m), bool) if fit_mask is None
else np.array(fit_mask)
)
self.colors = kwargs.get('colors', {})
self.fit_filter = kwargs.get('fit_filter')
self.color_transform = kwargs.get('color_transform', False)
if logger is None:
self.logger = logging.getLogger('CometaryTrends')
else:
self.logger = logger
# parameter check
if not all((isinstance(m, u.Quantity), isinstance(m_unc, u.Quantity))):
raise ValueError(
'm, m_unc must be Quantity in units of magnitude.')
n = [len(x) for x in (eph, m, m_unc, self.fit_mask)]
if filt is not None:
n += [len(filt)]
if len(np.unique(n)) != 1:
raise ValueError('all arrays must have the same length')
@property
def m_original(self):
return self._m
@property
def m(self):
"""Apparent magnitude.
Possibly limited to one filter (see ``fit_filter``) or filter
transformed (see ``color_transform``).
"""
m = np.ma.MaskedArray(self._m.copy(),
mask=np.zeros(len(self._m), bool))
if (self.filt is not None) and (self.fit_filter is not None):
for i in range(len(m)):
if self.filt[i] != self.fit_filter:
if self.color_transform:
# try to color transform
color = (self.filt[i], self.fit_filter)
if color in self.colors:
m[i] -= self.colors[color]
elif color[::-1] in self.colors:
m[i] += self.colors[color[::-1]]
else:
# not possible
m.mask[i] = True
else:
# not color transforming this filter
m.mask[i] = True
return m
@m.setter
def m(self, _m):
self._m = _m
@property
def fit_m(self):
"""Magnitude array masked for fitting."""
m = self.m
m.mask += self.fit_mask
return m
@property
def fit_filter(self):
"""Filter to fit.
Set to ``None`` to fit all data(without color transformations).
"""
return self._fit_filter
@fit_filter.setter
def fit_filter(self, filt):
if not isinstance(filt, (str, type(None))):
raise ValueError('fit filter must be a string or ``None``')
self._fit_filter = filt
@property
def color_transform(self):
"""Color transformation flag.
If fitting only one filter, set to ``True`` to allow
color transformations via ``self.color``.
"""
return self._color_transform
@color_transform.setter
def color_transform(self, flag):
self._color_transform = bool(flag)
def color(self, blue, red, max_dt=16 / 24, max_unc=0.25 * u.mag,
m_filter=None):
"""Estimate the color, blue - red, using weighted averages.
``eph`` requires ``'date'``.
Masked data is excluded.
Data is not nucleus subtracted.
Parameters
----------
blue: string
The name of the bluer filter.
red: string
The name of the redder filter.
max_dt: float, optional
Maximum time difference to consider when clustering observations.
max_unc: Quantity, optional
Ignore results with uncertainty > ``max_unc``.
m_filter : string, optional
Report mean apparent magnitude in this filter. Default is the
redder filter.
Returns
-------
color: Color
The color results or ``None`` if it cannot be calculated.
"""
if len(self.filt) < 2:
self.logger.info('Not enough filters.')
return None
b = self.filt == blue
r = self.filt == red
if m_filter is None:
m_filter = red
elif m_filter not in [blue, red]:
raise ValueError("m_filter must be one of blue or red")
clusters = hierarchy.fclusterdata(
self.eph['date'].mjd[:, np.newaxis],
max_dt, criterion='distance'
)
self.logger.info(f'{clusters.max()} clusters found.')
mjd = []
m_mean = []
m_mean_unc = []
bmr = []
bmr_unc = []
for cluster in np.unique(clusters):
i = (clusters == cluster) * ~self.fit_mask
# require both filters in this cluster
if (not np.any(b[i])) or (not np.any(r[i])):
clusters[i] = 0
continue
# estimate weighted averages and compute color
wb, sw = np.average(self.m_original[b * i],
weights=self.m_unc[b * i]**-2,
returned=True)
wb_unc = sw**-0.5
wr, sw = np.average(self.m_original[r * i],
weights=self.m_unc[r * i]**-2,
returned=True)
wr_unc = sw**-0.5
if np.hypot(wb_unc, wr_unc) > max_unc:
continue
mjd.append(self.eph['date'].mjd[i].mean())
if m_filter == 'blue':
m_mean.append(wb)
m_mean_unc.append(wb_unc)
else:
m_mean.append(wr)
m_mean_unc.append(wr_unc)
bmr.append(wb - wr)
bmr_unc.append(np.hypot(wb_unc, wr_unc))
if len(bmr) == 0:
self.logger.info('No colors measured.')
return None
m_mean = u.Quantity(m_mean)
m_mean_unc = u.Quantity(m_mean_unc)
bmr = u.Quantity(bmr)
bmr_unc = u.Quantity(bmr_unc)
avg, sw = np.average(bmr, weights=bmr_unc**-2, returned=True)
avg_unc = sw**-0.5
self.colors[(blue, red)] = avg
return Color(Time(mjd, format='mjd'), clusters, m_filter,
m_mean, m_mean_unc, bmr, bmr_unc, avg, avg_unc)
@staticmethod
def linear_add(a, b):
"""The sum a+b computed in linear space."""
return -np.log(np.exp(-a.value) + np.exp(-b.to_value(a.unit))) * a.unit
@staticmethod
def linear_subtract(a, b):
"""The difference a-b computed in linear space."""
return -np.log(np.exp(-a.value) - np.exp(-b.to_value(a.unit))) * a.unit
def H(self, fixed_angular_size=False, Phi=None, nucleus=None):
"""Absolute magnitude.
Parameters
----------
fixed_angular_size: bool
``True`` if the photometric aperture is measured with a fixed
angular size. If so, the target-observer distance(Δ) correction
will be Δ**-1.
Phi: function, optional
Phase function.
nucleus : Quantity
Subtract this nucleus before scaling.
"""
m = self.m.copy()
unit = m.data.unit
if nucleus is not None:
m = np.ma.MaskedArray(self.linear_subtract(m.data, nucleus),
mask=m.mask)
d = 2.5 if fixed_angular_size else 5
H = (m - 5 * np.log10(self.eph['rh'].to_value('au')) * unit
- d * np.log10(self.eph['delta'].to_value('au')) * unit)
if Phi is not None:
H += 2.5 * np.log10(Phi(self.eph['phase'])) * unit
return H
def ostat(self, k=4, dt=14, sigma=2, **kwargs):
"""Compute the outburst statistic for each photometry point.
ostat is calculated for each masked point, but the masked points are
not included in the photometric baseline calculation.
Parameters
----------
k : float, optional
Heliocentric distance slope on apparent magnitude for the baseline
estimate.
dt : float, optional
Number of days of history to use for the baseline estimate.
sigma : float, optional
Number of sigmas to clip the data.
**kwargs
Additional keyword arguments are passed to ``H()``.
Returns
-------
o : array
The outburst statistic.
"""
Hy = (
self.H(**kwargs)
- 2.5 * (k - 2) * np.log10(self.eph['rh'].to_value('au')) * u.mag
)
o = np.ma.zeros(len(Hy))
for i in range(len(Hy)):
j = (
(self.eph['date'] < self.eph['date'][i])
* (self.eph['date'] > (self.eph['date'][i] - dt * u.day))
)
if j.sum() < 1:
o[i] = np.ma.masked
continue
# reject outliers, calculate weighted mean
good = j * ~Hy.mask * np.isfinite(Hy.data)
if np.sum(good) > 2:
m = sigma_clip(Hy[good].data, sigma=sigma)
else:
m = Hy[good]
m -= Hy[i] # normalize to data point being tested
m_unc = self.m_unc[good]
baseline, sw = np.ma.average(m, weights=m_unc**-2,
returned=True)
baseline_unc = sw**-0.5
unc = max(np.sqrt(baseline_unc**2 + self.m_unc[i]**2).value, 0.1)
o[i] = np.round(baseline.value / unc, 1)
return o
def _fit_setup(self, nucleus=None, absolute=False, **kwargs):
dt = self.eph['date'].mjd * u.day
dt -= dt.min()
if absolute:
m = self.H(nucleus=nucleus, **kwargs)
m.mask = self.fit_m.mask
else:
m = self.fit_m
if nucleus is not None:
m = np.ma.MaskedArray(
self.linear_subtract(m.data, nucleus),
mask=m.mask
)
# subtraction may introduce nans
m.mask += ~np.isfinite(m)
return dt, m
def dmdt(self, nucleus=None, guess=None, k=1, absolute=False, **kwargs):
"""Fit magnitude versus time as a function of ``t**k``.
``eph`` requires ``'date'``.
``absolute`` requires ``'rh'``, ``'delta'``, and ``'phase'`` in
``eph``.
Parameters
----------
nucleus : Quantity
Subtract this nucleus before fitting, assumed to be in the same
filter as ``self.m``.
guess : tuple of floats
Initial fit guess: (m0, slope).
k : float, optional
Scale time by ``t^k``.
absolute : boo, optional
Fix absolute magnitude via ``self.H()``.
**kwargs
Additional keyword arguments pass to ``self.H()``.
Returns
-------
dt: np.array
trend: np.array
Including the nucleus.
fit_mask: np.array
Data points used in the fit.
fit: dmdtFit
Fit results.
"""
dt, m = self._fit_setup(nucleus=nucleus, absolute=absolute, **kwargs)
unit = m.data.unit
mask = m.mask
guess = (0.05, 15) if guess is None else guess
r = linefit(dt.value[~mask]**k, m.data.value[~mask],
self.m_unc.value[~mask], guess)
trend = (r[0][1] + r[0][0] * dt.value**k) * unit
fit_unc = r[1] if r[1] is not None else (0, 0)
# restore nucleus?
if nucleus is not None:
trend = self.linear_add(trend, nucleus)
residuals = m - trend
fit = dmdtFit(r[0][1] * unit, r[0][0] * unit / u.day**k,
fit_unc[1] * unit, fit_unc[0] * unit / u.day**k,
np.std(residuals[~mask].data),
np.sum((residuals[~mask].data / self.m_unc[~mask])**2)
/ np.sum(~mask))
return dt, trend, ~mask, fit
def exp(self, baseline, absolute=False, **kwargs):
"""Fit magnitude versus time as a function of ``e**(k*t)``.
``eph`` requires ``'date'``.
``absolute`` requires ``'rh'``, ``'delta'``, and ``'phase'`` in
``eph``.
Parameters
----------
baseline : Quantity
Fit the exponential with respect to this baseline trend (may
include the nucleus). Must be absolute magnitude if ``absolute``
is true.
absolute : boo, optional
Fix absolute magnitude via ``self.H()``.
**kwargs
Additional keyword arguments pass to ``self.H()``.
Returns
-------
dt: np.array
trend: np.array
Including the nucleus.
fit_mask: np.array
Data points used in the fit.
fit: ExpFit
Fit results.
"""
dt, m = self._fit_setup(absolute=absolute, **kwargs)
dm = m - baseline
unit = m.data.unit
mask = m.mask
print(m)
def model(dt, peak, tau):
lc = peak * np.exp(-dt / tau)
lc[dt < 0] = 0
return lc
def chi(p, dt, dm, m_unc):
m = model(dt, *p)
return (dm - m) / m_unc
args = (dt.value[~mask], dm.data.value[~mask], self.m_unc.value[~mask])
guess = (dm.compressed().min().value, 10)
r = leastsq(chi, guess, args=args, full_output=True)
fit_unc = np.sqrt(np.diag(r[1]))
trend = model(dt.value, *r[0]) * unit
# restore baseline
trend = trend + baseline
residuals = m - trend
fit = ExpFit(r[0][0] * unit, r[0][1] * u.day,
fit_unc[0] * unit, fit_unc[1] * u.day,
np.std(residuals[~mask].data),
np.sum((residuals[~mask].data / self.m_unc[~mask])**2)
/ np.sum(~mask))
return dt, trend, ~mask, fit
# def mrh(self, fixed_angular_size, filt=None, color_transform=True,
# Phi=phase_HalleyMarcus):
# """Fit magnitude as a function of rh.
# ``eph`` requires rh, delta, phase.
# m = M - k log10(rh) - d log10(Delta) + 2.5 log10(Phi(phase))
# d = 2.5 for fixed_angular_size == True, 5 otherwise.
# Parameters
# ----------
# fixed_angular_size: bool
# Aperture is fixed in angular size.
# filt: str, optional
# Fit only this filter.
# color_transformation: bool, optional
# If fitting only one filter, set to ``True`` to allow
# color transformations via ``self.color``.
# Phi: function, optional
# Use this phase function.
# Returns
# -------
# trend: np.array
# fit_mask: np.array
# Data points used in the fit.
# fit: mrhFit
# """
# m = self.coma(filt)
# if filt is not None and not color_transform:
# m[self.filt != filt] = np.nan
# if fixed_angular_size:
# d = 2.5
# else:
# d = 5
# dm = (-d * np.log10(self.eph['delta'].to_value('au'))
# + 2.5 * np.log10(Phi(self.eph['phase']))) * u.mag
# i = ~self.fit_mask * np.isfinite(m)
# r = linefit(self.eph['rh'][i].value, (m - dm)[i].value,
# self.m_unc[i].value, (0.05, 15))
# trend = (r[0][1] + r[0][0] * self.eph['rh'].value) * m.unit + dm
# residuals = m - trend
# # restore nucleus?
# if self.nucleus is not None:
# trend = -np.log(np.exp(-trend.value) +
# np.exp(-self.nucleus.value)) * u.mag
# fit = mrhFit(r[0][1] * m.unit, r[0][0] * m.unit / u.day,
# r[1][1] * m.unit, r[1][0] * m.unit / u.day,
# np.std(residuals[i]),
# np.sum((residuals[i] / self.m_unc[i])**2) / np.sum(i))
# return trend, i, fit
|
[
"numpy.sum",
"scipy.optimize.leastsq",
"numpy.exp",
"numpy.diag",
"numpy.round",
"numpy.unique",
"astropy.stats.sigma_clip",
"numpy.std",
"numpy.isfinite",
"astropy.units.Quantity",
"numpy.average",
"astropy.time.Time",
"scipy.cluster.hierarchy.fclusterdata",
"numpy.hypot",
"numpy.ma.average",
"numpy.any",
"numpy.array",
"collections.namedtuple",
"logging.getLogger",
"numpy.sqrt"
] |
[((475, 551), 'collections.namedtuple', 'namedtuple', (['"""dmdtFit"""', "['m0', 'dmdt', 'm0_unc', 'dmdt_unc', 'rms', 'rchisq']"], {}), "('dmdtFit', ['m0', 'dmdt', 'm0_unc', 'dmdt_unc', 'rms', 'rchisq'])\n", (485, 551), False, 'from collections import namedtuple\n'), ((567, 640), 'collections.namedtuple', 'namedtuple', (['"""ExpFit"""', "['dm', 'tau', 'dm_unc', 'tau_unc', 'rms', 'rchisq']"], {}), "('ExpFit', ['dm', 'tau', 'dm_unc', 'tau_unc', 'rms', 'rchisq'])\n", (577, 640), False, 'from collections import namedtuple\n'), ((656, 756), 'collections.namedtuple', 'namedtuple', (['"""Color"""', "['t', 'clusters', 'm_filter', 'm', 'm_unc', 'c', 'c_unc', 'avg', 'avg_unc']"], {}), "('Color', ['t', 'clusters', 'm_filter', 'm', 'm_unc', 'c',\n 'c_unc', 'avg', 'avg_unc'])\n", (666, 756), False, 'from collections import namedtuple\n'), ((3012, 3026), 'numpy.array', 'np.array', (['filt'], {}), '(filt)\n', (3020, 3026), True, 'import numpy as np\n'), ((7309, 7402), 'scipy.cluster.hierarchy.fclusterdata', 'hierarchy.fclusterdata', (["self.eph['date'].mjd[:, np.newaxis]", 'max_dt'], {'criterion': '"""distance"""'}), "(self.eph['date'].mjd[:, np.newaxis], max_dt,\n criterion='distance')\n", (7331, 7402), False, 'from scipy.cluster import hierarchy\n'), ((7618, 7637), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (7627, 7637), True, 'import numpy as np\n'), ((8858, 8876), 'astropy.units.Quantity', 'u.Quantity', (['m_mean'], {}), '(m_mean)\n', (8868, 8876), True, 'import astropy.units as u\n'), ((8898, 8920), 'astropy.units.Quantity', 'u.Quantity', (['m_mean_unc'], {}), '(m_mean_unc)\n', (8908, 8920), True, 'import astropy.units as u\n'), ((8935, 8950), 'astropy.units.Quantity', 'u.Quantity', (['bmr'], {}), '(bmr)\n', (8945, 8950), True, 'import astropy.units as u\n'), ((8969, 8988), 'astropy.units.Quantity', 'u.Quantity', (['bmr_unc'], {}), '(bmr_unc)\n', (8979, 8988), True, 'import astropy.units as u\n'), ((9007, 9060), 'numpy.average', 'np.average', (['bmr'], {'weights': '(bmr_unc ** -2)', 'returned': '(True)'}), '(bmr, weights=bmr_unc ** -2, returned=True)\n', (9017, 9060), True, 'import numpy as np\n'), ((16435, 16483), 'scipy.optimize.leastsq', 'leastsq', (['chi', 'guess'], {'args': 'args', 'full_output': '(True)'}), '(chi, guess, args=args, full_output=True)\n', (16442, 16483), False, 'from scipy.optimize import leastsq\n'), ((3125, 3143), 'numpy.array', 'np.array', (['fit_mask'], {}), '(fit_mask)\n', (3133, 3143), True, 'import numpy as np\n'), ((3374, 3409), 'logging.getLogger', 'logging.getLogger', (['"""CometaryTrends"""'], {}), "('CometaryTrends')\n", (3391, 3409), False, 'import logging\n'), ((7941, 8027), 'numpy.average', 'np.average', (['self.m_original[b * i]'], {'weights': '(self.m_unc[b * i] ** -2)', 'returned': '(True)'}), '(self.m_original[b * i], weights=self.m_unc[b * i] ** -2,\n returned=True)\n', (7951, 8027), True, 'import numpy as np\n'), ((8138, 8224), 'numpy.average', 'np.average', (['self.m_original[r * i]'], {'weights': '(self.m_unc[r * i] ** -2)', 'returned': '(True)'}), '(self.m_original[r * i], weights=self.m_unc[r * i] ** -2,\n returned=True)\n', (8148, 8224), True, 'import numpy as np\n'), ((9148, 9171), 'astropy.time.Time', 'Time', (['mjd'], {'format': '"""mjd"""'}), "(mjd, format='mjd')\n", (9152, 9171), False, 'from astropy.time import Time\n'), ((12255, 12307), 'numpy.ma.average', 'np.ma.average', (['m'], {'weights': '(m_unc ** -2)', 'returned': '(True)'}), '(m, weights=m_unc ** -2, returned=True)\n', (12268, 12307), True, 'import numpy as np\n'), ((12480, 12513), 'numpy.round', 'np.round', (['(baseline.value / unc)', '(1)'], {}), '(baseline.value / unc, 1)\n', (12488, 12513), True, 'import numpy as np\n'), ((14822, 14851), 'numpy.std', 'np.std', (['residuals[~mask].data'], {}), '(residuals[~mask].data)\n', (14828, 14851), True, 'import numpy as np\n'), ((16510, 16523), 'numpy.diag', 'np.diag', (['r[1]'], {}), '(r[1])\n', (16517, 16523), True, 'import numpy as np\n'), ((16799, 16828), 'numpy.std', 'np.std', (['residuals[~mask].data'], {}), '(residuals[~mask].data)\n', (16805, 16828), True, 'import numpy as np\n'), ((3797, 3809), 'numpy.unique', 'np.unique', (['n'], {}), '(n)\n', (3806, 3809), True, 'import numpy as np\n'), ((8329, 8353), 'numpy.hypot', 'np.hypot', (['wb_unc', 'wr_unc'], {}), '(wb_unc, wr_unc)\n', (8337, 8353), True, 'import numpy as np\n'), ((8711, 8735), 'numpy.hypot', 'np.hypot', (['wb_unc', 'wr_unc'], {}), '(wb_unc, wr_unc)\n', (8719, 8735), True, 'import numpy as np\n'), ((11967, 11987), 'numpy.isfinite', 'np.isfinite', (['Hy.data'], {}), '(Hy.data)\n', (11978, 11987), True, 'import numpy as np\n'), ((12003, 12015), 'numpy.sum', 'np.sum', (['good'], {}), '(good)\n', (12009, 12015), True, 'import numpy as np\n'), ((12041, 12079), 'astropy.stats.sigma_clip', 'sigma_clip', (['Hy[good].data'], {'sigma': 'sigma'}), '(Hy[good].data, sigma=sigma)\n', (12051, 12079), False, 'from astropy.stats import sigma_clip\n'), ((14875, 14931), 'numpy.sum', 'np.sum', (['((residuals[~mask].data / self.m_unc[~mask]) ** 2)'], {}), '((residuals[~mask].data / self.m_unc[~mask]) ** 2)\n', (14881, 14931), True, 'import numpy as np\n'), ((14954, 14967), 'numpy.sum', 'np.sum', (['(~mask)'], {}), '(~mask)\n', (14960, 14967), True, 'import numpy as np\n'), ((16123, 16140), 'numpy.exp', 'np.exp', (['(-dt / tau)'], {}), '(-dt / tau)\n', (16129, 16140), True, 'import numpy as np\n'), ((16851, 16907), 'numpy.sum', 'np.sum', (['((residuals[~mask].data / self.m_unc[~mask]) ** 2)'], {}), '((residuals[~mask].data / self.m_unc[~mask]) ** 2)\n', (16857, 16907), True, 'import numpy as np\n'), ((16929, 16942), 'numpy.sum', 'np.sum', (['(~mask)'], {}), '(~mask)\n', (16935, 16942), True, 'import numpy as np\n'), ((7766, 7778), 'numpy.any', 'np.any', (['b[i]'], {}), '(b[i])\n', (7772, 7778), True, 'import numpy as np\n'), ((7788, 7800), 'numpy.any', 'np.any', (['r[i]'], {}), '(r[i])\n', (7794, 7800), True, 'import numpy as np\n'), ((12405, 12452), 'numpy.sqrt', 'np.sqrt', (['(baseline_unc ** 2 + self.m_unc[i] ** 2)'], {}), '(baseline_unc ** 2 + self.m_unc[i] ** 2)\n', (12412, 12452), True, 'import numpy as np\n'), ((13074, 13088), 'numpy.isfinite', 'np.isfinite', (['m'], {}), '(m)\n', (13085, 13088), True, 'import numpy as np\n'), ((9382, 9398), 'numpy.exp', 'np.exp', (['(-a.value)'], {}), '(-a.value)\n', (9388, 9398), True, 'import numpy as np\n'), ((9571, 9587), 'numpy.exp', 'np.exp', (['(-a.value)'], {}), '(-a.value)\n', (9577, 9587), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
import chainer
from siam_rpn.general.eval_sot_vot import eval_sot_vot
from siam_rpn.siam_rpn import SiamRPN
from siam_rpn.siam_rpn_tracker import SiamRPNTracker
from siam_rpn.siam_mask_tracker import SiamMaskTracker
from siam_rpn.general.vot_tracking_dataset import VOTTrackingDataset
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from chainer import iterators
from siam_rpn.general.predictor_with_gt import PredictorWithGT
def collate_images_from_same_video(data, used_ids=None):
imgs = data.slice[:, 'img']
polys = data.slice[:, 'poly']
video_ids = data.slice[:, 'video_id']
frame_ids = data.slice[:, 'frame_id']
if used_ids is None:
used_ids = np.unique(video_ids)
np.sort(used_ids)
videos = []
video_polys = []
for video_id in used_ids:
indices = np.where(video_ids == video_id)[0]
the_frame_ids = list(frame_ids.slice[indices])
assert all(list(the_frame_ids) == np.arange(len(the_frame_ids)))
videos.append(imgs.slice[indices])
video_polys.append(polys[indices])
return videos, video_polys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model', type=str)
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--mask', action='store_true')
args = parser.parse_args()
data = VOTTrackingDataset('data')
if args.mask:
model = SiamRPN(multi_scale=False, mask=True)
chainer.serializers.load_npz(args.pretrained_model, model)
tracker = SiamMaskTracker(model)
else:
model = SiamRPN()
chainer.serializers.load_npz(args.pretrained_model, model)
tracker = SiamRPNTracker(model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
tracker.to_gpu()
videos, video_polys = collate_images_from_same_video(
data, used_ids=None)
video_dataset = chainer.datasets.TupleDataset(videos, video_polys)
it = iterators.SerialIterator(video_dataset, 1, False, False)
in_values, out_values, rest_values = apply_to_iterator(
PredictorWithGT(tracker, mask=args.mask), it,
n_input=2, hook=ProgressHook(len(video_dataset)))
# delete unused iterators explicitly
imgs, video_polys = in_values
pred_bboxes, pred_statuses, sizes = out_values
del imgs
video_polys = list(video_polys)
pred_bboxes = list(pred_bboxes)
pred_statuses = list(pred_statuses)
sizes = list(sizes)
np.savez(
'eval_sot_out.npz',
pred_bboxes=pred_bboxes, pred_statuses=pred_statuses,
gt_polys=video_polys, sizes=sizes)
result = eval_sot_vot(pred_bboxes, pred_statuses, video_polys, sizes)
print(result['eao'], result['accuracy'], result['robustness'])
|
[
"siam_rpn.siam_rpn.SiamRPN",
"chainer.datasets.TupleDataset",
"argparse.ArgumentParser",
"chainer.serializers.load_npz",
"siam_rpn.general.predictor_with_gt.PredictorWithGT",
"siam_rpn.siam_mask_tracker.SiamMaskTracker",
"siam_rpn.general.eval_sot_vot.eval_sot_vot",
"numpy.sort",
"numpy.where",
"chainer.iterators.SerialIterator",
"numpy.savez",
"chainer.cuda.get_device_from_id",
"siam_rpn.siam_rpn_tracker.SiamRPNTracker",
"numpy.unique",
"siam_rpn.general.vot_tracking_dataset.VOTTrackingDataset"
] |
[((1213, 1238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1236, 1238), False, 'import argparse\n'), ((1448, 1474), 'siam_rpn.general.vot_tracking_dataset.VOTTrackingDataset', 'VOTTrackingDataset', (['"""data"""'], {}), "('data')\n", (1466, 1474), False, 'from siam_rpn.general.vot_tracking_dataset import VOTTrackingDataset\n'), ((2011, 2061), 'chainer.datasets.TupleDataset', 'chainer.datasets.TupleDataset', (['videos', 'video_polys'], {}), '(videos, video_polys)\n', (2040, 2061), False, 'import chainer\n'), ((2072, 2128), 'chainer.iterators.SerialIterator', 'iterators.SerialIterator', (['video_dataset', '(1)', '(False)', '(False)'], {}), '(video_dataset, 1, False, False)\n', (2096, 2128), False, 'from chainer import iterators\n'), ((2582, 2704), 'numpy.savez', 'np.savez', (['"""eval_sot_out.npz"""'], {'pred_bboxes': 'pred_bboxes', 'pred_statuses': 'pred_statuses', 'gt_polys': 'video_polys', 'sizes': 'sizes'}), "('eval_sot_out.npz', pred_bboxes=pred_bboxes, pred_statuses=\n pred_statuses, gt_polys=video_polys, sizes=sizes)\n", (2590, 2704), True, 'import numpy as np\n'), ((2740, 2800), 'siam_rpn.general.eval_sot_vot.eval_sot_vot', 'eval_sot_vot', (['pred_bboxes', 'pred_statuses', 'video_polys', 'sizes'], {}), '(pred_bboxes, pred_statuses, video_polys, sizes)\n', (2752, 2800), False, 'from siam_rpn.general.eval_sot_vot import eval_sot_vot\n'), ((758, 778), 'numpy.unique', 'np.unique', (['video_ids'], {}), '(video_ids)\n', (767, 778), True, 'import numpy as np\n'), ((787, 804), 'numpy.sort', 'np.sort', (['used_ids'], {}), '(used_ids)\n', (794, 804), True, 'import numpy as np\n'), ((1510, 1547), 'siam_rpn.siam_rpn.SiamRPN', 'SiamRPN', ([], {'multi_scale': '(False)', 'mask': '(True)'}), '(multi_scale=False, mask=True)\n', (1517, 1547), False, 'from siam_rpn.siam_rpn import SiamRPN\n'), ((1556, 1614), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.pretrained_model', 'model'], {}), '(args.pretrained_model, model)\n', (1584, 1614), False, 'import chainer\n'), ((1633, 1655), 'siam_rpn.siam_mask_tracker.SiamMaskTracker', 'SiamMaskTracker', (['model'], {}), '(model)\n', (1648, 1655), False, 'from siam_rpn.siam_mask_tracker import SiamMaskTracker\n'), ((1682, 1691), 'siam_rpn.siam_rpn.SiamRPN', 'SiamRPN', ([], {}), '()\n', (1689, 1691), False, 'from siam_rpn.siam_rpn import SiamRPN\n'), ((1700, 1758), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.pretrained_model', 'model'], {}), '(args.pretrained_model, model)\n', (1728, 1758), False, 'import chainer\n'), ((1777, 1798), 'siam_rpn.siam_rpn_tracker.SiamRPNTracker', 'SiamRPNTracker', (['model'], {}), '(model)\n', (1791, 1798), False, 'from siam_rpn.siam_rpn_tracker import SiamRPNTracker\n'), ((2198, 2238), 'siam_rpn.general.predictor_with_gt.PredictorWithGT', 'PredictorWithGT', (['tracker'], {'mask': 'args.mask'}), '(tracker, mask=args.mask)\n', (2213, 2238), False, 'from siam_rpn.general.predictor_with_gt import PredictorWithGT\n'), ((891, 922), 'numpy.where', 'np.where', (['(video_ids == video_id)'], {}), '(video_ids == video_id)\n', (899, 922), True, 'import numpy as np\n'), ((1830, 1871), 'chainer.cuda.get_device_from_id', 'chainer.cuda.get_device_from_id', (['args.gpu'], {}), '(args.gpu)\n', (1861, 1871), False, 'import chainer\n')]
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
AMCT_CAFFE sample of accuracy_based_auto_calibration based on MobileNet V2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
from pathlib import Path
import numpy as np
import cv2 # pylint: disable=E0401
import datasets
MODEL_INPUT_BLOB_NAME = 'data'
MODEL_OUTPUT_BLOB_NAME = 'prob'
PATH = os.path.split(os.path.realpath(__file__))[0]
PATH = os.path.realpath(os.path.join(PATH, '..'))
TMP = os.path.join(PATH, 'tmp')
RESULT = os.path.join(PATH, 'results')
BATCH_SIZE = 32
SCALE = 0.017
CROP_SIZE = 224
MEAN_FILE = None
MEAN_VALUE = [103.94, 116.78, 123.68]
DATA_DIR = os.path.join(PATH, 'data/images')
LABEL_FILE = os.path.join(DATA_DIR, 'image_label.txt')
# Need to specify the dir of caffe and dataset (ImageNet)
CAFFE_DIR = ''
LMDB_DATASET_DIR = ''
CALIBRATION_BATCH_NUM = 2
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Mobilenet_v2 demo')
parser.add_argument('--model_file', dest='model_file',
help='Specify the model file of caffe model.',
default='./model/mobilenet_v2_deploy.prototxt',
type=str)
parser.add_argument('--weights_file', dest='weights_file',
help='Specify the weights file of caffe model.',
default="./model/mobilenet_v2.caffemodel",
type=str)
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=None, type=int)
parser.add_argument('--iterations', dest='iterations',
help='Specify iterations of test',
default=1000, type=int)
parser.add_argument('--caffe_dir', dest='caffe_dir',
help='Specify the dir of caffe',
default=CAFFE_DIR,
type=str)
parser.add_argument('--pre_test', dest='pre_test',
help='Do test with amct caffe calibration or not',
action='store_false')
parser.add_argument('--dataset',
dest='dataset',
help='The path of benchmark dataset.',
default=LMDB_DATASET_DIR,
type=str)
args = parser.parse_args()
return args
def args_check(args):
"""check args"""
# --model_file
if args.model_file is None:
raise RuntimeError('Must specify a caffe deploy prototxt file')
model_file = os.path.realpath(args.model_file)
if not Path(model_file).exists():
raise RuntimeError('Must specify a caffe deploy prototxt file')
# --weights_file
if args.weights_file is None:
raise RuntimeError('Must specify a caffe caffemodel file')
weights_file = os.path.realpath(args.weights_file)
if not Path(weights_file).exists():
raise RuntimeError('Must specify a caffe caffemodel file')
# --iterations
if args.iterations > 1500:
raise RuntimeError('Max iterations on sample dataset is 1500')
def args_check_caffe_dir(args):
"""check args of caffe dir"""
if args.caffe_dir is None:
raise RuntimeError('Must specify a caffe framework dir')
caffe_dir = os.path.realpath(args.caffe_dir)
if not Path(caffe_dir).exists():
raise RuntimeError('Must specify a caffe framework dir')
caffe_exec_bin = os.path.join(caffe_dir, 'build/tools/caffe')
if not Path(caffe_exec_bin).exists():
raise RuntimeError('Must make caffe before execute demo')
pycaffe_file = os.path.join(caffe_dir, 'python/caffe/pycaffe.py')
if not Path(pycaffe_file).exists():
raise RuntimeError('Must make pycaffe before execute demo')
def add_path(path):
"""Add path to env"""
if path not in sys.path:
sys.path.insert(0, path)
QUANT_ARGS = parse_args()
args_check(QUANT_ARGS)
args_check_caffe_dir(QUANT_ARGS)
add_path(os.path.join(QUANT_ARGS.caffe_dir, 'python'))
import caffe # pylint: disable=E0401, C0413
import amct_caffe as amct # pylint: disable=E0401, C0413
from amct_caffe.common.auto_calibration import \
AutoCalibrationEvaluatorBase # pylint: disable=E0401, C0413
def get_blobs_from_im(data_dir, imgs, batch_size):
"""Read image files to blobs [3, 256, 256]"""
if batch_size != len(imgs):
raise RuntimeError('batch_size:{} != len(imgs):{}'.format(
batch_size, len(imgs)))
blobs_data = np.zeros((batch_size, 3, 256, 256), np.uint8)
for index in range(batch_size):
im_file = os.path.join(data_dir, imgs[index])
im_data = cv2.imread(im_file)
im_data = cv2.resize(
im_data, (256, 256), interpolation=cv2.INTER_CUBIC)
im_data = im_data.swapaxes(0, 2)
im_data = im_data.swapaxes(1, 2)
blobs_data[index, :, :, :] = im_data
return blobs_data
def get_labels_from_txt():
"""Read all images' name and label from label_file"""
images = []
labels = []
with open(LABEL_FILE, 'r') as label_file:
lines = label_file.readlines()
for line in lines:
images.append(line.split(' ')[0])
labels.append(int(line.split(' ')[1]))
return images, labels
def img_preprocess(blobs_data, mean_value, crop_size):
"""Do image data pre-process"""
# crop image[height, width] to [crop_size, crop_size]
height = blobs_data.shape[2]
width = blobs_data.shape[3]
h_off = int((height - crop_size) / 2)
w_off = int((width - crop_size) / 2)
crop_data = blobs_data[:, :, h_off:(height - h_off), w_off:(width - w_off)]
# trans uint8 image data to float
crop_data = crop_data.astype(np.float32, copy=False)
# do channel-wise reduce mean value
for channel in range(crop_data.shape[1]):
crop_data[:, channel, :, :] -= mean_value[channel]
# mutiply the scale value
crop_data *= SCALE
return crop_data
def img_postprocess(probs, labels):
"""Do image post-process"""
# calculate top1 and top5 accuracy
top1_get = 0
top5_get = 0
if len(probs.shape) == 4:
probs = probs.reshape((probs.shape[0], probs.shape[1]))
prob_size = probs.shape[1]
for index, label in enumerate(labels):
top5_record = (probs[index, :].argsort())[prob_size - 5:prob_size]
if label == top5_record[-1]:
top1_get += 1
top5_get += 1
elif label in top5_record:
top5_get += 1
return float(top1_get) / len(labels), float(top5_get) / len(labels)
def run_caffe_model(model_file, weights_file, iterations):
"""run caffe model forward"""
net = caffe.Net(model_file, weights_file, caffe.TEST)
top1_total = 0
top5_total = 0
images, labels = get_labels_from_txt()
for iter_num in range(iterations):
blobs_data = get_blobs_from_im(
DATA_DIR,
images[iter_num * BATCH_SIZE:(iter_num + 1) * BATCH_SIZE],
BATCH_SIZE)
blobs_data = img_preprocess(blobs_data, [104, 117, 123], 224)
forward_kwargs = {MODEL_INPUT_BLOB_NAME: blobs_data}
blobs_out = net.forward(**forward_kwargs)
top1, top5 = img_postprocess(
blobs_out[MODEL_OUTPUT_BLOB_NAME],
labels[iter_num * BATCH_SIZE:(iter_num + 1) * BATCH_SIZE])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(iter_num))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
def do_benchmark_test(args, model_file, weights_file, iterations=1000):
""" Calc the accuracy on the lmdb dataset"""
net = caffe.Net(model_file, weights_file, caffe.TEST)
top1_total = 0
top5_total = 0
lmdb_data = datasets.LMDBData(args.dataset)
lmdb_data.set_scale(SCALE)
lmdb_data.set_crop_size(CROP_SIZE)
if MEAN_FILE is not None:
lmdb_data.set_mean_file(MEAN_FILE)
else:
lmdb_data.set_mean_value(MEAN_VALUE)
for index in range(iterations):
data, labels = lmdb_data.get_blobs(BATCH_SIZE)
forward_kwargs = {MODEL_INPUT_BLOB_NAME: data}
blobs_out = net.forward(**forward_kwargs)
top1, top5 = img_postprocess(blobs_out[MODEL_OUTPUT_BLOB_NAME], labels)
top1_total += top1
top5_total += top5
print('*****************iteration:{}******************'.format(index))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations
class AutoCalibrationEvaluator(AutoCalibrationEvaluatorBase):
"""auto calibration evaluator"""
def __init__(self, target_loss, batch_num, args):
"""
evaluate_batch_num is the needed batch num for evaluating
the model. Larger evaluate_batch_num is recommended, because
the evaluation metric of input model can be more precise
with larger eval dataset.
"""
self.target_loss = target_loss
self.batch_num = batch_num
self.args = args
super().__init__()
def calibration(self, model_file, weights_file):
""""
Function:
do the calibration with model
Parameter:
model_file: the prototxt model define file of caffe model
weights_file: the binary caffemodel file of caffe model
"""
run_caffe_model(model_file, weights_file, self.batch_num)
def evaluate(self, model_file, weights_file):
""""
Function:
evaluate the model with batch_num of data, return the eval
metric of the input model, such as top1 for classification
model, mAP for detection model and so on.
Parameter:
model_file: the prototxt model define file of caffe model
weights_file: the binary caffemodel file of caffe model
"""
return do_benchmark_test(self.args, model_file, weights_file,
self.args.iterations)
def metric_eval(self, original_metric, new_metric):
"""
Function:
whether the metric of new fake quant model can satisfy the
requirement
Parameter:
original_metric: the metric of non quantized model
new_metric: the metric of new quantized model
"""
# the loss of top1 acc need to be less than 0.2%
loss = original_metric - new_metric
if loss * 100 < self.target_loss:
return True, loss
return False, loss
def main(args):
"""main function"""
if args.gpu_id is not None:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
amct.set_gpu_mode()
else:
caffe.set_mode_cpu()
# User model files
model_file = os.path.realpath(args.model_file)
weights_file = os.path.realpath(args.weights_file)
# Run pre model test
if not args.pre_test:
do_benchmark_test(args, model_file, weights_file, args.iterations)
print('[AMCT][INFO]Run Mobilenet_v2 without quantize success!')
return
# step 1: create the quant config file
config_json_file = './config.json'
skip_layers = []
batch_num = CALIBRATION_BATCH_NUM
activation_offset = True
amct.create_quant_config(config_json_file, model_file, weights_file,
skip_layers, batch_num, activation_offset)
scale_offset_record_file = os.path.join(TMP, 'scale_offset_record.txt')
result_path = os.path.join(RESULT, 'MobileNetV2')
evaluator = AutoCalibrationEvaluator(target_loss=0.2, batch_num=batch_num,
args=args)
# step 2: start the accuracy_based_auto_calibration process
amct.accuracy_based_auto_calibration(
args.model_file,
args.weights_file,
evaluator,
config_json_file,
scale_offset_record_file,
result_path)
if __name__ == '__main__':
main(QUANT_ARGS)
|
[
"caffe.set_mode_gpu",
"argparse.ArgumentParser",
"amct_caffe.set_gpu_mode",
"amct_caffe.create_quant_config",
"os.path.realpath",
"numpy.zeros",
"sys.path.insert",
"caffe.set_mode_cpu",
"amct_caffe.accuracy_based_auto_calibration",
"cv2.imread",
"caffe.set_device",
"pathlib.Path",
"datasets.LMDBData",
"caffe.Net",
"os.path.join",
"cv2.resize"
] |
[((1070, 1095), 'os.path.join', 'os.path.join', (['PATH', '"""tmp"""'], {}), "(PATH, 'tmp')\n", (1082, 1095), False, 'import os\n'), ((1105, 1134), 'os.path.join', 'os.path.join', (['PATH', '"""results"""'], {}), "(PATH, 'results')\n", (1117, 1134), False, 'import os\n'), ((1248, 1281), 'os.path.join', 'os.path.join', (['PATH', '"""data/images"""'], {}), "(PATH, 'data/images')\n", (1260, 1281), False, 'import os\n'), ((1295, 1336), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""image_label.txt"""'], {}), "(DATA_DIR, 'image_label.txt')\n", (1307, 1336), False, 'import os\n'), ((1038, 1062), 'os.path.join', 'os.path.join', (['PATH', '""".."""'], {}), "(PATH, '..')\n", (1050, 1062), False, 'import os\n'), ((1525, 1581), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mobilenet_v2 demo"""'}), "(description='Mobilenet_v2 demo')\n", (1548, 1581), False, 'import argparse\n'), ((3173, 3206), 'os.path.realpath', 'os.path.realpath', (['args.model_file'], {}), '(args.model_file)\n', (3189, 3206), False, 'import os\n'), ((3458, 3493), 'os.path.realpath', 'os.path.realpath', (['args.weights_file'], {}), '(args.weights_file)\n', (3474, 3493), False, 'import os\n'), ((3902, 3934), 'os.path.realpath', 'os.path.realpath', (['args.caffe_dir'], {}), '(args.caffe_dir)\n', (3918, 3934), False, 'import os\n'), ((4058, 4102), 'os.path.join', 'os.path.join', (['caffe_dir', '"""build/tools/caffe"""'], {}), "(caffe_dir, 'build/tools/caffe')\n", (4070, 4102), False, 'import os\n'), ((4230, 4280), 'os.path.join', 'os.path.join', (['caffe_dir', '"""python/caffe/pycaffe.py"""'], {}), "(caffe_dir, 'python/caffe/pycaffe.py')\n", (4242, 4280), False, 'import os\n'), ((4592, 4636), 'os.path.join', 'os.path.join', (['QUANT_ARGS.caffe_dir', '"""python"""'], {}), "(QUANT_ARGS.caffe_dir, 'python')\n", (4604, 4636), False, 'import os\n'), ((5111, 5156), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, 256, 256)', 'np.uint8'], {}), '((batch_size, 3, 256, 256), np.uint8)\n', (5119, 5156), True, 'import numpy as np\n'), ((7289, 7336), 'caffe.Net', 'caffe.Net', (['model_file', 'weights_file', 'caffe.TEST'], {}), '(model_file, weights_file, caffe.TEST)\n', (7298, 7336), False, 'import caffe\n'), ((8433, 8480), 'caffe.Net', 'caffe.Net', (['model_file', 'weights_file', 'caffe.TEST'], {}), '(model_file, weights_file, caffe.TEST)\n', (8442, 8480), False, 'import caffe\n'), ((8536, 8567), 'datasets.LMDBData', 'datasets.LMDBData', (['args.dataset'], {}), '(args.dataset)\n', (8553, 8567), False, 'import datasets\n'), ((11696, 11729), 'os.path.realpath', 'os.path.realpath', (['args.model_file'], {}), '(args.model_file)\n', (11712, 11729), False, 'import os\n'), ((11749, 11784), 'os.path.realpath', 'os.path.realpath', (['args.weights_file'], {}), '(args.weights_file)\n', (11765, 11784), False, 'import os\n'), ((12173, 12288), 'amct_caffe.create_quant_config', 'amct.create_quant_config', (['config_json_file', 'model_file', 'weights_file', 'skip_layers', 'batch_num', 'activation_offset'], {}), '(config_json_file, model_file, weights_file,\n skip_layers, batch_num, activation_offset)\n', (12197, 12288), True, 'import amct_caffe as amct\n'), ((12346, 12390), 'os.path.join', 'os.path.join', (['TMP', '"""scale_offset_record.txt"""'], {}), "(TMP, 'scale_offset_record.txt')\n", (12358, 12390), False, 'import os\n'), ((12409, 12444), 'os.path.join', 'os.path.join', (['RESULT', '"""MobileNetV2"""'], {}), "(RESULT, 'MobileNetV2')\n", (12421, 12444), False, 'import os\n'), ((12645, 12789), 'amct_caffe.accuracy_based_auto_calibration', 'amct.accuracy_based_auto_calibration', (['args.model_file', 'args.weights_file', 'evaluator', 'config_json_file', 'scale_offset_record_file', 'result_path'], {}), '(args.model_file, args.weights_file,\n evaluator, config_json_file, scale_offset_record_file, result_path)\n', (12681, 12789), True, 'import amct_caffe as amct\n'), ((983, 1009), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (999, 1009), False, 'import os\n'), ((4474, 4498), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (4489, 4498), False, 'import sys\n'), ((5211, 5246), 'os.path.join', 'os.path.join', (['data_dir', 'imgs[index]'], {}), '(data_dir, imgs[index])\n', (5223, 5246), False, 'import os\n'), ((5265, 5284), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (5275, 5284), False, 'import cv2\n'), ((5303, 5365), 'cv2.resize', 'cv2.resize', (['im_data', '(256, 256)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(im_data, (256, 256), interpolation=cv2.INTER_CUBIC)\n', (5313, 5365), False, 'import cv2\n'), ((11529, 11549), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (11547, 11549), False, 'import caffe\n'), ((11558, 11587), 'caffe.set_device', 'caffe.set_device', (['args.gpu_id'], {}), '(args.gpu_id)\n', (11574, 11587), False, 'import caffe\n'), ((11596, 11615), 'amct_caffe.set_gpu_mode', 'amct.set_gpu_mode', ([], {}), '()\n', (11613, 11615), True, 'import amct_caffe as amct\n'), ((11634, 11654), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (11652, 11654), False, 'import caffe\n'), ((3218, 3234), 'pathlib.Path', 'Path', (['model_file'], {}), '(model_file)\n', (3222, 3234), False, 'from pathlib import Path\n'), ((3505, 3523), 'pathlib.Path', 'Path', (['weights_file'], {}), '(weights_file)\n', (3509, 3523), False, 'from pathlib import Path\n'), ((3946, 3961), 'pathlib.Path', 'Path', (['caffe_dir'], {}), '(caffe_dir)\n', (3950, 3961), False, 'from pathlib import Path\n'), ((4114, 4134), 'pathlib.Path', 'Path', (['caffe_exec_bin'], {}), '(caffe_exec_bin)\n', (4118, 4134), False, 'from pathlib import Path\n'), ((4292, 4310), 'pathlib.Path', 'Path', (['pycaffe_file'], {}), '(pycaffe_file)\n', (4296, 4310), False, 'from pathlib import Path\n')]
|
from pywim.utils.stats import iqr
import numpy as np
import pandas as pd
import peakutils
def sensors_estimation(
signal_data: pd.DataFrame, sensors_delta_distance: list
) -> [np.array]:
"""
:param signal_data:
:param sensors_delta_distance:
:return:
"""
# x axis: time
x = signal_data.index.values
sensors_peak_time = []
sensors_delta_time = [None]
for k in signal_data.keys():
# y axis: volts
y = signal_data[k].values
indexes = peakutils.indexes(y, thres=0.5, min_dist=30)
sensors_peak_time.append(x[indexes])
for i in range(1, len(sensors_peak_time)):
sensors_delta_time.append(
sensors_peak_time[i] - sensors_peak_time[i - 1]
)
# the information about first sensor should be equal to the second sensor
sensors_delta_time[0] = sensors_delta_time[1]
sensors_delta_speed = []
for i in range(len(sensors_delta_distance)):
sensors_delta_speed.append(
sensors_delta_distance[i] / sensors_delta_time[i]
)
# the information about first sensor should be equal to the second sensor
sensors_delta_speed[0] = sensors_delta_speed[1]
return sensors_delta_speed
def average_estimation(
signal_data: pd.DataFrame=None,
sensors_delta_distance: list=None,
sensors_delta_speed: list=None
) -> float:
"""
:param signal_data:
:param sensors_delta_distance:
:param sensors_delta_speed:
:return:
"""
if not sensors_delta_speed:
sensors_delta_speed = sensors_estimation(
signal_data, sensors_delta_distance
)
speed_values = np.array([])
for sensor_speeds in sensors_delta_speed[1:]:
speed_values = np.concatenate((speed_values, sensor_speeds))
return iqr.reject_outliers(pd.Series(speed_values)).mean()
|
[
"peakutils.indexes",
"numpy.array",
"pandas.Series",
"numpy.concatenate"
] |
[((1659, 1671), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1667, 1671), True, 'import numpy as np\n'), ((506, 550), 'peakutils.indexes', 'peakutils.indexes', (['y'], {'thres': '(0.5)', 'min_dist': '(30)'}), '(y, thres=0.5, min_dist=30)\n', (523, 550), False, 'import peakutils\n'), ((1746, 1791), 'numpy.concatenate', 'np.concatenate', (['(speed_values, sensor_speeds)'], {}), '((speed_values, sensor_speeds))\n', (1760, 1791), True, 'import numpy as np\n'), ((1824, 1847), 'pandas.Series', 'pd.Series', (['speed_values'], {}), '(speed_values)\n', (1833, 1847), True, 'import pandas as pd\n')]
|
"""@package MuSCADeT
"""
from scipy import signal as scp
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pf
import scipy.ndimage.filters as med
import MuSCADeT.pca_ring_spectrum as pcas
import MuSCADeT.wave_transform as mw
NOISE_TAB = np.array([ 0.8907963 , 0.20066385, 0.08550751, 0.04121745, 0.02042497,
0.01018976, 0.00504662, 0.00368314])
NOISE_TAB_2G = np.array([ 0.94288346, 0.22998949, 0.10029194, 0.04860995, 0.02412084,
0.01498695])
def mMCA(img, A,kmax, niter,mode = 'PCA', PCA = [2,40], harder = 0, pos = False,threshmode = 'mom',lvl = 0, PSF = None,
soft = False, reweighting = 'none', alpha = [0,0], npca = 64, mask = [0,0], plot = False, noise_map = [0,0],
newwave=1):
"""
mMCA runs the MuSCADeT algorithm over a cube of multi-band images.
INPUTS:
img: multiband cube with size nbxn1xn2 where nb is the number of bands and n1xn2,
the size of the images
A: the mixing matrix. if mode is set to 'PCA', A will be ignored and can be set to 0
kmax: detection threshold in units of noise standard deviation usually chosen between 3 and 5
niter: number of iterations of the MuSCADeT algorithm
OUTPUTS:
S: extracted sources
A: mixing matrix, either given by the user or estimate by PCA with option mode ='PCA'
alpha: angles in PCA space to identify pixels with same SEDs
OPTIONS:
mode: if set to 'PCA', the mixing matrix A will be estimated from PCA decomposition of the SEDs
PCA: parameters for PCA sensitivity. if mode is set to 'PCA', the PCA estimator will take PCA[0]
as the number of sources to be extracted and PCA[1] as a sensitivity parameter to discriminate between
source. Values betwee 5 and 30 are usually recommended
harder: if set to 1,
pos: if set to True, the output of the hard thresholding procedure is constrined to be positive
threshmode: if set to 'mom', adaptive method of moments is used at every iteration to decrease the threshold
lvl: number of wavelet levels to use in the decompositions, default is 6.
soft: if set to True, soft thresholding is used
alpha: angles in degrees to feed the PCA finder. If set, the PCA finder will use pixels along the directions pointed by these angles in PCA space to estimate SED
That option is particularly useful if automated PCA fails at clearly identifying different SEDs. This happens in case of high degrees of blending.
mask: if parts of the band images images are to be masked (e.g. stars in the FOV), the user can provide a mask with size n1xn2
with all pixels at one except for the masked pixels that should be set to 0.
npca: number of pixels in which images are downsampled to perform a fast PCA.
plot: set to true to display PCA coefficients of the SEDs. Set to False for automated mode
EXAMPLE:
S,A = wine.MCA.mMCA(cube, A, 5,10, PCA=[2,80], mode=pca, harder = 1)
"""
nb, n1, n2 = np.shape(img)
if lvl == 0:
lvl = int(np.log2(n1))
print("using lvl (including coarse scale !)", lvl)
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
img = np.multiply(img,mask)
print("mode", mode)
if mode == 'PCA':
Apca = PCA_initialise(img.T, PCA[0], angle = PCA[1], alpha = alpha, npca = npca, plot = plot, newwave=newwave)
Apca = np.multiply(Apca,[1./np.sum(Apca,0)])
A = Apca
nb,ns = np.shape(A)
X = np.zeros((ns,n1*n2))
A = np.multiply(A,[1./np.sum(A,0)])
AT = A.T
mu = 2. / linorm(A, 10)
Y = np.reshape(img,(nb,n1*n2))
Ri = np.dot(AT,Y)
sigma_y = np.zeros(nb)
for i in range(nb):
sigma_y[i] = MAD(np.reshape(Y[i,:],(n1,n2)))
if PSF is not None:
PSFT = np.copy(PSF)
for ind in range(nb):
PSFT[ind,:,:] = PSF[ind,:,:].T
def PSF_apply(x):
y = np.copy(x)*0
for i in range(nb):
y[i,:,:] = scp.fftconvolve(x[i,:,:],PSF[i,:,:],mode = 'same')
return y
def PSFT_apply(x):
y = np.copy(x)*0
for i in range(nb):
y[i,:,:] = scp.fftconvolve(x[i,:,:],PSFT[i,:,:],mode = 'same')
return y
for i in range(nb):
sigma_y[i] = sigma_y[i]*np.sqrt(np.sum(PSFT[i,:,:]**2))
sigma = np.zeros(ns)
for i in range(ns):
sigma[i] = np.sqrt(np.sum( (AT[i,:]**2)*(sigma_y**2)))
kmas = MOM(np.reshape(Ri,(ns,n1,n1)),sigma,lvl,newwave)#15#np.max(np.dot(1/(mu*np.dot(AT,Y),1),mu*np.dot(AT,Y)))
print(kmas)
step = (kmas-kmax)/(niter-5)
k = kmas
################FOR PLOT#############
th = np.ones((lvl,n1,n2))
th0 = np.multiply(th.T, NOISE_TAB[:lvl]).T * sigma[0]
th1 = np.multiply(th.T, NOISE_TAB[:lvl]).T * sigma[1]
per= np.zeros((ns,niter))
w = np.zeros((ns,lvl,n1,n2))
wmap = np.zeros((ns,lvl,n1,n2))
S = np.zeros((ns,n1*n2))
thmap = np.zeros((ns,lvl,n1,n2))
ks = np.zeros(niter)
sub = 0
reweight = 0
weight2 = 1
if np.sum(noise_map) != 0:
sig_map = np.dot(AT,np.reshape(noise_map,(nb,n1*n2)))
sigma = np.reshape(sig_map,(ns,n1,n2))
for i in range(niter):
if i % 10 == 0:
print(i)
AX = np.dot(A,X)
if PSF is not None:
AX = PSF_apply(AX.reshape((nb,n1,n2))).reshape((nb,n1*n2))
R = mu*np.dot(AT, PSFT_apply(np.reshape(Y-AX,(nb,n1,n2))).reshape(nb,n1*n2))
else:
R = mu*np.dot(AT, Y-AX)
X = np.real(X+R)
S = X
if threshmode == 'mom':
kmas = MOM(np.reshape(R,(ns,n1,n2)),sigma,lvl=lvl)
threshmom = np.max([kmas,kmax])
if threshmom < k:
k = threshmom
step = ((k-kmax)/(niter-i-6))
print('threshold from MOM',threshmom)
for j in range(ns):
kthr = np.max([kmax, k])
Sj,wmap = mr_filter(np.reshape(S[j,:],(n1,n2)),20,kthr,sigma[j],harder = harder, lvl = lvl,pos = pos,soft = soft, newwave=newwave)
S[j,:] = np.reshape(Sj,(n1*n2))
X = np.multiply(S,np.reshape(mask,(n1*n2)))
a = 1
ks[i] = kthr
k = k-step
S = np.reshape(S,(ns,n1,n2))
plt.plot(ks, linewidth = 5)
plt.xlabel('Iterations', fontsize=30)
plt.ylabel('k', fontsize=30)
plt.title('k = f(it)', fontsize = 50)
plt.show()
return S,A
def MOM(R, sigma, lvl=6 , newwave=1):
"""
Estimates the best for a threshold from method of moments
INPUTS:
R: multi-sources cube with size nsxn1xn2 where ns is the number of sources
and n1xn2, the size of an image
sigma: noise standard deviation
OUTPUTS:
k: threshold level
OPTIONS:
lvl: number of wavelet levels used in the decomposition, default is 6.
EXAMPLES
"""
ns,n1,n2 = np.shape(R)
wmax = np.zeros((ns))
wm = np.zeros((ns,lvl))
w = np.zeros((ns,lvl,n1,n2))
for j in range(ns):
w[j,:,:,:], _ = mw.wave_transform(R[j,:,:],lvl, newwave=newwave, verbose=False)
for j in range(ns):
for l in range(lvl-1):
wm[j,l] = np.max(np.abs(w[j,l,:,:]))/NOISE_TAB[l]
wmax[j] = np.max(wm[j,:])
wmax[j] = wmax[j]/np.mean(sigma[j])
k = np.min(wmax)+(np.max(wmax)-np.min(wmax))/100
return k
def MM(R, sigma, lvl=6, newwave=1):
n1,n2 = np.shape(R)
wm = np.zeros((lvl))
w = np.zeros((lvl,n1,n2))
w[:,:,:], _ = mw.wave_transform(R,lvl, newwave=newwave, verbose=False)
for l in range(lvl-1):
wm[l] = np.max(np.abs(w[l,:,:]))/NOISE_TAB[l]
wmax = np.max(wm)/sigma
k = (wmax)-(wmax)/100
return k
def MAD(x):
"""
Estimates noise level in an image from Median Absolute Deviation
INPUTS:
x: image
OUTPUTS:
sigma: noise standard deviation
EXAMPLES
"""
meda = med.median_filter(x,size = (3,3))
medfil = np.abs(x-meda)
sh = np.shape(x)
sigma = 1.48*np.median((medfil))
return sigma
def mr_filter(img, niter, k, sigma,lvl = 6, pos = False, harder = 0,mulweight = 1, subweight = 0, addweight = 0, soft = False, newwave=1):
"""
Computes wavelet iterative filtering on an image.
INPUTS:
img: image to be filtered
niter: number of iterations (10 is usually recommended)
k: threshold level in units of sigma
sigma: noise standard deviation
OUTPUTS:
imnew: filtered image
wmap: weight map
OPTIONS:
lvl: number of wavelet levels used in the decomposition, default is 6.
pos: if set to True, positivity constrain is applied to the output image
harder: if set to one, threshold levels are risen. This is used to compensate for correlated noise
for instance
mulweight: multiplicative weight (default is 1)
subweight: weight map derived from other sources applied to diminish the impact of a given set of coefficient (default is 0)
addweight: weight map used to enhance previously detected features in an iterative process (default is 0)
soft: if set to True, soft thresholding is used
EXAMPLES
"""
shim = np.shape(img)
n1 = shim[0]
n2 = shim[1]
M = np.zeros((lvl,n1,n2))
M[-1,:,:] = 1
th = np.ones_like(M) * k
##A garder
th[0,:,:] = k+1
####################
th = np.multiply(th.T, NOISE_TAB[:lvl]).T * sigma
th[np.where(th<0)] = 0
th[-1,:,:] = 0
imnew = 0
i = 0
R = img
# here, always 1st gen transform (apparently better ?)
alpha, _ = mw.wave_transform(R, lvl, newwave=0, verbose=False)
if pos == True :
M[np.where(alpha-np.abs(addweight)+np.abs(subweight)-np.abs(th)*mulweight > 0)] = 1
else:
M[np.where(np.abs(alpha)-np.abs(addweight)+np.abs(subweight)-np.abs(th)*mulweight > 0)] = 1
while i < niter:
R = img-imnew
alpha, pysap_transform = mw.wave_transform(R,lvl, newwave=newwave, verbose=False)
if soft == True and i>0:
alpha= np.sign(alpha)*(np.abs(alpha)-np.abs(addweight)+np.abs(subweight)-(th*mulweight))
Rnew = mw.iuwt(M*alpha, newwave=newwave, convol2d=0,
pysap_transform=pysap_transform, verbose=False)
imnew = imnew+Rnew
imnew[(imnew < 0)] = 0
i = i+1
wmap, _ = mw.wave_transform(imnew,lvl, newwave=newwave, verbose=False)
return imnew,wmap
def linorm(A,nit):
"""
Estimates the maximal eigen value of a matrix A
INPUTS:
A: matrix
nit: number of iterations
OUTPUTS:
xn: maximal eigen value
EXAMPLES
"""
ns,nb = np.shape(A)
x0 = np.random.rand(nb)
x0 = x0/np.sqrt(np.sum(x0**2))
for i in range(nit):
x = np.dot(A,x0)
xn = np.sqrt(np.sum(x**2))
xp = x/xn
y = np.dot(A.T,xp)
yn = np.sqrt(np.sum(y**2))
if yn < np.dot(y.T,x0) :
break
x0 = y/yn
return xn
def PCA_initialise(cube, ns, angle = 15,npca = 32, alpha = [0,0], plot = 0, newwave=1):
"""
Estimates the mixing matrix of of two sources in a multi band set of images
INPUTS:
cube: multi-band cube from which to extract mixing coefficients
ns: number of mixed sources
OUTPUTS:
A0: mixing matrix
OPTIONS:
angle: sensitivity parameter. The angular resolution at which the algorithm has to look for PCA coefficients clustering
npca: square root of the number of pixels to be used. Since too big images result in too big computation time
we propose to downsample the image in order to get reasonable calculation time
EXAMPLES
"""
n,n,nband = np.shape(cube)
cubep = cube+0.
lvl = int(np.log2(n))
s = np.zeros(nband)
for i in range(nband):
s[i] = MAD(cube[:,:,i])
cubep[:,:,i] = mr_filter(cube[:,:,i],10,3,s[i],harder = 0, lvl=lvl, newwave=newwave)[0]
cubepca = np.zeros((np.min([n,npca]),np.min([n,npca]),nband))
xk, yk = np.where(cubepca[:,:,0]==0)
cubepca[xk, yk, :] = cubep[xk*int(n/npca), yk*int(n/npca), :]
lines = np.reshape(cubep,(n**2, nband))
alphas, basis, sig= pcas.pca_ring_spectrum(cubepca[:,:,:].T,std = s)
ims0 = pcas.pca_lines(alphas,sig,angle, ns, alpha0 = alpha, plot = plot)
vals = np.array(list(set(np.reshape(ims0,(npca*npca)))))
vals = vals[np.where(vals>=0)]
nsp = np.size(vals)
spectras = np.ones([ns, nband])
rank = nsp
S_prior = np.zeros((n,n,np.size(vals)))
xs,ys = np.where(S_prior[:,:,0]==0)
count = 0
for k in vals:
x,y = np.where(ims0 == k)
im = np.zeros((npca, npca))
im[x,y] = 1
S_prior[xs,ys,count] = im[np.int_(xs*(npca/n)), np.int_(ys*(npca/n))]#/(k+1)
vecube = np.reshape(cubepca,(nband,npca*npca))
######Essai norm#####
xcol,ycol=np.where(ims0==k)
specs = np.reshape(cubepca[xcol,ycol,:],(len(xcol),nband))
s1 =np.multiply(np.mean(specs,0),
1/np.sum(np.reshape(cubepca,(npca**2,nband),0)))
spectras[count,:]=s1/np.sum(s1,0)
S_prior[:,:,count] = S_prior[:,:,count]*np.dot(cube,spectras[count,:])
count = count+1
S0 = np.reshape(S_prior[:,:,::-1],(ns,n*n))
A0 = spectras.T
return A0
|
[
"matplotlib.pyplot.title",
"MuSCADeT.pca_ring_spectrum.pca_lines",
"numpy.abs",
"numpy.sum",
"numpy.ones",
"numpy.shape",
"numpy.mean",
"scipy.signal.fftconvolve",
"numpy.int_",
"numpy.multiply",
"numpy.copy",
"numpy.max",
"numpy.reshape",
"MuSCADeT.wave_transform.wave_transform",
"numpy.real",
"scipy.ndimage.filters.median_filter",
"numpy.size",
"matplotlib.pyplot.show",
"numpy.ones_like",
"numpy.median",
"numpy.log2",
"numpy.min",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"MuSCADeT.wave_transform.iuwt",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.sign",
"numpy.random.rand",
"matplotlib.pyplot.xlabel",
"MuSCADeT.pca_ring_spectrum.pca_ring_spectrum"
] |
[((269, 379), 'numpy.array', 'np.array', (['[0.8907963, 0.20066385, 0.08550751, 0.04121745, 0.02042497, 0.01018976, \n 0.00504662, 0.00368314]'], {}), '([0.8907963, 0.20066385, 0.08550751, 0.04121745, 0.02042497, \n 0.01018976, 0.00504662, 0.00368314])\n', (277, 379), True, 'import numpy as np\n'), ((407, 494), 'numpy.array', 'np.array', (['[0.94288346, 0.22998949, 0.10029194, 0.04860995, 0.02412084, 0.01498695]'], {}), '([0.94288346, 0.22998949, 0.10029194, 0.04860995, 0.02412084, \n 0.01498695])\n', (415, 494), True, 'import numpy as np\n'), ((3108, 3121), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (3116, 3121), True, 'import numpy as np\n'), ((3299, 3321), 'numpy.multiply', 'np.multiply', (['img', 'mask'], {}), '(img, mask)\n', (3310, 3321), True, 'import numpy as np\n'), ((3576, 3587), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (3584, 3587), True, 'import numpy as np\n'), ((3596, 3619), 'numpy.zeros', 'np.zeros', (['(ns, n1 * n2)'], {}), '((ns, n1 * n2))\n', (3604, 3619), True, 'import numpy as np\n'), ((3709, 3739), 'numpy.reshape', 'np.reshape', (['img', '(nb, n1 * n2)'], {}), '(img, (nb, n1 * n2))\n', (3719, 3739), True, 'import numpy as np\n'), ((3746, 3759), 'numpy.dot', 'np.dot', (['AT', 'Y'], {}), '(AT, Y)\n', (3752, 3759), True, 'import numpy as np\n'), ((3773, 3785), 'numpy.zeros', 'np.zeros', (['nb'], {}), '(nb)\n', (3781, 3785), True, 'import numpy as np\n'), ((4478, 4490), 'numpy.zeros', 'np.zeros', (['ns'], {}), '(ns)\n', (4486, 4490), True, 'import numpy as np\n'), ((4809, 4831), 'numpy.ones', 'np.ones', (['(lvl, n1, n2)'], {}), '((lvl, n1, n2))\n', (4816, 4831), True, 'import numpy as np\n'), ((4956, 4977), 'numpy.zeros', 'np.zeros', (['(ns, niter)'], {}), '((ns, niter))\n', (4964, 4977), True, 'import numpy as np\n'), ((4985, 5012), 'numpy.zeros', 'np.zeros', (['(ns, lvl, n1, n2)'], {}), '((ns, lvl, n1, n2))\n', (4993, 5012), True, 'import numpy as np\n'), ((5021, 5048), 'numpy.zeros', 'np.zeros', (['(ns, lvl, n1, n2)'], {}), '((ns, lvl, n1, n2))\n', (5029, 5048), True, 'import numpy as np\n'), ((5054, 5077), 'numpy.zeros', 'np.zeros', (['(ns, n1 * n2)'], {}), '((ns, n1 * n2))\n', (5062, 5077), True, 'import numpy as np\n'), ((5087, 5114), 'numpy.zeros', 'np.zeros', (['(ns, lvl, n1, n2)'], {}), '((ns, lvl, n1, n2))\n', (5095, 5114), True, 'import numpy as np\n'), ((5121, 5136), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (5129, 5136), True, 'import numpy as np\n'), ((6387, 6414), 'numpy.reshape', 'np.reshape', (['S', '(ns, n1, n2)'], {}), '(S, (ns, n1, n2))\n', (6397, 6414), True, 'import numpy as np\n'), ((6416, 6441), 'matplotlib.pyplot.plot', 'plt.plot', (['ks'], {'linewidth': '(5)'}), '(ks, linewidth=5)\n', (6424, 6441), True, 'import matplotlib.pyplot as plt\n'), ((6448, 6485), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {'fontsize': '(30)'}), "('Iterations', fontsize=30)\n", (6458, 6485), True, 'import matplotlib.pyplot as plt\n'), ((6490, 6518), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""k"""'], {'fontsize': '(30)'}), "('k', fontsize=30)\n", (6500, 6518), True, 'import matplotlib.pyplot as plt\n'), ((6523, 6558), 'matplotlib.pyplot.title', 'plt.title', (['"""k = f(it)"""'], {'fontsize': '(50)'}), "('k = f(it)', fontsize=50)\n", (6532, 6558), True, 'import matplotlib.pyplot as plt\n'), ((6565, 6575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6573, 6575), True, 'import matplotlib.pyplot as plt\n'), ((7072, 7083), 'numpy.shape', 'np.shape', (['R'], {}), '(R)\n', (7080, 7083), True, 'import numpy as np\n'), ((7100, 7112), 'numpy.zeros', 'np.zeros', (['ns'], {}), '(ns)\n', (7108, 7112), True, 'import numpy as np\n'), ((7124, 7143), 'numpy.zeros', 'np.zeros', (['(ns, lvl)'], {}), '((ns, lvl))\n', (7132, 7143), True, 'import numpy as np\n'), ((7151, 7178), 'numpy.zeros', 'np.zeros', (['(ns, lvl, n1, n2)'], {}), '((ns, lvl, n1, n2))\n', (7159, 7178), True, 'import numpy as np\n'), ((7620, 7631), 'numpy.shape', 'np.shape', (['R'], {}), '(R)\n', (7628, 7631), True, 'import numpy as np\n'), ((7650, 7663), 'numpy.zeros', 'np.zeros', (['lvl'], {}), '(lvl)\n', (7658, 7663), True, 'import numpy as np\n'), ((7674, 7697), 'numpy.zeros', 'np.zeros', (['(lvl, n1, n2)'], {}), '((lvl, n1, n2))\n', (7682, 7697), True, 'import numpy as np\n'), ((7740, 7797), 'MuSCADeT.wave_transform.wave_transform', 'mw.wave_transform', (['R', 'lvl'], {'newwave': 'newwave', 'verbose': '(False)'}), '(R, lvl, newwave=newwave, verbose=False)\n', (7757, 7797), True, 'import MuSCADeT.wave_transform as mw\n'), ((8171, 8204), 'scipy.ndimage.filters.median_filter', 'med.median_filter', (['x'], {'size': '(3, 3)'}), '(x, size=(3, 3))\n', (8188, 8204), True, 'import scipy.ndimage.filters as med\n'), ((8218, 8234), 'numpy.abs', 'np.abs', (['(x - meda)'], {}), '(x - meda)\n', (8224, 8234), True, 'import numpy as np\n'), ((8242, 8253), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (8250, 8253), True, 'import numpy as np\n'), ((9520, 9533), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (9528, 9533), True, 'import numpy as np\n'), ((9576, 9599), 'numpy.zeros', 'np.zeros', (['(lvl, n1, n2)'], {}), '((lvl, n1, n2))\n', (9584, 9599), True, 'import numpy as np\n'), ((9921, 9972), 'MuSCADeT.wave_transform.wave_transform', 'mw.wave_transform', (['R', 'lvl'], {'newwave': '(0)', 'verbose': '(False)'}), '(R, lvl, newwave=0, verbose=False)\n', (9938, 9972), True, 'import MuSCADeT.wave_transform as mw\n'), ((11051, 11062), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (11059, 11062), True, 'import numpy as np\n'), ((11072, 11090), 'numpy.random.rand', 'np.random.rand', (['nb'], {}), '(nb)\n', (11086, 11090), True, 'import numpy as np\n'), ((12129, 12143), 'numpy.shape', 'np.shape', (['cube'], {}), '(cube)\n', (12137, 12143), True, 'import numpy as np\n'), ((12198, 12213), 'numpy.zeros', 'np.zeros', (['nband'], {}), '(nband)\n', (12206, 12213), True, 'import numpy as np\n'), ((12453, 12484), 'numpy.where', 'np.where', (['(cubepca[:, :, 0] == 0)'], {}), '(cubepca[:, :, 0] == 0)\n', (12461, 12484), True, 'import numpy as np\n'), ((12559, 12593), 'numpy.reshape', 'np.reshape', (['cubep', '(n ** 2, nband)'], {}), '(cubep, (n ** 2, nband))\n', (12569, 12593), True, 'import numpy as np\n'), ((12620, 12669), 'MuSCADeT.pca_ring_spectrum.pca_ring_spectrum', 'pcas.pca_ring_spectrum', (['cubepca[:, :, :].T'], {'std': 's'}), '(cubepca[:, :, :].T, std=s)\n', (12642, 12669), True, 'import MuSCADeT.pca_ring_spectrum as pcas\n'), ((12684, 12747), 'MuSCADeT.pca_ring_spectrum.pca_lines', 'pcas.pca_lines', (['alphas', 'sig', 'angle', 'ns'], {'alpha0': 'alpha', 'plot': 'plot'}), '(alphas, sig, angle, ns, alpha0=alpha, plot=plot)\n', (12698, 12747), True, 'import MuSCADeT.pca_ring_spectrum as pcas\n'), ((12858, 12871), 'numpy.size', 'np.size', (['vals'], {}), '(vals)\n', (12865, 12871), True, 'import numpy as np\n'), ((12892, 12912), 'numpy.ones', 'np.ones', (['[ns, nband]'], {}), '([ns, nband])\n', (12899, 12912), True, 'import numpy as np\n'), ((12990, 13021), 'numpy.where', 'np.where', (['(S_prior[:, :, 0] == 0)'], {}), '(S_prior[:, :, 0] == 0)\n', (12998, 13021), True, 'import numpy as np\n'), ((13708, 13752), 'numpy.reshape', 'np.reshape', (['S_prior[:, :, ::-1]', '(ns, n * n)'], {}), '(S_prior[:, :, ::-1], (ns, n * n))\n', (13718, 13752), True, 'import numpy as np\n'), ((3238, 3250), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (3244, 3250), True, 'import numpy as np\n'), ((3272, 3289), 'numpy.ones', 'np.ones', (['(n1, n2)'], {}), '((n1, n2))\n', (3279, 3289), True, 'import numpy as np\n'), ((3906, 3918), 'numpy.copy', 'np.copy', (['PSF'], {}), '(PSF)\n', (3913, 3918), True, 'import numpy as np\n'), ((4596, 4624), 'numpy.reshape', 'np.reshape', (['Ri', '(ns, n1, n1)'], {}), '(Ri, (ns, n1, n1))\n', (4606, 4624), True, 'import numpy as np\n'), ((5189, 5206), 'numpy.sum', 'np.sum', (['noise_map'], {}), '(noise_map)\n', (5195, 5206), True, 'import numpy as np\n'), ((5291, 5324), 'numpy.reshape', 'np.reshape', (['sig_map', '(ns, n1, n2)'], {}), '(sig_map, (ns, n1, n2))\n', (5301, 5324), True, 'import numpy as np\n'), ((5408, 5420), 'numpy.dot', 'np.dot', (['A', 'X'], {}), '(A, X)\n', (5414, 5420), True, 'import numpy as np\n'), ((5672, 5686), 'numpy.real', 'np.real', (['(X + R)'], {}), '(X + R)\n', (5679, 5686), True, 'import numpy as np\n'), ((7229, 7295), 'MuSCADeT.wave_transform.wave_transform', 'mw.wave_transform', (['R[j, :, :]', 'lvl'], {'newwave': 'newwave', 'verbose': '(False)'}), '(R[j, :, :], lvl, newwave=newwave, verbose=False)\n', (7246, 7295), True, 'import MuSCADeT.wave_transform as mw\n'), ((7428, 7444), 'numpy.max', 'np.max', (['wm[j, :]'], {}), '(wm[j, :])\n', (7434, 7444), True, 'import numpy as np\n'), ((7513, 7525), 'numpy.min', 'np.min', (['wmax'], {}), '(wmax)\n', (7519, 7525), True, 'import numpy as np\n'), ((7889, 7899), 'numpy.max', 'np.max', (['wm'], {}), '(wm)\n', (7895, 7899), True, 'import numpy as np\n'), ((8271, 8288), 'numpy.median', 'np.median', (['medfil'], {}), '(medfil)\n', (8280, 8288), True, 'import numpy as np\n'), ((9626, 9641), 'numpy.ones_like', 'np.ones_like', (['M'], {}), '(M)\n', (9638, 9641), True, 'import numpy as np\n'), ((9769, 9785), 'numpy.where', 'np.where', (['(th < 0)'], {}), '(th < 0)\n', (9777, 9785), True, 'import numpy as np\n'), ((10280, 10337), 'MuSCADeT.wave_transform.wave_transform', 'mw.wave_transform', (['R', 'lvl'], {'newwave': 'newwave', 'verbose': '(False)'}), '(R, lvl, newwave=newwave, verbose=False)\n', (10297, 10337), True, 'import MuSCADeT.wave_transform as mw\n'), ((10494, 10594), 'MuSCADeT.wave_transform.iuwt', 'mw.iuwt', (['(M * alpha)'], {'newwave': 'newwave', 'convol2d': '(0)', 'pysap_transform': 'pysap_transform', 'verbose': '(False)'}), '(M * alpha, newwave=newwave, convol2d=0, pysap_transform=\n pysap_transform, verbose=False)\n', (10501, 10594), True, 'import MuSCADeT.wave_transform as mw\n'), ((10724, 10785), 'MuSCADeT.wave_transform.wave_transform', 'mw.wave_transform', (['imnew', 'lvl'], {'newwave': 'newwave', 'verbose': '(False)'}), '(imnew, lvl, newwave=newwave, verbose=False)\n', (10741, 10785), True, 'import MuSCADeT.wave_transform as mw\n'), ((11169, 11182), 'numpy.dot', 'np.dot', (['A', 'x0'], {}), '(A, x0)\n', (11175, 11182), True, 'import numpy as np\n'), ((11247, 11262), 'numpy.dot', 'np.dot', (['A.T', 'xp'], {}), '(A.T, xp)\n', (11253, 11262), True, 'import numpy as np\n'), ((12178, 12188), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (12185, 12188), True, 'import numpy as np\n'), ((12829, 12848), 'numpy.where', 'np.where', (['(vals >= 0)'], {}), '(vals >= 0)\n', (12837, 12848), True, 'import numpy as np\n'), ((13071, 13090), 'numpy.where', 'np.where', (['(ims0 == k)'], {}), '(ims0 == k)\n', (13079, 13090), True, 'import numpy as np\n'), ((13104, 13126), 'numpy.zeros', 'np.zeros', (['(npca, npca)'], {}), '((npca, npca))\n', (13112, 13126), True, 'import numpy as np\n'), ((13251, 13292), 'numpy.reshape', 'np.reshape', (['cubepca', '(nband, npca * npca)'], {}), '(cubepca, (nband, npca * npca))\n', (13261, 13292), True, 'import numpy as np\n'), ((13338, 13357), 'numpy.where', 'np.where', (['(ims0 == k)'], {}), '(ims0 == k)\n', (13346, 13357), True, 'import numpy as np\n'), ((3158, 3169), 'numpy.log2', 'np.log2', (['n1'], {}), '(n1)\n', (3165, 3169), True, 'import numpy as np\n'), ((3835, 3864), 'numpy.reshape', 'np.reshape', (['Y[i, :]', '(n1, n2)'], {}), '(Y[i, :], (n1, n2))\n', (3845, 3864), True, 'import numpy as np\n'), ((4542, 4578), 'numpy.sum', 'np.sum', (['(AT[i, :] ** 2 * sigma_y ** 2)'], {}), '(AT[i, :] ** 2 * sigma_y ** 2)\n', (4548, 4578), True, 'import numpy as np\n'), ((4840, 4874), 'numpy.multiply', 'np.multiply', (['th.T', 'NOISE_TAB[:lvl]'], {}), '(th.T, NOISE_TAB[:lvl])\n', (4851, 4874), True, 'import numpy as np\n'), ((4898, 4932), 'numpy.multiply', 'np.multiply', (['th.T', 'NOISE_TAB[:lvl]'], {}), '(th.T, NOISE_TAB[:lvl])\n', (4909, 4932), True, 'import numpy as np\n'), ((5241, 5277), 'numpy.reshape', 'np.reshape', (['noise_map', '(nb, n1 * n2)'], {}), '(noise_map, (nb, n1 * n2))\n', (5251, 5277), True, 'import numpy as np\n'), ((5818, 5838), 'numpy.max', 'np.max', (['[kmas, kmax]'], {}), '([kmas, kmax])\n', (5824, 5838), True, 'import numpy as np\n'), ((6054, 6071), 'numpy.max', 'np.max', (['[kmax, k]'], {}), '([kmax, k])\n', (6060, 6071), True, 'import numpy as np\n'), ((6236, 6259), 'numpy.reshape', 'np.reshape', (['Sj', '(n1 * n2)'], {}), '(Sj, n1 * n2)\n', (6246, 6259), True, 'import numpy as np\n'), ((6287, 6312), 'numpy.reshape', 'np.reshape', (['mask', '(n1 * n2)'], {}), '(mask, n1 * n2)\n', (6297, 6312), True, 'import numpy as np\n'), ((7470, 7487), 'numpy.mean', 'np.mean', (['sigma[j]'], {}), '(sigma[j])\n', (7477, 7487), True, 'import numpy as np\n'), ((9717, 9751), 'numpy.multiply', 'np.multiply', (['th.T', 'NOISE_TAB[:lvl]'], {}), '(th.T, NOISE_TAB[:lvl])\n', (9728, 9751), True, 'import numpy as np\n'), ((11111, 11126), 'numpy.sum', 'np.sum', (['(x0 ** 2)'], {}), '(x0 ** 2)\n', (11117, 11126), True, 'import numpy as np\n'), ((11203, 11217), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (11209, 11217), True, 'import numpy as np\n'), ((11283, 11297), 'numpy.sum', 'np.sum', (['(y ** 2)'], {}), '(y ** 2)\n', (11289, 11297), True, 'import numpy as np\n'), ((11314, 11329), 'numpy.dot', 'np.dot', (['y.T', 'x0'], {}), '(y.T, x0)\n', (11320, 11329), True, 'import numpy as np\n'), ((12398, 12415), 'numpy.min', 'np.min', (['[n, npca]'], {}), '([n, npca])\n', (12404, 12415), True, 'import numpy as np\n'), ((12415, 12432), 'numpy.min', 'np.min', (['[n, npca]'], {}), '([n, npca])\n', (12421, 12432), True, 'import numpy as np\n'), ((12962, 12975), 'numpy.size', 'np.size', (['vals'], {}), '(vals)\n', (12969, 12975), True, 'import numpy as np\n'), ((13447, 13464), 'numpy.mean', 'np.mean', (['specs', '(0)'], {}), '(specs, 0)\n', (13454, 13464), True, 'import numpy as np\n'), ((13581, 13594), 'numpy.sum', 'np.sum', (['s1', '(0)'], {}), '(s1, 0)\n', (13587, 13594), True, 'import numpy as np\n'), ((13642, 13674), 'numpy.dot', 'np.dot', (['cube', 'spectras[count, :]'], {}), '(cube, spectras[count, :])\n', (13648, 13674), True, 'import numpy as np\n'), ((3644, 3656), 'numpy.sum', 'np.sum', (['A', '(0)'], {}), '(A, 0)\n', (3650, 3656), True, 'import numpy as np\n'), ((4035, 4045), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4042, 4045), True, 'import numpy as np\n'), ((4107, 4161), 'scipy.signal.fftconvolve', 'scp.fftconvolve', (['x[i, :, :]', 'PSF[i, :, :]'], {'mode': '"""same"""'}), "(x[i, :, :], PSF[i, :, :], mode='same')\n", (4122, 4161), True, 'from scipy import signal as scp\n'), ((4222, 4232), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4229, 4232), True, 'import numpy as np\n'), ((4294, 4349), 'scipy.signal.fftconvolve', 'scp.fftconvolve', (['x[i, :, :]', 'PSFT[i, :, :]'], {'mode': '"""same"""'}), "(x[i, :, :], PSFT[i, :, :], mode='same')\n", (4309, 4349), True, 'from scipy import signal as scp\n'), ((5643, 5661), 'numpy.dot', 'np.dot', (['AT', '(Y - AX)'], {}), '(AT, Y - AX)\n', (5649, 5661), True, 'import numpy as np\n'), ((5754, 5781), 'numpy.reshape', 'np.reshape', (['R', '(ns, n1, n2)'], {}), '(R, (ns, n1, n2))\n', (5764, 5781), True, 'import numpy as np\n'), ((6104, 6133), 'numpy.reshape', 'np.reshape', (['S[j, :]', '(n1, n2)'], {}), '(S[j, :], (n1, n2))\n', (6114, 6133), True, 'import numpy as np\n'), ((7527, 7539), 'numpy.max', 'np.max', (['wmax'], {}), '(wmax)\n', (7533, 7539), True, 'import numpy as np\n'), ((7540, 7552), 'numpy.min', 'np.min', (['wmax'], {}), '(wmax)\n', (7546, 7552), True, 'import numpy as np\n'), ((7847, 7865), 'numpy.abs', 'np.abs', (['w[l, :, :]'], {}), '(w[l, :, :])\n', (7853, 7865), True, 'import numpy as np\n'), ((10390, 10404), 'numpy.sign', 'np.sign', (['alpha'], {}), '(alpha)\n', (10397, 10404), True, 'import numpy as np\n'), ((12780, 12809), 'numpy.reshape', 'np.reshape', (['ims0', '(npca * npca)'], {}), '(ims0, npca * npca)\n', (12790, 12809), True, 'import numpy as np\n'), ((13182, 13206), 'numpy.int_', 'np.int_', (['(xs * (npca / n))'], {}), '(xs * (npca / n))\n', (13189, 13206), True, 'import numpy as np\n'), ((13204, 13228), 'numpy.int_', 'np.int_', (['(ys * (npca / n))'], {}), '(ys * (npca / n))\n', (13211, 13228), True, 'import numpy as np\n'), ((3523, 3538), 'numpy.sum', 'np.sum', (['Apca', '(0)'], {}), '(Apca, 0)\n', (3529, 3538), True, 'import numpy as np\n'), ((4440, 4466), 'numpy.sum', 'np.sum', (['(PSFT[i, :, :] ** 2)'], {}), '(PSFT[i, :, :] ** 2)\n', (4446, 4466), True, 'import numpy as np\n'), ((7377, 7398), 'numpy.abs', 'np.abs', (['w[j, l, :, :]'], {}), '(w[j, l, :, :])\n', (7383, 7398), True, 'import numpy as np\n'), ((13512, 13554), 'numpy.reshape', 'np.reshape', (['cubepca', '(npca ** 2, nband)', '(0)'], {}), '(cubepca, (npca ** 2, nband), 0)\n', (13522, 13554), True, 'import numpy as np\n'), ((10438, 10455), 'numpy.abs', 'np.abs', (['subweight'], {}), '(subweight)\n', (10444, 10455), True, 'import numpy as np\n'), ((10039, 10056), 'numpy.abs', 'np.abs', (['subweight'], {}), '(subweight)\n', (10045, 10056), True, 'import numpy as np\n'), ((10057, 10067), 'numpy.abs', 'np.abs', (['th'], {}), '(th)\n', (10063, 10067), True, 'import numpy as np\n'), ((10151, 10168), 'numpy.abs', 'np.abs', (['subweight'], {}), '(subweight)\n', (10157, 10168), True, 'import numpy as np\n'), ((10169, 10179), 'numpy.abs', 'np.abs', (['th'], {}), '(th)\n', (10175, 10179), True, 'import numpy as np\n'), ((10406, 10419), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (10412, 10419), True, 'import numpy as np\n'), ((10420, 10437), 'numpy.abs', 'np.abs', (['addweight'], {}), '(addweight)\n', (10426, 10437), True, 'import numpy as np\n'), ((5562, 5594), 'numpy.reshape', 'np.reshape', (['(Y - AX)', '(nb, n1, n2)'], {}), '(Y - AX, (nb, n1, n2))\n', (5572, 5594), True, 'import numpy as np\n'), ((10021, 10038), 'numpy.abs', 'np.abs', (['addweight'], {}), '(addweight)\n', (10027, 10038), True, 'import numpy as np\n'), ((10119, 10132), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (10125, 10132), True, 'import numpy as np\n'), ((10133, 10150), 'numpy.abs', 'np.abs', (['addweight'], {}), '(addweight)\n', (10139, 10150), True, 'import numpy as np\n')]
|
import os
import typing as T
import warnings
import fsspec # type: ignore
import numpy as np
import numpy.typing as NT
import pandas as pd # type: ignore
import rioxarray # type: ignore
import xarray as xr
from xarray_sentinel import conventions, esa_safe
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=float, sep=" ") # type: ignore
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=float, sep=" ") # type: ignore
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=float, sep=" ") # type: ignore
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=float, sep=" ") # type: ignore
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinateConversion_dataset(annotation_path: esa_safe.PathType) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag(
annotation_path, ".//coordinateConversionList"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[NT.NDArray[np.float_]] = []
grsrCoefficients: T.List[NT.NDArray[np.float_]] = []
for values in coordinate_conversion["coordinateConversion"]:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
np.fromstring(values["srgrCoefficients"]["$"], dtype=float, sep=" ")
)
grsrCoefficients.append(
np.fromstring(values["grsrCoefficients"]["$"], dtype=float, sep=" ")
)
coords = {
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"degree": list(range(len(srgrCoefficients[0]))),
}
data_vars = {
"gr0": ("azimuth_time", gr0),
"sr0": ("azimuth_time", sr0),
"slant_range_time": ("azimuth_time", slant_range_time),
"srgr_coefficients": (("azimuth_time", "degree"), srgrCoefficients),
"grsr_coefficients": (("azimuth_time", "degree"), grsrCoefficients),
}
return xr.Dataset(data_vars=data_vars, coords=coords)
def get_fs_path(
urlpath_or_path: esa_safe.PathType, fs: T.Optional[fsspec.AbstractFileSystem] = None
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(urlpath_or_path)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = urlpath_or_path
return fs, path
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_dc_estimate(annotation)
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(dc_estimate["dataDcPolynomial"])
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_azimuth_fm_rate(annotation)
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(azimuth_fm_rate["azimuthFmRatePolynomial"])
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_avalable_groups(
ancillary_data_paths: T.Dict[str, T.Dict[str, T.Dict[str, str]]],
product_attrs: T.Dict[str, T.Any],
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, str]:
groups: T.Dict[str, str] = {}
for subswath_id, subswath_data_path in ancillary_data_paths.items():
for pol_id, pol_data_paths in subswath_data_path.items():
try:
with fs.open(pol_data_paths["s1Level1ProductSchema"]):
pass
except FileNotFoundError:
continue
groups[subswath_id] = ""
groups[f"{subswath_id}/{pol_id}"] = pol_data_paths["s1Level1ProductSchema"]
for metadata_group in [
"gcp",
"orbit",
"attitude",
"dc_estimate",
"azimuth_fm_rate",
]:
groups[f"{subswath_id}/{pol_id}/{metadata_group}"] = pol_data_paths[
"s1Level1ProductSchema"
]
try:
with fs.open(pol_data_paths["s1Level1CalibrationSchema"]):
pass
except FileNotFoundError:
continue
groups[f"{subswath_id}/{pol_id}/calibration"] = pol_data_paths[
"s1Level1CalibrationSchema"
]
return groups
def open_pol_dataset(
measurement: esa_safe.PathType,
annotation: esa_safe.PathType,
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
) -> xr.Dataset:
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_sampling = 1 / product_information["rangeSamplingRate"]
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time + slant_range_sampling * (number_of_samples - 1),
number_of_samples,
)
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.to_timedelta(azimuth_time_interval, "s"),
).values
attrs = {
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
}
number_of_bursts = swath_timing["burstList"]["@count"]
if number_of_bursts:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.to_timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
if chunks is None:
chunks = {"y": lines_per_burst}
arr = rioxarray.open_rasterio(measurement, chunks=chunks)
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(
{
"pixel": np.arange(0, arr["pixel"].size, dtype=int),
"line": np.arange(0, arr["line"].size, dtype=int),
"slant_range_time": ("pixel", slant_range_time),
"azimuth_time": ("line", azimuth_time),
}
)
if number_of_bursts == 0:
arr = arr.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def crop_burst_dataset(pol_dataset: xr.Dataset, burst_index: int) -> xr.Dataset:
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"{burst_index=} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[NT.NDArray[np.float_], NT.NDArray[np.float_]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values, centre.longitude.values
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
fs, manifest_path = get_fs_path(product_urlpath, fs)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
base_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
ancillary_data_paths = esa_safe.get_ancillary_data_paths(base_path, product_files)
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
groups = find_avalable_groups(ancillary_data_paths, product_attrs, fs=fs)
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
if group == "":
ds = xr.Dataset()
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if "/" not in group:
ds = xr.Dataset()
elif group.count("/") == 1:
subswath, pol = group.split("/", 1)
ds = open_pol_dataset(
ancillary_data_paths[subswath][pol]["s1Level1MeasurementSchema"],
ancillary_data_paths[subswath][pol]["s1Level1ProductSchema"],
chunks=chunks,
)
if burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
else:
subswath, pol, metadata = group.split("/", 2)
with fs.open(groups[group]) as file:
ds = METADATA_OPENERS[metadata](file)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs) # type: ignore
conventions.update_attributes(ds, group=metadata)
return ds
class Sentinel1Backend(xr.backends.common.BackendEntrypoint):
def open_dataset( # type: ignore
self,
filename_or_obj: str,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
) -> xr.Dataset:
return open_dataset(filename_or_obj, drop_variables=drop_variables, group=group)
def guess_can_open(self, filename_or_obj: T.Any) -> bool:
try:
_, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
return ext.lower() in {".safe", ".safe/"}
METADATA_OPENERS = {
"gcp": open_gcp_dataset,
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"dc_estimate": open_dc_estimate_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"calibration": open_calibration_dataset,
}
|
[
"numpy.allclose",
"xarray.Variable",
"numpy.arange",
"os.path.join",
"numpy.full",
"rioxarray.open_rasterio",
"fsspec.get_fs_token_paths",
"os.path.dirname",
"numpy.linspace",
"numpy.fromstring",
"xarray_sentinel.esa_safe.get_ancillary_data_paths",
"xarray_sentinel.conventions.update_attributes",
"xarray_sentinel.esa_safe.parse_azimuth_fm_rate",
"xarray.Dataset",
"xarray_sentinel.esa_safe.parse_tag",
"pandas.to_timedelta",
"xarray_sentinel.esa_safe.parse_manifest_sentinel1",
"numpy.datetime64",
"xarray_sentinel.esa_safe.parse_dc_estimate",
"numpy.array",
"os.path.splitext",
"xarray_sentinel.esa_safe.parse_tag_list",
"warnings.warn",
"fsspec.filesystem"
] |
[((365, 440), 'xarray_sentinel.esa_safe.parse_tag_list', 'esa_safe.parse_tag_list', (['calibration', '""".//calibrationVector"""', '"""calibration"""'], {}), "(calibration, './/calibrationVector', 'calibration')\n", (388, 440), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((1404, 1424), 'numpy.array', 'np.array', (['pixel_list'], {}), '(pixel_list)\n', (1412, 1424), True, 'import numpy as np\n'), ((1976, 2022), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': 'data_vars', 'coords': 'coords'}), '(data_vars=data_vars, coords=coords)\n', (1986, 2022), True, 'import xarray as xr\n'), ((2142, 2208), 'xarray_sentinel.esa_safe.parse_tag', 'esa_safe.parse_tag', (['annotation_path', '""".//coordinateConversionList"""'], {}), "(annotation_path, './/coordinateConversionList')\n", (2160, 2208), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((3379, 3425), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': 'data_vars', 'coords': 'coords'}), '(data_vars=data_vars, coords=coords)\n', (3389, 3425), True, 'import xarray as xr\n'), ((4075, 4137), 'xarray_sentinel.esa_safe.parse_tag_list', 'esa_safe.parse_tag_list', (['annotation', '""".//geolocationGridPoint"""'], {}), "(annotation, './/geolocationGridPoint')\n", (4098, 4137), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((5636, 5686), 'xarray_sentinel.esa_safe.parse_tag_list', 'esa_safe.parse_tag_list', (['annotation', '""".//attitude"""'], {}), "(annotation, './/attitude')\n", (5659, 5686), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((6291, 6338), 'xarray_sentinel.esa_safe.parse_tag_list', 'esa_safe.parse_tag_list', (['annotation', '""".//orbit"""'], {}), "(annotation, './/orbit')\n", (6314, 6338), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((7182, 7247), 'xarray.Variable', 'xr.Variable', ([], {'data': "data['position']", 'dims': "('axis', 'azimuth_time')"}), "(data=data['position'], dims=('axis', 'azimuth_time'))\n", (7193, 7247), True, 'import xarray as xr\n'), ((7279, 7344), 'xarray.Variable', 'xr.Variable', ([], {'data': "data['velocity']", 'dims': "('axis', 'azimuth_time')"}), "(data=data['velocity'], dims=('axis', 'azimuth_time'))\n", (7290, 7344), True, 'import xarray as xr\n'), ((7836, 7874), 'xarray_sentinel.esa_safe.parse_dc_estimate', 'esa_safe.parse_dc_estimate', (['annotation'], {}), '(annotation)\n', (7862, 7874), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((8581, 8623), 'xarray_sentinel.esa_safe.parse_azimuth_fm_rate', 'esa_safe.parse_azimuth_fm_rate', (['annotation'], {}), '(annotation)\n', (8611, 8623), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((9507, 9532), 'fsspec.filesystem', 'fsspec.filesystem', (['"""file"""'], {}), "('file')\n", (9524, 9532), False, 'import fsspec\n'), ((10904, 10957), 'xarray_sentinel.esa_safe.parse_tag', 'esa_safe.parse_tag', (['annotation', '""".//imageInformation"""'], {}), "(annotation, './/imageInformation')\n", (10922, 10957), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((10984, 11039), 'xarray_sentinel.esa_safe.parse_tag', 'esa_safe.parse_tag', (['annotation', '""".//productInformation"""'], {}), "(annotation, './/productInformation')\n", (11002, 11039), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((11059, 11107), 'xarray_sentinel.esa_safe.parse_tag', 'esa_safe.parse_tag', (['annotation', '""".//swathTiming"""'], {}), "(annotation, './/swathTiming')\n", (11077, 11107), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((11330, 11462), 'numpy.linspace', 'np.linspace', (['first_slant_range_time', '(first_slant_range_time + slant_range_sampling * (number_of_samples - 1))', 'number_of_samples'], {}), '(first_slant_range_time, first_slant_range_time + \n slant_range_sampling * (number_of_samples - 1), number_of_samples)\n', (11341, 11462), True, 'import numpy as np\n'), ((12928, 12979), 'rioxarray.open_rasterio', 'rioxarray.open_rasterio', (['measurement'], {'chunks': 'chunks'}), '(measurement, chunks=chunks)\n', (12951, 12979), False, 'import rioxarray\n'), ((13517, 13572), 'xarray.Dataset', 'xr.Dataset', ([], {'attrs': 'attrs', 'data_vars': "{'measurement': arr}"}), "(attrs=attrs, data_vars={'measurement': arr})\n", (13527, 13572), True, 'import xarray as xr\n'), ((15807, 15837), 'os.path.dirname', 'os.path.dirname', (['manifest_path'], {}), '(manifest_path)\n', (15822, 15837), False, 'import os\n'), ((15987, 16046), 'xarray_sentinel.esa_safe.get_ancillary_data_paths', 'esa_safe.get_ancillary_data_paths', (['base_path', 'product_files'], {}), '(base_path, product_files)\n', (16020, 16046), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((17486, 17535), 'xarray_sentinel.conventions.update_attributes', 'conventions.update_attributes', (['ds'], {'group': 'metadata'}), '(ds, group=metadata)\n', (17515, 17535), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((762, 817), 'numpy.fromstring', 'np.fromstring', (["vector['pixel']['$']"], {'dtype': 'int', 'sep': '""" """'}), "(vector['pixel']['$'], dtype=int, sep=' ')\n", (775, 817), True, 'import numpy as np\n'), ((889, 952), 'numpy.fromstring', 'np.fromstring', (["vector['sigmaNought']['$']"], {'dtype': 'float', 'sep': '""" """'}), "(vector['sigmaNought']['$'], dtype=float, sep=' ')\n", (902, 952), True, 'import numpy as np\n'), ((1035, 1097), 'numpy.fromstring', 'np.fromstring', (["vector['betaNought']['$']"], {'dtype': 'float', 'sep': '""" """'}), "(vector['betaNought']['$'], dtype=float, sep=' ')\n", (1048, 1097), True, 'import numpy as np\n'), ((1173, 1230), 'numpy.fromstring', 'np.fromstring', (["vector['gamma']['$']"], {'dtype': 'float', 'sep': '""" """'}), "(vector['gamma']['$'], dtype=float, sep=' ')\n", (1186, 1230), True, 'import numpy as np\n'), ((1293, 1347), 'numpy.fromstring', 'np.fromstring', (["vector['dn']['$']"], {'dtype': 'float', 'sep': '""" """'}), "(vector['dn']['$'], dtype=float, sep=' ')\n", (1306, 1347), True, 'import numpy as np\n'), ((1436, 1464), 'numpy.allclose', 'np.allclose', (['pixel', 'pixel[0]'], {}), '(pixel, pixel[0])\n', (1447, 1464), True, 'import numpy as np\n'), ((3622, 3664), 'fsspec.get_fs_token_paths', 'fsspec.get_fs_token_paths', (['urlpath_or_path'], {}), '(urlpath_or_path)\n', (3647, 3664), False, 'import fsspec\n'), ((15745, 15789), 'os.path.join', 'os.path.join', (['manifest_path', '"""manifest.safe"""'], {}), "(manifest_path, 'manifest.safe')\n", (15757, 15789), False, 'import os\n'), ((15919, 15958), 'xarray_sentinel.esa_safe.parse_manifest_sentinel1', 'esa_safe.parse_manifest_sentinel1', (['file'], {}), '(file)\n', (15952, 15958), False, 'from xarray_sentinel import conventions, esa_safe\n'), ((16090, 16144), 'warnings.warn', 'warnings.warn', (['"""\'drop_variables\' is currently ignored"""'], {}), '("\'drop_variables\' is currently ignored")\n', (16103, 16144), False, 'import warnings\n'), ((16480, 16492), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (16490, 16492), True, 'import xarray as xr\n'), ((2699, 2767), 'numpy.fromstring', 'np.fromstring', (["values['srgrCoefficients']['$']"], {'dtype': 'float', 'sep': '""" """'}), "(values['srgrCoefficients']['$'], dtype=float, sep=' ')\n", (2712, 2767), True, 'import numpy as np\n'), ((2823, 2891), 'numpy.fromstring', 'np.fromstring', (["values['grsrCoefficients']['$']"], {'dtype': 'float', 'sep': '""" """'}), "(values['grsrCoefficients']['$'], dtype=float, sep=' ')\n", (2836, 2891), True, 'import numpy as np\n'), ((2943, 2960), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (2956, 2960), True, 'import numpy as np\n'), ((4718, 4740), 'numpy.full', 'np.full', (['shape', 'np.nan'], {}), '(shape, np.nan)\n', (4725, 4740), True, 'import numpy as np\n'), ((4771, 4793), 'numpy.full', 'np.full', (['shape', 'np.nan'], {}), '(shape, np.nan)\n', (4778, 4793), True, 'import numpy as np\n'), ((4821, 4843), 'numpy.full', 'np.full', (['shape', 'np.nan'], {}), '(shape, np.nan)\n', (4828, 4843), True, 'import numpy as np\n'), ((4879, 4901), 'numpy.full', 'np.full', (['shape', 'np.nan'], {}), '(shape, np.nan)\n', (4886, 4901), True, 'import numpy as np\n'), ((4937, 4959), 'numpy.full', 'np.full', (['shape', 'np.nan'], {}), '(shape, np.nan)\n', (4944, 4959), True, 'import numpy as np\n'), ((7021, 7099), 'warnings.warn', 'warnings.warn', (['"""reference_system is not consistent in all the state vectors. """'], {}), "('reference_system is not consistent in all the state vectors. ')\n", (7034, 7099), False, 'import warnings\n'), ((13155, 13197), 'numpy.arange', 'np.arange', (['(0)', "arr['pixel'].size"], {'dtype': 'int'}), "(0, arr['pixel'].size, dtype=int)\n", (13164, 13197), True, 'import numpy as np\n'), ((13219, 13260), 'numpy.arange', 'np.arange', (['(0)', "arr['line'].size"], {'dtype': 'int'}), "(0, arr['line'].size, dtype=int)\n", (13228, 13260), True, 'import numpy as np\n'), ((16701, 16713), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (16711, 16713), True, 'import xarray as xr\n'), ((18001, 18034), 'os.path.splitext', 'os.path.splitext', (['filename_or_obj'], {}), '(filename_or_obj)\n', (18017, 18034), False, 'import os\n'), ((1636, 1653), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (1649, 1653), True, 'import numpy as np\n'), ((4356, 4389), 'numpy.datetime64', 'np.datetime64', (["ggp['azimuthTime']"], {}), "(ggp['azimuthTime'])\n", (4369, 4389), True, 'import numpy as np\n'), ((11800, 11843), 'pandas.to_timedelta', 'pd.to_timedelta', (['azimuth_time_interval', '"""s"""'], {}), "(azimuth_time_interval, 's')\n", (11815, 11843), True, 'import pandas as pd\n'), ((5322, 5339), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (5335, 5339), True, 'import numpy as np\n'), ((6136, 6153), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (6149, 6153), True, 'import numpy as np\n'), ((7628, 7645), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (7641, 7645), True, 'import numpy as np\n'), ((8340, 8357), 'numpy.datetime64', 'np.datetime64', (['at'], {}), '(at)\n', (8353, 8357), True, 'import numpy as np\n'), ((9195, 9212), 'numpy.datetime64', 'np.datetime64', (['at'], {}), '(at)\n', (9208, 9212), True, 'import numpy as np\n'), ((12642, 12685), 'pandas.to_timedelta', 'pd.to_timedelta', (['azimuth_time_interval', '"""s"""'], {}), "(azimuth_time_interval, 's')\n", (12657, 12685), True, 'import pandas as pd\n')]
|
"""
Authors: <NAME>, <NAME>
Helper functions:
1. Overall score between, explainability and performance with normalization between 0-1 (logaritmic_power, sigmoid_power).
2. An explainability minimization (smaller is better) with additive constrains according the number of leaves and the error for the optimization.
3. Accuracy score.
"""
from sklearn.metrics import accuracy_score
from sklearn.tree import _tree
import numpy as np
import math
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def logaritmic_power(x, y):
'''
Parameters:
----------
input:
x: performance (scalar)
y: explainability (scalar)
output:
factor: Normalized overall score
----------
'''
z = 1-x
l = np.log2(y ** z)
factor = x ** l
return factor
def sigmoid_power(x, y):
'''
Parameters:
----------
input:
x: performance (scalar)
y: explainability (scalar)
output:
factor: Normalized overall score
----------
'''
sigmoid = 1/(1 + math.exp(-y))
factor = x ** sigmoid
return factor
def explainability_metric(clf, x):
'''
Parameters:
----------
input:
x: performance
clf: object of decision tree
output:
minimize: explainable (scalar)
----------
'''
size_leaf = clf.tree_.n_leaves
size_node = len([z for z in clf.tree_.feature if z != _tree.TREE_UNDEFINED])
_lambda = 1
error = (1.0 - x)
minimize = error + _lambda * size_node
return minimize
|
[
"numpy.log2",
"sklearn.metrics.accuracy_score",
"math.exp"
] |
[((500, 530), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (514, 530), False, 'from sklearn.metrics import accuracy_score\n'), ((796, 811), 'numpy.log2', 'np.log2', (['(y ** z)'], {}), '(y ** z)\n', (803, 811), True, 'import numpy as np\n'), ((1104, 1116), 'math.exp', 'math.exp', (['(-y)'], {}), '(-y)\n', (1112, 1116), False, 'import math\n')]
|
# Author : <NAME>
# Contact : <EMAIL>
# Date : Feb 16, 2020
import random
import time
import numpy as np
import random
import time
import numpy as np
try:
from CS5313_Localization_Env import maze
except:
print(
'Problem finding CS5313_Localization_Env.maze... Trying to "import maze" only...'
)
try:
import maze
print("Successfully imported maze")
except Exception as ex:
print("Could not import maze")
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
print(ex)
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
try:
from CS5313_Localization_Env import RobotLocalization as viz
except:
print(
'Problem finding CS5313_Localization_Env.RobotLocalization... Trying to "import RobotLocalization" only...'
)
try:
import RobotLocalization as viz
print("Successfully imported RobotLocalization")
except Exception as ex:
print("Could not import RobotLocalization")
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
print(ex)
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
try:
from CS5313_Localization_Env import localization_env as le
except:
print(
'Problem finding CS5313_Localization_Env.localization_env... Trying to "import localization_env" only...'
)
try:
import localization_env as le
print("Successfully imported localization_env")
except Exception as ex:
print("Could not import localization_env")
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
print(ex)
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
from enum import Enum
# Change this to true to print out information on the robot location and heading
printouts = True
# Change this to true inorder to print out the map as a dataframe to console every time move() is called, as well as the Transition Tables to csv files named "heading.csv" and "location.csv". Won't do anything if printouts is false expect import pandas
df = False
if df:
from pandas import DataFrame
class Directions(Enum):
"""An Enum containing the directions S, E, N, W, and St (stationary) and their respective (x, y) movement tuples. Ex. S = (0, 1) meaning down one row, and stationary in the columns."""
S = (0, 1)
E = (1, 0)
N = (0, -1)
W = (-1, 0)
St = (0, 0)
def get_ortho(self, value):
""" Return the Direction Enums orthogonal to the given direction
Arguements:\n
value -- The given direction for which the orthogonal directions will be based on.\n
Returns:\n
A list of directions orthogonal to the given direction.
"""
if value in [self.N, self.S]:
return [self.W, self.E]
return [self.N, self.S]
class Headings(Enum):
"""An enum containing the headings S, E, N, W and their respective (x, y) movement tuples"""
S = (0, 1)
E = (1, 0)
N = (0, -1)
W = (-1, 0)
def get_ortho(self, value):
""" Return the Headings Enums orthogonal to the given heading
Arguements:\n
value -- The given heading for which the orthogonal heading will be based on.\n
Returns:\n
A list of headings orthogonal to the given heading.
"""
if value in [self.N, self.S]:
return [self.W, self.E]
return [self.N, self.S]
class Environment:
""" An environment for testing a randomly moving robot around a maze.
Important Class Variables\n
map -- The map of the the maze. A 2d list of lists in the form list[x][y] where a value of 1 signifies there is a wall, 0 signifies the cell is traversable, and 'x' denotes the robot location.\n
location_transitions -- The table of transition probabilities for each cell. Format is [x][y][heading][direction] which will return the probabilities of moving the direction, given the robot's current x, y, and heading.\n
heading_transitions -- The table of transition probabilities for the headings given each cell. Format is [x][y][heading][heading] which will return the probabilities of each heading for the next time step given the robot's current x, y, and heading.\n
robot_location -- The current location of the robot, given as a tuple in the for (x, y).
robot_heading -- The current heading of the robot, given as a Headings enum.
"""
def __init__(
self,
action_bias,
observation_noise,
action_noise,
dimensions,
seed=None,
window_size=[750, 750],
):
"""Initializes the environment. The robot starts in a random traversable cell.
Arguements:\n
action_bias -- Provides a bias for the robots actions. Positive values increase the likelihood of South and East movements, and negative favor North and West. (float in range -1-1)\n
observation_noise -- The probability that any given observation value will flip values erroneously. (float in range 0-1)\n
action_noise -- The probability that an action will move either direction perpendicular to the inteded direction. (float in range 0-1)\n
dimensions -- The dimensions of the map, given in the form (x,y). (tuple in range (1+, 1+))\n
seed (optional) -- The random seed value. (int) default=10\n
window_size(optional) -- The [x, y] size of the display. Default is [750, 750]. Should be the same aspect ratio as the maze to avoid strange looking graphics.
Return:\n
No return
"""
# the pygame state
self.running = True
# Step counter
self.steps = 0
# save the bias, noise, and map sizze parameters
self.action_bias = action_bias
self.observation_noise = observation_noise
self.action_noise = action_noise
self.dimensions = dimensions
# set the random seed and display it
self.seed = seed if seed != None else random.randint(1, 10000)
random.seed(self.seed)
# creat the map and list of free cells
self.map = maze.make_maze(dimensions[0], dimensions[1], seed)
self.free_cells = [
(x, y)
for x in range(dimensions[0])
for y in range(dimensions[1])
if self.map[x][y] == 0
]
# create the transistion table
self.location_transitions = self.create_locations_table()
self.headings_transitions = self.create_headings_table()
if df:
DataFrame(self.location_transitions).transpose().to_csv("location.csv")
DataFrame(self.headings_transitions).transpose().to_csv("heading.csv")
# set the robot location and print
self.robot_location = self.free_cells[
random.randint(0, len(self.free_cells) - 1)
]
self.location_priors, self.heading_priors = self.compute_prior_probabilities()
self.observation_tables = self.create_observation_tables()
self.map[self.robot_location[0]][self.robot_location[1]] = "x"
# Set the robot heading
self.robot_heading = random.choice(
[
h
for h in Headings
if self.traversable(self.robot_location[0], self.robot_location[1], h)
]
)
# gen initial headings probs
probs = {}
# prob_sum = 0
for h in le.Headings:
# num = random.random()
probs[h] = 1
# prob_sum += num
# for h in le.Headings:
# probs[h] /= prob_sum
# init viz
self.window_size = window_size
self.game = viz.Game()
self.game.init_pygame(self.window_size)
self.game.update(
self.map,
self.robot_location,
self.robot_heading,
[[0] * self.dimensions[1]] * self.dimensions[0],
probs,
)
self.game.display()
if printouts:
print("Random seed:", self.seed)
print("Robot starting location:", self.robot_location)
print("Robot starting heading:", self.robot_heading)
if df:
print(DataFrame(self.map).transpose())
def compute_prior_probabilities(self):
location_priors = {}
for cell in self.free_cells:
location_priors[cell] = 1 / len(self.free_cells)
heading_priors = {}
for heading in Headings:
heading_priors[heading] = 0
for cell in self.free_cells:
for heading2 in Headings:
heading_priors[heading] += self.headings_transitions[cell[0]][
cell[1]
][heading2][heading]
heading_priors[heading] /= len(self.free_cells) * 4
return location_priors, heading_priors
def random_dictionary_sample(self, probs):
sample = random.random()
prob_sum = 0
for key in probs.keys():
prob_sum += probs[key]
if prob_sum > sample:
return key
def move(self):
"""Updates the robots heading and moves the robot to a new position based off of the transistion table and its current location and new heading.
Return:\n
A list of the observations modified by the observation noise, where 1 signifies a wall and 0 signifies an empty cell. The order of the list is [S, E, N, W]
"""
# get the new location
self.map[self.robot_location[0]][self.robot_location[1]] = 0
probs = self.location_transitions[self.robot_location[0]][
self.robot_location[1]
][self.robot_heading]
direction = self.random_dictionary_sample(probs)
self.robot_location = (
self.robot_location[0] + direction.value[0],
self.robot_location[1] + direction.value[1],
)
self.map[self.robot_location[0]][self.robot_location[1]] = "x"
# Get the new heading
h_probs = self.headings_transitions[self.robot_location[0]][
self.robot_location[1]
][self.robot_heading]
self.robot_heading = self.random_dictionary_sample(h_probs)
# # get the new location
# self.map[self.robot_location[0]][self.robot_location[1]] = 0
# probs = self.location_transitions[self.robot_location[0]][
# self.robot_location[1]
# ][self.robot_heading]
self.steps += 1
# return the new observation
if printouts:
print()
print(
"---------------------------Steps: "
+ str(self.steps)
+ " ---------------------------------"
)
print(self.robot_location)
print(self.robot_heading)
print(direction)
if df:
print(DataFrame(self.map).transpose())
# if self.running:
# self.game.update(
# self.map,
# self.robot_location,
# self.robot_heading,
# location_probs,
# headings_probs,
# )
# self.running = self.game.display()
# else:
# print("Pygame closed. Quiting...")
# self.game.quit()
return self.observe()
def update(self, location_probs, headings_probs):
"""Updates the visualizer to represent where your filtering method estimates the robot to be, and where it estimates the robot is heading.
Arguments:\n
location_probs: The probability of the robot being in any (x, y) cell in the map. Created from your project code. Format list[x][y] = float\n
headings_probs: The probability of the robot's current heading being any given heading. Created from your project code. Format dict{<Headings enum> : float, <Headings enum> : float,... }\n
"""
if self.running:
self.game.update(
self.map,
self.robot_location,
self.robot_heading,
location_probs,
headings_probs,
)
self.running = self.game.display()
else:
print("Pygame closed. Quiting...")
self.game.quit()
def observe(self):
"""Observes the walls at the current robot location
Return:\n
A list of the observations modified by the observation noise, where 1 signifies a wall and 0 signifies an empty cell. The order of the list is [S, E, N, W]
"""
# get the neighboring walls to create the true observation table
observations = [
0
if self.traversable(
self.robot_location[0], self.robot_location[1], direction
)
else 1
for direction in Directions
if direction != Directions.St
]
# apply observation noise
observations = [
1 - x if random.random() < self.observation_noise else x
for x in observations
]
return observations
def create_observation_tables(self):
observation_table = []
for x in range(self.dimensions[0]):
observation_table.append({})
for y in range(self.dimensions[1]):
if self.map[x][y] == 1:
observation_table[x][y] = -1
continue
observation_table[x][y] = {}
observations = [
0
if self.traversable(
x, y, direction
)
else 1
for direction in Directions
if direction != Directions.St
]
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1]:
potential_obs = (a, b, c, d)
num_wrong = 0
for i in range(len(observations)):
if observations[i] != potential_obs[i]:
num_wrong += 1
prob = (1 - self.observation_noise) ** (len(
observations
)-num_wrong) * self.observation_noise ** num_wrong
observation_table[x][y][potential_obs] = prob
return observation_table
def create_locations_table(self):
temp = []
# loop through the x dim
for x in range(self.dimensions[0]):
temp.append([])
# loop through the y dim
for y in range(self.dimensions[1]):
# If the cell is not traversable than set its value in the transition table to -1
if self.map[x][y] == 1:
temp[x].append(-1)
continue
temp[x].append({})
for heading in list(Headings):
probs = {}
# Compute Transistion probabilities ignoring walls
for direction in Directions:
if direction.name == heading.name:
probs[direction] = 1 - self.action_noise
elif direction in Directions.get_ortho(
Directions, Directions[heading.name]
):
probs[direction] = self.action_noise / 2
else:
probs[direction] = 0
# init stationary probability
probs[Directions.St] = 0
# account for walls. If there is a wall for one of the transition probabilities add the probability to the stationary probability and set the transisition probability to 0
for direction in Directions:
if not self.traversable(x, y, direction):
probs[Directions.St] += probs[direction]
probs[direction] = 0
# add the new transistion probabilities
temp[x][y].update({heading: probs})
return temp
def create_headings_table(self):
temp = []
# loop through the x dim
for x in range(self.dimensions[0]):
temp.append([])
# loop through the y dim
for y in range(self.dimensions[1]):
# If the cell is not traversable than set its value in the transition table to -1
if self.map[x][y] == 1:
temp[x].append(-1)
continue
temp[x].append({})
for heading in Headings:
probs = {}
# Handle case when the current heading is traversable
if self.traversable(x, y, heading):
for new_heading in Headings:
if heading == new_heading:
probs[new_heading] = 1
else:
probs[new_heading] = 0
temp[x][y].update({heading: probs})
continue
# If the current heading is not traversable
# Find which headings are available
headings_traversablity = {}
for new_heading in Headings:
if self.traversable(x, y, new_heading):
headings_traversablity[new_heading] = 1
else:
headings_traversablity[new_heading] = 0
# Sum these values for later arithmetic
total_traversable = sum(list(headings_traversablity.values()))
se_traversable = (
headings_traversablity[Headings.S]
+ headings_traversablity[Headings.E]
)
nw_traversable = (
headings_traversablity[Headings.N]
+ headings_traversablity[Headings.W]
)
# Compute the heading probabilities for traversable headings
for new_heading in Headings:
if self.traversable(x, y, new_heading):
if new_heading in [Headings.S, Headings.E]:
probs[new_heading] = (
1 / total_traversable
+ self.action_bias / se_traversable
)
else:
probs[new_heading] = (
1 / total_traversable
- self.action_bias / nw_traversable
)
else:
probs[new_heading] = 0
# normalize heading probabilities
probs_sum = sum([probs[x] for x in Headings])
for h in Headings:
probs[h] /= probs_sum
# add the new transistion probabilities
temp[x][y].update({heading: probs})
return temp
def traversable(self, x, y, direction):
"""
Returns true if the cell to the given direction of (x,y) is traversable, otherwise returns false.
Arguements:\n
row -- the x coordinate of the initial cell\n
col -- the y coordinate of the initial cell\n
direction -- the direction of the cell to check for traversablility. Type: localization_env.Directions enum or localization_env.Headings\n
Return:\n
A boolean signifying whether the cell to the given direction is traversable or not
"""
# see if the cell in the direction is traversable. If statement to handle out of bounds errors
if (
x + direction.value[0] >= 0
and x + direction.value[0] < self.dimensions[0]
and y + direction.value[0] >= 0
and y + direction.value[0] < self.dimensions[1]
):
if self.map[x + direction.value[0]][y + direction.value[1]] == 0:
return True
return False
def dummy_location_and_heading_probs(self):
"""
Returns a dummy location probability table and a dummy heading probability dictionary for testing purposes
Returns:\n
location probability table: Format is list[x][y] = float between (0-1)\n
Headings probability table: Format is dict{<Heading enum> : float between (0-1)}
"""
loc_probs = list()
sum_probs = 0
for x in range(self.dimensions[0]):
loc_probs.append([])
for y in range(self.dimensions[1]):
if self.map[x][y] == 1:
loc_probs[x].append(0.0)
else:
num = random.random()
loc_probs[x].append(num)
sum_probs += num
for x in range(self.dimensions[0]):
for y in range(self.dimensions[1]):
loc_probs[x][y] /= sum_probs
hed_probs = {}
sample = np.random.rand(4)
sample = (sample / np.sum(sample)).tolist()
i = 0
for heading in le.Headings:
hed_probs[heading] = sample[i]
i += 1
return loc_probs, hed_probs
if __name__ == "__main__":
env = Environment(0.1, 0.1, 0.2, (10, 10), window_size=[1000, 1000])
# print("Starting test. Press <enter> to make move")
location, heading = env.dummy_location_and_heading_probs()
done = False
while env.running:
observation = env.move(location, heading)
if printouts:
print(observation)
time.sleep(0.25)
|
[
"pandas.DataFrame",
"numpy.sum",
"random.randint",
"RobotLocalization.Game",
"maze.make_maze",
"time.sleep",
"random.random",
"random.seed",
"numpy.random.rand"
] |
[((6124, 6146), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (6135, 6146), False, 'import random\n'), ((6214, 6264), 'maze.make_maze', 'maze.make_maze', (['dimensions[0]', 'dimensions[1]', 'seed'], {}), '(dimensions[0], dimensions[1], seed)\n', (6228, 6264), False, 'import maze\n'), ((7779, 7789), 'RobotLocalization.Game', 'viz.Game', ([], {}), '()\n', (7787, 7789), True, 'import RobotLocalization as viz\n'), ((9033, 9048), 'random.random', 'random.random', ([], {}), '()\n', (9046, 9048), False, 'import random\n'), ((21621, 21638), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (21635, 21638), True, 'import numpy as np\n'), ((22216, 22232), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (22226, 22232), False, 'import time\n'), ((6091, 6115), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (6105, 6115), False, 'import random\n'), ((13103, 13118), 'random.random', 'random.random', ([], {}), '()\n', (13116, 13118), False, 'import random\n'), ((21345, 21360), 'random.random', 'random.random', ([], {}), '()\n', (21358, 21360), False, 'import random\n'), ((21666, 21680), 'numpy.sum', 'np.sum', (['sample'], {}), '(sample)\n', (21672, 21680), True, 'import numpy as np\n'), ((6640, 6676), 'pandas.DataFrame', 'DataFrame', (['self.location_transitions'], {}), '(self.location_transitions)\n', (6649, 6676), False, 'from pandas import DataFrame\n'), ((6724, 6760), 'pandas.DataFrame', 'DataFrame', (['self.headings_transitions'], {}), '(self.headings_transitions)\n', (6733, 6760), False, 'from pandas import DataFrame\n'), ((8311, 8330), 'pandas.DataFrame', 'DataFrame', (['self.map'], {}), '(self.map)\n', (8320, 8330), False, 'from pandas import DataFrame\n'), ((10990, 11009), 'pandas.DataFrame', 'DataFrame', (['self.map'], {}), '(self.map)\n', (10999, 11009), False, 'from pandas import DataFrame\n')]
|
#############################START LICENSE##########################################
# Copyright (C) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################END LICENSE##########################################
###########################################################################################
#
# Script name: qc-lightrad
#
# Description: This script performs automated EPID QC of the QC-3 phantom developed in Manitoba.
# There are other tools out there that do this but generally the ROI are fixed whereas this script
# aims to dynamically identify them using machine vision and the bibs in the phantom.
#
# Example usage: python qc-lightrad "/file/"
#
# Using MED-TEC MT-IAD-1 phantom
#
# Author: <NAME>
# <EMAIL>
# 5877000722
# Date:2019-04-09
#
###########################################################################################
import argparse
import os
from datetime import datetime
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from PIL import Image
from skimage.feature import blob_log
import pydicom
from pymedphys.labs.pedromartinez.utils import utils as u
def point_detect(imcirclist):
k = 0
detCenterXRegion = []
detCenterYRegion = []
print("Finding bibs in phantom...")
for img in tqdm(imcirclist):
grey_img = np.array(img, dtype=np.uint8) # converting the image to grayscale
blobs_log = blob_log(
grey_img, min_sigma=15, max_sigma=40, num_sigma=10, threshold=0.05
)
centerXRegion = []
centerYRegion = []
centerRRegion = []
grey_ampRegion = []
for blob in blobs_log:
y, x, r = blob
# center = (int(x), int(y))
centerXRegion.append(x)
centerYRegion.append(y)
centerRRegion.append(r)
grey_ampRegion.append(grey_img[int(y), int(x)])
# radius = int(r)
# print('center=', center, 'radius=', radius, 'value=', img[center], grey_img[center])
xindx = int(centerXRegion[np.argmin(grey_ampRegion)])
yindx = int(centerYRegion[np.argmin(grey_ampRegion)])
# rindx = int(centerRRegion[np.argmin(grey_ampRegion)])
detCenterXRegion.append(xindx)
detCenterYRegion.append(yindx)
k = k + 1
return detCenterXRegion, detCenterYRegion
def read_dicom(filenm, ioptn):
dataset = pydicom.dcmread(filenm)
now = datetime.now()
ArrayDicom = np.zeros(
(dataset.Rows, dataset.Columns), dtype=dataset.pixel_array.dtype
)
ArrayDicom = dataset.pixel_array
SID = dataset.RTImageSID
print("array_shape=", np.shape(ArrayDicom))
height = np.shape(ArrayDicom)[0]
width = np.shape(ArrayDicom)[1]
dx = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[0]) / 1000)
dy = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[1]) / 1000)
print("pixel spacing row [mm]=", dx)
print("pixel spacing col [mm]=", dy)
# creating the figure extent based on the image dimensions, we divide by 10 to get the units in cm
extent = (
0,
0 + (ArrayDicom.shape[1] * dx / 10),
0 + (ArrayDicom.shape[0] * dy / 10),
0,
)
# creating the figure extent list for the bib images
list_extent = []
# plt.figure()
# plt.imshow(ArrayDicom, extent=extent, origin='upper')
# plt.imshow(ArrayDicom)
# plt.xlabel('x distance [cm]')
# plt.ylabel('y distance [cm]')
# plt.show()
if ioptn.startswith(("y", "yeah", "yes")):
height, width = ArrayDicom.shape
ArrayDicom_mod = ArrayDicom[
:, width // 2 - height // 2 : width // 2 + height // 2
]
else:
ArrayDicom_mod = ArrayDicom
# we take a diagonal profile to avoid phantom artifacts
# im_profile = ArrayDicom_mod.diagonal()
# test to make sure image is displayed correctly bibs are high amplitude against dark background
ctr_pixel = ArrayDicom_mod[height // 2, width // 2]
corner_pixel = ArrayDicom_mod[0, 0]
if ctr_pixel > corner_pixel:
ArrayDicom = u.range_invert(ArrayDicom)
ArrayDicom = u.norm01(ArrayDicom)
# working on transforming the full image and invert it first and go from there.
if ioptn.startswith(("y", "yeah", "yes")):
ROI1 = {"edge_top": 70, "edge_bottom": 130, "edge_left": 270, "edge_right": 350}
ROI2 = {"edge_top": 70, "edge_bottom": 130, "edge_left": 680, "edge_right": 760}
ROI3 = {
"edge_top": 150,
"edge_bottom": 210,
"edge_left": 760,
"edge_right": 830,
}
ROI4 = {
"edge_top": 560,
"edge_bottom": 620,
"edge_left": 760,
"edge_right": 830,
}
ROI5 = {
"edge_top": 640,
"edge_bottom": 700,
"edge_left": 680,
"edge_right": 760,
}
ROI6 = {
"edge_top": 640,
"edge_bottom": 700,
"edge_left": 270,
"edge_right": 350,
}
ROI7 = {
"edge_top": 560,
"edge_bottom": 620,
"edge_left": 200,
"edge_right": 270,
}
ROI8 = {
"edge_top": 150,
"edge_bottom": 210,
"edge_left": 200,
"edge_right": 270,
}
else:
ROI1 = {
"edge_top": 280,
"edge_bottom": 360,
"edge_left": 360,
"edge_right": 440,
}
ROI2 = {
"edge_top": 280,
"edge_bottom": 360,
"edge_left": 830,
"edge_right": 910,
}
ROI3 = {
"edge_top": 360,
"edge_bottom": 440,
"edge_left": 940,
"edge_right": 1020,
}
ROI4 = {
"edge_top": 840,
"edge_bottom": 920,
"edge_left": 940,
"edge_right": 1020,
}
ROI5 = {
"edge_top": 930,
"edge_bottom": 1000,
"edge_left": 830,
"edge_right": 910,
}
ROI6 = {
"edge_top": 930,
"edge_bottom": 1000,
"edge_left": 360,
"edge_right": 440,
}
ROI7 = {
"edge_top": 840,
"edge_bottom": 920,
"edge_left": 280,
"edge_right": 360,
}
ROI8 = {
"edge_top": 360,
"edge_bottom": 440,
"edge_left": 280,
"edge_right": 360,
}
# images for object detection
imcirclist = []
imcirc1 = Image.fromarray(
255
* ArrayDicom[
ROI1["edge_top"] : ROI1["edge_bottom"],
ROI1["edge_left"] : ROI1["edge_right"],
]
)
imcirc1 = imcirc1.resize((imcirc1.width * 10, imcirc1.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI1["edge_left"] * dx / 10),
(ROI1["edge_right"] * dx / 10),
(ROI1["edge_bottom"] * dy / 10),
(ROI1["edge_top"] * dy / 10),
)
)
imcirc2 = Image.fromarray(
255
* ArrayDicom[
ROI2["edge_top"] : ROI2["edge_bottom"],
ROI2["edge_left"] : ROI2["edge_right"],
]
)
imcirc2 = imcirc2.resize((imcirc2.width * 10, imcirc2.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI2["edge_left"] * dx / 10),
(ROI2["edge_right"] * dx / 10),
(ROI2["edge_bottom"] * dy / 10),
(ROI2["edge_top"] * dy / 10),
)
)
imcirc3 = Image.fromarray(
255
* ArrayDicom[
ROI3["edge_top"] : ROI3["edge_bottom"],
ROI3["edge_left"] : ROI3["edge_right"],
]
)
imcirc3 = imcirc3.resize((imcirc3.width * 10, imcirc3.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI3["edge_left"] * dx / 10),
(ROI3["edge_right"] * dx / 10),
(ROI3["edge_bottom"] * dy / 10),
(ROI3["edge_top"] * dy / 10),
)
)
imcirc4 = Image.fromarray(
255
* ArrayDicom[
ROI4["edge_top"] : ROI4["edge_bottom"],
ROI4["edge_left"] : ROI4["edge_right"],
]
)
imcirc4 = imcirc4.resize((imcirc4.width * 10, imcirc4.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI4["edge_left"] * dx / 10),
(ROI4["edge_right"] * dx / 10),
(ROI4["edge_bottom"] * dy / 10),
(ROI4["edge_top"] * dy / 10),
)
)
imcirc5 = Image.fromarray(
255
* ArrayDicom[
ROI5["edge_top"] : ROI5["edge_bottom"],
ROI5["edge_left"] : ROI5["edge_right"],
]
)
imcirc5 = imcirc5.resize((imcirc5.width * 10, imcirc5.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI5["edge_left"] * dx / 10),
(ROI5["edge_right"] * dx / 10),
(ROI5["edge_bottom"] * dy / 10),
(ROI5["edge_top"] * dy / 10),
)
)
imcirc6 = Image.fromarray(
255
* ArrayDicom[
ROI6["edge_top"] : ROI6["edge_bottom"],
ROI6["edge_left"] : ROI6["edge_right"],
]
)
imcirc6 = imcirc6.resize((imcirc6.width * 10, imcirc6.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI6["edge_left"] * dx / 10),
(ROI6["edge_right"] * dx / 10),
(ROI6["edge_bottom"] * dy / 10),
(ROI6["edge_top"] * dy / 10),
)
)
imcirc7 = Image.fromarray(
255
* ArrayDicom[
ROI7["edge_top"] : ROI7["edge_bottom"],
ROI7["edge_left"] : ROI7["edge_right"],
]
)
imcirc7 = imcirc7.resize((imcirc7.width * 10, imcirc7.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI7["edge_left"] * dx / 10),
(ROI7["edge_right"] * dx / 10),
(ROI7["edge_bottom"] * dy / 10),
(ROI7["edge_top"] * dy / 10),
)
)
imcirc8 = Image.fromarray(
255
* ArrayDicom[
ROI8["edge_top"] : ROI8["edge_bottom"],
ROI8["edge_left"] : ROI8["edge_right"],
]
)
imcirc8 = imcirc8.resize((imcirc8.width * 10, imcirc8.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI8["edge_left"] * dx / 10),
(ROI8["edge_right"] * dx / 10),
(ROI8["edge_bottom"] * dy / 10),
(ROI8["edge_top"] * dy / 10),
)
)
imcirclist.append(imcirc1)
imcirclist.append(imcirc2)
imcirclist.append(imcirc3)
imcirclist.append(imcirc4)
imcirclist.append(imcirc5)
imcirclist.append(imcirc6)
imcirclist.append(imcirc7)
imcirclist.append(imcirc8)
xdet, ydet = point_detect(imcirclist)
profiles = []
profile1 = np.array(imcirc1, dtype=np.uint8)[:, xdet[0]] / 255
profile2 = np.array(imcirc2, dtype=np.uint8)[:, xdet[1]] / 255
profile3 = np.array(imcirc3, dtype=np.uint8)[ydet[2], :] / 255
profile4 = np.array(imcirc4, dtype=np.uint8)[ydet[3], :] / 255
profile5 = np.array(imcirc5, dtype=np.uint8)[:, xdet[4]] / 255
profile6 = np.array(imcirc6, dtype=np.uint8)[:, xdet[5]] / 255
profile7 = np.array(imcirc7, dtype=np.uint8)[ydet[6], :] / 255
profile8 = np.array(imcirc8, dtype=np.uint8)[ydet[7], :] / 255
profiles.append(profile1)
profiles.append(profile2)
profiles.append(profile3)
profiles.append(profile4)
profiles.append(profile5)
profiles.append(profile6)
profiles.append(profile7)
profiles.append(profile8)
k = 0
fig = plt.figure(figsize=(8, 12)) # this figure will hold the bibs
plt.subplots_adjust(hspace=0.35)
# creating the page to write the results
dirname = os.path.dirname(filenm)
# tolerance levels to change at will
tol = 1.0 # tolearance level
act = 2.0 # action level
phantom_distance = 3.0 # distance from the bib to the edge of the phantom
with PdfPages(
dirname
+ "/"
+ now.strftime("%d-%m-%Y_%H:%M_")
+ dataset[0x0008, 0x1010].value
+ "_Lightrad_report.pdf"
) as pdf:
Page = plt.figure(figsize=(4, 5))
Page.text(0.45, 0.9, "Report", size=18)
kk = 0 # counter for data points
for profile in profiles:
_, index = u.find_nearest(profile, 0.5) # find the 50% amplitude point
# value_near, index = find_nearest(profile, 0.5) # find the 50% amplitude point
if ( # pylint: disable = consider-using-in
k == 0 or k == 1 or k == 4 or k == 5
): # there are the bibs in the horizontal
offset_value_y = round(
abs((ydet[k] - index) * (dy / 10)) - phantom_distance, 2
)
txt = str(offset_value_y)
# print('offset_value_y=', offset_value_y)
if abs(offset_value_y) <= tol:
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="g",
)
elif abs(offset_value_y) > tol and abs(offset_value_y) <= act:
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="y",
)
else:
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="r",
)
kk = kk + 1
ax = fig.add_subplot(
4, 2, k + 1
) # plotting all the figures in a single plot
ax.imshow(
np.array(imcirclist[k], dtype=np.uint8) / 255,
extent=list_extent[k],
origin="upper",
)
ax.scatter(
list_extent[k][0] + xdet[k] * dx / 100,
list_extent[k][3] + ydet[k] * dy / 100,
s=30,
marker="P",
color="y",
)
ax.set_title("Bib=" + str(k + 1))
ax.axhline(
list_extent[k][3] + index * dy / 100, color="r", linestyle="--"
)
ax.set_xlabel("x distance [cm]")
ax.set_ylabel("y distance [cm]")
else:
offset_value_x = round(
abs((xdet[k] - index) * (dx / 10)) - phantom_distance, 2
)
txt = str(offset_value_x)
if abs(offset_value_x) <= tol:
# print('1')
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="g",
)
elif abs(offset_value_x) > tol and abs(offset_value_x) <= act:
# print('2')
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="y",
)
else:
# print('3')
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="r",
)
kk = kk + 1
ax = fig.add_subplot(
4, 2, k + 1
) # plotting all the figures in a single plot
ax.imshow(
np.array(imcirclist[k], dtype=np.uint8) / 255,
extent=list_extent[k],
origin="upper",
)
ax.scatter(
list_extent[k][0] + xdet[k] * dx / 100,
list_extent[k][3] + ydet[k] * dy / 100,
s=30,
marker="P",
color="y",
)
ax.set_title("Bib=" + str(k + 1))
ax.axvline(
list_extent[k][0] + index * dx / 100, color="r", linestyle="--"
)
ax.set_xlabel("x distance [cm]")
ax.set_ylabel("y distance [cm]")
k = k + 1
pdf.savefig()
pdf.savefig(fig)
# we now need to select a horizontal and a vertical profile to find the edge of the field from an image
# for the field size calculation
im = Image.fromarray(255 * ArrayDicom)
if ioptn.startswith(("y", "yeah", "yes")):
PROFILE = {
"horizontal": 270,
"vertical": 430,
} # location to extract the horizontal and vertical profiles if this is a linac
else:
PROFILE = {
"horizontal": 470,
"vertical": 510,
} # location to extract the horizontal and vertical profiles if this is a true beam
profilehorz = (
np.array(im, dtype=np.uint8)[PROFILE["horizontal"], :] / 255
) # we need to change these limits on a less specific criteria
profilevert = np.array(im, dtype=np.uint8)[:, PROFILE["vertical"]] / 255
# top_edge, index_top = find_nearest(profilevert[0:height//2], 0.5) # finding the edge of the field on the top
# bot_edge, index_bot = find_nearest(profilevert[height//2:height], 0.5) # finding the edge of the field on the bottom
_, index_top = u.find_nearest(
profilevert[0 : height // 2], 0.5
) # finding the edge of the field on the top
_, index_bot = u.find_nearest(
profilevert[height // 2 : height], 0.5
) # finding the edge of the field on the bottom
# l_edge, index_l = find_nearest(profilehorz[0:width//2], 0.5) #finding the edge of the field on the bottom
# r_edge, index_r = find_nearest(profilehorz[width//2:width], 0.5) #finding the edge of the field on the right
_, index_l = u.find_nearest(
profilehorz[0 : width // 2], 0.5
) # finding the edge of the field on the bottom
_, index_r = u.find_nearest(
profilehorz[width // 2 : width], 0.5
) # finding the edge of the field on the right
fig2 = plt.figure(
figsize=(7, 5)
) # this figure will show the vertical and horizontal calculated field size
ax = fig2.subplots()
ax.imshow(ArrayDicom, extent=extent, origin="upper")
ax.set_xlabel("x distance [cm]")
ax.set_ylabel("y distance [cm]")
# adding a vertical arrow
ax.annotate(
s="",
xy=(PROFILE["vertical"] * dx / 10, index_top * dy / 10),
xytext=(PROFILE["vertical"] * dx / 10, (height // 2 + index_bot) * dy / 10),
arrowprops=dict(arrowstyle="<->", color="r"),
) # example on how to plot a double headed arrow
ax.text(
(PROFILE["vertical"] + 10) * dx / 10,
(height // 1.25) * dy / 10,
"Vfs="
+ str(round((height // 2 + index_bot - index_top) * dy / 10, 2))
+ "cm",
rotation=90,
fontsize=14,
color="r",
)
# adding a horizontal arrow
# print(index_l*dx, index_l, PROFILE['horizontal']*dy, PROFILE['horizontal'])
ax.annotate(
s="",
xy=(index_l * dx / 10, PROFILE["horizontal"] * dy / 10),
xytext=((width // 2 + index_r) * dx / 10, PROFILE["horizontal"] * dy / 10),
arrowprops=dict(arrowstyle="<->", color="r"),
) # example on how to plot a double headed arrow
ax.text(
(width // 2) * dx / 10,
(PROFILE["horizontal"] - 10) * dy / 10,
"Hfs=" + str(round((width // 2 + index_r - index_l) * dx / 10, 2)) + "cm",
rotation=0,
fontsize=14,
color="r",
)
pdf.savefig(fig2)
if __name__ == "__main__":
while True: # example of infinite loops using try and except to catch only numbers
line = input("Are these files from a clinac [yes(y)/no(n)]> ")
try:
## if line == 'done':
## break
ioption = str(line.lower())
if ioption.startswith(("y", "yeah", "yes", "n", "no", "nope")):
break
except: # pylint: disable = bare-except
print("Please enter a valid option:")
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str, help="Input the Light/Rad file")
args = parser.parse_args()
filename = args.file
read_dicom(filename, ioption)
|
[
"pymedphys.labs.pedromartinez.utils.utils.range_invert",
"tqdm.tqdm",
"pydicom.dcmread",
"argparse.ArgumentParser",
"os.path.dirname",
"numpy.zeros",
"skimage.feature.blob_log",
"numpy.argmin",
"pymedphys.labs.pedromartinez.utils.utils.norm01",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.array",
"PIL.Image.fromarray",
"pymedphys.labs.pedromartinez.utils.utils.find_nearest",
"datetime.datetime.now",
"matplotlib.pyplot.subplots_adjust"
] |
[((1874, 1890), 'tqdm.tqdm', 'tqdm', (['imcirclist'], {}), '(imcirclist)\n', (1878, 1890), False, 'from tqdm import tqdm\n'), ((2983, 3006), 'pydicom.dcmread', 'pydicom.dcmread', (['filenm'], {}), '(filenm)\n', (2998, 3006), False, 'import pydicom\n'), ((3017, 3031), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3029, 3031), False, 'from datetime import datetime\n'), ((3050, 3124), 'numpy.zeros', 'np.zeros', (['(dataset.Rows, dataset.Columns)'], {'dtype': 'dataset.pixel_array.dtype'}), '((dataset.Rows, dataset.Columns), dtype=dataset.pixel_array.dtype)\n', (3058, 3124), True, 'import numpy as np\n'), ((4711, 4731), 'pymedphys.labs.pedromartinez.utils.utils.norm01', 'u.norm01', (['ArrayDicom'], {}), '(ArrayDicom)\n', (4719, 4731), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((7211, 7325), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI1['edge_top']:ROI1['edge_bottom'], ROI1['edge_left']:\n ROI1['edge_right']])"], {}), "(255 * ArrayDicom[ROI1['edge_top']:ROI1['edge_bottom'], ROI1\n ['edge_left']:ROI1['edge_right']])\n", (7226, 7325), False, 'from PIL import Image\n'), ((7709, 7823), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI2['edge_top']:ROI2['edge_bottom'], ROI2['edge_left']:\n ROI2['edge_right']])"], {}), "(255 * ArrayDicom[ROI2['edge_top']:ROI2['edge_bottom'], ROI2\n ['edge_left']:ROI2['edge_right']])\n", (7724, 7823), False, 'from PIL import Image\n'), ((8207, 8321), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI3['edge_top']:ROI3['edge_bottom'], ROI3['edge_left']:\n ROI3['edge_right']])"], {}), "(255 * ArrayDicom[ROI3['edge_top']:ROI3['edge_bottom'], ROI3\n ['edge_left']:ROI3['edge_right']])\n", (8222, 8321), False, 'from PIL import Image\n'), ((8704, 8818), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI4['edge_top']:ROI4['edge_bottom'], ROI4['edge_left']:\n ROI4['edge_right']])"], {}), "(255 * ArrayDicom[ROI4['edge_top']:ROI4['edge_bottom'], ROI4\n ['edge_left']:ROI4['edge_right']])\n", (8719, 8818), False, 'from PIL import Image\n'), ((9202, 9316), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI5['edge_top']:ROI5['edge_bottom'], ROI5['edge_left']:\n ROI5['edge_right']])"], {}), "(255 * ArrayDicom[ROI5['edge_top']:ROI5['edge_bottom'], ROI5\n ['edge_left']:ROI5['edge_right']])\n", (9217, 9316), False, 'from PIL import Image\n'), ((9700, 9814), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI6['edge_top']:ROI6['edge_bottom'], ROI6['edge_left']:\n ROI6['edge_right']])"], {}), "(255 * ArrayDicom[ROI6['edge_top']:ROI6['edge_bottom'], ROI6\n ['edge_left']:ROI6['edge_right']])\n", (9715, 9814), False, 'from PIL import Image\n'), ((10198, 10312), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI7['edge_top']:ROI7['edge_bottom'], ROI7['edge_left']:\n ROI7['edge_right']])"], {}), "(255 * ArrayDicom[ROI7['edge_top']:ROI7['edge_bottom'], ROI7\n ['edge_left']:ROI7['edge_right']])\n", (10213, 10312), False, 'from PIL import Image\n'), ((10696, 10810), 'PIL.Image.fromarray', 'Image.fromarray', (["(255 * ArrayDicom[ROI8['edge_top']:ROI8['edge_bottom'], ROI8['edge_left']:\n ROI8['edge_right']])"], {}), "(255 * ArrayDicom[ROI8['edge_top']:ROI8['edge_bottom'], ROI8\n ['edge_left']:ROI8['edge_right']])\n", (10711, 10810), False, 'from PIL import Image\n'), ((12288, 12315), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 12)'}), '(figsize=(8, 12))\n', (12298, 12315), True, 'import matplotlib.pyplot as plt\n'), ((12354, 12386), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.35)'}), '(hspace=0.35)\n', (12373, 12386), True, 'import matplotlib.pyplot as plt\n'), ((12447, 12470), 'os.path.dirname', 'os.path.dirname', (['filenm'], {}), '(filenm)\n', (12462, 12470), False, 'import os\n'), ((21564, 21589), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21587, 21589), False, 'import argparse\n'), ((1911, 1940), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1919, 1940), True, 'import numpy as np\n'), ((1998, 2074), 'skimage.feature.blob_log', 'blob_log', (['grey_img'], {'min_sigma': '(15)', 'max_sigma': '(40)', 'num_sigma': '(10)', 'threshold': '(0.05)'}), '(grey_img, min_sigma=15, max_sigma=40, num_sigma=10, threshold=0.05)\n', (2006, 2074), False, 'from skimage.feature import blob_log\n'), ((3231, 3251), 'numpy.shape', 'np.shape', (['ArrayDicom'], {}), '(ArrayDicom)\n', (3239, 3251), True, 'import numpy as np\n'), ((3266, 3286), 'numpy.shape', 'np.shape', (['ArrayDicom'], {}), '(ArrayDicom)\n', (3274, 3286), True, 'import numpy as np\n'), ((3302, 3322), 'numpy.shape', 'np.shape', (['ArrayDicom'], {}), '(ArrayDicom)\n', (3310, 3322), True, 'import numpy as np\n'), ((4666, 4692), 'pymedphys.labs.pedromartinez.utils.utils.range_invert', 'u.range_invert', (['ArrayDicom'], {}), '(ArrayDicom)\n', (4680, 4692), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((12850, 12876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 5)'}), '(figsize=(4, 5))\n', (12860, 12876), True, 'import matplotlib.pyplot as plt\n'), ((17561, 17594), 'PIL.Image.fromarray', 'Image.fromarray', (['(255 * ArrayDicom)'], {}), '(255 * ArrayDicom)\n', (17576, 17594), False, 'from PIL import Image\n'), ((18556, 18603), 'pymedphys.labs.pedromartinez.utils.utils.find_nearest', 'u.find_nearest', (['profilevert[0:height // 2]', '(0.5)'], {}), '(profilevert[0:height // 2], 0.5)\n', (18570, 18603), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((18695, 18747), 'pymedphys.labs.pedromartinez.utils.utils.find_nearest', 'u.find_nearest', (['profilevert[height // 2:height]', '(0.5)'], {}), '(profilevert[height // 2:height], 0.5)\n', (18709, 18747), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((19076, 19122), 'pymedphys.labs.pedromartinez.utils.utils.find_nearest', 'u.find_nearest', (['profilehorz[0:width // 2]', '(0.5)'], {}), '(profilehorz[0:width // 2], 0.5)\n', (19090, 19122), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((19215, 19265), 'pymedphys.labs.pedromartinez.utils.utils.find_nearest', 'u.find_nearest', (['profilehorz[width // 2:width]', '(0.5)'], {}), '(profilehorz[width // 2:width], 0.5)\n', (19229, 19265), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((19352, 19378), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (19362, 19378), True, 'import matplotlib.pyplot as plt\n'), ((11505, 11538), 'numpy.array', 'np.array', (['imcirc1'], {'dtype': 'np.uint8'}), '(imcirc1, dtype=np.uint8)\n', (11513, 11538), True, 'import numpy as np\n'), ((11572, 11605), 'numpy.array', 'np.array', (['imcirc2'], {'dtype': 'np.uint8'}), '(imcirc2, dtype=np.uint8)\n', (11580, 11605), True, 'import numpy as np\n'), ((11639, 11672), 'numpy.array', 'np.array', (['imcirc3'], {'dtype': 'np.uint8'}), '(imcirc3, dtype=np.uint8)\n', (11647, 11672), True, 'import numpy as np\n'), ((11706, 11739), 'numpy.array', 'np.array', (['imcirc4'], {'dtype': 'np.uint8'}), '(imcirc4, dtype=np.uint8)\n', (11714, 11739), True, 'import numpy as np\n'), ((11773, 11806), 'numpy.array', 'np.array', (['imcirc5'], {'dtype': 'np.uint8'}), '(imcirc5, dtype=np.uint8)\n', (11781, 11806), True, 'import numpy as np\n'), ((11840, 11873), 'numpy.array', 'np.array', (['imcirc6'], {'dtype': 'np.uint8'}), '(imcirc6, dtype=np.uint8)\n', (11848, 11873), True, 'import numpy as np\n'), ((11907, 11940), 'numpy.array', 'np.array', (['imcirc7'], {'dtype': 'np.uint8'}), '(imcirc7, dtype=np.uint8)\n', (11915, 11940), True, 'import numpy as np\n'), ((11974, 12007), 'numpy.array', 'np.array', (['imcirc8'], {'dtype': 'np.uint8'}), '(imcirc8, dtype=np.uint8)\n', (11982, 12007), True, 'import numpy as np\n'), ((13023, 13051), 'pymedphys.labs.pedromartinez.utils.utils.find_nearest', 'u.find_nearest', (['profile', '(0.5)'], {}), '(profile, 0.5)\n', (13037, 13051), True, 'from pymedphys.labs.pedromartinez.utils import utils as u\n'), ((2637, 2662), 'numpy.argmin', 'np.argmin', (['grey_ampRegion'], {}), '(grey_ampRegion)\n', (2646, 2662), True, 'import numpy as np\n'), ((2699, 2724), 'numpy.argmin', 'np.argmin', (['grey_ampRegion'], {}), '(grey_ampRegion)\n', (2708, 2724), True, 'import numpy as np\n'), ((18072, 18100), 'numpy.array', 'np.array', (['im'], {'dtype': 'np.uint8'}), '(im, dtype=np.uint8)\n', (18080, 18100), True, 'import numpy as np\n'), ((18227, 18255), 'numpy.array', 'np.array', (['im'], {'dtype': 'np.uint8'}), '(im, dtype=np.uint8)\n', (18235, 18255), True, 'import numpy as np\n'), ((14626, 14665), 'numpy.array', 'np.array', (['imcirclist[k]'], {'dtype': 'np.uint8'}), '(imcirclist[k], dtype=np.uint8)\n', (14634, 14665), True, 'import numpy as np\n'), ((16646, 16685), 'numpy.array', 'np.array', (['imcirclist[k]'], {'dtype': 'np.uint8'}), '(imcirclist[k], dtype=np.uint8)\n', (16654, 16685), True, 'import numpy as np\n')]
|
import nltk
import json
import numpy as np
from nltk import word_tokenize
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])
# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0")
# Get OUTPUT1 configuration
output1_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT1")
# Get OUTPUT2 configuration
output2_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT2")
# Get OUTPUT3 configuration
output3_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT3")
# Convert Triton types to numpy types
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config["data_type"]
)
self.output1_dtype = pb_utils.triton_string_to_numpy(
output1_config["data_type"]
)
self.output2_dtype = pb_utils.triton_string_to_numpy(
output2_config["data_type"]
)
self.output3_dtype = pb_utils.triton_string_to_numpy(
output3_config["data_type"]
)
# Get model repository path to read labels
self.model_repository = model_repository = args["model_repository"]
print(model_repository)
# Initialize tokenizer
nltk.download("punkt")
def tokenize(self, text):
tokens = word_tokenize(text)
# split into lower-case word tokens, in numpy array with shape of (seq, 1)
words = np.array([w.lower() for w in tokens], dtype=np.object_).reshape(-1, 1)
# split words into chars, in numpy array with shape of (seq, 1, 1, 16)
chars = [[c for c in t][:16] for t in tokens]
chars = [cs + [""] * (16 - len(cs)) for cs in chars]
chars = np.array(chars, dtype=np.object_).reshape(-1, 1, 1, 16)
return words, chars
def execute(self, requests):
"""
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
output0_dtype = self.output0_dtype
output1_dtype = self.output1_dtype
output2_dtype = self.output2_dtype
output3_dtype = self.output3_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
context = in_0.as_numpy().astype(str)
print(context)
# Get INPUT1
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT1")
query = in_0.as_numpy().astype(str)
print(query)
cw, cc = self.tokenize(context[0])
qw, qc = self.tokenize(query[0])
out_0 = np.array(qw, dtype=output0_dtype)
out_1 = np.array(cc, dtype=output1_dtype)
out_2 = np.array(qc, dtype=output2_dtype)
out_3 = np.array(cw, dtype=output3_dtype)
# Create output tensors. You need pb_utils.Tensor objects to create pb_utils.InferenceResponse.
out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0)
out_tensor_1 = pb_utils.Tensor("OUTPUT1", out_1)
out_tensor_2 = pb_utils.Tensor("OUTPUT2", out_2)
out_tensor_3 = pb_utils.Tensor("OUTPUT3", out_3)
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0, out_tensor_1, out_tensor_2, out_tensor_3]
)
responses.append(inference_response)
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
|
[
"triton_python_backend_utils.get_output_config_by_name",
"json.loads",
"triton_python_backend_utils.Tensor",
"triton_python_backend_utils.get_input_tensor_by_name",
"numpy.array",
"triton_python_backend_utils.InferenceResponse",
"triton_python_backend_utils.triton_string_to_numpy",
"nltk.download",
"nltk.word_tokenize"
] |
[((1180, 1212), 'json.loads', 'json.loads', (["args['model_config']"], {}), "(args['model_config'])\n", (1190, 1212), False, 'import json\n'), ((1275, 1334), 'triton_python_backend_utils.get_output_config_by_name', 'pb_utils.get_output_config_by_name', (['model_config', '"""OUTPUT0"""'], {}), "(model_config, 'OUTPUT0')\n", (1309, 1334), True, 'import triton_python_backend_utils as pb_utils\n'), ((1397, 1456), 'triton_python_backend_utils.get_output_config_by_name', 'pb_utils.get_output_config_by_name', (['model_config', '"""OUTPUT1"""'], {}), "(model_config, 'OUTPUT1')\n", (1431, 1456), True, 'import triton_python_backend_utils as pb_utils\n'), ((1519, 1578), 'triton_python_backend_utils.get_output_config_by_name', 'pb_utils.get_output_config_by_name', (['model_config', '"""OUTPUT2"""'], {}), "(model_config, 'OUTPUT2')\n", (1553, 1578), True, 'import triton_python_backend_utils as pb_utils\n'), ((1641, 1700), 'triton_python_backend_utils.get_output_config_by_name', 'pb_utils.get_output_config_by_name', (['model_config', '"""OUTPUT3"""'], {}), "(model_config, 'OUTPUT3')\n", (1675, 1700), True, 'import triton_python_backend_utils as pb_utils\n'), ((1777, 1837), 'triton_python_backend_utils.triton_string_to_numpy', 'pb_utils.triton_string_to_numpy', (["output0_config['data_type']"], {}), "(output0_config['data_type'])\n", (1808, 1837), True, 'import triton_python_backend_utils as pb_utils\n'), ((1889, 1949), 'triton_python_backend_utils.triton_string_to_numpy', 'pb_utils.triton_string_to_numpy', (["output1_config['data_type']"], {}), "(output1_config['data_type'])\n", (1920, 1949), True, 'import triton_python_backend_utils as pb_utils\n'), ((2001, 2061), 'triton_python_backend_utils.triton_string_to_numpy', 'pb_utils.triton_string_to_numpy', (["output2_config['data_type']"], {}), "(output2_config['data_type'])\n", (2032, 2061), True, 'import triton_python_backend_utils as pb_utils\n'), ((2113, 2173), 'triton_python_backend_utils.triton_string_to_numpy', 'pb_utils.triton_string_to_numpy', (["output3_config['data_type']"], {}), "(output3_config['data_type'])\n", (2144, 2173), True, 'import triton_python_backend_utils as pb_utils\n'), ((2396, 2418), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (2409, 2418), False, 'import nltk\n'), ((2468, 2487), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2481, 2487), False, 'from nltk import word_tokenize\n'), ((3697, 3749), 'triton_python_backend_utils.get_input_tensor_by_name', 'pb_utils.get_input_tensor_by_name', (['request', '"""INPUT0"""'], {}), "(request, 'INPUT0')\n", (3730, 3749), True, 'import triton_python_backend_utils as pb_utils\n'), ((3872, 3924), 'triton_python_backend_utils.get_input_tensor_by_name', 'pb_utils.get_input_tensor_by_name', (['request', '"""INPUT1"""'], {}), "(request, 'INPUT1')\n", (3905, 3924), True, 'import triton_python_backend_utils as pb_utils\n'), ((4112, 4145), 'numpy.array', 'np.array', (['qw'], {'dtype': 'output0_dtype'}), '(qw, dtype=output0_dtype)\n', (4120, 4145), True, 'import numpy as np\n'), ((4166, 4199), 'numpy.array', 'np.array', (['cc'], {'dtype': 'output1_dtype'}), '(cc, dtype=output1_dtype)\n', (4174, 4199), True, 'import numpy as np\n'), ((4220, 4253), 'numpy.array', 'np.array', (['qc'], {'dtype': 'output2_dtype'}), '(qc, dtype=output2_dtype)\n', (4228, 4253), True, 'import numpy as np\n'), ((4274, 4307), 'numpy.array', 'np.array', (['cw'], {'dtype': 'output3_dtype'}), '(cw, dtype=output3_dtype)\n', (4282, 4307), True, 'import numpy as np\n'), ((4444, 4477), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""OUTPUT0"""', 'out_0'], {}), "('OUTPUT0', out_0)\n", (4459, 4477), True, 'import triton_python_backend_utils as pb_utils\n'), ((4505, 4538), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""OUTPUT1"""', 'out_1'], {}), "('OUTPUT1', out_1)\n", (4520, 4538), True, 'import triton_python_backend_utils as pb_utils\n'), ((4566, 4599), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""OUTPUT2"""', 'out_2'], {}), "('OUTPUT2', out_2)\n", (4581, 4599), True, 'import triton_python_backend_utils as pb_utils\n'), ((4627, 4660), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""OUTPUT3"""', 'out_3'], {}), "('OUTPUT3', out_3)\n", (4642, 4660), True, 'import triton_python_backend_utils as pb_utils\n'), ((4695, 4798), 'triton_python_backend_utils.InferenceResponse', 'pb_utils.InferenceResponse', ([], {'output_tensors': '[out_tensor_0, out_tensor_1, out_tensor_2, out_tensor_3]'}), '(output_tensors=[out_tensor_0, out_tensor_1,\n out_tensor_2, out_tensor_3])\n', (4721, 4798), True, 'import triton_python_backend_utils as pb_utils\n'), ((2870, 2903), 'numpy.array', 'np.array', (['chars'], {'dtype': 'np.object_'}), '(chars, dtype=np.object_)\n', (2878, 2903), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
"""
训练神经网络,将参数(Weight)存入 HDF5 文件
"""
import numpy as np
import tensorflow as tf
from utils import *
from network import *
"""
==== 一些术语的概念 ====
# Batch size : 批次(样本)数目。一次迭代(Forword 运算(用于得到损失函数)以及 BackPropagation 运算(用于更新神经网络参数))所用的样本数目。Batch size 越大,所需的内存就越大
# Iteration : 迭代。每一次迭代更新一次权重(网络参数),每一次权重更新需要 Batch size 个数据进行 Forward 运算,再进行 BP 运算
# Epoch : 纪元/时代。所有的训练样本完成一次迭代
# 假如 : 训练集有 1000 个样本,Batch_size=10
# 那么 : 训练完整个样本集需要: 100 次 Iteration,1 个 Epoch
# 但一般我们都不止训练一个 Epoch
"""
# 训练神经网络
def train():
notes = get_notes()
# 得到所有不重复(因为用了 set)的音调数目
num_pitch = len(set(notes))
network_input, network_output = prepare_sequences(notes, num_pitch)
model = network_model(network_input, num_pitch)
filepath = "weights-{epoch:02d}-{loss:.4f}.hdf5"
# 用 Checkpoint(检查点)文件在每一个 Epoch 结束时保存模型的参数(Weights)
# 不怕训练过程中丢失模型参数。可以在我们对 Loss(损失)满意了的时候随时停止训练
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath, # 保存的文件路径
monitor='loss', # 监控的对象是 损失(loss)
verbose=0,
save_best_only=True, # 不替换最近的数值最佳的监控对象的文件
mode='min' # 取损失最小的
)
callbacks_list = [checkpoint]
# 用 fit 方法来训练模型
model.fit(network_input, network_output, epochs=100, batch_size=64, callbacks=callbacks_list)
def prepare_sequences(notes, num_pitch):
"""
为神经网络准备好供训练的序列
"""
sequence_length = 100 # 序列长度
# 得到所有音调的名字
pitch_names = sorted(set(item for item in notes))
# 创建一个字典,用于映射 音调 和 整数
pitch_to_int = dict((pitch, num) for num, pitch in enumerate(pitch_names))
# 创建神经网络的输入序列和输出序列
network_input = []
network_output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i: i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([pitch_to_int[char] for char in sequence_in])
network_output.append(pitch_to_int[sequence_out])
n_patterns = len(network_input)
# 将输入的形状转换成神经网络模型可以接受的
network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))
# 将 输入 标准化 / 归一化
# 归一话可以让之后的优化器(optimizer)更快更好地找到误差最小值
network_input = network_input / float(num_pitch)
# 将期望输出转换成 {0, 1} 组成的布尔矩阵,为了配合 categorical_crossentropy 误差算法使用
network_output = tf.keras.utils.to_categorical(network_output)
return network_input, network_output
if __name__ == '__main__':
train()
|
[
"tensorflow.keras.utils.to_categorical",
"numpy.reshape",
"tensorflow.keras.callbacks.ModelCheckpoint"
] |
[((917, 1025), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=0,\n save_best_only=True, mode='min')\n", (951, 1025), True, 'import tensorflow as tf\n'), ((2025, 2084), 'numpy.reshape', 'np.reshape', (['network_input', '(n_patterns, sequence_length, 1)'], {}), '(network_input, (n_patterns, sequence_length, 1))\n', (2035, 2084), True, 'import numpy as np\n'), ((2291, 2336), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['network_output'], {}), '(network_output)\n', (2320, 2336), True, 'import tensorflow as tf\n')]
|
import numpy as np
import re
import pandas as pd
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_sarc(input_file, training, sample_percent=1.0):
reddit = pd.read_csv(input_file)
sample_index = int(len(reddit) * sample_percent)
labels = reddit['label'].values
labels = labels[:sample_index]
labels = [[0, 1] if l == 1 else [1, 0] for l in labels]
split_index = int(len(labels) * 0.7)
train_labels, test_labels = labels[:split_index], labels[split_index:]
sarcastic = 0
for label in test_labels:
if label == [0, 1]: sarcastic += 1
# Process data
text = reddit['comment'].values
text = [str(x) for x in text]
text = text[:sample_index]
train_text, test_text = text[:split_index], text[split_index:]
return [train_text, np.array(train_labels)] if training else [test_text, np.array(test_labels)]
def load_data_ghosh(input_file):
with open(input_file) as f:
twitter = f.readlines()
twitter = [x.strip() for x in twitter]
twitter = pd.DataFrame(twitter)
new = twitter[0].str.split("\t", n = 2, expand = True)
twitter_labels = new[1]
twitter_text = new[2]
twitter_text = [tweet for tweet in twitter_text]
twitter_labels = [[0, 1] if l is '1' else [1, 0] for l in twitter_labels]
sarcastic = 0
for label in twitter_labels:
if label == [0, 1]: sarcastic += 1
#print("Sarcastic Count: %d" % sarcastic)
#print("Not Sarcastic Count: %d" % (len(twitter_labels)-sarcastic))
twitter_labels = np.array(twitter_labels)
return [twitter_text, twitter_labels]
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def batch_iter_one_epoch(data, batch_size, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches = int((len(data)-1)/batch_size) + 1
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
print("Epoch: %d" % epoch)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
#load_data_sarc('data/train-balanced-sarcasm.csv', True)
#load_data_ghosh('data/ghosh/train.txt')
|
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"re.sub",
"numpy.concatenate"
] |
[((260, 306), 're.sub', 're.sub', (['"""[^A-Za-z0-9(),!?\\\\\'\\\\`]"""', '""" """', 'string'], {}), '("[^A-Za-z0-9(),!?\\\\\'\\\\`]", \' \', string)\n', (266, 306), False, 'import re\n'), ((319, 348), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (325, 348), False, 'import re\n'), ((363, 394), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (369, 394), False, 'import re\n'), ((409, 440), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (415, 440), False, 'import re\n'), ((455, 486), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (461, 486), False, 'import re\n'), ((501, 530), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (507, 530), False, 'import re\n'), ((545, 576), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (551, 576), False, 'import re\n'), ((591, 617), 're.sub', 're.sub', (['""","""', '""" , """', 'string'], {}), "(',', ' , ', string)\n", (597, 617), False, 'import re\n'), ((632, 658), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'string'], {}), "('!', ' ! ', string)\n", (638, 658), False, 'import re\n'), ((673, 703), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 'string'], {}), "('\\\\(', ' \\\\( ', string)\n", (679, 703), False, 'import re\n'), ((716, 746), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 'string'], {}), "('\\\\)', ' \\\\) ', string)\n", (722, 746), False, 'import re\n'), ((759, 789), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 'string'], {}), "('\\\\?', ' \\\\? ', string)\n", (765, 789), False, 'import re\n'), ((802, 832), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (808, 832), False, 'import re\n'), ((943, 966), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (954, 966), True, 'import pandas as pd\n'), ((1814, 1835), 'pandas.DataFrame', 'pd.DataFrame', (['twitter'], {}), '(twitter)\n', (1826, 1835), True, 'import pandas as pd\n'), ((2319, 2343), 'numpy.array', 'np.array', (['twitter_labels'], {}), '(twitter_labels)\n', (2327, 2343), True, 'import numpy as np\n'), ((3199, 3252), 'numpy.concatenate', 'np.concatenate', (['[positive_labels, negative_labels]', '(0)'], {}), '([positive_labels, negative_labels], 0)\n', (3213, 3252), True, 'import numpy as np\n'), ((3346, 3360), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3354, 3360), True, 'import numpy as np\n'), ((3957, 3971), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3965, 3971), True, 'import numpy as np\n'), ((1582, 1604), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (1590, 1604), True, 'import numpy as np\n'), ((1635, 1656), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (1643, 1656), True, 'import numpy as np\n'), ((3508, 3528), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (3517, 3528), True, 'import numpy as np\n'), ((4244, 4264), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (4253, 4264), True, 'import numpy as np\n')]
|
"""
Modified From https://github.com/OpenNMT/OpenNMT-tf/blob/r1/examples/library/minimal_transformer_training.py
MIT License
Copyright (c) 2017-present The OpenNMT Authors.
This example demonstrates how to train a standard Transformer model using
OpenNMT-tf as a library in about 200 lines of code. While relatively short,
this example contains some advanced concepts such as dataset bucketing and
prefetching, token-based batching, gradients accumulation, beam search, etc.
Currently, the beam search part is not easily customizable. This is expected to
be improved for TensorFlow 2.0 which is eager first.
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Use opennmt-tf-1.25.1
import argparse
import copy
from datetime import datetime
import numpy as np
import os
import sys
import tensorflow as tf
import opennmt as onmt
from opennmt import constants
from opennmt.utils import misc
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding
from examples.tensorflow.decoding.utils.bleu_score import bleu_score
from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
NUM_HEADS = 8
NUM_LAYERS = 6
HIDDEN_UNITS = 512
SIZE_PER_HEAD = 64
FFN_INNER_DIM = 2048
encoder = onmt.encoders.SelfAttentionEncoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
decoder = onmt.decoders.SelfAttentionDecoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
def translate(args_dict):
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
model_dir = args_dict["model_dir"]
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
sampling_topk = args_dict['sampling_topk']
sampling_topp = args_dict['sampling_topp']
tf_datatype = tf.float32
max_ite = args_dict['max_iteration']
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
# Define the "base" Transformer model.
source_inputter = onmt.inputters.WordEmbedder("source_vocabulary", embedding_size=512, dtype=tf_datatype)
target_inputter = onmt.inputters.WordEmbedder("target_vocabulary", embedding_size=512, dtype=tf_datatype)
inputter = onmt.inputters.ExampleInputter(source_inputter, target_inputter)
inputter.initialize({
"source_vocabulary": args_dict["source_vocabulary"],
"target_vocabulary": args_dict["target_vocabulary"]
})
mode = tf.estimator.ModeKeys.PREDICT
np.random.seed(1)
tf.set_random_seed(1)
# Create the inference dataset.
dataset = inputter.make_inference_dataset(source_file, batch_size)
iterator = dataset.make_initializable_iterator()
source = iterator.get_next()
encoder_args = TransformerArgument(beam_width=1,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
remove_padding=True,
allow_gemm_test=False)
# Encode the source.
with tf.variable_scope("transformer/encoder"):
source_embedding = source_inputter.make_inputs(source)
source_embedding = tf.cast(source_embedding, tf_datatype)
# Using onmt fp16 for encoder.encode leads to significant accuracy drop
# So, we rewrite the encoder
# memory, _, _ = encoder.encode(source_embedding, source["length"], mode=mode)
memory = tf_encoder_opennmt(source_embedding, encoder_args, source["length"])
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = tf.cast(v, tf_datatype)
ft_encoder_result = ft_encoder_opennmt(inputs=source_embedding,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=source["length"])
# Generate the target.
with tf.variable_scope("transformer/decoder", reuse=tf.AUTO_REUSE):
target_inputter.build()
batch_size = tf.shape(memory)[0]
start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
end_token = constants.END_OF_SENTENCE_ID
target_embedding = tf.cast(target_inputter.embedding, tf_datatype)
target_ids, _, target_length, _ = decoder.dynamic_decode_and_search(
target_embedding,
start_tokens,
end_token,
vocab_size=target_inputter.vocabulary_size,
beam_width=beam_size,
memory=memory,
memory_sequence_length=source["length"],
maximum_iterations=max_seq_len)
target_vocab_rev = target_inputter.vocabulary_lookup_reverse()
target_tokens = target_vocab_rev.lookup(tf.cast(target_ids, tf.int64))
decoder_args = TransformerArgument(beam_width=beam_size,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
kernel_init_range=0.00,
bias_init_range=0.00)
decoder_args_2 = copy.deepcopy(decoder_args) # for beam search
decoder_args_2.__dict__ = copy.deepcopy(decoder_args.__dict__)
decoder_args_2.beam_width = 1 # for sampling
ft_decoder_beamsearch_args = DecodingBeamsearchArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args,
beam_search_diversity_rate)
ft_decoder_sampling_args = DecodingSamplingArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args_2,
sampling_topk,
sampling_topp)
decoding_beamsearch_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
beam_search_diversity_rate,
0,
0.0,
decoder_args)
decoding_sampling_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
0.0,
sampling_topk,
sampling_topp,
decoder_args_2)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ft_target_ids, ft_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_beamsearch_args)
ft_target_tokens = target_vocab_rev.lookup(tf.cast(ft_target_ids, tf.int64))
ft_sampling_target_ids, ft_sampling_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_sampling_args)
ft_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_sampling_target_ids, tf.int64))
# ### TF Sampling Decoding ###
tf_sampling_target_ids, tf_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=0)
# tf_sampling_target_tokens: [batch_size, seq_len]
tf_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(tf_sampling_target_ids, tf.int64))
# ### end of TF BeamSearch Decoding ###
### OP BeamSearch Decoder ###
ft_decoder_beamsearch_target_ids, ft_decoder_beamsearch_target_length, _, _, _ = tf_beamsearch_decoding(memory,
source["length"],
target_embedding,
ft_decoder_beamsearch_args,
decoder_type=1)
# ft_decoder_beamsearch_target_tokens: [batch_size, beam_width, seq_len]
ft_decoder_beamsearch_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_beamsearch_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
### OP Sampling Decoder ###
ft_decoder_sampling_target_ids, ft_decoder_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=1)
ft_decoder_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_sampling_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
class TranslationResult(object):
def __init__(self, token_op, length_op, name):
self.token_op = token_op
self.length_op = length_op
self.name = name
self.file_name = name + ".txt"
self.token_list = []
self.length_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.bleu_score = None
translation_result_list = []
if time_args != "":
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling-for-warmup"))
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult(
target_tokens, target_length, "tf-decoding-beamsearch"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_beamsearch_target_tokens, ft_decoder_beamsearch_target_length, "ft-decoder-beamsearch"))
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult(
ft_target_tokens, ft_target_length, "ft-decoding-beamsearch"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling"))
if time_args.find("4") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_sampling_target_tokens, ft_decoder_sampling_target_length, "ft-decoder-sampling"))
if time_args.find("5") != -1:
translation_result_list.append(TranslationResult(
ft_sampling_target_tokens, ft_sampling_target_length, "ft-decoding-sampling"))
# Iterates on the dataset.
float_checkpoint_path = tf.train.latest_checkpoint(model_dir)
half_checkpoint_path = tf.train.latest_checkpoint(model_dir + "_fp16")
float_var_list = []
half_var_list = []
for var in tf.global_variables():
if var.dtype.base_dtype == tf.float32:
float_var_list.append(var)
elif var.dtype.base_dtype == tf.float16:
half_var_list.append(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
for i in range(len(translation_result_list)):
with tf.Session(config=config) as sess:
if(len(float_var_list) > 0):
float_saver = tf.train.Saver(float_var_list)
float_saver.restore(sess, float_checkpoint_path)
if(len(half_var_list) > 0):
half_saver = tf.train.Saver(half_var_list)
half_saver.restore(sess, half_checkpoint_path)
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
t1 = datetime.now()
while True:
try:
batch_tokens, batch_length = sess.run([translation_result_list[i].token_op,
translation_result_list[i].length_op])
for tokens, length in zip(batch_tokens, batch_length):
# misc.print_bytes(b" ".join(tokens[0][:length[0] - 1]))
if translation_result_list[i].name.find("beamsearch") != -1:
translation_result_list[i].token_list.append(
b" ".join(tokens[0][:length[0] - 1]).decode("UTF-8"))
else:
translation_result_list[i].token_list.append(b" ".join(tokens[:length - 1]).decode("UTF-8"))
translation_result_list[i].batch_num += 1
if translation_result_list[i].name == "tf-decoding-sampling-for-warmup" and translation_result_list[i].batch_num > 20:
break
if translation_result_list[i].batch_num >= max_ite:
break
except tf.errors.OutOfRangeError:
break
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
translation_result_list[i].execution_time = time_sum
with open(translation_result_list[i].file_name, "w") as file_b:
for s in translation_result_list[i].token_list:
file_b.write(s)
file_b.write("\n")
ref_file_path = "./.ref_file.txt"
os.system("head -n %d %s > %s" % (len(translation_result_list[i].token_list), tgt_file, ref_file_path))
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].file_name, ref_file_path)
os.system("rm {}".format(ref_file_path))
for t in translation_result_list:
if t.name == "tf-decoding-sampling-for-warmup":
continue
print("[INFO] {} translates {} batches taking {:.2f} sec to translate {} tokens, BLEU score: {:.2f}, {:.0f} tokens/sec.".format(
t.name, t.batch_num, t.execution_time, t.bleu_score.sys_len, t.bleu_score.score, t.bleu_score.sys_len / t.execution_time))
return translation_result_list
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/tensorflow/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/tensorflow/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument("--source_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the source vocabulary.")
parser.add_argument("--target_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the target vocabulary.")
parser.add_argument("--model_dir", default="../translation/ckpt",
help="Directory where checkpoint are written.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test tf_decoding_beamsearch
'1': test op_decoder_beamsearch
'2': test op_decoding_beamsearch
'3': test tf_decoding_sampling
'4': test op_decoder_sampling
'5': test op_decoding_sampling
'e.g., if you want to test op_decoder_beamsearch and op_decoding_sampling,
then you need to use -time '15' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beams earch.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
args = parser.parse_args()
translate(vars(args))
# example script
# python ../examples/tensorflow/decoding/translate_example.py --source ../examples/tensorflow/decoding/utils/translation/test.en --target ../examples/tensorflow/decoding/utils/translation/test.de --source_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --target_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --model_dir ../translation/ckpt/ -time 02
if __name__ == "__main__":
main()
|
[
"opennmt.encoders.SelfAttentionEncoder",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.get_collection",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"tensorflow.train.latest_checkpoint",
"examples.tensorflow.decoder.utils.common.DecodingArgumentNew",
"tensorflow.tables_initializer",
"examples.tensorflow.decoding.utils.bleu_score.bleu_score",
"examples.tensorflow.decoder.utils.decoding.tf_beamsearch_decoding",
"sys.path.append",
"tensorflow.variable_scope",
"tensorflow.set_random_seed",
"tensorflow.cast",
"datetime.datetime.now",
"copy.deepcopy",
"examples.tensorflow.encoder.utils.encoder.ft_encoder_opennmt",
"tensorflow.train.Saver",
"os.path.realpath",
"opennmt.inputters.WordEmbedder",
"tensorflow.Session",
"examples.tensorflow.decoder.utils.common.DecodingBeamsearchArgument",
"opennmt.inputters.ExampleInputter",
"examples.tensorflow.decoder.utils.decoding.tf_sampling_decoding",
"examples.tensorflow.encoder.utils.encoder.tf_encoder_opennmt",
"examples.tensorflow.decoder.utils.common.TransformerArgument",
"tensorflow.fill",
"tensorflow.shape",
"examples.tensorflow.decoding.utils.ft_decoding.ft_decoding",
"examples.tensorflow.decoder.utils.common.DecodingSamplingArgument",
"opennmt.decoders.SelfAttentionDecoder"
] |
[((1522, 1561), 'sys.path.append', 'sys.path.append', (["(dir_path + '/../../..')"], {}), "(dir_path + '/../../..')\n", (1537, 1561), False, 'import sys\n'), ((2406, 2601), 'opennmt.encoders.SelfAttentionEncoder', 'onmt.encoders.SelfAttentionEncoder', ([], {'num_layers': 'NUM_LAYERS', 'num_units': 'HIDDEN_UNITS', 'num_heads': 'NUM_HEADS', 'ffn_inner_dim': 'FFN_INNER_DIM', 'dropout': '(0.1)', 'attention_dropout': '(0.1)', 'relu_dropout': '(0.1)'}), '(num_layers=NUM_LAYERS, num_units=\n HIDDEN_UNITS, num_heads=NUM_HEADS, ffn_inner_dim=FFN_INNER_DIM, dropout\n =0.1, attention_dropout=0.1, relu_dropout=0.1)\n', (2440, 2601), True, 'import opennmt as onmt\n'), ((2631, 2826), 'opennmt.decoders.SelfAttentionDecoder', 'onmt.decoders.SelfAttentionDecoder', ([], {'num_layers': 'NUM_LAYERS', 'num_units': 'HIDDEN_UNITS', 'num_heads': 'NUM_HEADS', 'ffn_inner_dim': 'FFN_INNER_DIM', 'dropout': '(0.1)', 'attention_dropout': '(0.1)', 'relu_dropout': '(0.1)'}), '(num_layers=NUM_LAYERS, num_units=\n HIDDEN_UNITS, num_heads=NUM_HEADS, ffn_inner_dim=FFN_INNER_DIM, dropout\n =0.1, attention_dropout=0.1, relu_dropout=0.1)\n', (2665, 2826), True, 'import opennmt as onmt\n'), ((1494, 1520), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1510, 1520), False, 'import os\n'), ((3719, 3811), 'opennmt.inputters.WordEmbedder', 'onmt.inputters.WordEmbedder', (['"""source_vocabulary"""'], {'embedding_size': '(512)', 'dtype': 'tf_datatype'}), "('source_vocabulary', embedding_size=512, dtype=\n tf_datatype)\n", (3746, 3811), True, 'import opennmt as onmt\n'), ((3829, 3921), 'opennmt.inputters.WordEmbedder', 'onmt.inputters.WordEmbedder', (['"""target_vocabulary"""'], {'embedding_size': '(512)', 'dtype': 'tf_datatype'}), "('target_vocabulary', embedding_size=512, dtype=\n tf_datatype)\n", (3856, 3921), True, 'import opennmt as onmt\n'), ((3933, 3997), 'opennmt.inputters.ExampleInputter', 'onmt.inputters.ExampleInputter', (['source_inputter', 'target_inputter'], {}), '(source_inputter, target_inputter)\n', (3963, 3997), True, 'import opennmt as onmt\n'), ((4199, 4216), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (4213, 4216), True, 'import numpy as np\n'), ((4221, 4242), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (4239, 4242), True, 'import tensorflow as tf\n'), ((4457, 4676), 'examples.tensorflow.decoder.utils.common.TransformerArgument', 'TransformerArgument', ([], {'beam_width': '(1)', 'head_num': 'NUM_HEADS', 'size_per_head': 'SIZE_PER_HEAD', 'inter_size': '(NUM_HEADS * SIZE_PER_HEAD * 4)', 'num_layer': 'NUM_LAYERS', 'dtype': 'tf_datatype', 'remove_padding': '(True)', 'allow_gemm_test': '(False)'}), '(beam_width=1, head_num=NUM_HEADS, size_per_head=\n SIZE_PER_HEAD, inter_size=NUM_HEADS * SIZE_PER_HEAD * 4, num_layer=\n NUM_LAYERS, dtype=tf_datatype, remove_padding=True, allow_gemm_test=False)\n', (4476, 4676), False, 'from examples.tensorflow.decoder.utils.common import TransformerArgument\n'), ((11134, 11244), 'examples.tensorflow.decoder.utils.decoding.tf_sampling_decoding', 'tf_sampling_decoding', (['memory', "source['length']", 'target_embedding', 'ft_decoder_sampling_args'], {'decoder_type': '(0)'}), "(memory, source['length'], target_embedding,\n ft_decoder_sampling_args, decoder_type=0)\n", (11154, 11244), False, 'from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding\n'), ((11868, 11982), 'examples.tensorflow.decoder.utils.decoding.tf_beamsearch_decoding', 'tf_beamsearch_decoding', (['memory', "source['length']", 'target_embedding', 'ft_decoder_beamsearch_args'], {'decoder_type': '(1)'}), "(memory, source['length'], target_embedding,\n ft_decoder_beamsearch_args, decoder_type=1)\n", (11890, 11982), False, 'from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding\n'), ((12754, 12864), 'examples.tensorflow.decoder.utils.decoding.tf_sampling_decoding', 'tf_sampling_decoding', (['memory', "source['length']", 'target_embedding', 'ft_decoder_sampling_args'], {'decoder_type': '(1)'}), "(memory, source['length'], target_embedding,\n ft_decoder_sampling_args, decoder_type=1)\n", (12774, 12864), False, 'from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding\n'), ((15223, 15260), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (15249, 15260), True, 'import tensorflow as tf\n'), ((15288, 15335), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["(model_dir + '_fp16')"], {}), "(model_dir + '_fp16')\n", (15314, 15335), True, 'import tensorflow as tf\n'), ((15399, 15420), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (15418, 15420), True, 'import tensorflow as tf\n'), ((15609, 15625), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (15623, 15625), True, 'import tensorflow as tf\n'), ((18566, 18645), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (18589, 18645), False, 'import argparse\n'), ((4971, 5011), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transformer/encoder"""'], {}), "('transformer/encoder')\n", (4988, 5011), True, 'import tensorflow as tf\n'), ((5103, 5141), 'tensorflow.cast', 'tf.cast', (['source_embedding', 'tf_datatype'], {}), '(source_embedding, tf_datatype)\n', (5110, 5141), True, 'import tensorflow as tf\n'), ((5364, 5432), 'examples.tensorflow.encoder.utils.encoder.tf_encoder_opennmt', 'tf_encoder_opennmt', (['source_embedding', 'encoder_args', "source['length']"], {}), "(source_embedding, encoder_args, source['length'])\n", (5382, 5432), False, 'from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt\n'), ((5457, 5505), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (5474, 5505), True, 'import tensorflow as tf\n'), ((5670, 5820), 'examples.tensorflow.encoder.utils.encoder.ft_encoder_opennmt', 'ft_encoder_opennmt', ([], {'inputs': 'source_embedding', 'encoder_args': 'encoder_args', 'encoder_vars_dict': 'encoder_variables_dict', 'sequence_length': "source['length']"}), "(inputs=source_embedding, encoder_args=encoder_args,\n encoder_vars_dict=encoder_variables_dict, sequence_length=source['length'])\n", (5688, 5820), False, 'from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt\n'), ((5995, 6056), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transformer/decoder"""'], {'reuse': 'tf.AUTO_REUSE'}), "('transformer/decoder', reuse=tf.AUTO_REUSE)\n", (6012, 6056), True, 'import tensorflow as tf\n'), ((6154, 6207), 'tensorflow.fill', 'tf.fill', (['[batch_size]', 'constants.START_OF_SENTENCE_ID'], {}), '([batch_size], constants.START_OF_SENTENCE_ID)\n', (6161, 6207), True, 'import tensorflow as tf\n'), ((6284, 6331), 'tensorflow.cast', 'tf.cast', (['target_inputter.embedding', 'tf_datatype'], {}), '(target_inputter.embedding, tf_datatype)\n', (6291, 6331), True, 'import tensorflow as tf\n'), ((6876, 7103), 'examples.tensorflow.decoder.utils.common.TransformerArgument', 'TransformerArgument', ([], {'beam_width': 'beam_size', 'head_num': 'NUM_HEADS', 'size_per_head': 'SIZE_PER_HEAD', 'inter_size': '(NUM_HEADS * SIZE_PER_HEAD * 4)', 'num_layer': 'NUM_LAYERS', 'dtype': 'tf_datatype', 'kernel_init_range': '(0.0)', 'bias_init_range': '(0.0)'}), '(beam_width=beam_size, head_num=NUM_HEADS, size_per_head\n =SIZE_PER_HEAD, inter_size=NUM_HEADS * SIZE_PER_HEAD * 4, num_layer=\n NUM_LAYERS, dtype=tf_datatype, kernel_init_range=0.0, bias_init_range=0.0)\n', (6895, 7103), False, 'from examples.tensorflow.decoder.utils.common import TransformerArgument\n'), ((7419, 7446), 'copy.deepcopy', 'copy.deepcopy', (['decoder_args'], {}), '(decoder_args)\n', (7432, 7446), False, 'import copy\n'), ((7500, 7536), 'copy.deepcopy', 'copy.deepcopy', (['decoder_args.__dict__'], {}), '(decoder_args.__dict__)\n', (7513, 7536), False, 'import copy\n'), ((7629, 7814), 'examples.tensorflow.decoder.utils.common.DecodingBeamsearchArgument', 'DecodingBeamsearchArgument', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', 'decoder_args', 'beam_search_diversity_rate'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len,\n decoder_args, beam_search_diversity_rate)\n', (7655, 7814), False, 'from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument\n'), ((8162, 8349), 'examples.tensorflow.decoder.utils.common.DecodingSamplingArgument', 'DecodingSamplingArgument', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', 'decoder_args_2', 'sampling_topk', 'sampling_topp'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len,\n decoder_args_2, sampling_topk, sampling_topp)\n', (8186, 8349), False, 'from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument\n'), ((8737, 8923), 'examples.tensorflow.decoder.utils.common.DecodingArgumentNew', 'DecodingArgumentNew', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', 'beam_search_diversity_rate', '(0)', '(0.0)', 'decoder_args'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len,\n beam_search_diversity_rate, 0, 0.0, decoder_args)\n', (8756, 8923), False, 'from examples.tensorflow.decoder.utils.common import DecodingArgumentNew\n'), ((9334, 9521), 'examples.tensorflow.decoder.utils.common.DecodingArgumentNew', 'DecodingArgumentNew', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', '(0.0)', 'sampling_topk', 'sampling_topp', 'decoder_args_2'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len, 0.0,\n sampling_topk, sampling_topp, decoder_args_2)\n', (9353, 9521), False, 'from examples.tensorflow.decoder.utils.common import DecodingArgumentNew\n'), ((9904, 9952), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (9921, 9952), True, 'import tensorflow as tf\n'), ((10004, 10110), 'examples.tensorflow.decoding.utils.ft_decoding.ft_decoding', 'ft_decoding', (['ft_encoder_result', "source['length']", 'target_embedding', 'all_vars', 'decoding_beamsearch_args'], {}), "(ft_encoder_result, source['length'], target_embedding, all_vars,\n decoding_beamsearch_args)\n", (10015, 10110), False, 'from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding\n'), ((10514, 10618), 'examples.tensorflow.decoding.utils.ft_decoding.ft_decoding', 'ft_decoding', (['ft_encoder_result', "source['length']", 'target_embedding', 'all_vars', 'decoding_sampling_args'], {}), "(ft_encoder_result, source['length'], target_embedding, all_vars,\n decoding_sampling_args)\n", (10525, 10618), False, 'from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding\n'), ((11661, 11702), 'tensorflow.cast', 'tf.cast', (['tf_sampling_target_ids', 'tf.int64'], {}), '(tf_sampling_target_ids, tf.int64)\n', (11668, 11702), True, 'import tensorflow as tf\n'), ((12555, 12606), 'tensorflow.cast', 'tf.cast', (['ft_decoder_beamsearch_target_ids', 'tf.int64'], {}), '(ft_decoder_beamsearch_target_ids, tf.int64)\n', (12562, 12606), True, 'import tensorflow as tf\n'), ((13298, 13347), 'tensorflow.cast', 'tf.cast', (['ft_decoder_sampling_target_ids', 'tf.int64'], {}), '(ft_decoder_sampling_target_ids, tf.int64)\n', (13305, 13347), True, 'import tensorflow as tf\n'), ((5618, 5641), 'tensorflow.cast', 'tf.cast', (['v', 'tf_datatype'], {}), '(v, tf_datatype)\n', (5625, 5641), True, 'import tensorflow as tf\n'), ((6111, 6127), 'tensorflow.shape', 'tf.shape', (['memory'], {}), '(memory)\n', (6119, 6127), True, 'import tensorflow as tf\n'), ((6821, 6850), 'tensorflow.cast', 'tf.cast', (['target_ids', 'tf.int64'], {}), '(target_ids, tf.int64)\n', (6828, 6850), True, 'import tensorflow as tf\n'), ((10410, 10442), 'tensorflow.cast', 'tf.cast', (['ft_target_ids', 'tf.int64'], {}), '(ft_target_ids, tf.int64)\n', (10417, 10442), True, 'import tensorflow as tf\n'), ((10999, 11040), 'tensorflow.cast', 'tf.cast', (['ft_sampling_target_ids', 'tf.int64'], {}), '(ft_sampling_target_ids, tf.int64)\n', (11006, 11040), True, 'import tensorflow as tf\n'), ((15732, 15757), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (15742, 15757), True, 'import tensorflow as tf\n'), ((16204, 16218), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16216, 16218), False, 'from datetime import datetime\n'), ((17439, 17453), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17451, 17453), False, 'from datetime import datetime\n'), ((17999, 18062), 'examples.tensorflow.decoding.utils.bleu_score.bleu_score', 'bleu_score', (['translation_result_list[i].file_name', 'ref_file_path'], {}), '(translation_result_list[i].file_name, ref_file_path)\n', (18009, 18062), False, 'from examples.tensorflow.decoding.utils.bleu_score import bleu_score\n'), ((15838, 15868), 'tensorflow.train.Saver', 'tf.train.Saver', (['float_var_list'], {}), '(float_var_list)\n', (15852, 15868), True, 'import tensorflow as tf\n'), ((16003, 16032), 'tensorflow.train.Saver', 'tf.train.Saver', (['half_var_list'], {}), '(half_var_list)\n', (16017, 16032), True, 'import tensorflow as tf\n'), ((16118, 16141), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (16139, 16141), True, 'import tensorflow as tf\n')]
|
import numpy as np
from Kuru import QuadratureRule, FunctionSpace , Mesh
from Kuru.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from Kuru.VariationalPrinciple._GeometricStiffness_ import GeometricStiffnessIntegrand as GetGeomStiffness
from .DisplacementApproachIndices import FillGeometricB
#from ._MassIntegrand_ import __MassIntegrand__, __ConstantMassIntegrand__
__all__ = ["VariationalPrinciple"]
class VariationalPrinciple(object):
energy_dissipation = []
internal_energy = []
kinetic_energy = []
external_energy = []
power_dissipation = []
internal_power = []
kinetic_power = []
external_power = []
def __init__(self, mesh, variables_order=(1,0),
analysis_type='static', analysis_nature='nonlinear', fields='mechanics',
quadrature_rules=None, median=None, quadrature_type=None,
function_spaces=None, compute_post_quadrature=True):
self.variables_order = variables_order
self.nvar = None
self.ndim = mesh.points.shape[1]
if isinstance(self.variables_order,int):
self.variables_order = tuple(self.variables_order)
self.quadrature_rules = quadrature_rules
self.quadrature_type = quadrature_type
self.function_spaces = function_spaces
self.median = median
self.analysis_type = analysis_type
self.analysis_nature = analysis_nature
self.fields = fields
self.compute_post_quadrature = compute_post_quadrature
# GET NUMBER OF VARIABLES
self.GetNumberOfVariables()
def GetQuadratureOrder(self, C, element_type, quadrature_degree=None):
"""Finds quadrature degree/strength for a given polynomial order C=p-1 [where p is polynomial degree]"""
if quadrature_degree is None:
if element_type == "tri" or element_type == "tet":
norder = 2*C if C > 0 else 1
norder_post = 2*(C+1)
else:
norder = C+2
# ACTUAL
# norder_post = 2*(C+2)
# ALTHOUGH THIS INTEGRATES EXACTLY
norder_post = C+2
else:
norder = quadrature_degree
if element_type == "tri" or element_type == "tet":
norder_post = 2*quadrature_degree
else:
norder_post = quadrature_degree
return norder, norder_post
def GetQuadraturesAndFunctionSpaces(self, mesh, variables_order=(1,),
quadrature_rules=None, quadrature_type=None, function_spaces=None, compute_post_quadrature=True,
equally_spaced_bases=False, quadrature_degree=None):
""""The default function for computing quadrature rules and function spaces for equall order single
and multi-physics/fields problems"""
C = mesh.InferPolynomialDegree() - 1
mesh.InferBoundaryElementType()
if quadrature_rules == None and self.quadrature_rules == None:
# OPTION FOR QUADRATURE TECHNIQUE FOR TRIS AND TETS
optimal_quadrature = 3
if mesh.element_type == "quad" or mesh.element_type == "hex":
if quadrature_type == "wv":
optimal_quadrature = 4
norder, norder_post = self.GetQuadratureOrder(C, mesh.element_type, quadrature_degree=quadrature_degree)
# GET QUADRATURE
quadrature = QuadratureRule(optimal=optimal_quadrature, norder=norder, mesh_type=mesh.element_type)
if self.compute_post_quadrature:
# COMPUTE INTERPOLATION FUNCTIONS AT ALL INTEGRATION POINTS FOR POST-PROCESSING
post_quadrature = QuadratureRule(optimal=optimal_quadrature, norder=norder_post, mesh_type=mesh.element_type)
else:
post_quadrature = None
# BOUNDARY QUADRATURE
bquadrature = QuadratureRule(optimal=optimal_quadrature, norder=C+2, mesh_type=mesh.boundary_element_type)
self.quadrature_rules = (quadrature,post_quadrature,bquadrature)
else:
self.quadrature_rules = quadrature_rules
if function_spaces == None and self.function_spaces == None:
# CREATE FUNCTIONAL SPACES
function_space = FunctionSpace(mesh, self.quadrature_rules[0], p=C+1, equally_spaced=equally_spaced_bases)
if self.compute_post_quadrature:
post_function_space = FunctionSpace(mesh, self.quadrature_rules[1], p=C+1, equally_spaced=equally_spaced_bases)
else:
post_function_space = None
# CREATE BOUNDARY FUNCTIONAL SPACES
bfunction_space = FunctionSpace(mesh.CreateDummyLowerDimensionalMesh(),
self.quadrature_rules[2], p=C+1, equally_spaced=equally_spaced_bases)
self.function_spaces = (function_space,post_function_space,bfunction_space)
else:
self.function_spaces = function_spaces
local_size = self.function_spaces[0].Bases.shape[0]*self.nvar
self.local_rows = np.repeat(np.arange(0,local_size),local_size,axis=0)
self.local_columns = np.tile(np.arange(0,local_size),local_size)
self.local_size = local_size
# FOR MASS
local_size_m = self.function_spaces[0].Bases.shape[0]*self.ndim
self.local_rows_mass = np.repeat(np.arange(0,local_size_m),local_size_m,axis=0)
self.local_columns_mass = np.tile(np.arange(0,local_size_m),local_size_m)
self.local_size_m = local_size_m
def GetNumberOfVariables(self):
"""Returns (self.nvar) i.e. number of variables/unknowns per node, for the formulation.
Note that self.nvar does not take into account the unknowns which get condensated
"""
# nvar = 0
# for i in self.variables_order:
# # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT
# if i!=0:
# if mesh.element_type == "tri":
# nvar += (i+1)*(i+2) // 2
# elif mesh.element_type == "tet":
# nvar += (i+1)*(i+2)*(i+3) // 6
# elif mesh.element_type == "quad":
# nvar += (i+1)**2
# elif mesh.element_type == "hex":
# nvar += (i+1)**3
# nvar = sum(self.variables_order)
if self.nvar == None:
self.nvar = self.ndim
return self.nvar
def FindIndices(self,A):
return self.local_rows, self.local_columns, A.ravel()
def GeometricStiffnessIntegrand(self, SpatialGradient, CauchyStressTensor):
"""Applies to displacement based, displacement potential based and all mixed
formulations that involve static condensation"""
ndim = self.ndim
nvar = self.nvar
B = np.zeros((nvar*SpatialGradient.shape[0],ndim*ndim))
S = np.zeros((ndim*ndim,ndim*ndim))
SpatialGradient = SpatialGradient.T.copy('c')
FillGeometricB(B,SpatialGradient,S,CauchyStressTensor,ndim,nvar)
BDB = np.dot(np.dot(B,S),B.T)
return BDB
def __GeometricStiffnessIntegrand__(self, SpatialGradient, CauchyStressTensor, detJ):
"""Applies to displacement based formulation"""
return GetGeomStiffness(np.ascontiguousarray(SpatialGradient),CauchyStressTensor, detJ, self.nvar)
def VolumetricStiffnessIntegrand(self, material, SpatialGradient, detJ, dV):
"""Computes the volumetric stiffness using Hu-Washizu on Mean Dilatation method"""
if material.has_low_level_dispatcher:
from ._VolumetricStiffness_ import _VolumetricStiffnessIntegrand_
stiffness, MeanVolume = _VolumetricStiffnessIntegrand_(material,
np.ascontiguousarray(SpatialGradient), np.ascontiguousarray(detJ),
np.ascontiguousarray(dV), self.nvar)
else:
MaterialVolume = np.sum(dV)
if material.has_state_variables and material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,material.id_growth])
CurrentElasticVolume = np.sum(dve)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageDeformationv = np.einsum('i,ijk,i->jk',material.StateVariables[:,material.id_density],SpatialGradient,dve)
AverageDeformationv = AverageDeformationv.flatten()
AverageDeformationu = np.einsum('ijk,i->jk',SpatialGradient,dve)
AverageDeformationu = AverageDeformationu.flatten()
stiffness = np.einsum('i,j->ij',AverageDeformationv,AverageDeformationu)
MeanVolume = (CurrentElasticVolume-MaterialVolume)/MaterialVolume
elif material.has_state_variables and not material.has_growth_remodeling:
CurrentElasticVolume = np.sum(detJ)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageDeformationv = np.einsum('i,ijk,i->jk',material.StateVariables[:,material.id_density],SpatialGradient,detJ)
AverageDeformationv = AverageDeformationv.flatten()
AverageDeformationu = np.einsum('ijk,i->jk',SpatialGradient,detJ)
AverageDeformationu = AverageDeformationu.flatten()
stiffness = np.einsum('i,j->ij',AverageDeformationv,AverageDeformationu)
MeanVolume = (CurrentElasticVolume-MaterialVolume)/MaterialVolume
elif not material.has_state_variables and not material.has_growth_remodeling:
CurrentVolume = np.sum(detJ)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageSpatialGradient = np.einsum('ijk,i->jk',SpatialGradient,detJ)
AverageSpatialGradient = AverageSpatialGradient.flatten()
stiffness = np.einsum('i,j->ij',AverageSpatialGradient,AverageSpatialGradient)
MeanVolume = (CurrentVolume-MaterialVolume)/MaterialVolume
stiffness = np.true_divide(stiffness,MaterialVolume)
material.pressure = material.kappa*MeanVolume
stiffness *= material.kappa
return stiffness
|
[
"numpy.sum",
"numpy.true_divide",
"numpy.zeros",
"Kuru.FunctionSpace",
"numpy.einsum",
"Kuru.QuadratureRule",
"numpy.arange",
"numpy.dot",
"numpy.ascontiguousarray"
] |
[((6804, 6860), 'numpy.zeros', 'np.zeros', (['(nvar * SpatialGradient.shape[0], ndim * ndim)'], {}), '((nvar * SpatialGradient.shape[0], ndim * ndim))\n', (6812, 6860), True, 'import numpy as np\n'), ((6868, 6904), 'numpy.zeros', 'np.zeros', (['(ndim * ndim, ndim * ndim)'], {}), '((ndim * ndim, ndim * ndim))\n', (6876, 6904), True, 'import numpy as np\n'), ((3416, 3507), 'Kuru.QuadratureRule', 'QuadratureRule', ([], {'optimal': 'optimal_quadrature', 'norder': 'norder', 'mesh_type': 'mesh.element_type'}), '(optimal=optimal_quadrature, norder=norder, mesh_type=mesh.\n element_type)\n', (3430, 3507), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((3888, 3987), 'Kuru.QuadratureRule', 'QuadratureRule', ([], {'optimal': 'optimal_quadrature', 'norder': '(C + 2)', 'mesh_type': 'mesh.boundary_element_type'}), '(optimal=optimal_quadrature, norder=C + 2, mesh_type=mesh.\n boundary_element_type)\n', (3902, 3987), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((4265, 4361), 'Kuru.FunctionSpace', 'FunctionSpace', (['mesh', 'self.quadrature_rules[0]'], {'p': '(C + 1)', 'equally_spaced': 'equally_spaced_bases'}), '(mesh, self.quadrature_rules[0], p=C + 1, equally_spaced=\n equally_spaced_bases)\n', (4278, 4361), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((5070, 5094), 'numpy.arange', 'np.arange', (['(0)', 'local_size'], {}), '(0, local_size)\n', (5079, 5094), True, 'import numpy as np\n'), ((5150, 5174), 'numpy.arange', 'np.arange', (['(0)', 'local_size'], {}), '(0, local_size)\n', (5159, 5174), True, 'import numpy as np\n'), ((5356, 5382), 'numpy.arange', 'np.arange', (['(0)', 'local_size_m'], {}), '(0, local_size_m)\n', (5365, 5382), True, 'import numpy as np\n'), ((5445, 5471), 'numpy.arange', 'np.arange', (['(0)', 'local_size_m'], {}), '(0, local_size_m)\n', (5454, 5471), True, 'import numpy as np\n'), ((7050, 7062), 'numpy.dot', 'np.dot', (['B', 'S'], {}), '(B, S)\n', (7056, 7062), True, 'import numpy as np\n'), ((7267, 7304), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['SpatialGradient'], {}), '(SpatialGradient)\n', (7287, 7304), True, 'import numpy as np\n'), ((7896, 7906), 'numpy.sum', 'np.sum', (['dV'], {}), '(dV)\n', (7902, 7906), True, 'import numpy as np\n'), ((10128, 10169), 'numpy.true_divide', 'np.true_divide', (['stiffness', 'MaterialVolume'], {}), '(stiffness, MaterialVolume)\n', (10142, 10169), True, 'import numpy as np\n'), ((3678, 3774), 'Kuru.QuadratureRule', 'QuadratureRule', ([], {'optimal': 'optimal_quadrature', 'norder': 'norder_post', 'mesh_type': 'mesh.element_type'}), '(optimal=optimal_quadrature, norder=norder_post, mesh_type=\n mesh.element_type)\n', (3692, 3774), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((4438, 4534), 'Kuru.FunctionSpace', 'FunctionSpace', (['mesh', 'self.quadrature_rules[1]'], {'p': '(C + 1)', 'equally_spaced': 'equally_spaced_bases'}), '(mesh, self.quadrature_rules[1], p=C + 1, equally_spaced=\n equally_spaced_bases)\n', (4451, 4534), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((7733, 7770), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['SpatialGradient'], {}), '(SpatialGradient)\n', (7753, 7770), True, 'import numpy as np\n'), ((7772, 7798), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['detJ'], {}), '(detJ)\n', (7792, 7798), True, 'import numpy as np\n'), ((7816, 7840), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dV'], {}), '(dV)\n', (7836, 7840), True, 'import numpy as np\n'), ((8009, 8077), 'numpy.true_divide', 'np.true_divide', (['detJ', 'material.StateVariables[:, material.id_growth]'], {}), '(detJ, material.StateVariables[:, material.id_growth])\n', (8023, 8077), True, 'import numpy as np\n'), ((8115, 8126), 'numpy.sum', 'np.sum', (['dve'], {}), '(dve)\n', (8121, 8126), True, 'import numpy as np\n'), ((8277, 8376), 'numpy.einsum', 'np.einsum', (['"""i,ijk,i->jk"""', 'material.StateVariables[:, material.id_density]', 'SpatialGradient', 'dve'], {}), "('i,ijk,i->jk', material.StateVariables[:, material.id_density],\n SpatialGradient, dve)\n", (8286, 8376), True, 'import numpy as np\n'), ((8475, 8519), 'numpy.einsum', 'np.einsum', (['"""ijk,i->jk"""', 'SpatialGradient', 'dve'], {}), "('ijk,i->jk', SpatialGradient, dve)\n", (8484, 8519), True, 'import numpy as np\n'), ((8614, 8676), 'numpy.einsum', 'np.einsum', (['"""i,j->ij"""', 'AverageDeformationv', 'AverageDeformationu'], {}), "('i,j->ij', AverageDeformationv, AverageDeformationu)\n", (8623, 8676), True, 'import numpy as np\n'), ((8882, 8894), 'numpy.sum', 'np.sum', (['detJ'], {}), '(detJ)\n', (8888, 8894), True, 'import numpy as np\n'), ((9045, 9145), 'numpy.einsum', 'np.einsum', (['"""i,ijk,i->jk"""', 'material.StateVariables[:, material.id_density]', 'SpatialGradient', 'detJ'], {}), "('i,ijk,i->jk', material.StateVariables[:, material.id_density],\n SpatialGradient, detJ)\n", (9054, 9145), True, 'import numpy as np\n'), ((9244, 9289), 'numpy.einsum', 'np.einsum', (['"""ijk,i->jk"""', 'SpatialGradient', 'detJ'], {}), "('ijk,i->jk', SpatialGradient, detJ)\n", (9253, 9289), True, 'import numpy as np\n'), ((9384, 9446), 'numpy.einsum', 'np.einsum', (['"""i,j->ij"""', 'AverageDeformationv', 'AverageDeformationu'], {}), "('i,j->ij', AverageDeformationv, AverageDeformationu)\n", (9393, 9446), True, 'import numpy as np\n'), ((9649, 9661), 'numpy.sum', 'np.sum', (['detJ'], {}), '(detJ)\n', (9655, 9661), True, 'import numpy as np\n'), ((9815, 9860), 'numpy.einsum', 'np.einsum', (['"""ijk,i->jk"""', 'SpatialGradient', 'detJ'], {}), "('ijk,i->jk', SpatialGradient, detJ)\n", (9824, 9860), True, 'import numpy as np\n'), ((9961, 10029), 'numpy.einsum', 'np.einsum', (['"""i,j->ij"""', 'AverageSpatialGradient', 'AverageSpatialGradient'], {}), "('i,j->ij', AverageSpatialGradient, AverageSpatialGradient)\n", (9970, 10029), True, 'import numpy as np\n')]
|
import exrex
import logging
import os
import multiprocessing
import numpy as np
from scipy.stats import genlogistic
from scipy.ndimage.filters import median_filter, uniform_filter1d
from functools import partial
from patteRNA.LBC import LBC
from patteRNA import rnalib, filelib, timelib, misclib, viennalib
from tqdm import tqdm
LOCK = multiprocessing.Lock()
logger = logging.getLogger(__name__)
clock = timelib.Clock()
class ScoringManager:
def __init__(self, model, run_config):
self.model = model
self.run_config = run_config
self.mp_tasks = run_config['n_tasks']
self.mp_pool = None
self.motifs = []
self.cscore_dists = None
self.dataset = None
self.no_vienna = run_config['no_vienna']
self.lbc = LBC()
if run_config['motif'] is not None:
self.parse_motifs()
def parse_motifs(self):
expression = self.run_config['motif']
expression = expression.replace('(', r'\(')
expression = expression.replace('.', r'\.')
expression = expression.replace(')', r'\)')
motifs = exrex.generate(expression)
self.motifs = list(filter(rnalib.valid_db, motifs))
def import_data(self, dataset):
self.dataset = dataset
def execute_scoring(self):
# Compile scoring configuration parameters
scoring_config = {'posteriors': self.run_config['posteriors'],
'hdsl': self.run_config['HDSL'],
'spp': self.run_config['SPP'],
'viterbi': self.run_config['viterbi'],
'suppress_nan': True,
'fp_posteriors': os.path.join(self.run_config['output'], 'posteriors.txt'),
'fp_scores_pre': os.path.join(self.run_config['output'], 'scores_pre'),
'fp_scores': os.path.join(self.run_config['output'], 'scores.txt'),
'fp_hdsl': os.path.join(self.run_config['output'], 'hdsl.txt'),
'fp_spp': os.path.join(self.run_config['output'], 'spp.txt'),
'fp_viterbi': os.path.join(self.run_config['output'], 'viterbi.txt'),
'no_cscores': self.run_config['no_cscores'],
'min_cscores': self.run_config['min_cscores'],
'batch_size': self.run_config['batch_size'],
'motifs': self.motifs,
'path': self.run_config['path'],
'context': self.run_config['context'],
'cscore_dists': None,
'no_vienna': self.no_vienna,
'energy': ~np.any([self.no_vienna,
self.run_config['no_cscores'],
not viennalib.vienna_imported]),
'lbc': self.lbc,
'hdsl_params': self.run_config['hdsl_params']}
self.pool_init() # Initialize parallelized pool
# Prepare score distributions for c-score normalization
if not scoring_config['no_cscores']:
logger.info('Sampling null sites for c-score normalization')
clock.tick()
self.cscore_dists = dict.fromkeys(self.motifs)
cscore_batch = self.make_cscore_batch(scoring_config['min_cscores'])
cscore_batch.pre_process(self.model, scoring=True)
with tqdm(total=len(self.motifs),
leave=False,
unit='motif') as pb_samples:
try:
if scoring_config['path']:
path = np.array(list(scoring_config['path']), dtype=int)
else:
path = None
worker = partial(self.sample_worker, path=path, batch=cscore_batch)
samples_pool = self.mp_pool.imap_unordered(worker, self.motifs)
for (motif, samples) in samples_pool:
params = genlogistic.fit(samples)
self.cscore_dists[motif] = genlogistic(c=params[0], loc=params[1], scale=params[2])
pb_samples.update()
self.mp_pool.close()
self.mp_pool.join()
except Exception:
self.mp_pool.terminate()
raise
scoring_config['cscore_dists'] = self.cscore_dists
logger.info(' ... done in {}'.format(misclib.seconds_to_hms(clock.tock())))
# Begin formal scoring phase by making batches to save on memory
batches = self.make_batches(scoring_config['batch_size'])
n_batches = len(self.dataset.rnas) // scoring_config['batch_size'] + 1 # Number of batches
if self.motifs:
header = "transcript\tstart score c-score BCE MEL Prob(motif) motif path seq\n"
with open(scoring_config['fp_scores_pre'], 'w') as f:
f.write(header)
logger.info("Executing scoring")
clock.tick()
with tqdm(total=n_batches,
leave=False,
unit='batch',
desc=' Overall') as pbar_batches:
# Process batches sequentially
for i, batch in enumerate(batches):
self.pool_init()
batch.pre_process(self.model)
with tqdm(total=len(batch.rnas),
leave=False,
unit="transcript",
desc="Current batch") as pbar_transcripts:
try:
worker = partial(self.score_worker, model=self.model, config=scoring_config)
jobs = self.mp_pool.imap_unordered(worker, batch.rnas.values())
for _ in jobs:
pbar_transcripts.update()
self.mp_pool.close()
self.mp_pool.join()
except Exception:
self.mp_pool.terminate()
raise
batch.clear()
pbar_batches.update()
# Sort score file
if self.motifs:
scores = filelib.read_score_file(scoring_config['fp_scores_pre'])
if not scores:
os.rename(scoring_config['fp_scores_pre'], scoring_config['fp_scores'])
else:
if scoring_config['no_cscores']:
filelib.write_score_file(sorted(scores, key=lambda score: score['score'], reverse=True),
scoring_config['fp_scores'])
else:
if scoring_config['energy']:
filelib.write_score_file(sorted(scores, key=lambda score: score['Prob(motif)'], reverse=True),
scoring_config['fp_scores'])
else:
filelib.write_score_file(sorted(scores, key=lambda score: score['c-score'], reverse=True),
scoring_config['fp_scores'])
os.remove(scoring_config['fp_scores_pre']) # Clean-up
logger.info(' ... done in {}'.format(misclib.seconds_to_hms(clock.tock())))
@staticmethod
def sample_worker(motif, path, batch):
if path is None:
path = rnalib.dot2states(motif)
scores = []
for transcript in batch.rnas.values():
scores.extend(get_null_scores(transcript, motif, path))
return motif, scores
@staticmethod
def score_worker(transcript, model, config):
model.e_step(transcript) # Apply model to transcripts
outputs = compute_outputs(transcript, model, config)
with LOCK as _:
write_outputs(outputs, config)
def make_cscore_batch(self, min_sample_size):
"""
Scan through RNAs in provided data and determine how many are needed to sufficiently
estimate null distributions for c-score normalization. Return a new Dataset with just
the RNAs to use for score sampling.
Args:
min_sample_size: Minimum number of samples to estimate the null score distribution for a single motif.
Returns:
Dataset of RNAs which is a subset of the provided data and meets the criteria needed for score sampling.
"""
motif_samples = {motif: 0 for motif in self.motifs}
cscore_rnas = []
for rna in self.dataset.rnas.values():
cscore_rnas.append(rna.name)
for motif in self.motifs:
null_sites = count_null_sites(rna, motif)
motif_samples[motif] += null_sites
if np.all([motif_samples[motif] >= min_sample_size for motif in motif_samples]):
break # No more sites needed
return self.dataset.spawn_set(rnas=cscore_rnas)
def make_batches(self, size):
rnas = list(self.dataset.rnas.keys())
while rnas:
rnas_batch = rnas[:size]
rnas[:size] = []
yield self.dataset.spawn_set(rnas=rnas_batch)
def pool_init(self):
self.mp_pool = multiprocessing.Pool(processes=self.mp_tasks,
maxtasksperchild=1000)
def count_null_sites(transcript, motif):
if motif not in transcript.valid_sites.keys():
transcript.find_valid_sites(motif)
if motif not in transcript.nan_sites.keys():
transcript.find_nan_sites(len(motif))
non_null_sites = transcript.nan_sites[len(motif)] | transcript.valid_sites[motif]
count = transcript.T - len(motif) + 1 - len(non_null_sites)
return count
def get_null_scores(transcript, motif, path):
# Get sites which violate sequence constraints
invalid_sites = np.where(~np.in1d(range(transcript.T - len(motif) + 1), transcript.valid_sites[motif]))[0]
null_scores = list(filter(lambda score: ~np.isnan(score['score']),
map(lambda start: score_path(transcript, start, path, motif, None, lbc=False),
invalid_sites)))
return [null_score['score'] for null_score in null_scores]
def compute_cscores(scores, dists):
list(map(lambda score: apply_cscore(score, dists[score['dot-bracket']]), scores))
def apply_cscore(score, dist):
pv = dist.sf(score['score'])
if pv == 0:
log_c = np.Inf
elif np.isnan(pv):
log_c = np.nan
else:
log_c = -np.log10(pv)
score['c-score'] = log_c
def score_path(transcript, start, path, motif, pt, lbc=True, context=40):
m = len(path)
end = start + m - 1
bce = np.nan
mel = np.nan
if np.all(np.isnan(transcript.obs[start:end + 1])):
score = np.nan
else:
score = 0
score += np.log(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])
score += np.sum((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])
score += np.log(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])
if lbc:
rstart = int(np.max((0, start - context)))
rend = int(np.min((len(transcript.seq), end + context)))
start_shift = start - rstart
hcs = rnalib.compile_motif_constraints(pt[0], pt[1], start_shift)
lmfe = viennalib.fold(transcript.seq[rstart:rend])
lcmfe = viennalib.hc_fold(transcript.seq[rstart:rend], hcs=hcs)
mel = lmfe - lcmfe
bce = bce_loss(transcript.gamma[1, start:end + 1], path)
return {'score': score,
'c-score': None,
'start': start,
'transcript': transcript.name,
'dot-bracket': motif,
'path': "".join([str(a) for a in path]),
'BCE': bce,
'MEL': mel,
'Prob(motif)': np.nan,
'seq': transcript.seq[start:start + m]}
def bce_loss(yhat, y):
assert len(yhat) == len(y)
return sum(
-yi * np.log(yhi + 1e-20) if yi == 1 else -(1 - yi) * np.log(1 - yhi + 1e-20) for yhi, yi in zip(yhat, y))
def compute_outputs(transcript, model, config):
outputs = {'name': transcript.name,
'viterbi': '',
'posteriors': '',
'spp': '',
'scores_pre': '',
'hdsl': ''} # Initialize outputs dictionary
if config['viterbi']:
vp = model.viterbi_decoding(transcript) # Viterbi algorithm
outputs['viterbi'] = "> {}\n{}\n".format(transcript.name, "".join([str(i) for i in vp]))
# Posterior pairing probabilities
if config['posteriors']:
transcript.gamma /= np.sum(transcript.gamma, axis=0)[np.newaxis, :]
outputs['posteriors'] = "> {}\n{}\n".format(transcript.name,
" ".join(["{:1.3f}".format(p) for p in transcript.gamma[0, :]]))
# Smoothed P(paired) measure --> HDSL without augmentation
if config['spp']:
spp_tmp = transcript.gamma[1, :] # Raw pairing probabilities
spp_tmp = uniform_filter1d(spp_tmp, size=5) # Local mean
spp = median_filter(spp_tmp, size=15) # Local median
outputs['spp'] = "> {}\n{}\n".format(transcript.name,
" ".join(["{:1.3f}".format(p) for p in spp]))
if config['motifs']:
transcript.compute_log_B_ratios()
scores = []
for motif in config['motifs']:
if config['path'] is not None:
path = np.array(list(config['path']), dtype=int)
else:
path = rnalib.dot2states(motif)
pt = transcript.find_valid_sites(motif) # Returns motif base pairing list
scores_tmp = list(map(lambda start: score_path(transcript, start, path, motif, pt, lbc=config['energy']),
transcript.valid_sites[motif]))
if config['suppress_nan']:
scores_tmp = list(filter(lambda s: ~np.isnan(s['score']), scores_tmp))
if config['cscore_dists'] is not None:
compute_cscores(scores_tmp, config['cscore_dists'])
scores += scores_tmp
if config['energy']:
config['lbc'].apply_classifier(scores)
outputs['scores_pre'] = format_scores(scores)
# Hairpin-derived structure level measure
if config['hdsl']:
hdsl_tmp = transcript.gamma[1, :] # Pairing probabilities
for score in scores:
# Profile augmentation with hairpin scores
if score['c-score'] > config['hdsl_params'][1]:
end = score['start'] + len(score['dot-bracket'])
boost = config['hdsl_params'][0] * (score['c-score'] - config['hdsl_params'][1])
hdsl_tmp[score['start']:end] += boost
# Clipping to [0, 1]
hdsl_tmp[hdsl_tmp < 0] = 0
hdsl_tmp[hdsl_tmp > 1] = 1
# Smoothing steps
hdsl_tmp = uniform_filter1d(hdsl_tmp, size=5) # Local mean
hdsl = median_filter(hdsl_tmp, size=15) # Local median
outputs['hdsl'] = "> {}\n{}\n".format(transcript.name, " ".join(["{:1.3f}".format(p) for p in hdsl]))
return outputs
def format_scores(scores):
return "".join(["{} {} {:1.2f} {:1.2f} {:1.2f} {:1.2f} {:1.3g} {} {} {}\n".format(
score['transcript'],
score['start'] + 1,
score['score'],
score['c-score'],
score['BCE'],
score['MEL'],
score['Prob(motif)'],
score['dot-bracket'],
score['path'],
score['seq']) for score in scores])
def write_outputs(outputs, config):
output_types = ['viterbi', 'posteriors', 'spp', 'scores_pre', 'hdsl']
for output_type in output_types:
if outputs[output_type]:
with open(config[f'fp_{output_type}'], 'a') as f:
f.write(outputs[output_type])
|
[
"os.remove",
"numpy.sum",
"multiprocessing.Lock",
"numpy.isnan",
"patteRNA.rnalib.compile_motif_constraints",
"scipy.ndimage.filters.uniform_filter1d",
"os.path.join",
"patteRNA.viennalib.hc_fold",
"scipy.stats.genlogistic.fit",
"numpy.max",
"patteRNA.LBC.LBC",
"numpy.log10",
"scipy.ndimage.filters.median_filter",
"functools.partial",
"patteRNA.timelib.Clock",
"tqdm.tqdm",
"os.rename",
"multiprocessing.Pool",
"exrex.generate",
"patteRNA.filelib.read_score_file",
"patteRNA.rnalib.dot2states",
"numpy.all",
"numpy.log",
"patteRNA.viennalib.fold",
"numpy.any",
"scipy.stats.genlogistic",
"logging.getLogger"
] |
[((337, 359), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (357, 359), False, 'import multiprocessing\n'), ((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((405, 420), 'patteRNA.timelib.Clock', 'timelib.Clock', ([], {}), '()\n', (418, 420), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((780, 785), 'patteRNA.LBC.LBC', 'LBC', ([], {}), '()\n', (783, 785), False, 'from patteRNA.LBC import LBC\n'), ((1112, 1138), 'exrex.generate', 'exrex.generate', (['expression'], {}), '(expression)\n', (1126, 1138), False, 'import exrex\n'), ((9336, 9404), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'self.mp_tasks', 'maxtasksperchild': '(1000)'}), '(processes=self.mp_tasks, maxtasksperchild=1000)\n', (9356, 9404), False, 'import multiprocessing\n'), ((10590, 10602), 'numpy.isnan', 'np.isnan', (['pv'], {}), '(pv)\n', (10598, 10602), True, 'import numpy as np\n'), ((10864, 10903), 'numpy.isnan', 'np.isnan', (['transcript.obs[start:end + 1]'], {}), '(transcript.obs[start:end + 1])\n', (10872, 10903), True, 'import numpy as np\n'), ((10974, 11053), 'numpy.log', 'np.log', (['(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])'], {}), '(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])\n', (10980, 11053), True, 'import numpy as np\n'), ((11071, 11142), 'numpy.sum', 'np.sum', (['((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])'], {}), '((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])\n', (11077, 11142), True, 'import numpy as np\n'), ((11160, 11235), 'numpy.log', 'np.log', (['(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])'], {}), '(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])\n', (11166, 11235), True, 'import numpy as np\n'), ((1694, 1751), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""posteriors.txt"""'], {}), "(self.run_config['output'], 'posteriors.txt')\n", (1706, 1751), False, 'import os\n'), ((1796, 1849), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""scores_pre"""'], {}), "(self.run_config['output'], 'scores_pre')\n", (1808, 1849), False, 'import os\n'), ((1890, 1943), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""scores.txt"""'], {}), "(self.run_config['output'], 'scores.txt')\n", (1902, 1943), False, 'import os\n'), ((1982, 2033), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""hdsl.txt"""'], {}), "(self.run_config['output'], 'hdsl.txt')\n", (1994, 2033), False, 'import os\n'), ((2071, 2121), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""spp.txt"""'], {}), "(self.run_config['output'], 'spp.txt')\n", (2083, 2121), False, 'import os\n'), ((2163, 2217), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""viterbi.txt"""'], {}), "(self.run_config['output'], 'viterbi.txt')\n", (2175, 2217), False, 'import os\n'), ((5173, 5243), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_batches', 'leave': '(False)', 'unit': '"""batch"""', 'desc': '""" Overall"""'}), "(total=n_batches, leave=False, unit='batch', desc=' Overall')\n", (5177, 5243), False, 'from tqdm import tqdm\n'), ((6347, 6403), 'patteRNA.filelib.read_score_file', 'filelib.read_score_file', (["scoring_config['fp_scores_pre']"], {}), "(scoring_config['fp_scores_pre'])\n", (6370, 6403), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((7518, 7542), 'patteRNA.rnalib.dot2states', 'rnalib.dot2states', (['motif'], {}), '(motif)\n', (7535, 7542), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((8880, 8958), 'numpy.all', 'np.all', (['[(motif_samples[motif] >= min_sample_size) for motif in motif_samples]'], {}), '([(motif_samples[motif] >= min_sample_size) for motif in motif_samples])\n', (8886, 8958), True, 'import numpy as np\n'), ((11436, 11495), 'patteRNA.rnalib.compile_motif_constraints', 'rnalib.compile_motif_constraints', (['pt[0]', 'pt[1]', 'start_shift'], {}), '(pt[0], pt[1], start_shift)\n', (11468, 11495), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((11515, 11558), 'patteRNA.viennalib.fold', 'viennalib.fold', (['transcript.seq[rstart:rend]'], {}), '(transcript.seq[rstart:rend])\n', (11529, 11558), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((11579, 11634), 'patteRNA.viennalib.hc_fold', 'viennalib.hc_fold', (['transcript.seq[rstart:rend]'], {'hcs': 'hcs'}), '(transcript.seq[rstart:rend], hcs=hcs)\n', (11596, 11634), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((12834, 12866), 'numpy.sum', 'np.sum', (['transcript.gamma'], {'axis': '(0)'}), '(transcript.gamma, axis=0)\n', (12840, 12866), True, 'import numpy as np\n'), ((13258, 13291), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['spp_tmp'], {'size': '(5)'}), '(spp_tmp, size=5)\n', (13274, 13291), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((13324, 13355), 'scipy.ndimage.filters.median_filter', 'median_filter', (['spp_tmp'], {'size': '(15)'}), '(spp_tmp, size=15)\n', (13337, 13355), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((15220, 15254), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['hdsl_tmp'], {'size': '(5)'}), '(hdsl_tmp, size=5)\n', (15236, 15254), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((15288, 15320), 'scipy.ndimage.filters.median_filter', 'median_filter', (['hdsl_tmp'], {'size': '(15)'}), '(hdsl_tmp, size=15)\n', (15301, 15320), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((2747, 2838), 'numpy.any', 'np.any', (["[self.no_vienna, self.run_config['no_cscores'], not viennalib.vienna_imported]"], {}), "([self.no_vienna, self.run_config['no_cscores'], not viennalib.\n vienna_imported])\n", (2753, 2838), True, 'import numpy as np\n'), ((6447, 6518), 'os.rename', 'os.rename', (["scoring_config['fp_scores_pre']", "scoring_config['fp_scores']"], {}), "(scoring_config['fp_scores_pre'], scoring_config['fp_scores'])\n", (6456, 6518), False, 'import os\n'), ((7272, 7314), 'os.remove', 'os.remove', (["scoring_config['fp_scores_pre']"], {}), "(scoring_config['fp_scores_pre'])\n", (7281, 7314), False, 'import os\n'), ((10654, 10666), 'numpy.log10', 'np.log10', (['pv'], {}), '(pv)\n', (10662, 10666), True, 'import numpy as np\n'), ((11278, 11306), 'numpy.max', 'np.max', (['(0, start - context)'], {}), '((0, start - context))\n', (11284, 11306), True, 'import numpy as np\n'), ((13812, 13836), 'patteRNA.rnalib.dot2states', 'rnalib.dot2states', (['motif'], {}), '(motif)\n', (13829, 13836), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((3888, 3946), 'functools.partial', 'partial', (['self.sample_worker'], {'path': 'path', 'batch': 'cscore_batch'}), '(self.sample_worker, path=path, batch=cscore_batch)\n', (3895, 3946), False, 'from functools import partial\n'), ((10103, 10127), 'numpy.isnan', 'np.isnan', (["score['score']"], {}), "(score['score'])\n", (10111, 10127), True, 'import numpy as np\n'), ((12172, 12191), 'numpy.log', 'np.log', (['(yhi + 1e-20)'], {}), '(yhi + 1e-20)\n', (12178, 12191), True, 'import numpy as np\n'), ((12220, 12243), 'numpy.log', 'np.log', (['(1 - yhi + 1e-20)'], {}), '(1 - yhi + 1e-20)\n', (12226, 12243), True, 'import numpy as np\n'), ((4123, 4147), 'scipy.stats.genlogistic.fit', 'genlogistic.fit', (['samples'], {}), '(samples)\n', (4138, 4147), False, 'from scipy.stats import genlogistic\n'), ((4199, 4255), 'scipy.stats.genlogistic', 'genlogistic', ([], {'c': 'params[0]', 'loc': 'params[1]', 'scale': 'params[2]'}), '(c=params[0], loc=params[1], scale=params[2])\n', (4210, 4255), False, 'from scipy.stats import genlogistic\n'), ((5749, 5816), 'functools.partial', 'partial', (['self.score_worker'], {'model': 'self.model', 'config': 'scoring_config'}), '(self.score_worker, model=self.model, config=scoring_config)\n', (5756, 5816), False, 'from functools import partial\n'), ((14201, 14221), 'numpy.isnan', 'np.isnan', (["s['score']"], {}), "(s['score'])\n", (14209, 14221), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from numpy.testing import assert_array_equal,\
assert_array_almost_equal, assert_almost_equal
from .image_generation import binary_circle_border
from ..spim import Spim, SpimStage
from ..process_opencv import ContourFinderSimple, FeatureFormFilter
class FeatureFilterTestCase(unittest.TestCase):
seed = 0
repetitions = 20
def test_binary_circle_left_border_filter(self):
h, w = [1000, 2000]
contour_finder = ContourFinderSimple()
feature_filter = FeatureFormFilter(size=0,
solidity=0.9,
remove_on_edge=True)
for i in range(self.repetitions):
# randomly select a border
j = np.random.randint(low=0, high=3)
border = ["left", "right", "top", "bottom"][j]
circ_im, exp_pos, exp_radius = binary_circle_border(
border,
shape=(h, w),
val_type=np.uint8,
seed=self.seed)
assert_array_equal(np.sort(np.unique(circ_im)), np.array([0, 255]))
# make spim, assuming image is already binary
bin_spim = Spim(image=circ_im,
metadata={},
stage=SpimStage.binarized,
cached=False,
predecessors=[])
cont_spim = bin_spim\
.extract_features(contour_finder)\
.filter_features(feature_filter)
blobs = cont_spim.metadata["contours"]
self.assertEqual(len(blobs), 0)
|
[
"numpy.random.randint",
"numpy.array",
"numpy.unique"
] |
[((802, 834), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(3)'}), '(low=0, high=3)\n', (819, 834), True, 'import numpy as np\n'), ((1149, 1167), 'numpy.array', 'np.array', (['[0, 255]'], {}), '([0, 255])\n', (1157, 1167), True, 'import numpy as np\n'), ((1128, 1146), 'numpy.unique', 'np.unique', (['circ_im'], {}), '(circ_im)\n', (1137, 1146), True, 'import numpy as np\n')]
|
import numpy as np
import modeling.collision_model as cm
import visualization.panda.world as wd
if __name__ == '__main__':
base = wd.World(cam_pos=np.array([.7, .05, .3]), lookat_pos=np.zeros(3))
# object
object_ref = cm.CollisionModel(initor="./objects/bunnysim.stl",
cdprimit_type="box",
cdmesh_type="triangles")
object_ref.set_rgba([.9, .75, .35, 1])
# object 1
object1 = object_ref.copy()
object1.set_pos(np.array([0, -.18, 0]))
# object 2
object2 = object_ref.copy()
object2.set_pos(np.array([0, -.09, 0]))
# object 3
object3 = object_ref.copy()
object3.change_cdprimitive_type(cdprimitive_type="surface_balls")
object3.set_pos(np.array([0, .0, 0]))
# object 4
object4 = object_ref.copy()
object4.set_pos(np.array([0, .09, 0]))
# object 5
object5 = object_ref.copy()
object5.change_cdmesh_type(cdmesh_type="convex_hull")
object5.set_pos(np.array([0, .18, 0]))
# object 1 show
object1.attach_to(base)
# object 2 show
object2.attach_to(base)
object2.show_cdprimit()
# object 3 show
object3.attach_to(base)
object3.show_cdprimit()
# object 4 show
object4.attach_to(base)
object4.show_cdmesh()
# object 5 show
object5.attach_to(base)
object5.show_cdmesh()
base.run()
|
[
"numpy.array",
"numpy.zeros",
"modeling.collision_model.CollisionModel"
] |
[((231, 331), 'modeling.collision_model.CollisionModel', 'cm.CollisionModel', ([], {'initor': '"""./objects/bunnysim.stl"""', 'cdprimit_type': '"""box"""', 'cdmesh_type': '"""triangles"""'}), "(initor='./objects/bunnysim.stl', cdprimit_type='box',\n cdmesh_type='triangles')\n", (248, 331), True, 'import modeling.collision_model as cm\n'), ((508, 531), 'numpy.array', 'np.array', (['[0, -0.18, 0]'], {}), '([0, -0.18, 0])\n', (516, 531), True, 'import numpy as np\n'), ((599, 622), 'numpy.array', 'np.array', (['[0, -0.09, 0]'], {}), '([0, -0.09, 0])\n', (607, 622), True, 'import numpy as np\n'), ((760, 781), 'numpy.array', 'np.array', (['[0, 0.0, 0]'], {}), '([0, 0.0, 0])\n', (768, 781), True, 'import numpy as np\n'), ((849, 871), 'numpy.array', 'np.array', (['[0, 0.09, 0]'], {}), '([0, 0.09, 0])\n', (857, 871), True, 'import numpy as np\n'), ((997, 1019), 'numpy.array', 'np.array', (['[0, 0.18, 0]'], {}), '([0, 0.18, 0])\n', (1005, 1019), True, 'import numpy as np\n'), ((152, 178), 'numpy.array', 'np.array', (['[0.7, 0.05, 0.3]'], {}), '([0.7, 0.05, 0.3])\n', (160, 178), True, 'import numpy as np\n'), ((188, 199), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (196, 199), True, 'import numpy as np\n')]
|
# --------------------------------------------------------
# DenseFusion 6D Object Pose Estimation by Iterative Dense Fusion
# Licensed under The MIT License [see LICENSE for details]
# Written by Chen
# --------------------------------------------------------
import argparse
import os
import random
import time
import numpy as np
import torch
from pathlib import Path
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset
from DenseFusion.lib.network import PoseNet, PoseRefineNet
from DenseFusion.lib.loss import Loss
from DenseFusion.lib.loss_refiner import Loss_refine
#import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
import pc_reconstruction.open3d_utils as pc_utils
import json
from DenseFusion.tools.utils import *
from DenseFusion.lib.transformations import quaternion_matrix
def main(data_set_name, root, save_extra='', load_pretrained=True, load_trained=False, load_name='',
label_mode='new_pred', p_extra_data=0.0, p_viewpoints=1.0, show_sample=False, plot_train=False, device_num=0):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--workers', type=int, default=8, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help='learning rate')
parser.add_argument('--lr_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--w', default=0.015, help='learning rate')
parser.add_argument('--w_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--decay_margin', default=0.016, help='margin to decay lr & w')
parser.add_argument('--refine_margin', default=0.010, help='margin to start the training of iterative refinement')
parser.add_argument('--noise_trans', default=0.03,
help='range of the random noise of translation added to the training data')
parser.add_argument('--iteration', type=int, default=2, help='number of refinement iterations')
parser.add_argument('--nepoch', type=int, default=500, help='max number of epochs to train')
parser.add_argument('--refine_epoch_margin', type=int, default=400, help='max number of epochs to train')
parser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')
opt = parser.parse_args()
opt.manualSeed = random.randint(1, 10000)
torch.cuda.set_device(device_num)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
print('bs', opt.batch_size, 'it', opt.iteration)
opt.refine_start = False
opt.num_points = 1000 #number of points on the input pointcloud
opt.outf = os.path.join(root, 'DenseFusion/trained_models', data_set_name+save_extra) #folder to save trained models
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
opt.log_dir = os.path.join(root, 'DenseFusion/experiments/logs', data_set_name+save_extra) #folder to save logs
opt.log_dir_images = os.path.join(root, 'DenseFusion/experiments/logs', data_set_name+save_extra, 'images')
if not os.path.exists(opt.log_dir):
os.makedirs(opt.log_dir)
if not os.path.exists(opt.log_dir_images):
os.makedirs(opt.log_dir_images)
opt.repeat_epoch = 1 #number of repeat times for one epoch training
print('create datasets')
dataset = PoseDataset('train',
opt.num_points,
True,
0.0,
opt.refine_start,
data_set_name,
root,
show_sample=show_sample,
label_mode=label_mode,
p_extra_data=p_extra_data,
p_viewpoints=p_viewpoints)
test_dataset = PoseDataset('test',
opt.num_points,
False,
0.0,
opt.refine_start,
data_set_name,
root,
show_sample=show_sample,
label_mode=label_mode,
p_extra_data=p_extra_data,
p_viewpoints=p_viewpoints)
opt.num_objects = dataset.num_classes #number of object classes in the dataset
print('n classes: {}'.format(dataset.num_classes))
print('create models')
estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects)
estimator.cuda()
refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects)
refiner.cuda()
if load_pretrained:
# load the pretrained estimator model on the ycb dataset, leave the last layer due to mismatch
init_state_dict = estimator.state_dict()
pretrained_dict = torch.load(os.path.join(root, 'DenseFusion/trained_models/pose_model.pth'))
pretrained_dict['conv4_r.weight'] = init_state_dict['conv4_r.weight']
pretrained_dict['conv4_r.bias'] = init_state_dict['conv4_r.bias']
pretrained_dict['conv4_t.weight'] = init_state_dict['conv4_t.weight']
pretrained_dict['conv4_t.bias'] = init_state_dict['conv4_t.bias']
pretrained_dict['conv4_c.weight'] = init_state_dict['conv4_c.weight']
pretrained_dict['conv4_c.bias'] = init_state_dict['conv4_c.bias']
estimator.load_state_dict(pretrained_dict)
del init_state_dict
del pretrained_dict
# load the pretrained refiner model on the ycb dataset, leave the last layer due to mismatch
init_state_dict = refiner.state_dict()
pretrained_dict = torch.load(os.path.join(root, 'DenseFusion/trained_models/pose_refine_model.pth'))
pretrained_dict['conv3_r.weight'] = init_state_dict['conv3_r.weight']
pretrained_dict['conv3_r.bias'] = init_state_dict['conv3_r.bias']
pretrained_dict['conv3_t.weight'] = init_state_dict['conv3_t.weight']
pretrained_dict['conv3_t.bias'] = init_state_dict['conv3_t.bias']
refiner.load_state_dict(pretrained_dict)
del init_state_dict
del pretrained_dict
elif load_trained:
loading_path = os.path.join(root, 'DenseFusion/trained_models/{}/pose_model.pth'.format(load_name))
pretrained_dict = torch.load(loading_path)
estimator.load_state_dict(pretrained_dict)
loading_path = os.path.join(root, 'DenseFusion/trained_models/{}/pose_refine_model.pth'.format(load_name))
pretrained_dict = torch.load(loading_path)
refiner.load_state_dict(pretrained_dict)
del pretrained_dict
print('create optimizer and dataloader')
#opt.refine_start = False
opt.decay_start = False
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=opt.workers,
# collate_fn=collate_fn)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
opt.sym_list = dataset.get_sym_list()
opt.num_points_mesh = dataset.get_num_points_mesh()
print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}'
'\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(
len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
best_test = np.Inf
best_test_epoch = 0
best_train = np.Inf
best_train_epoch = 0
if opt.start_epoch == 1:
for log in os.listdir(opt.log_dir):
if log !='images':
os.remove(os.path.join(opt.log_dir, log))
for img in os.listdir(opt.log_dir_images):
os.remove(os.path.join(opt.log_dir_images, img))
train_dists = []
test_dists = []
losses = []
refiner_losses = []
best_loss = np.inf
best_loss_epoch = 0
elapsed_times = 0.0
for epoch in range(opt.start_epoch, opt.nepoch):
start_time = time.time()
train_count = 0
train_dis_avg = 0.0
if opt.refine_start:
estimator.eval()
refiner.train()
else:
estimator.train()
optimizer.zero_grad()
epoch_losses = []
epoch_losses_refiner = []
for rep in range(opt.repeat_epoch):
#for batch in dataloader:
#points, choose, img, target, model_points, idx = batch
#print(points.shape, choose.shape, img.shape, target.shape, model_points.shape)
for i, data in enumerate(dataloader, 0):
points, choose, img, target, model_points, idx = data
#print(points.shape, choose.shape, img.shape, target.shape, model_points.shape)
points, choose, img, target, model_points, idx = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target).cuda(), \
Variable(model_points).cuda(), \
Variable(idx).cuda()
pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
loss, dis, new_points, new_target, pred = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start)
epoch_losses.append(loss.item())
if opt.refine_start:
for ite in range(0, opt.iteration):
pred_r, pred_t = refiner(new_points, emb, idx)
dis, new_points, new_target, pred = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points)
dis.backward()
epoch_losses_refiner.append(dis.item())
else:
loss.backward()
epoch_losses_refiner.append(0)
train_dis_avg += dis.item()
train_count += 1
# make step after one epoch
if train_count % opt.batch_size == 0:
optimizer.step()
optimizer.zero_grad()
# make last step of epoch if something is remaining
if train_count % opt.batch_size != 0:
optimizer.step()
optimizer.zero_grad()
refiner_losses.append(np.mean(epoch_losses_refiner))
losses.append(np.mean(epoch_losses))
if losses[-1] < best_loss:
best_loss = losses[-1]
best_loss_epoch = epoch
train_dists.append(train_dis_avg/train_count)
if train_dists[-1] < best_train:
best_train_epoch = epoch
best_train = train_dists[-1]
test_dis = 0.0
test_count = 0
estimator.eval()
refiner.eval()
if plot_train:
# plot randomly selected validation preds
jj = 0
x_axis = 0
fig_x = 4
fig_y = 4
log_indexes = sorted(list(np.random.choice(list(range(len(testdataloader))), int(fig_x*(fig_y/2)), replace=False)))
plt.cla()
plt.close('all')
fig, axs = plt.subplots(fig_x, fig_y, constrained_layout=True, figsize=(25, 15))
for j, data in enumerate(testdataloader, 0):
points, choose, img, target, model_points, idx, intr, np_img = data
points, choose, img, target, model_points, idx = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target).cuda(), \
Variable(model_points).cuda(), \
Variable(idx).cuda()
pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
if plot_train:
if j in log_indexes:
my_pred, my_r, my_t = my_estimator_prediction(pred_r, pred_t, pred_c, opt.num_points, 1, points)
_, dis, new_points, new_target, pred = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start)
if opt.refine_start:
for ite in range(0, opt.iteration):
pred_r, pred_t = refiner(new_points, emb, idx)
if plot_train:
if j in log_indexes:
my_pred, my_r, my_t = my_refined_prediction(pred_r, pred_t, my_r, my_t)
dis, new_points, new_target, pred = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points)
if plot_train:
if j in log_indexes:
if jj == 4:
jj = 0
x_axis += 1
my_r = quaternion_matrix(my_r)[:3, :3]
np_pred = np.dot(model_points[0].data.cpu().numpy(), my_r.T)
np_pred = np.add(np_pred, my_t)
np_target = target[0].data.cpu().numpy()
np_img = np_img[0].data.numpy()
image_target = pc_utils.pointcloud2image(np_img.copy(), np_target, 3, intr)
image_prediction = pc_utils.pointcloud2image(np_img.copy(), np_pred, 3, intr)
axs[x_axis, jj].imshow(image_target)
axs[x_axis, jj].set_title('target {}'.format(j))
axs[x_axis, jj].set_axis_off()
jj += 1
axs[x_axis, jj].imshow(image_prediction)
axs[x_axis, jj].set_title('prediction {}'.format(j))
axs[x_axis, jj].set_axis_off()
jj += 1
test_dis += dis.item()
test_count += 1
test_dis = test_dis / test_count
test_dists.append(test_dis)
if plot_train:
fig.suptitle('epoch {}, with a average dist: {}'.format(epoch, test_dis), fontsize=16)
plt.savefig(os.path.join(opt.log_dir_images, 'test_images_epoch_{}.png'.format(epoch)))
if epoch > 1:
plt.close('all')
plt.cla()
fig, axs = plt.subplots(2, 2, constrained_layout=True, figsize=(30, 20))
axs[0, 0].plot(losses)
axs[0, 0].set_title('Training estimator loss')
axs[0, 0].set_xlabel('Epochs')
axs[0, 0].set_ylabel('Loss')
axs[0, 1].plot(refiner_losses)
axs[0, 1].set_title('Training refiner loss')
axs[0, 1].set_xlabel('Epochs')
axs[0, 1].set_ylabel('Loss')
axs[1, 0].plot(train_dists)
axs[1, 0].set_title('Training Avg. distance')
axs[1, 0].set_xlabel('Epochs')
axs[1, 0].set_ylabel('Avg. distance [m]')
axs[1, 1].plot(test_dists)
axs[1, 1].set_title('Test Avg. distance')
axs[1, 1].set_xlabel('Epochs')
axs[1, 1].set_ylabel('Avg. distance [m]')
plt.savefig(os.path.join(opt.log_dir_images, 'losses.png'))
out_dict = {
'losses': losses,
'refiner_losses': refiner_losses,
'train_dists': train_dists,
'test_dists': test_dists
}
with open(os.path.join(opt.log_dir, 'losses.json'), 'w') as outfile:
json.dump(out_dict, outfile)
del out_dict
print('>>>>>>>>----------Epoch {0} finished---------<<<<<<<<'.format(epoch))
if test_dis <= best_test:
best_test = test_dis
best_test_epoch = epoch
if opt.refine_start:
state_dict = refiner.state_dict()
torch.save(state_dict, '{0}/pose_refine_model.pth'.format(opt.outf))
del state_dict
else:
state_dict = estimator.state_dict()
torch.save(state_dict, '{0}/pose_model.pth'.format(opt.outf))
del state_dict
print('>>>>>>>>----------MODEL SAVED---------<<<<<<<<')
t_elapsed = time.time() - start_time
elapsed_times += t_elapsed/3600
print('elapsed time: {} min, total elapsed time: {} hours'.format(
np.round(t_elapsed/60, 2), np.round(elapsed_times), 2))
print('Train loss : {}'.format(losses[-1]))
print('Best train loss {} : {}'.format(best_loss_epoch, best_loss))
print('Train dist : {}'.format(train_dists[-1]))
print('Best train dist {} : {}'.format(best_train_epoch, best_train))
print('Test dist : {}'.format(test_dists[-1]))
print('Best test dist {} : {}'.format(best_test_epoch, best_test))
# changing stuff during training if...
if best_test < opt.decay_margin and not opt.decay_start:
print('decay lr')
opt.decay_start = True
opt.lr *= opt.lr_rate
opt.w *= opt.w_rate
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
if (best_test < opt.refine_margin or epoch >= opt.refine_epoch_margin) and not opt.refine_start:
#print('train refiner')
opt.refine_start = True
print('bs', opt.batch_size, 'it', opt.iteration)
opt.batch_size = int(opt.batch_size / opt.iteration)
print('new bs', opt.batch_size)
optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
#testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
#opt.sym_list = dataset.get_sym_list()
#opt.num_points_mesh = dataset.get_num_points_mesh()
print('>>>>>>>>----------train refiner!---------<<<<<<<<')
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
if __name__ == '__main__':
data_set_name = 'bluedude_solo'
save_extra = '_test4'
root = Path(__file__).resolve().parent.parent.parent
main(data_set_name, root, save_extra=save_extra)
|
[
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.mean",
"os.path.join",
"numpy.round",
"random.randint",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"torch.load",
"os.path.exists",
"DenseFusion.lib.network.PoseNet",
"random.seed",
"matplotlib.pyplot.cla",
"DenseFusion.datasets.myDatasetAugmented.dataset.PoseDataset",
"torch.cuda.set_device",
"numpy.add",
"matplotlib.pyplot.subplots",
"json.dump",
"DenseFusion.lib.loss.Loss",
"torch.manual_seed",
"torch.autograd.Variable",
"DenseFusion.lib.network.PoseRefineNet",
"DenseFusion.lib.transformations.quaternion_matrix",
"os.listdir",
"DenseFusion.lib.loss_refiner.Loss_refine",
"os.makedirs",
"time.time"
] |
[((1184, 1209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1207, 1209), False, 'import argparse\n'), ((2502, 2526), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2516, 2526), False, 'import random\n'), ((2532, 2565), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device_num'], {}), '(device_num)\n', (2553, 2565), False, 'import torch\n'), ((2571, 2598), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2582, 2598), False, 'import random\n'), ((2603, 2636), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2620, 2636), False, 'import torch\n'), ((2804, 2880), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/trained_models"""', '(data_set_name + save_extra)'], {}), "(root, 'DenseFusion/trained_models', data_set_name + save_extra)\n", (2816, 2880), False, 'import os\n'), ((2995, 3073), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/experiments/logs"""', '(data_set_name + save_extra)'], {}), "(root, 'DenseFusion/experiments/logs', data_set_name + save_extra)\n", (3007, 3073), False, 'import os\n'), ((3118, 3210), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/experiments/logs"""', '(data_set_name + save_extra)', '"""images"""'], {}), "(root, 'DenseFusion/experiments/logs', data_set_name +\n save_extra, 'images')\n", (3130, 3210), False, 'import os\n'), ((3482, 3678), 'DenseFusion.datasets.myDatasetAugmented.dataset.PoseDataset', 'PoseDataset', (['"""train"""', 'opt.num_points', '(True)', '(0.0)', 'opt.refine_start', 'data_set_name', 'root'], {'show_sample': 'show_sample', 'label_mode': 'label_mode', 'p_extra_data': 'p_extra_data', 'p_viewpoints': 'p_viewpoints'}), "('train', opt.num_points, True, 0.0, opt.refine_start,\n data_set_name, root, show_sample=show_sample, label_mode=label_mode,\n p_extra_data=p_extra_data, p_viewpoints=p_viewpoints)\n", (3493, 3678), False, 'from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset\n'), ((3951, 4147), 'DenseFusion.datasets.myDatasetAugmented.dataset.PoseDataset', 'PoseDataset', (['"""test"""', 'opt.num_points', '(False)', '(0.0)', 'opt.refine_start', 'data_set_name', 'root'], {'show_sample': 'show_sample', 'label_mode': 'label_mode', 'p_extra_data': 'p_extra_data', 'p_viewpoints': 'p_viewpoints'}), "('test', opt.num_points, False, 0.0, opt.refine_start,\n data_set_name, root, show_sample=show_sample, label_mode=label_mode,\n p_extra_data=p_extra_data, p_viewpoints=p_viewpoints)\n", (3962, 4147), False, 'from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset\n'), ((4634, 4693), 'DenseFusion.lib.network.PoseNet', 'PoseNet', ([], {'num_points': 'opt.num_points', 'num_obj': 'opt.num_objects'}), '(num_points=opt.num_points, num_obj=opt.num_objects)\n', (4641, 4693), False, 'from DenseFusion.lib.network import PoseNet, PoseRefineNet\n'), ((4729, 4794), 'DenseFusion.lib.network.PoseRefineNet', 'PoseRefineNet', ([], {'num_points': 'opt.num_points', 'num_obj': 'opt.num_objects'}), '(num_points=opt.num_points, num_obj=opt.num_objects)\n', (4742, 4794), False, 'from DenseFusion.lib.network import PoseNet, PoseRefineNet\n'), ((7164, 7257), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': 'opt.workers'}), '(dataset, batch_size=1, shuffle=True,\n num_workers=opt.workers)\n', (7191, 7257), False, 'import torch\n'), ((7276, 7375), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'opt.workers'}), '(test_dataset, batch_size=1, shuffle=False,\n num_workers=opt.workers)\n', (7303, 7375), False, 'import torch\n'), ((7784, 7823), 'DenseFusion.lib.loss.Loss', 'Loss', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (7788, 7823), False, 'from DenseFusion.lib.loss import Loss\n'), ((7847, 7893), 'DenseFusion.lib.loss_refiner.Loss_refine', 'Loss_refine', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (7858, 7893), False, 'from DenseFusion.lib.loss_refiner import Loss_refine\n'), ((2921, 2945), 'os.path.exists', 'os.path.exists', (['opt.outf'], {}), '(opt.outf)\n', (2935, 2945), False, 'import os\n'), ((2955, 2976), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (2966, 2976), False, 'import os\n'), ((3216, 3243), 'os.path.exists', 'os.path.exists', (['opt.log_dir'], {}), '(opt.log_dir)\n', (3230, 3243), False, 'import os\n'), ((3253, 3277), 'os.makedirs', 'os.makedirs', (['opt.log_dir'], {}), '(opt.log_dir)\n', (3264, 3277), False, 'import os\n'), ((3289, 3323), 'os.path.exists', 'os.path.exists', (['opt.log_dir_images'], {}), '(opt.log_dir_images)\n', (3303, 3323), False, 'import os\n'), ((3333, 3364), 'os.makedirs', 'os.makedirs', (['opt.log_dir_images'], {}), '(opt.log_dir_images)\n', (3344, 3364), False, 'import os\n'), ((8039, 8062), 'os.listdir', 'os.listdir', (['opt.log_dir'], {}), '(opt.log_dir)\n', (8049, 8062), False, 'import os\n'), ((8172, 8202), 'os.listdir', 'os.listdir', (['opt.log_dir_images'], {}), '(opt.log_dir_images)\n', (8182, 8202), False, 'import os\n'), ((8493, 8504), 'time.time', 'time.time', ([], {}), '()\n', (8502, 8504), False, 'import time\n'), ((5028, 5091), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/trained_models/pose_model.pth"""'], {}), "(root, 'DenseFusion/trained_models/pose_model.pth')\n", (5040, 5091), False, 'import os\n'), ((5843, 5913), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/trained_models/pose_refine_model.pth"""'], {}), "(root, 'DenseFusion/trained_models/pose_refine_model.pth')\n", (5855, 5913), False, 'import os\n'), ((6482, 6506), 'torch.load', 'torch.load', (['loading_path'], {}), '(loading_path)\n', (6492, 6506), False, 'import torch\n'), ((6700, 6724), 'torch.load', 'torch.load', (['loading_path'], {}), '(loading_path)\n', (6710, 6724), False, 'import torch\n'), ((11062, 11091), 'numpy.mean', 'np.mean', (['epoch_losses_refiner'], {}), '(epoch_losses_refiner)\n', (11069, 11091), True, 'import numpy as np\n'), ((11115, 11136), 'numpy.mean', 'np.mean', (['epoch_losses'], {}), '(epoch_losses)\n', (11122, 11136), True, 'import numpy as np\n'), ((11818, 11827), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (11825, 11827), True, 'from matplotlib import pyplot as plt\n'), ((11840, 11856), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11849, 11856), True, 'from matplotlib import pyplot as plt\n'), ((11880, 11949), 'matplotlib.pyplot.subplots', 'plt.subplots', (['fig_x', 'fig_y'], {'constrained_layout': '(True)', 'figsize': '(25, 15)'}), '(fig_x, fig_y, constrained_layout=True, figsize=(25, 15))\n', (11892, 11949), True, 'from matplotlib import pyplot as plt\n'), ((16272, 16300), 'json.dump', 'json.dump', (['out_dict', 'outfile'], {}), '(out_dict, outfile)\n', (16281, 16300), False, 'import json\n'), ((16980, 16991), 'time.time', 'time.time', ([], {}), '()\n', (16989, 16991), False, 'import time\n'), ((18821, 18860), 'DenseFusion.lib.loss.Loss', 'Loss', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (18825, 18860), False, 'from DenseFusion.lib.loss import Loss\n'), ((18892, 18938), 'DenseFusion.lib.loss_refiner.Loss_refine', 'Loss_refine', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (18903, 18938), False, 'from DenseFusion.lib.loss_refiner import Loss_refine\n'), ((8226, 8263), 'os.path.join', 'os.path.join', (['opt.log_dir_images', 'img'], {}), '(opt.log_dir_images, img)\n', (8238, 8263), False, 'import os\n'), ((14975, 14991), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (14984, 14991), True, 'from matplotlib import pyplot as plt\n'), ((15008, 15017), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (15015, 15017), True, 'from matplotlib import pyplot as plt\n'), ((15045, 15106), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'constrained_layout': '(True)', 'figsize': '(30, 20)'}), '(2, 2, constrained_layout=True, figsize=(30, 20))\n', (15057, 15106), True, 'from matplotlib import pyplot as plt\n'), ((16201, 16241), 'os.path.join', 'os.path.join', (['opt.log_dir', '"""losses.json"""'], {}), "(opt.log_dir, 'losses.json')\n", (16213, 16241), False, 'import os\n'), ((17132, 17159), 'numpy.round', 'np.round', (['(t_elapsed / 60)', '(2)'], {}), '(t_elapsed / 60, 2)\n', (17140, 17159), True, 'import numpy as np\n'), ((17159, 17182), 'numpy.round', 'np.round', (['elapsed_times'], {}), '(elapsed_times)\n', (17167, 17182), True, 'import numpy as np\n'), ((8121, 8151), 'os.path.join', 'os.path.join', (['opt.log_dir', 'log'], {}), '(opt.log_dir, log)\n', (8133, 8151), False, 'import os\n'), ((13816, 13837), 'numpy.add', 'np.add', (['np_pred', 'my_t'], {}), '(np_pred, my_t)\n', (13822, 13837), True, 'import numpy as np\n'), ((15950, 15996), 'os.path.join', 'os.path.join', (['opt.log_dir_images', '"""losses.png"""'], {}), "(opt.log_dir_images, 'losses.png')\n", (15962, 15996), False, 'import os\n'), ((12145, 12161), 'torch.autograd.Variable', 'Variable', (['points'], {}), '(points)\n', (12153, 12161), False, 'from torch.autograd import Variable\n'), ((12233, 12249), 'torch.autograd.Variable', 'Variable', (['choose'], {}), '(choose)\n', (12241, 12249), False, 'from torch.autograd import Variable\n'), ((12321, 12334), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (12329, 12334), False, 'from torch.autograd import Variable\n'), ((12406, 12422), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (12414, 12422), False, 'from torch.autograd import Variable\n'), ((12494, 12516), 'torch.autograd.Variable', 'Variable', (['model_points'], {}), '(model_points)\n', (12502, 12516), False, 'from torch.autograd import Variable\n'), ((12588, 12601), 'torch.autograd.Variable', 'Variable', (['idx'], {}), '(idx)\n', (12596, 12601), False, 'from torch.autograd import Variable\n'), ((13673, 13696), 'DenseFusion.lib.transformations.quaternion_matrix', 'quaternion_matrix', (['my_r'], {}), '(my_r)\n', (13690, 13696), False, 'from DenseFusion.lib.transformations import quaternion_matrix\n'), ((19041, 19055), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (19045, 19055), False, 'from pathlib import Path\n'), ((9315, 9331), 'torch.autograd.Variable', 'Variable', (['points'], {}), '(points)\n', (9323, 9331), False, 'from torch.autograd import Variable\n'), ((9407, 9423), 'torch.autograd.Variable', 'Variable', (['choose'], {}), '(choose)\n', (9415, 9423), False, 'from torch.autograd import Variable\n'), ((9499, 9512), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (9507, 9512), False, 'from torch.autograd import Variable\n'), ((9588, 9604), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (9596, 9604), False, 'from torch.autograd import Variable\n'), ((9680, 9702), 'torch.autograd.Variable', 'Variable', (['model_points'], {}), '(model_points)\n', (9688, 9702), False, 'from torch.autograd import Variable\n'), ((9778, 9791), 'torch.autograd.Variable', 'Variable', (['idx'], {}), '(idx)\n', (9786, 9791), False, 'from torch.autograd import Variable\n')]
|
import numpy as np
import pandas as pd
from ncls import NCLS
def _number_overlapping(scdf, ocdf, **kwargs):
keep_nonoverlapping = kwargs.get("keep_nonoverlapping", True)
column_name = kwargs.get("overlap_col", True)
if scdf.empty:
return None
if ocdf.empty:
if keep_nonoverlapping:
df = scdf.copy()
df.insert(df.shape[1], column_name, 0)
return df
else:
return None
oncls = NCLS(ocdf.Start.values, ocdf.End.values, ocdf.index.values)
starts = scdf.Start.values
ends = scdf.End.values
indexes = scdf.index.values
_self_indexes, _other_indexes = oncls.all_overlaps_both(
starts, ends, indexes)
s = pd.Series(_self_indexes)
counts_per_read = s.value_counts()[s.unique()].reset_index()
counts_per_read.columns = ["Index", "Count"]
df = scdf.copy()
if keep_nonoverlapping:
_missing_indexes = np.setdiff1d(scdf.index, _self_indexes)
missing = pd.DataFrame(data={"Index": _missing_indexes, "Count": 0}, index=_missing_indexes)
counts_per_read = pd.concat([counts_per_read, missing])
else:
df = df.loc[_self_indexes]
counts_per_read = counts_per_read.set_index("Index")
df.insert(df.shape[1], column_name, counts_per_read)
return df
def _coverage(scdf, ocdf, **kwargs):
fraction_col = kwargs["fraction_col"]
if scdf.empty:
return None
if ocdf.empty:
df = scdf.copy()
df.insert(df.shape[1], fraction_col, 0.0)
return df
oncls = NCLS(ocdf.Start.values, ocdf.End.values, ocdf.index.values)
starts = scdf.Start.values
ends = scdf.End.values
indexes = scdf.index.values
_lengths = oncls.coverage(starts, ends, indexes)
_lengths = _lengths / (ends - starts)
_fractions = _lengths
_fractions = _fractions.astype("float64")
_fractions = np.nan_to_num(_fractions)
scdf = scdf.copy()
scdf.insert(scdf.shape[1], fraction_col, _fractions)
return scdf
|
[
"pandas.DataFrame",
"numpy.nan_to_num",
"ncls.NCLS",
"numpy.setdiff1d",
"pandas.Series",
"pandas.concat"
] |
[((471, 530), 'ncls.NCLS', 'NCLS', (['ocdf.Start.values', 'ocdf.End.values', 'ocdf.index.values'], {}), '(ocdf.Start.values, ocdf.End.values, ocdf.index.values)\n', (475, 530), False, 'from ncls import NCLS\n'), ((724, 748), 'pandas.Series', 'pd.Series', (['_self_indexes'], {}), '(_self_indexes)\n', (733, 748), True, 'import pandas as pd\n'), ((1570, 1629), 'ncls.NCLS', 'NCLS', (['ocdf.Start.values', 'ocdf.End.values', 'ocdf.index.values'], {}), '(ocdf.Start.values, ocdf.End.values, ocdf.index.values)\n', (1574, 1629), False, 'from ncls import NCLS\n'), ((1907, 1932), 'numpy.nan_to_num', 'np.nan_to_num', (['_fractions'], {}), '(_fractions)\n', (1920, 1932), True, 'import numpy as np\n'), ((941, 980), 'numpy.setdiff1d', 'np.setdiff1d', (['scdf.index', '_self_indexes'], {}), '(scdf.index, _self_indexes)\n', (953, 980), True, 'import numpy as np\n'), ((999, 1086), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Index': _missing_indexes, 'Count': 0}", 'index': '_missing_indexes'}), "(data={'Index': _missing_indexes, 'Count': 0}, index=\n _missing_indexes)\n", (1011, 1086), True, 'import pandas as pd\n'), ((1108, 1145), 'pandas.concat', 'pd.concat', (['[counts_per_read, missing]'], {}), '([counts_per_read, missing])\n', (1117, 1145), True, 'import pandas as pd\n')]
|
import os
import sys
import platform
import numpy
import threading
import ctypes
import string
import random
import requests
import json
from colorama import Fore
VALID = 0
INVALID = 0
BOOST_LENGTH = 24
CLASSIC_LENGTH = 16
CODESET = []
BASEURL = "https://discord.gift/"
CODESET[:0] = string.ascii_letters + string.digits
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: 0 | Invalid: 0")
os.system("cls")
NOIRGEN = """[40;35m
[40;34mv 1.0.0[40;35m
/$$ /$$ /$$ /$$$$$$
| $$$ | $$ |__/ /$$__ $$
| $$$$| $$ /$$$$$$ /$$ /$$$$$$ | $$ \__/ /$$$$$$ /$$$$$$$
| $$ $$ $$ /$$__ $$| $$ /$$__ $$| $$ /$$$$ /$$__ $$| $$__ $$
| $$ $$$$| $$ \ $$| $$| $$ \__/| $$|_ $$| $$$$$$$$| $$ \ $$
| $$\ $$$| $$ | $$| $$| $$ | $$ \ $$| $$_____/| $$ | $$
| $$ \ $$| $$$$$$/| $$| $$ | $$$$$$/| $$$$$$$| $$ | $$
|__/ \__/ \______/ |__/|__/ \______/ \_______/|__/ |__/
"""
print(NOIRGEN)
for i in range(3):
print('')
CODE_AMOUNT = int(input(" [40;36mCodes to Generate => "))
for i in range(2):
print('')
BOOST_CLASSIC = str(input(" [40;32mBoost or Classic => "))
for i in range(2):
print('')
THREAD_COUNT = int(input(" [40;31mThreads => "))
for i in range(5):
print('')
def checkBoost(boostURL):
global VALID
global INVALID
CHECKURL = f"https://discordapp.com/api/v9/entitlements/gift-codes/{boostURL}?with_application=false&with_subscription_plan=true"
resp = requests.get(CHECKURL)
if resp.status_code == 200:
VALID += 1
return True
else:
INVALID += 1
return False
def genBoost():
global VALID
global INVALID
for i in range(CODE_AMOUNT):
code = numpy.random.choice(CODESET, size=[CODE_AMOUNT, BOOST_LENGTH])
for i in code:
try:
boostCode = ''.join(e for e in i)
boostURL = BASEURL + boostCode
if checkBoost(boostURL):
with open("valid.txt", "w") as f:
f.write(boostURL + "\n")
print(Fore.GREEN + f"[!] VALID | {boostURL}")
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
else:
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
print(Fore.RED + f"[!] INVALID | {boostURL}")
except Exception as e:
print(e)
print(Fore.RED + "[!] An Error has Occured!")
def checkClassic(classicURL):
global VALID
global INVALID
CHECKURL = f"https://discordapp.com/api/v9/entitlements/gift-codes/{classicURL}?with_application=false&with_subscription_plan=true"
resp = requests.get(CHECKURL)
if resp.status_code == 200:
VALID += 1
return True
else:
INVALID += 1
return False
def genClassic():
global VALID
global INVALID
for i in range(CODE_AMOUNT):
code = numpy.random.choice(CODESET, size=[CODE_AMOUNT, CLASSIC_LENGTH])
for i in code:
try:
classicCode = ''.join(e for e in i)
classicURL = BASEURL + classicCode
if checkClassic(classicURL):
with open("valid.txt", "w") as f:
f.write(classicURL + "\n")
print(Fore.GREEN + f"[!] VALID | {classicURL}")
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
else:
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
print(Fore.RED + f"[!] INVALID | {classicURL}")
except Exception as e:
print(e)
print(Fore.RED + "[!] An Error has Occured!")
if BOOST_CLASSIC == "Boost" or "B" or "b" or "boost":
for i in range(THREAD_COUNT):
threading.Thread(target=genBoost).start()
elif BOOST_CLASSIC == "Classic" or "C" or "c" or "classic":
for i in range(THREAD_COUNT):
threading.Thread(target=genClassic).start()
|
[
"threading.Thread",
"os.system",
"ctypes.windll.kernel32.SetConsoleTitleW",
"requests.get",
"numpy.random.choice"
] |
[((330, 422), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: 0 | Invalid: 0"""'], {}), "(\n f'NoirGen and Checker | Valid: 0 | Invalid: 0')\n", (369, 422), False, 'import ctypes\n'), ((418, 434), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (427, 434), False, 'import os\n'), ((2097, 2119), 'requests.get', 'requests.get', (['CHECKURL'], {}), '(CHECKURL)\n', (2109, 2119), False, 'import requests\n'), ((3453, 3475), 'requests.get', 'requests.get', (['CHECKURL'], {}), '(CHECKURL)\n', (3465, 3475), False, 'import requests\n'), ((2355, 2417), 'numpy.random.choice', 'numpy.random.choice', (['CODESET'], {'size': '[CODE_AMOUNT, BOOST_LENGTH]'}), '(CODESET, size=[CODE_AMOUNT, BOOST_LENGTH])\n', (2374, 2417), False, 'import numpy\n'), ((3713, 3777), 'numpy.random.choice', 'numpy.random.choice', (['CODESET'], {'size': '[CODE_AMOUNT, CLASSIC_LENGTH]'}), '(CODESET, size=[CODE_AMOUNT, CLASSIC_LENGTH])\n', (3732, 3777), False, 'import numpy\n'), ((4708, 4741), 'threading.Thread', 'threading.Thread', ([], {'target': 'genBoost'}), '(target=genBoost)\n', (4724, 4741), False, 'import threading\n'), ((2795, 2901), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (2834, 2901), False, 'import ctypes\n'), ((2940, 3046), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (2979, 3046), False, 'import ctypes\n'), ((4169, 4275), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (4208, 4275), False, 'import ctypes\n'), ((4314, 4420), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (4353, 4420), False, 'import ctypes\n'), ((4852, 4887), 'threading.Thread', 'threading.Thread', ([], {'target': 'genClassic'}), '(target=genClassic)\n', (4868, 4887), False, 'import threading\n')]
|
import numpy as np
class RidgeRegression:
def __init__(self, bias=True, weight_l2=1e-3, scale=True):
self.bias = bias
self.weight_l2 = weight_l2
self.weights = None
self.scale = scale
def _scale(self, X):
return (X - self._min) / (self._max - self._min)
def fit(self, X, y):
if self.scale:
self._min = X.min(axis=0)
self._max = X.max(axis=0)
X = self._scale(X)
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
n_samples, n_features = X.shape
self.weights = np.linalg.pinv(X.T @ X + self.weight_l2 * np.eye(n_features)) @ X.T @ y
def predict(self, X):
if self.scale:
X = self._scale(X)
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return X @ self.weights
class LogisticRegression:
def __init__(self, lr=1e-2, bias=True, weight_l2=1e-3):
self.lr = lr
self.bias = bias
self.weight_l2 = weight_l2
self.weights = None
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def fit(self, X, y, max_iter=100):
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
for _ in range(max_iter):
y_hat = self._sigmoid(X @ self.weights)
self.weights -= self.lr * (self.weight_l2 * 2 * self.weights + (1 / n_samples) * X.T @ (y_hat - y))
def predict(self, X):
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return self._sigmoid(X @ self.weights)
|
[
"numpy.eye",
"numpy.ones",
"numpy.zeros",
"numpy.exp"
] |
[((1402, 1422), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (1410, 1422), True, 'import numpy as np\n'), ((1195, 1205), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1201, 1205), True, 'import numpy as np\n'), ((541, 565), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (548, 565), True, 'import numpy as np\n'), ((858, 882), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (865, 882), True, 'import numpy as np\n'), ((1300, 1324), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1307, 1324), True, 'import numpy as np\n'), ((1710, 1734), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1717, 1734), True, 'import numpy as np\n'), ((685, 703), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (691, 703), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from tensorflow.keras.utils import Sequence
from core.dataset import augment
from core.image import read_image, preprocess_image
from core.utils import decode_annotation, decode_name
class Dataset(Sequence):
def __init__(self, cfg, verbose=0):
self.verbose = verbose
self.mask = cfg["yolo"]["mask"]
self.anchors = cfg["yolo"]["anchors"]
self.max_boxes = cfg["yolo"]["max_boxes"]
self.strides = cfg["yolo"]["strides"]
self.name_path = cfg['yolo']['name_path']
self.anno_path = cfg["train"]["anno_path"]
self.image_size = cfg["train"]["image_size"]
self.batch_size = cfg["train"]["batch_size"]
self.normal_method = cfg['train']["normal_method"]
self.mosaic = cfg['train']['mosaic']
self.label_smoothing = cfg['train']["label_smoothing"]
self.annotation = decode_annotation(anno_path=self.anno_path)
self.num_anno = len(self.annotation)
self.name = decode_name(name_path=self.name_path)
self.num_classes = len(self.name)
# init
self._image_size = np.random.choice(self.image_size)
self._grid_size = self._image_size // self.strides
def __len__(self):
return int(np.ceil(float(len(self.annotation)) / self.batch_size))
def __getitem__(self, idx):
l_bound = idx * self.batch_size
r_bound = (idx + 1) * self.batch_size
if r_bound > len(self.annotation):
r_bound = len(self.annotation)
l_bound = r_bound - self.batch_size
self._on_batch_start(idx)
batch_image = np.zeros((r_bound - l_bound, self._image_size, self._image_size, 3), dtype=np.float32)
batch_label = [np.zeros((r_bound - l_bound, size, size, len(mask_per_layer) * (5 + self.num_classes)),
dtype=np.float32)
for size, mask_per_layer in zip(self._grid_size, self.mask)]
for i, sub_idx in enumerate(range(l_bound, r_bound)):
image, bboxes, labels = self._getitem(sub_idx)
if self.mosaic:
sub_idx = np.random.choice(np.delete(np.arange(self.num_anno), idx), 3, False)
image2, bboxes2, labels2 = self._getitem(sub_idx[0])
image3, bboxes3, labels3 = self._getitem(sub_idx[1])
image4, bboxes4, labels4 = self._getitem(sub_idx[2])
image, bboxes, labels = augment.mosic(image, bboxes, labels,
image2, bboxes2, labels2,
image3, bboxes3, labels3,
image4, bboxes4, labels4)
if self.normal_method:
image = augment.random_distort(image)
image = augment.random_grayscale(image)
image, bboxes = augment.random_flip_lr(image, bboxes)
image, bboxes = augment.random_rotate(image, bboxes)
image, bboxes, labels = augment.random_crop_and_zoom(image, bboxes, labels,
(self._image_size, self._image_size))
image, bboxes, labels = augment.bbox_filter(image, bboxes, labels)
labels = self._preprocess_true_boxes(bboxes, labels)
batch_image[i] = image
for j in range(len(self.mask)):
batch_label[j][i, :, :, :] = labels[j][:, :, :]
return batch_image, batch_label
def _getitem(self, sub_idx):
path, bboxes, labels = self.annotation[sub_idx]
image = read_image(path)
if len(bboxes) != 0:
bboxes, labels = np.array(bboxes), np.array(labels)
else:
bboxes, labels = np.zeros((0, 4)), np.zeros((0,))
image, bboxes = preprocess_image(image, (self._image_size, self._image_size), bboxes)
labels = augment.onehot(labels, self.num_classes, self.label_smoothing)
return image, bboxes, labels
def _preprocess_true_boxes(self, bboxes, labels):
bboxes_label = [np.zeros((size, size, len(mask_per_layer), 5 + self.num_classes), np.float32)
for size, mask_per_layer in zip(self._grid_size, self.mask)]
bboxes = np.array(bboxes, dtype=np.float32)
# calculate anchor index for true boxes
anchor_area = self.anchors[:, 0] * self.anchors[:, 1]
bboxes_wh = bboxes[:, 2:4] - bboxes[:, 0:2]
bboxes_wh_exp = np.tile(np.expand_dims(bboxes_wh, 1), (1, self.anchors.shape[0], 1))
boxes_area = bboxes_wh_exp[..., 0] * bboxes_wh_exp[..., 1]
intersection = np.minimum(bboxes_wh_exp[..., 0], self.anchors[:, 0]) * np.minimum(bboxes_wh_exp[..., 1],
self.anchors[:, 1])
iou = intersection / (boxes_area + anchor_area - intersection + 1e-8) # (N, A)
best_anchor_idxs = np.argmax(iou, axis=-1) # (N,)
for i, bbox in enumerate(bboxes):
search = np.where(self.mask == best_anchor_idxs[i])
best_detect = search[0][0]
best_anchor = search[1][0]
coord_xy = (bbox[0:2] + bbox[2:4]) * 0.5
coord_xy /= self.strides[best_detect]
coord_xy = coord_xy.astype(np.int)
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, :4] = bbox
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, 4:5] = 1.
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, 5:] = labels[i, :]
return [layer.reshape([layer.shape[0], layer.shape[1], -1]) for layer in bboxes_label]
def _on_batch_start(self, idx, patience=10):
if idx % patience == 0:
self._image_size = np.random.choice(self.image_size)
self._grid_size = self._image_size // self.strides
if self.verbose:
print('Change image size to', self._image_size)
def on_epoch_end(self):
np.random.shuffle(self.annotation) # shuffle
from core.utils import decode_cfg, load_weights
cfg = decode_cfg("cfgs/custom.yaml")
train_dataset = Dataset(cfg)
|
[
"core.dataset.augment.bbox_filter",
"numpy.argmax",
"core.dataset.augment.random_distort",
"numpy.arange",
"core.image.preprocess_image",
"core.utils.decode_annotation",
"core.dataset.augment.random_rotate",
"core.dataset.augment.random_flip_lr",
"numpy.random.choice",
"numpy.random.shuffle",
"core.dataset.augment.random_grayscale",
"numpy.minimum",
"core.utils.decode_cfg",
"core.dataset.augment.random_crop_and_zoom",
"core.dataset.augment.mosic",
"core.image.read_image",
"core.utils.decode_name",
"core.dataset.augment.onehot",
"numpy.zeros",
"numpy.expand_dims",
"numpy.where",
"numpy.array"
] |
[((6246, 6276), 'core.utils.decode_cfg', 'decode_cfg', (['"""cfgs/custom.yaml"""'], {}), "('cfgs/custom.yaml')\n", (6256, 6276), False, 'from core.utils import decode_cfg, load_weights\n'), ((913, 956), 'core.utils.decode_annotation', 'decode_annotation', ([], {'anno_path': 'self.anno_path'}), '(anno_path=self.anno_path)\n', (930, 956), False, 'from core.utils import decode_annotation, decode_name\n'), ((1022, 1059), 'core.utils.decode_name', 'decode_name', ([], {'name_path': 'self.name_path'}), '(name_path=self.name_path)\n', (1033, 1059), False, 'from core.utils import decode_annotation, decode_name\n'), ((1145, 1178), 'numpy.random.choice', 'np.random.choice', (['self.image_size'], {}), '(self.image_size)\n', (1161, 1178), True, 'import numpy as np\n'), ((1650, 1741), 'numpy.zeros', 'np.zeros', (['(r_bound - l_bound, self._image_size, self._image_size, 3)'], {'dtype': 'np.float32'}), '((r_bound - l_bound, self._image_size, self._image_size, 3), dtype=\n np.float32)\n', (1658, 1741), True, 'import numpy as np\n'), ((3671, 3687), 'core.image.read_image', 'read_image', (['path'], {}), '(path)\n', (3681, 3687), False, 'from core.image import read_image, preprocess_image\n'), ((3883, 3952), 'core.image.preprocess_image', 'preprocess_image', (['image', '(self._image_size, self._image_size)', 'bboxes'], {}), '(image, (self._image_size, self._image_size), bboxes)\n', (3899, 3952), False, 'from core.image import read_image, preprocess_image\n'), ((3970, 4032), 'core.dataset.augment.onehot', 'augment.onehot', (['labels', 'self.num_classes', 'self.label_smoothing'], {}), '(labels, self.num_classes, self.label_smoothing)\n', (3984, 4032), False, 'from core.dataset import augment\n'), ((4332, 4366), 'numpy.array', 'np.array', (['bboxes'], {'dtype': 'np.float32'}), '(bboxes, dtype=np.float32)\n', (4340, 4366), True, 'import numpy as np\n'), ((5028, 5051), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(-1)'}), '(iou, axis=-1)\n', (5037, 5051), True, 'import numpy as np\n'), ((6118, 6152), 'numpy.random.shuffle', 'np.random.shuffle', (['self.annotation'], {}), '(self.annotation)\n', (6135, 6152), True, 'import numpy as np\n'), ((3272, 3314), 'core.dataset.augment.bbox_filter', 'augment.bbox_filter', (['image', 'bboxes', 'labels'], {}), '(image, bboxes, labels)\n', (3291, 3314), False, 'from core.dataset import augment\n'), ((4562, 4590), 'numpy.expand_dims', 'np.expand_dims', (['bboxes_wh', '(1)'], {}), '(bboxes_wh, 1)\n', (4576, 4590), True, 'import numpy as np\n'), ((4713, 4766), 'numpy.minimum', 'np.minimum', (['bboxes_wh_exp[..., 0]', 'self.anchors[:, 0]'], {}), '(bboxes_wh_exp[..., 0], self.anchors[:, 0])\n', (4723, 4766), True, 'import numpy as np\n'), ((4769, 4822), 'numpy.minimum', 'np.minimum', (['bboxes_wh_exp[..., 1]', 'self.anchors[:, 1]'], {}), '(bboxes_wh_exp[..., 1], self.anchors[:, 1])\n', (4779, 4822), True, 'import numpy as np\n'), ((5137, 5179), 'numpy.where', 'np.where', (['(self.mask == best_anchor_idxs[i])'], {}), '(self.mask == best_anchor_idxs[i])\n', (5145, 5179), True, 'import numpy as np\n'), ((5890, 5923), 'numpy.random.choice', 'np.random.choice', (['self.image_size'], {}), '(self.image_size)\n', (5906, 5923), True, 'import numpy as np\n'), ((2475, 2593), 'core.dataset.augment.mosic', 'augment.mosic', (['image', 'bboxes', 'labels', 'image2', 'bboxes2', 'labels2', 'image3', 'bboxes3', 'labels3', 'image4', 'bboxes4', 'labels4'], {}), '(image, bboxes, labels, image2, bboxes2, labels2, image3,\n bboxes3, labels3, image4, bboxes4, labels4)\n', (2488, 2593), False, 'from core.dataset import augment\n'), ((2811, 2840), 'core.dataset.augment.random_distort', 'augment.random_distort', (['image'], {}), '(image)\n', (2833, 2840), False, 'from core.dataset import augment\n'), ((2865, 2896), 'core.dataset.augment.random_grayscale', 'augment.random_grayscale', (['image'], {}), '(image)\n', (2889, 2896), False, 'from core.dataset import augment\n'), ((2929, 2966), 'core.dataset.augment.random_flip_lr', 'augment.random_flip_lr', (['image', 'bboxes'], {}), '(image, bboxes)\n', (2951, 2966), False, 'from core.dataset import augment\n'), ((2999, 3035), 'core.dataset.augment.random_rotate', 'augment.random_rotate', (['image', 'bboxes'], {}), '(image, bboxes)\n', (3020, 3035), False, 'from core.dataset import augment\n'), ((3076, 3170), 'core.dataset.augment.random_crop_and_zoom', 'augment.random_crop_and_zoom', (['image', 'bboxes', 'labels', '(self._image_size, self._image_size)'], {}), '(image, bboxes, labels, (self._image_size, self\n ._image_size))\n', (3104, 3170), False, 'from core.dataset import augment\n'), ((3747, 3763), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (3755, 3763), True, 'import numpy as np\n'), ((3765, 3781), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3773, 3781), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (3833, 3841), True, 'import numpy as np\n'), ((3843, 3857), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (3851, 3857), True, 'import numpy as np\n'), ((2186, 2210), 'numpy.arange', 'np.arange', (['self.num_anno'], {}), '(self.num_anno)\n', (2195, 2210), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Simple model of receptors diffusing in and out of synapses.
# Simulation of the Dynamcis with the Euler method.
# This simulates the effect of a sudden change in the pool size
#
# <NAME>, January-April 2017
import numpy as np
from matplotlib import pyplot as plt
# parameters
N = 3 # number of synapses
steps = 10000 # number of time steps to simulate
duration = 10.0 # duration in minutes
change_time = 2.0 # time at which number of pool size changes in minutes
ts = duration/steps # time step of the simulation
beta = 60.0/43.0 # transition rate out of slots in 1/min
delta = 1.0/14.0 # removal rate in 1/min
phi = 2.67 # relative pool size
F = 0.9 # set desired filling fraction
# initializations: the w_i and p are set to their steady state values
s = np.zeros(N)
for i in range(0,N):
s[i] = 40.0 + i*20.0
S = sum(s)
gamma = delta*F*S*phi # production rate set to achieve desired p*
alpha = beta/(phi*S*(1-F)) # set alpha accordingly
P = gamma/delta # total number of receptors in steady state
# variables we want to keep track of to plot them at the end:
# 'u' stands for up-regulation and 'd' stands for down-regulation.
# Up- and down-regulation are simulated simultaneously.
pu = np.zeros(steps) # pool size
pd = np.zeros(steps)
wu = np.zeros([N,steps]) # synaptic weights
wd = np.zeros([N,steps])
ru = np.zeros(steps) # relative change of synaptic weights
rd = np.zeros(steps)
times = np.zeros(steps)
pu[0] = P
pd[0] = P
ru[0] = 1.0
rd[0] = 1.0
for i in range(0,N):
wu[i,0] = F*s[i]
wd[i,0] = F*s[i]
# simulation loop
for t in range(0, steps-1):
if t==round(change_time/ts): # change pool size after some time
pu[t]=2.0*P # double number of receptors in the pool
pd[t]=0.0*P # set number of receptors in the pool to zero
Wu = sum(wu[:,t])
Wd = sum(wd[:,t])
wu[:,t+1] = wu[:,t] + ts * (alpha*pu[t] * (s-wu[:,t]) - beta*wu[:,t])
wd[:,t+1] = wd[:,t] + ts * (alpha*pd[t] * (s-wd[:,t]) - beta*wd[:,t])
pu[t+1] = pu[t] + ts * (beta*Wu - alpha*pu[t]*(S-Wu) - delta*pu[t] + gamma)
pd[t+1] = pd[t] + ts * (beta*Wd - alpha*pd[t]*(S-Wd) - delta*pd[t] + gamma)
ru[t+1] = wu[0,t+1]/wu[0,0]*100.0
rd[t+1] = wd[0,t+1]/wd[0,0]*100.0
times[t+1] = ts*(t+1)
# show results
f = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.gca().set_prop_cycle(plt.cycler('color', ['blue', 'green', 'red']))
[line1, line2, line3] = plt.plot(times, np.transpose(wu))
plt.plot(times, np.transpose(wd), ls='dotted')
plt.legend((line3, line2, line1), (r'$w_3$', r'$w_2$', r'$w_1$'), loc=1, fontsize=12)
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel(r'$w_i$', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f.savefig("Fig4A.pdf", bbox_inches='tight')
f2 = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.plot(times, pu, "k")
plt.plot(times, pd, "k", ls='dotted')
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel('pool size', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f2.savefig("Fig4C.pdf", bbox_inches='tight')
f3 = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.plot(times, ru, "k")
plt.plot(times, rd, "k", ls='dotted')
plt.axis((0.0, 10.0, 40.0, 140.0))
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel(r'$w_i(t)/w_i(0) \quad [\%]$', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f3.savefig("Fig4B.pdf", bbox_inches='tight')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.cycler",
"numpy.transpose",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((837, 848), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (845, 848), True, 'import numpy as np\n'), ((1294, 1309), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1302, 1309), True, 'import numpy as np\n'), ((1334, 1349), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1342, 1349), True, 'import numpy as np\n'), ((1355, 1375), 'numpy.zeros', 'np.zeros', (['[N, steps]'], {}), '([N, steps])\n', (1363, 1375), True, 'import numpy as np\n'), ((1402, 1422), 'numpy.zeros', 'np.zeros', (['[N, steps]'], {}), '([N, steps])\n', (1410, 1422), True, 'import numpy as np\n'), ((1427, 1442), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1435, 1442), True, 'import numpy as np\n'), ((1493, 1508), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1501, 1508), True, 'import numpy as np\n'), ((1517, 1532), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1525, 1532), True, 'import numpy as np\n'), ((2359, 2385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (2369, 2385), True, 'from matplotlib import pyplot as plt\n'), ((2465, 2487), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (2471, 2487), True, 'from matplotlib import pyplot as plt\n'), ((2488, 2527), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (2494, 2527), True, 'from matplotlib import pyplot as plt\n'), ((2706, 2792), 'matplotlib.pyplot.legend', 'plt.legend', (['(line3, line2, line1)', "('$w_3$', '$w_2$', '$w_1$')"], {'loc': '(1)', 'fontsize': '(12)'}), "((line3, line2, line1), ('$w_3$', '$w_2$', '$w_1$'), loc=1,\n fontsize=12)\n", (2716, 2792), True, 'from matplotlib import pyplot as plt\n'), ((2792, 2839), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\; [{\\\\rm min}]$"""'], {'fontsize': '(12)'}), "('$t \\\\; [{\\\\rm min}]$', fontsize=12)\n", (2802, 2839), True, 'from matplotlib import pyplot as plt\n'), ((2839, 2871), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w_i$"""'], {'fontsize': '(12)'}), "('$w_i$', fontsize=12)\n", (2849, 2871), True, 'from matplotlib import pyplot as plt\n'), ((2873, 2906), 'matplotlib.pyplot.title', 'plt.title', (['"""$F=0.9$"""'], {'fontsize': '(12)'}), "('$F=0.9$', fontsize=12)\n", (2882, 2906), True, 'from matplotlib import pyplot as plt\n'), ((2908, 2918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2916, 2918), True, 'from matplotlib import pyplot as plt\n'), ((2969, 2995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (2979, 2995), True, 'from matplotlib import pyplot as plt\n'), ((3075, 3097), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (3081, 3097), True, 'from matplotlib import pyplot as plt\n'), ((3098, 3137), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (3104, 3137), True, 'from matplotlib import pyplot as plt\n'), ((3138, 3162), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'pu', '"""k"""'], {}), "(times, pu, 'k')\n", (3146, 3162), True, 'from matplotlib import pyplot as plt\n'), ((3163, 3200), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'pd', '"""k"""'], {'ls': '"""dotted"""'}), "(times, pd, 'k', ls='dotted')\n", (3171, 3200), True, 'from matplotlib import pyplot as plt\n'), ((3201, 3248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\; [{\\\\rm min}]$"""'], {'fontsize': '(12)'}), "('$t \\\\; [{\\\\rm min}]$', fontsize=12)\n", (3211, 3248), True, 'from matplotlib import pyplot as plt\n'), ((3248, 3284), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pool size"""'], {'fontsize': '(12)'}), "('pool size', fontsize=12)\n", (3258, 3284), True, 'from matplotlib import pyplot as plt\n'), ((3285, 3318), 'matplotlib.pyplot.title', 'plt.title', (['"""$F=0.9$"""'], {'fontsize': '(12)'}), "('$F=0.9$', fontsize=12)\n", (3294, 3318), True, 'from matplotlib import pyplot as plt\n'), ((3320, 3330), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3328, 3330), True, 'from matplotlib import pyplot as plt\n'), ((3382, 3408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (3392, 3408), True, 'from matplotlib import pyplot as plt\n'), ((3488, 3510), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (3494, 3510), True, 'from matplotlib import pyplot as plt\n'), ((3511, 3550), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (3517, 3550), True, 'from matplotlib import pyplot as plt\n'), ((3552, 3576), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'ru', '"""k"""'], {}), "(times, ru, 'k')\n", (3560, 3576), True, 'from matplotlib import pyplot as plt\n'), ((3577, 3614), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'rd', '"""k"""'], {'ls': '"""dotted"""'}), "(times, rd, 'k', ls='dotted')\n", (3585, 3614), True, 'from matplotlib import pyplot as plt\n'), ((3615, 3649), 'matplotlib.pyplot.axis', 'plt.axis', (['(0.0, 10.0, 40.0, 140.0)'], {}), '((0.0, 10.0, 40.0, 140.0))\n', (3623, 3649), True, 'from matplotlib import pyplot as plt\n'), ((3650, 3697), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\; [{\\\\rm min}]$"""'], {'fontsize': '(12)'}), "('$t \\\\; [{\\\\rm min}]$', fontsize=12)\n", (3660, 3697), True, 'from matplotlib import pyplot as plt\n'), ((3697, 3752), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w_i(t)/w_i(0) \\\\quad [\\\\%]$"""'], {'fontsize': '(12)'}), "('$w_i(t)/w_i(0) \\\\quad [\\\\%]$', fontsize=12)\n", (3707, 3752), True, 'from matplotlib import pyplot as plt\n'), ((3752, 3785), 'matplotlib.pyplot.title', 'plt.title', (['"""$F=0.9$"""'], {'fontsize': '(12)'}), "('$F=0.9$', fontsize=12)\n", (3761, 3785), True, 'from matplotlib import pyplot as plt\n'), ((3787, 3797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3795, 3797), True, 'from matplotlib import pyplot as plt\n'), ((2554, 2599), 'matplotlib.pyplot.cycler', 'plt.cycler', (['"""color"""', "['blue', 'green', 'red']"], {}), "('color', ['blue', 'green', 'red'])\n", (2564, 2599), True, 'from matplotlib import pyplot as plt\n'), ((2641, 2657), 'numpy.transpose', 'np.transpose', (['wu'], {}), '(wu)\n', (2653, 2657), True, 'import numpy as np\n'), ((2675, 2691), 'numpy.transpose', 'np.transpose', (['wd'], {}), '(wd)\n', (2687, 2691), True, 'import numpy as np\n'), ((2529, 2538), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2536, 2538), True, 'from matplotlib import pyplot as plt\n')]
|
import subprocess
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import scipy
from scipy.sparse.linalg import lsqr
import time
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.widgets import Slider, RadioButtons
from .geomtools import *
from .emcoords import *
from ripser import ripser
import warnings
"""#########################################
Main Circular Coordinates Class
#########################################"""
SCATTER_SIZE = 50
class CircularCoords(EMCoords):
def __init__(self, X, n_landmarks, distance_matrix=False, prime=41, maxdim=1, verbose=False):
"""
Parameters
----------
X: ndarray(N, d)
A point cloud with N points in d dimensions
n_landmarks: int
Number of landmarks to use
distance_matrix: boolean
If true, treat X as a distance matrix instead of a point cloud
prime : int
Field coefficient with which to compute rips on landmarks
maxdim : int
Maximum dimension of homology. Only dimension 1 is needed for circular coordinates,
but it may be of interest to see other dimensions (e.g. for a torus)
"""
EMCoords.__init__(self, X, n_landmarks, distance_matrix, prime, maxdim, verbose)
self.type_ = "circ"
def get_coordinates(self, perc = 0.99, do_weighted = False, cocycle_idx = [0], partunity_fn = partunity_linear):
"""
Perform circular coordinates via persistent cohomology of
sparse filtrations (<NAME> 2018)
Parameters
----------
perc : float
Percent coverage
do_weighted : boolean
Whether to make a weighted cocycle on the representatives
cocycle_idx : list
Add the cocycles together in this list
partunity_fn: (dist_land_data, r_cover) -> phi
A function from the distances of each landmark to a bump function
"""
## Step 1: Come up with the representative cocycle as a formal sum
## of the chosen cocycles
n_landmarks = self.n_landmarks_
n_data = self.X_.shape[0]
dgm1 = self.dgms_[1]/2.0 #Need so that Cech is included in rips
cohomdeath = -np.inf
cohombirth = np.inf
cocycle = np.zeros((0, 3))
prime = self.prime_
for k in range(len(cocycle_idx)):
cocycle = add_cocycles(cocycle, self.cocycles_[1][cocycle_idx[k]], p=prime)
cohomdeath = max(cohomdeath, dgm1[cocycle_idx[k], 0])
cohombirth = min(cohombirth, dgm1[cocycle_idx[k], 1])
## Step 2: Determine radius for balls
dist_land_data = self.dist_land_data_
dist_land_land = self.dist_land_land_
coverage = np.max(np.min(dist_land_data, 1))
r_cover = (1-perc)*max(cohomdeath, coverage) + perc*cohombirth
self.r_cover_ = r_cover # Store covering radius for reference
if self.verbose:
print("r_cover = %.3g"%r_cover)
## Step 3: Setup coboundary matrix, delta_0, for Cech_{r_cover }
## and use it to find a projection of the cocycle
## onto the image of delta0
#Lift to integer cocycle
val = np.array(cocycle[:, 2])
val[val > (prime-1)/2] -= prime
Y = np.zeros((n_landmarks, n_landmarks))
Y[cocycle[:, 0], cocycle[:, 1]] = val
Y = Y + Y.T
#Select edges that are under the threshold
[I, J] = np.meshgrid(np.arange(n_landmarks), np.arange(n_landmarks))
I = I[np.triu_indices(n_landmarks, 1)]
J = J[np.triu_indices(n_landmarks, 1)]
Y = Y[np.triu_indices(n_landmarks, 1)]
idx = np.arange(len(I))
idx = idx[dist_land_land[I, J] < 2*r_cover]
I = I[idx]
J = J[idx]
Y = Y[idx]
NEdges = len(I)
R = np.zeros((NEdges, 2))
R[:, 0] = J
R[:, 1] = I
#Make a flat array of NEdges weights parallel to the rows of R
if do_weighted:
W = dist_land_land[I, J]
else:
W = np.ones(NEdges)
delta0 = make_delta0(R)
wSqrt = np.sqrt(W).flatten()
WSqrt = scipy.sparse.spdiags(wSqrt, 0, len(W), len(W))
A = WSqrt*delta0
b = WSqrt.dot(Y)
tau = lsqr(A, b)[0]
theta = np.zeros((NEdges, 3))
theta[:, 0] = J
theta[:, 1] = I
theta[:, 2] = -delta0.dot(tau)
theta = add_cocycles(cocycle, theta, real=True)
## Step 4: Create the open covering U = {U_1,..., U_{s+1}} and partition of unity
U = dist_land_data < r_cover
phi = np.zeros_like(dist_land_data)
phi[U] = partunity_fn(dist_land_data[U], r_cover)
# Compute the partition of unity
# varphi_j(b) = phi_j(b)/(phi_1(b) + ... + phi_{n_landmarks}(b))
denom = np.sum(phi, 0)
nzero = np.sum(denom == 0)
if nzero > 0:
warnings.warn("There are %i point not covered by a landmark"%nzero)
denom[denom == 0] = 1
varphi = phi / denom[None, :]
# To each data point, associate the index of the first open set it belongs to
ball_indx = np.argmax(U, 0)
## Step 5: From U_1 to U_{s+1} - (U_1 \cup ... \cup U_s), apply classifying map
# compute all transition functions
theta_matrix = np.zeros((n_landmarks, n_landmarks))
I = np.array(theta[:, 0], dtype = np.int64)
J = np.array(theta[:, 1], dtype = np.int64)
theta = theta[:, 2]
theta = np.mod(theta + 0.5, 1) - 0.5
theta_matrix[I, J] = theta
theta_matrix[J, I] = -theta
class_map = -tau[ball_indx]
for i in range(n_data):
class_map[i] += theta_matrix[ball_indx[i], :].dot(varphi[:, i])
thetas = np.mod(2*np.pi*class_map, 2*np.pi)
return thetas
def update_colors(self):
if len(self.selected) > 0:
idxs = np.array(list(self.selected))
self.selected_plot.set_offsets(self.dgm1_lifetime[idxs, :])
## Step 2: Update circular coordinates on point cloud
thetas = self.coords
c = plt.get_cmap('magma_r')
thetas -= np.min(thetas)
thetas /= np.max(thetas)
thetas = np.array(np.round(thetas*255), dtype=int)
C = c(thetas)
if self.Y.shape[1] == 2:
self.coords_scatter.set_color(C)
else:
self.coords_scatter._facecolor3d = C
self.coords_scatter._edgecolor3d = C
else:
self.selected_plot.set_offsets(np.zeros((0, 2)))
if self.Y.shape[1] == 2:
self.coords_scatter.set_color('C0')
else:
self.coords_scatter._facecolor3d = 'C0'
self.coords_scatter._edgecolor3d = 'C0'
def recompute_coords_dimred(self, clicked = []):
"""
Toggle including a cocycle from a set of points in the
persistence diagram, and update the circular coordinates
colors accordingly
Parameters
----------
clicked: list of int
Indices to toggle
"""
EMCoords.recompute_coords(self, clicked)
self.update_colors()
def onpick_dimred(self, evt):
if evt.artist == self.dgmplot:
## Step 1: Highlight point on persistence diagram
clicked = set(evt.ind.tolist())
self.recompute_coords_dimred(clicked)
self.ax_persistence.figure.canvas.draw()
self.ax_coords.figure.canvas.draw()
return True
def on_perc_slider_move_dimred(self, evt):
self.recompute_coords_dimred()
def on_partunity_selector_change_dimred(self, evt):
self.recompute_coords_dimred()
def plot_dimreduced(self, Y, using_jupyter = True, init_params = {'cocycle_idxs':[], 'perc':0.99, 'partunity_fn':partunity_linear, 'azim':-60, 'elev':30}, dpi=None):
"""
Do an interactive plot of circular coordinates, coloring a dimension
reduced version of the point cloud by the circular coordinates
Parameters
----------
Y: ndarray(N, d)
A 2D point cloud with the same number of points as X
using_jupyter: boolean
Whether this is an interactive plot in jupyter
init_params: dict
The intial parameters. Optional fields of the dictionary are as follows:
{
cocycle_idxs: list of int
A list of cocycles to start with
u: ndarray(3, float)
The initial stereographic north pole
perc: float
The percent coverage to start with
partunity_fn: (dist_land_data, r_cover) -> phi
The partition of unity function to start with
azim: float
Initial azimuth for 3d plots
elev: float
Initial elevation for 3d plots
}
dpi: int
Dot pixels per inch
"""
if Y.shape[1] < 2 or Y.shape[1] > 3:
raise Exception("Dimension reduced version must be in 2D or 3D")
self.Y = Y
if using_jupyter and in_notebook():
import matplotlib
matplotlib.use("nbAgg")
if not dpi:
dpi = compute_dpi(2, 1)
fig = plt.figure(figsize=(DREIMAC_FIG_RES*2, DREIMAC_FIG_RES), dpi=dpi)
## Step 1: Plot H1
self.ax_persistence = fig.add_subplot(121)
self.setup_ax_persistence(y_compress=1.37)
fig.canvas.mpl_connect('pick_event', self.onpick_dimred)
self.selected = set([])
## Step 2: Setup window for choosing coverage / partition of unity type
## and for displaying the chosen cocycle
self.perc_slider, self.partunity_selector, self.selected_cocycle_text, _ = EMCoords.setup_param_chooser_gui(self, fig, 0.25, 0.75, 0.4, 0.5, init_params)
self.perc_slider.on_changed(self.on_perc_slider_move_dimred)
self.partunity_selector.on_clicked(self.on_partunity_selector_change_dimred)
## Step 3: Setup axis for coordinates
if Y.shape[1] == 3:
self.ax_coords = fig.add_subplot(122, projection='3d')
self.coords_scatter = self.ax_coords.scatter(Y[:, 0], Y[:, 1], Y[:, 2], s=SCATTER_SIZE, cmap='magma_r')
set_3dplot_equalaspect(self.ax_coords, Y)
if 'azim' in init_params:
self.ax_coords.azim = init_params['azim']
if 'elev' in init_params:
self.ax_coords.elev = init_params['elev']
else:
self.ax_coords = fig.add_subplot(122)
self.coords_scatter = self.ax_coords.scatter(Y[:, 0], Y[:, 1], s=SCATTER_SIZE, cmap='magma_r')
self.ax_coords.set_aspect('equal')
self.ax_coords.set_title("Dimension Reduced Point Cloud")
if len(init_params['cocycle_idxs']) > 0:
# If some initial cocycle indices were chosen, update
# the plot
self.recompute_coords_dimred(init_params['cocycle_idxs'])
plt.show()
def get_selected_dimreduced_info(self):
"""
Return information about what the user selected and their viewpoint in
the interactive dimension reduced plot
Returns
-------
{
'partunity_fn': (dist_land_data, r_cover) -> phi
The selected function handle for the partition of unity
'cocycle_idxs':ndarray(dtype = int)
Indices of the selected cocycles,
'perc': float
The selected percent coverage,
'azim':float
Azumith if viewing in 3D
'elev':float
Elevation if viewing in 3D
}
"""
ret = EMCoords.get_selected_info(self)
if self.Y.shape[1] == 3:
ret['azim'] = self.ax_coords.azim
ret['elev'] = self.ax_coords.elev
return ret
def update_plot_torii(self, circ_idx):
"""
Update a joint plot of circular coordinates, switching between
2D and 3D modes if necessary
Parameters
----------
circ_idx: int
Index of the circular coordinates that have
been updated
"""
N = self.plots_in_one
n_plots = len(self.plots)
## Step 1: Figure out the index of the involved plot
plot_idx = int(np.floor(circ_idx/N))
plot = self.plots[plot_idx]
## Step 2: Extract the circular coordinates from all
## plots that have at least one cochain representative selected
labels = []
coords = []
for i in range(N):
idx = plot_idx*N + i
c_info = self.coords_info[idx]
if len(c_info['selected']) > 0:
# Only include circular coordinates that have at least
# one persistence dot selected
coords.append(c_info['coords'])
labels.append("Coords {}".format(idx))
## Step 3: Adjust the plot accordingly
if len(labels) > 0:
X = np.array([])
if len(labels) == 1:
# Just a single coordinate; put it on a circle
coords = np.array(coords).flatten()
X = np.array([np.cos(coords), np.sin(coords)]).T
else:
X = np.array(coords).T
updating_axes = False
if X.shape[1] == 3 and plot['axis_2d']:
# Need to switch from 2D to 3D coordinates
self.fig.delaxes(plot['ax'])
plot['axis_2d'] = False
updating_axes = True
elif X.shape[1] == 2 and not plot['axis_2d']:
# Need to switch from 3D to 2D coordinates
self.fig.delaxes(plot['ax'])
plot['axis_2d'] = True
updating_axes = True
if X.shape[1] == 3:
if updating_axes:
plot['ax'] = self.fig.add_subplot(2, n_plots+1, n_plots+3+plot_idx, projection='3d')
plot['coords_scatter'] = plot['ax'].scatter(X[:, 0], X[:, 1], X[:, 2], s=SCATTER_SIZE, c=self.coords_colors)
plot['ax'].set_title('Joint 3D Plot')
else:
plot['coords_scatter'].set_offsets(X)
set_pi_axis_labels(plot['ax'], labels)
else:
if updating_axes:
plot['ax'] = self.fig.add_subplot(2, n_plots+1, n_plots+3+plot_idx)
plot['coords_scatter'] = plot['ax'].scatter(X[:, 0], X[:, 1], s=SCATTER_SIZE, c=self.coords_colors)
else:
plot['coords_scatter'].set_offsets(X)
if len(labels) > 1:
set_pi_axis_labels(plot['ax'], labels)
plot['ax'].set_title('Joint 2D Plot')
else:
plot['ax'].set_xlabel('')
plot['ax'].set_xlim([-1.1, 1.1])
plot['ax'].set_ylabel('')
plot['ax'].set_ylim([-1.1, 1.1])
plot['ax'].set_title(labels[0])
else:
X = np.array([])
if plot['axis_2d']:
X = -2*np.ones((self.X_.shape[0], 2))
else:
X = -2*np.ones((self.X_.shape[0], 3))
plot['coords_scatter'].set_offsets(X)
def recompute_coords_torii(self, clicked = []):
"""
Toggle including a cocycle from a set of points in the
persistence diagram, and update the circular coordinates
joint torii plots accordingly
Parameters
----------
clicked: list of int
Indices to toggle
"""
EMCoords.recompute_coords(self, clicked)
# Save away circular coordinates
self.coords_info[self.selected_coord_idx]['selected'] = self.selected
self.coords_info[self.selected_coord_idx]['coords'] = self.coords
self.update_plot_torii(self.selected_coord_idx)
def onpick_torii(self, evt):
"""
Handle a pick even for the torii plot
"""
if evt.artist == self.dgmplot:
## Step 1: Highlight point on persistence diagram
clicked = set(evt.ind.tolist())
self.recompute_coords_torii(clicked)
self.ax_persistence.figure.canvas.draw()
self.fig.canvas.draw()
return True
def select_torii_coord(self, idx):
"""
Select a particular circular coordinate plot and un-select others
Parameters
----------
idx: int
Index of the plot to select
"""
for i, coordsi in enumerate(self.coords_info):
if i == idx:
self.selected_coord_idx = idx
coordsi = self.coords_info[idx]
# Swap in the appropriate GUI objects for selection
self.selected = coordsi['selected']
self.selected_cocycle_text = coordsi['selected_cocycle_text']
self.perc_slider = coordsi['perc_slider']
self.partunity_selector = coordsi['partunity_selector']
self.persistence_text_labels = coordsi['persistence_text_labels']
self.coords = coordsi['coords']
coordsi['button'].color = 'red'
for j in np.array(list(self.selected)):
self.persistence_text_labels[j].set_text("%i"%j)
idxs = np.array(list(self.selected), dtype=int)
if idxs.size > 0:
self.selected_plot.set_offsets(self.dgm1_lifetime[idxs, :])
else:
self.selected_plot.set_offsets(np.array([[np.nan]*2]))
else:
coordsi['button'].color = 'gray'
self.ax_persistence.set_title("H1 Cocycle Selection: Coordinate {}".format(idx))
def on_perc_slider_move_torii(self, evt, idx):
"""
React to a change in coverage
a particular circular coordinate, and recompute the
coordinates if they aren't trivial
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
if len(self.selected) > 0:
self.recompute_coords_torii()
def on_partunity_selector_change_torii(self, evt, idx):
"""
React to a change in partition of unity type for
a particular circular coordinate, and recompute the
coordinates if they aren't trivial
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
if len(self.selectd) > 0:
self.recompute_coords_torii()
def on_click_torii_button(self, evt, idx):
"""
React to a click event, and change the selected
circular coordinate if necessary
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
def plot_torii(self, f, using_jupyter=True, zoom=1, dpi=None, coords_info=2, plots_in_one = 2, lowerleft_plot = None, lowerleft_3d=False):
"""
Do an interactive plot of circular coordinates, where points are drawn on S1,
on S1 x S1, or S1 x S1 x S1
Parameters
----------
f: Display information for the points
On of three options:
1) A scalar function with which to color the points, represented
as a 1D array
2) A list of colors with which to color the points, specified as
an Nx3 array
3) A list of images to place at each location
using_jupyter: boolean
Whether this is an interactive plot in jupyter
zoom: int
If using patches, the factor by which to zoom in on them
dpi: int
Dot pixels per inch
coords_info: Information about how to perform circular coordinates. There will
be as many plots as the ceil of the number of circular coordinates, and
they will be plotted pairwise.
This parameter is one of two options
1) An int specifying the number of different circular coordinate
functions to compute
2) A list of dictionaries with pre-specified initial parameters for
each circular coordinate. Each dictionary has the following keys:
{
'cocycle_reps': dictionary
A dictionary of cocycle representatives, with the key
as the cocycle index, and the value as the coefficient
TODO: Finish update to support this instead of a set
'perc': float
The percent coverage to start with,
'partunity_fn': (dist_land_data, r_cover) -> phi
The partition of unity function to start with
}
plots_in_one: int
The max number of circular coordinates to put in one plot
lowerleft_plot: function(matplotlib axis)
A function that plots something in the lower left
lowerleft_3d: boolean
Whether the lower left plot is 3D
"""
if plots_in_one < 2 or plots_in_one > 3:
raise Exception("Cannot be fewer than 2 or more than 3 circular coordinates in one plot")
self.plots_in_one = plots_in_one
self.f = f
## Step 1: Figure out how many plots are needed to accommodate all
## circular coordinates
n_plots = 1
if type(coords_info) is int:
n_plots = int(np.ceil(coords_info/plots_in_one))
coords_info = []
else:
n_plots = int(np.ceil(len(coords_info)/plots_in_one))
while len(coords_info) < n_plots*plots_in_one:
coords_info.append({'selected':set([]), 'perc':0.99, 'partunity_fn':partunity_linear})
self.selecting_idx = 0 # Index of circular coordinate which is currently being selected
if using_jupyter and in_notebook():
import matplotlib
matplotlib.use("nbAgg")
if not dpi:
dpi = compute_dpi(n_plots+1, 2)
fig = plt.figure(figsize=(DREIMAC_FIG_RES*(n_plots+1), DREIMAC_FIG_RES*2), dpi=dpi)
self.dpi = dpi
self.fig = fig
## Step 2: Setup H1 plot, along with initially empty text labels
## for each persistence point
self.ax_persistence = fig.add_subplot(2, n_plots+1, 1)
self.setup_ax_persistence()
fig.canvas.mpl_connect('pick_event', self.onpick_torii)
## Step 2: Setup windows for choosing coverage / partition of unity type
## and for displaying the chosen cocycle for each circular coordinate.
## Also store variables for selecting cocycle representatives
width = 1/(n_plots+1)
height = 1/plots_in_one
partunity_keys = tuple(PARTUNITY_FNS.keys())
for i in range(n_plots):
xstart = width*(i+1.4)
for j in range(plots_in_one):
idx = i*plots_in_one+j
# Setup plots and state for a particular circular coordinate
ystart = 0.8 - 0.4*height*j
coords_info[idx]['perc_slider'], coords_info[idx]['partunity_selector'], coords_info[idx]['selected_cocycle_text'], coords_info[idx]['button'] = self.setup_param_chooser_gui(fig, xstart, ystart, width, height, coords_info[idx], idx)
coords_info[idx]['perc_slider'].on_changed(callback_factory(self.on_perc_slider_move_torii, idx))
coords_info[idx]['partunity_selector'].on_clicked = callback_factory(self.on_partunity_selector_change_torii, idx)
coords_info[idx]['button'].on_clicked(callback_factory(self.on_click_torii_button, idx))
dgm = self.dgm1_lifetime
coords_info[idx]['persistence_text_labels'] = [self.ax_persistence.text(dgm[i, 0], dgm[i, 1], '') for i in range(dgm.shape[0])]
coords_info[idx]['idx'] = idx
coords_info[idx]['coords'] = np.zeros(self.X_.shape[0])
self.coords_info = coords_info
## Step 3: Figure out colors of coordinates
self.coords_colors = None
if not (type(f) is list):
# Figure out colormap if images aren't passed along
self.coords_colors = f
if f.size == self.X_.shape[0]:
# Scalar function, so need to apply colormap
c = plt.get_cmap('magma_r')
fscaled = f - np.min(f)
fscaled = fscaled/np.max(fscaled)
C = c(np.array(np.round(fscaled*255), dtype=np.int32))
self.coords_colors = C[:, 0:3]
## Step 4: Setup plots
plots = []
self.n_plots = n_plots
for i in range(n_plots):
# 2D by default, but can change to 3D later
ax = fig.add_subplot(2, n_plots+1, n_plots+3+i)
pix = -2*np.ones(self.X_.shape[0])
plot = {}
plot['ax'] = ax
plot['coords_scatter'] = ax.scatter(pix, pix, s=SCATTER_SIZE, c=self.coords_colors) # Scatterplot for circular coordinates
ax.set_xlim([-1.1, 1.1])
ax.set_ylim([-1.1, 1.1])
plot['axis_2d'] = True
plot['patch_boxes'] = [] # Array of image patch display objects
plots.append(plot)
self.plots = plots
## Step 5: Initialize plots with information passed along
for i in reversed(range(len(coords_info))):
self.select_torii_coord(i)
self.recompute_coords_torii([])
## Step 6: Plot something in the lower left corner if desired
if lowerleft_plot:
if lowerleft_3d:
ax = fig.add_subplot(2, n_plots+1, n_plots+2, projection='3d')
else:
ax = fig.add_subplot(2, n_plots+1, n_plots+2)
lowerleft_plot(ax)
plt.show()
def do_two_circle_test():
"""
Test interactive plotting with two noisy circles of different sizes
"""
prime = 41
np.random.seed(2)
N = 500
X = np.zeros((N*2, 2))
t = np.linspace(0, 1, N+1)[0:N]**1.2
t = 2*np.pi*t
X[0:N, 0] = np.cos(t)
X[0:N, 1] = np.sin(t)
X[N::, 0] = 2*np.cos(t) + 4
X[N::, 1] = 2*np.sin(t) + 4
perm = np.random.permutation(X.shape[0])
X = X[perm, :]
X = X + 0.2*np.random.randn(X.shape[0], 2)
f = np.concatenate((t, t + np.max(t)))
f = f[perm]
fscaled = f - np.min(f)
fscaled = fscaled/np.max(fscaled)
c = plt.get_cmap('magma_r')
C = c(np.array(np.round(fscaled*255), dtype=np.int32))[:, 0:3]
#plt.scatter(X[:, 0], X[:, 1], s=SCATTER_SIZE, c=C)
cc = CircularCoords(X, 100, prime = prime)
#cc.plot_dimreduced(X, using_jupyter=False)
cc.plot_torii(f, coords_info=2, plots_in_one=3)
def do_torus_test():
"""
Test interactive plotting with a torus
"""
prime = 41
np.random.seed(2)
N = 10000
R = 5
r = 2
X = np.zeros((N, 3))
s = np.random.rand(N)*2*np.pi
t = np.random.rand(N)*2*np.pi
X[:, 0] = (R + r*np.cos(s))*np.cos(t)
X[:, 1] = (R + r*np.cos(s))*np.sin(t)
X[:, 2] = r*np.sin(s)
cc = CircularCoords(X, 100, prime=prime)
f = s
def plot_torus(ax):
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=f, cmap='magma_r')
set_3dplot_equalaspect(ax, X)
cc.plot_torii(f, coords_info=2, plots_in_one=2, lowerleft_plot=plot_torus, lowerleft_3d=True)
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"numpy.floor",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.round",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.ceil",
"numpy.triu_indices",
"numpy.mod",
"numpy.min",
"matplotlib.use",
"numpy.cos",
"numpy.random.permutation",
"scipy.sparse.linalg.lsqr",
"numpy.zeros",
"numpy.array",
"numpy.random.rand",
"warnings.warn",
"numpy.sqrt"
] |
[((26405, 26422), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (26419, 26422), True, 'import numpy as np\n'), ((26443, 26463), 'numpy.zeros', 'np.zeros', (['(N * 2, 2)'], {}), '((N * 2, 2))\n', (26451, 26463), True, 'import numpy as np\n'), ((26537, 26546), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (26543, 26546), True, 'import numpy as np\n'), ((26563, 26572), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (26569, 26572), True, 'import numpy as np\n'), ((26648, 26681), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[0]'], {}), '(X.shape[0])\n', (26669, 26681), True, 'import numpy as np\n'), ((26882, 26905), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (26894, 26905), True, 'import matplotlib.pyplot as plt\n'), ((27281, 27298), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (27295, 27298), True, 'import numpy as np\n'), ((27341, 27357), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (27349, 27357), True, 'import numpy as np\n'), ((2406, 2422), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (2414, 2422), True, 'import numpy as np\n'), ((3340, 3363), 'numpy.array', 'np.array', (['cocycle[:, 2]'], {}), '(cocycle[:, 2])\n', (3348, 3363), True, 'import numpy as np\n'), ((3416, 3452), 'numpy.zeros', 'np.zeros', (['(n_landmarks, n_landmarks)'], {}), '((n_landmarks, n_landmarks))\n', (3424, 3452), True, 'import numpy as np\n'), ((3966, 3987), 'numpy.zeros', 'np.zeros', (['(NEdges, 2)'], {}), '((NEdges, 2))\n', (3974, 3987), True, 'import numpy as np\n'), ((4432, 4453), 'numpy.zeros', 'np.zeros', (['(NEdges, 3)'], {}), '((NEdges, 3))\n', (4440, 4453), True, 'import numpy as np\n'), ((4748, 4777), 'numpy.zeros_like', 'np.zeros_like', (['dist_land_data'], {}), '(dist_land_data)\n', (4761, 4777), True, 'import numpy as np\n'), ((4967, 4981), 'numpy.sum', 'np.sum', (['phi', '(0)'], {}), '(phi, 0)\n', (4973, 4981), True, 'import numpy as np\n'), ((4998, 5016), 'numpy.sum', 'np.sum', (['(denom == 0)'], {}), '(denom == 0)\n', (5004, 5016), True, 'import numpy as np\n'), ((5298, 5313), 'numpy.argmax', 'np.argmax', (['U', '(0)'], {}), '(U, 0)\n', (5307, 5313), True, 'import numpy as np\n'), ((5478, 5514), 'numpy.zeros', 'np.zeros', (['(n_landmarks, n_landmarks)'], {}), '((n_landmarks, n_landmarks))\n', (5486, 5514), True, 'import numpy as np\n'), ((5527, 5564), 'numpy.array', 'np.array', (['theta[:, 0]'], {'dtype': 'np.int64'}), '(theta[:, 0], dtype=np.int64)\n', (5535, 5564), True, 'import numpy as np\n'), ((5579, 5616), 'numpy.array', 'np.array', (['theta[:, 1]'], {'dtype': 'np.int64'}), '(theta[:, 1], dtype=np.int64)\n', (5587, 5616), True, 'import numpy as np\n'), ((5928, 5968), 'numpy.mod', 'np.mod', (['(2 * np.pi * class_map)', '(2 * np.pi)'], {}), '(2 * np.pi * class_map, 2 * np.pi)\n', (5934, 5968), True, 'import numpy as np\n'), ((9551, 9618), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(DREIMAC_FIG_RES * 2, DREIMAC_FIG_RES)', 'dpi': 'dpi'}), '(figsize=(DREIMAC_FIG_RES * 2, DREIMAC_FIG_RES), dpi=dpi)\n', (9561, 9618), True, 'import matplotlib.pyplot as plt\n'), ((11293, 11303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11301, 11303), True, 'import matplotlib.pyplot as plt\n'), ((22477, 22564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(DREIMAC_FIG_RES * (n_plots + 1), DREIMAC_FIG_RES * 2)', 'dpi': 'dpi'}), '(figsize=(DREIMAC_FIG_RES * (n_plots + 1), DREIMAC_FIG_RES * 2),\n dpi=dpi)\n', (22487, 22564), True, 'import matplotlib.pyplot as plt\n'), ((26260, 26270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26268, 26270), True, 'import matplotlib.pyplot as plt\n'), ((26826, 26835), 'numpy.min', 'np.min', (['f'], {}), '(f)\n', (26832, 26835), True, 'import numpy as np\n'), ((26858, 26873), 'numpy.max', 'np.max', (['fscaled'], {}), '(fscaled)\n', (26864, 26873), True, 'import numpy as np\n'), ((27458, 27467), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (27464, 27467), True, 'import numpy as np\n'), ((27500, 27509), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (27506, 27509), True, 'import numpy as np\n'), ((27526, 27535), 'numpy.sin', 'np.sin', (['s'], {}), '(s)\n', (27532, 27535), True, 'import numpy as np\n'), ((2878, 2903), 'numpy.min', 'np.min', (['dist_land_data', '(1)'], {}), '(dist_land_data, 1)\n', (2884, 2903), True, 'import numpy as np\n'), ((3599, 3621), 'numpy.arange', 'np.arange', (['n_landmarks'], {}), '(n_landmarks)\n', (3608, 3621), True, 'import numpy as np\n'), ((3623, 3645), 'numpy.arange', 'np.arange', (['n_landmarks'], {}), '(n_landmarks)\n', (3632, 3645), True, 'import numpy as np\n'), ((3661, 3692), 'numpy.triu_indices', 'np.triu_indices', (['n_landmarks', '(1)'], {}), '(n_landmarks, 1)\n', (3676, 3692), True, 'import numpy as np\n'), ((3708, 3739), 'numpy.triu_indices', 'np.triu_indices', (['n_landmarks', '(1)'], {}), '(n_landmarks, 1)\n', (3723, 3739), True, 'import numpy as np\n'), ((3755, 3786), 'numpy.triu_indices', 'np.triu_indices', (['n_landmarks', '(1)'], {}), '(n_landmarks, 1)\n', (3770, 3786), True, 'import numpy as np\n'), ((4190, 4205), 'numpy.ones', 'np.ones', (['NEdges'], {}), '(NEdges)\n', (4197, 4205), True, 'import numpy as np\n'), ((4402, 4412), 'scipy.sparse.linalg.lsqr', 'lsqr', (['A', 'b'], {}), '(A, b)\n', (4406, 4412), False, 'from scipy.sparse.linalg import lsqr\n'), ((5051, 5120), 'warnings.warn', 'warnings.warn', (["('There are %i point not covered by a landmark' % nzero)"], {}), "('There are %i point not covered by a landmark' % nzero)\n", (5064, 5120), False, 'import warnings\n'), ((5663, 5685), 'numpy.mod', 'np.mod', (['(theta + 0.5)', '(1)'], {}), '(theta + 0.5, 1)\n', (5669, 5685), True, 'import numpy as np\n'), ((6287, 6310), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (6299, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6347), 'numpy.min', 'np.min', (['thetas'], {}), '(thetas)\n', (6339, 6347), True, 'import numpy as np\n'), ((6370, 6384), 'numpy.max', 'np.max', (['thetas'], {}), '(thetas)\n', (6376, 6384), True, 'import numpy as np\n'), ((9457, 9480), 'matplotlib.use', 'matplotlib.use', (['"""nbAgg"""'], {}), "('nbAgg')\n", (9471, 9480), False, 'import matplotlib\n'), ((12651, 12673), 'numpy.floor', 'np.floor', (['(circ_idx / N)'], {}), '(circ_idx / N)\n', (12659, 12673), True, 'import numpy as np\n'), ((13342, 13354), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13350, 13354), True, 'import numpy as np\n'), ((15418, 15430), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15426, 15430), True, 'import numpy as np\n'), ((22375, 22398), 'matplotlib.use', 'matplotlib.use', (['"""nbAgg"""'], {}), "('nbAgg')\n", (22389, 22398), False, 'import matplotlib\n'), ((26470, 26494), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(N + 1)'], {}), '(0, 1, N + 1)\n', (26481, 26494), True, 'import numpy as np\n'), ((26591, 26600), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (26597, 26600), True, 'import numpy as np\n'), ((26623, 26632), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (26629, 26632), True, 'import numpy as np\n'), ((26717, 26747), 'numpy.random.randn', 'np.random.randn', (['X.shape[0]', '(2)'], {}), '(X.shape[0], 2)\n', (26732, 26747), True, 'import numpy as np\n'), ((27366, 27383), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (27380, 27383), True, 'import numpy as np\n'), ((27400, 27417), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (27414, 27417), True, 'import numpy as np\n'), ((4254, 4264), 'numpy.sqrt', 'np.sqrt', (['W'], {}), '(W)\n', (4261, 4264), True, 'import numpy as np\n'), ((6415, 6437), 'numpy.round', 'np.round', (['(thetas * 255)'], {}), '(thetas * 255)\n', (6423, 6437), True, 'import numpy as np\n'), ((6741, 6757), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (6749, 6757), True, 'import numpy as np\n'), ((21895, 21930), 'numpy.ceil', 'np.ceil', (['(coords_info / plots_in_one)'], {}), '(coords_info / plots_in_one)\n', (21902, 21930), True, 'import numpy as np\n'), ((24368, 24394), 'numpy.zeros', 'np.zeros', (['self.X_.shape[0]'], {}), '(self.X_.shape[0])\n', (24376, 24394), True, 'import numpy as np\n'), ((24778, 24801), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (24790, 24801), True, 'import matplotlib.pyplot as plt\n'), ((25270, 25295), 'numpy.ones', 'np.ones', (['self.X_.shape[0]'], {}), '(self.X_.shape[0])\n', (25277, 25295), True, 'import numpy as np\n'), ((26780, 26789), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (26786, 26789), True, 'import numpy as np\n'), ((26925, 26948), 'numpy.round', 'np.round', (['(fscaled * 255)'], {}), '(fscaled * 255)\n', (26933, 26948), True, 'import numpy as np\n'), ((27447, 27456), 'numpy.cos', 'np.cos', (['s'], {}), '(s)\n', (27453, 27456), True, 'import numpy as np\n'), ((27489, 27498), 'numpy.cos', 'np.cos', (['s'], {}), '(s)\n', (27495, 27498), True, 'import numpy as np\n'), ((13606, 13622), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (13614, 13622), True, 'import numpy as np\n'), ((15486, 15516), 'numpy.ones', 'np.ones', (['(self.X_.shape[0], 2)'], {}), '((self.X_.shape[0], 2))\n', (15493, 15516), True, 'import numpy as np\n'), ((15558, 15588), 'numpy.ones', 'np.ones', (['(self.X_.shape[0], 3)'], {}), '((self.X_.shape[0], 3))\n', (15565, 15588), True, 'import numpy as np\n'), ((24832, 24841), 'numpy.min', 'np.min', (['f'], {}), '(f)\n', (24838, 24841), True, 'import numpy as np\n'), ((24876, 24891), 'numpy.max', 'np.max', (['fscaled'], {}), '(fscaled)\n', (24882, 24891), True, 'import numpy as np\n'), ((13476, 13492), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (13484, 13492), True, 'import numpy as np\n'), ((17992, 18016), 'numpy.array', 'np.array', (['[[np.nan] * 2]'], {}), '([[np.nan] * 2])\n', (18000, 18016), True, 'import numpy as np\n'), ((24923, 24946), 'numpy.round', 'np.round', (['(fscaled * 255)'], {}), '(fscaled * 255)\n', (24931, 24946), True, 'import numpy as np\n'), ((13533, 13547), 'numpy.cos', 'np.cos', (['coords'], {}), '(coords)\n', (13539, 13547), True, 'import numpy as np\n'), ((13549, 13563), 'numpy.sin', 'np.sin', (['coords'], {}), '(coords)\n', (13555, 13563), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 09:52:31 2021
@author: HaoLI
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 11:48:41 2021
@author: HaoLI
"""
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data.sampler import WeightedRandomSampler
import torch.utils.data as data_utils
import pandas as pd
import numpy as np
import os #for working directory
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score # 计算roc和auc
import time
import datetime
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import random
use_gpu = torch.cuda.is_available()
print("GPU",use_gpu)
list_rec = [] #记录参数
randomseed = 22
random.seed(randomseed)
layer1=196
layer2=196
oversample_ratio=0.5
training_epochs = 80
minibatch_size = 5000
learning_rate=2e-4
penalty=2 #p=1 for L1; p=0 for L2, weight_decay only for L2 ; p=2 for default. 范数计算中的幂指数值,默认求2范数. 当p=0为L2正则化,p=1为L1正则化
weight_decay=0.0125 #weight_decay 就是 L2 正则项
dropout=0.0
#os.getcwd()
os.chdir('/Users/HaoLI/Stata/credit/data')
df = pd.read_csv('data1210rename_use.csv')
col_names = list(df.columns.values[3:30])
col_names.remove('default_geq_1') #X中不能包含目标函数y
col_names.remove('default_geq_2')
col_names.remove('default_geq_3')
base_col_names = col_names[0:13] # for baseline model 包含银行数据+早中晚数据
df_fillna = df.fillna(0) # fill NA with 0. 无消费以0计
X = df_fillna[col_names]
y = df_fillna.default_geq_1 # Target variable
X_base = df_fillna[base_col_names]
y_base = df_fillna.default_geq_1 # Target variable
layer0=len(X.columns) # input层的神经元个数
#min_max_scaler = MinMaxScaler()
#X = min_max_scaler.fit_transform(X)
sc = StandardScaler()# transform X into standard normal distribution for each column. X from dataframe to array
X = sc.fit_transform(X)
ros = RandomOverSampler(random_state=0)
for layer1 in [196]:
for layer2 in [196]:
for weight_decay in [0.0125]:
for training_epochs in [80]:
for minibatch_size in [5000]:
for random_state in [18]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=random_state) # data types are dataframe
X_train, y_train = ros.fit_resample(X_train, y_train)
y_train = y_train.values
y_test = np.array(y_test)
# construct NN
class CreditNet(nn.Module):
def __init__(self): #p=1 for L1; p=0 for L2, weight_decay only for L2 ; p=2 for default. 范数计算中的幂指数值,默认求2范数. 当p=0为L2正则化,p=1为L1正则化
super().__init__()
self.fc1 = nn.Linear(layer0, layer1) # fc: fully connected
#self.bn1 = nn.BatchNorm1d(num_features=64, momentum=0.1) #default momentum = 0.1
self.fc2 = nn.Linear(layer1, layer2)
#self.fc3 = nn.Linear(layer2, layer3)
#self.bn3 = nn.BatchNorm1d(num_features=32)
#self.fc4 = nn.Linear(28, 24)
self.fc5 = nn.Linear(layer2, 1)
# x represents our data
def forward(self, x): # x is the data
x = F.relu(self.fc1(x)) # first x pass through
#x = self.bn1(x)
x = F.dropout(x, p=dropout)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=dropout)
#x = F.relu(self.fc3(x))
#x = self.bn3(x)
#x = F.dropout(x, p=0.25)
#x = F.relu(self.fc4(x))
#x = F.softmax(self.fc5(x),dim=0)
x = torch.sigmoid(self.fc5(x))
return x
net = CreditNet().double() # .double() makes the data type float, 在pytorch中,只有浮点类型的数才有梯度
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#或device = torch.device("cuda:0")
device1 = torch.device("cuda:1")
if torch.cuda.is_available():
#net = net.cuda()
net = net.to(device1) #使用序号为0的GPU
#或model.to(device1) #使用序号为1的GPU
########### Train #################
#loss_fn = nn.CrossEntropyLoss()
#loss_fn = nn.BCELoss() # binary cross entropy loss
#optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) # auto adjust lr, better than sgd
#optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum = 0.9) # auto adjust lr, better than sgd; sgd stable
#优化器采用Adam,并且设置参数weight_decay=0.0,即无正则化的方法
#优化器采用Adam,并且设置参数weight_decay=10.0,即正则化的权重lambda =10.0
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=weight_decay) # auto adjust lr, better than sgd
# if we use L2 regularization, apply the following line
#optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=weight_decay)
X_train = torch.from_numpy(X_train) # transfer to Tensor, no need to add .double(), because it is already float data type
y_train = torch.from_numpy(y_train).double() # .double() makes the data type float, 在pytorch中,只有浮点类型的数才有梯度
#weights_tensor = torch.from_numpy(overwt_arr_y_lossfn)
if torch.cuda.is_available():
X_train = X_train.to(device1)
y_train = y_train.to(device1)
#weights_tensor = weights_tensor.to(device1)
train = data_utils.TensorDataset(X_train, y_train) # adjust format. 打包X Y 用于训练
train_loader = data_utils.DataLoader(train, batch_size=minibatch_size, shuffle=True) # 在PyTorch中训练模型经常要使用它. batch_size定义每次喂给神经网络多少行数据. shuffle在每次迭代训练时是否将数据洗牌,默认设置是False。将输入数据的顺序打乱,是为了使数据更有独立性,
# !tensorboard --logdir './runs' #远程的notebook中如果使用魔法函数, 可能会导致你无法打开tensorboard的http服务
from tensorboardX import SummaryWriter
writer = SummaryWriter()
#%reload_ext tensorboard
# Load the TensorBoard notebook extension
for epoch in range(training_epochs):
y_train_labels = [] # create an empty array
y_train_pred = []
for b, data in enumerate(train_loader, 0): # 取batch
inputs, labels = data#.cuda() # inputs and labels follows that when loaded
if torch.cuda.is_available():
inputs = inputs.to(device1)
labels = labels.to(device1)
#weights = weights.to(device1)
#print("inputs shape", inputs.shape, labels.shape)
#print("inputs", inputs)
#print("labels", labels)
optimizer.zero_grad() #reset gradients, i.e. zero the gradient buffers
y_pred = net(inputs) # obtain the predicted values, a Tensor
y_pred = y_pred.view(y_pred.size()[0])
#print("y_pred", y_pred)
y_train_labels = np.append(y_train_labels, labels.cpu().numpy())
y_train_pred = np.append(y_train_pred,y_pred.detach().cpu().numpy())
loss_fn = nn.BCELoss() # binary cross entropy loss, with weights
if torch.cuda.is_available():
loss_fn = loss_fn.to(device1)
loss = loss_fn(y_pred, labels) # 2 tensors in, 1 value out
loss.backward() # backward pass
optimizer.step() # update weights
if b % 100 == 0: # if b整除10, then output loss
#print('Epochs: {}, batch: {} loss: {}'.format(epoch, b, loss))
writer.add_scalar('NN_oversample',loss, epoch)
writer.close()
#%tensorboard --logdir #定位tensorboard读取的文件目录
X_test = torch.from_numpy(X_test) # check the tested results
y_test = torch.from_numpy(y_test).double()
if torch.cuda.is_available():
X_test = X_test.to(device1)
y_test = y_test.to(device1)
test = data_utils.TensorDataset(X_test, y_test)
test_loader = data_utils.DataLoader(test, batch_size=minibatch_size, shuffle=True)
y_test_labels = []
y_test_pred = []
with torch.no_grad(): #上下文管理器,被该语句 wrap 起来的部分将不会track 梯度
for data in test_loader:
inputs, labels = data
#inputs = inputs.to(device1)
#labels = labels.to(device1)
#print("inputs", inputs)
#print("labels", labels)
outputs = net(inputs)
outputs = outputs.view(outputs.size()[0])
#print("outputs", outputs)
#print("predicted", predicted.numpy())
y_test_labels = np.append(y_test_labels,labels.cpu().numpy())
y_test_pred = np.append(y_test_pred,outputs.cpu().numpy())
#print("Y_test_labels", Y_test_labels)
#print("Y_test_pred", Y_test_pred)
#### plot ROC, compute AUC ###
# y_true is ground truth labels, y_score is predicted probabilities generated by sklearn classifier
test_fpr, test_tpr, te_thresholds = roc_curve(y_true = y_test_labels, y_score = y_test_pred)
#print("AUC TEST = ", auc(test_fpr, test_tpr))
train_fpr, train_tpr, tr_thresholds = roc_curve(y_true = y_train_labels, y_score = y_train_pred) # /w_ytrain, such that return the array to 0,1 array
#print("AUC TRAIN = ", auc(train_fpr, train_tpr))
#print('resample: {}, Epochs: {}, batch size: {}, '.format(oversample_ratio, training_epochs, minibatch_size))
#print(net)
plt.grid()
plt.plot(train_fpr, train_tpr, label=" AUC TRAIN ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label=" AUC TEST ="+str(auc(test_fpr, test_tpr)))
plt.plot([0,1],[0,1],'g--')
plt.legend()
plt.xlabel("True Positive Rate")
plt.ylabel("False Positive Rate")
t='''
training_epochs=%s, minibatch_size=%s,
learning_rate=%s, penalty=L%s, weight_decay=%s,
dropout=%s, 24=>%s=>%s=>1, myoversampling, random_state=%s,
randomseed=%s
'''%(training_epochs,minibatch_size,learning_rate,
penalty, weight_decay, dropout, layer1, layer2, random_state,randomseed)
plt.title("AUC(Neural Network ROC curve)"+t)
plt.grid(color='black', linestyle='-', linewidth=0.5)
time1 = datetime.datetime.now()
#对现在时间格式化,以此作为文件名
time2 = time1.strftime('%Y-%m-%d-%H%M%S')
plt.savefig("/Users/HaoLI/Stata/credit/out/ROC figure/Figure_"+time2+".png", bbox_inches = 'tight')
plt.show()
list_rec.append([auc(train_fpr, train_tpr), auc(test_fpr, test_tpr),
training_epochs,minibatch_size,learning_rate,
penalty, weight_decay, dropout, layer1, layer2,
random_state, randomseed
])
list_rec_1 = list_rec
df = pd.DataFrame(list_rec, columns = ['IS_AUC','OOS_AUC','training_epochs',
'minibatch_size','learning_rate',
'penalty', 'weight_decay', 'dropout',
'layer1', 'layer2', 'random_state','randomseed'])
df.to_csv('NN_adj.csv')
|
[
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"torch.nn.functional.dropout",
"torch.utils.data.TensorDataset",
"torch.device",
"torch.no_grad",
"os.chdir",
"pandas.DataFrame",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"random.seed",
"torch.nn.Linear",
"datetime.datetime.now",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"torch.from_numpy",
"tensorboardX.SummaryWriter",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"imblearn.over_sampling.RandomOverSampler",
"sklearn.metrics.auc",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((851, 876), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (874, 876), False, 'import torch, torch.nn as nn\n'), ((934, 957), 'random.seed', 'random.seed', (['randomseed'], {}), '(randomseed)\n', (945, 957), False, 'import random\n'), ((1253, 1295), 'os.chdir', 'os.chdir', (['"""/Users/HaoLI/Stata/credit/data"""'], {}), "('/Users/HaoLI/Stata/credit/data')\n", (1261, 1295), False, 'import os\n'), ((1301, 1338), 'pandas.read_csv', 'pd.read_csv', (['"""data1210rename_use.csv"""'], {}), "('data1210rename_use.csv')\n", (1312, 1338), True, 'import pandas as pd\n'), ((1885, 1901), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1899, 1901), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2024, 2057), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2041, 2057), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((13406, 13609), 'pandas.DataFrame', 'pd.DataFrame', (['list_rec'], {'columns': "['IS_AUC', 'OOS_AUC', 'training_epochs', 'minibatch_size', 'learning_rate',\n 'penalty', 'weight_decay', 'dropout', 'layer1', 'layer2',\n 'random_state', 'randomseed']"}), "(list_rec, columns=['IS_AUC', 'OOS_AUC', 'training_epochs',\n 'minibatch_size', 'learning_rate', 'penalty', 'weight_decay', 'dropout',\n 'layer1', 'layer2', 'random_state', 'randomseed'])\n", (13418, 13609), True, 'import pandas as pd\n'), ((2334, 2398), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': 'random_state'}), '(X, y, test_size=0.3, random_state=random_state)\n', (2350, 2398), False, 'from sklearn.model_selection import train_test_split\n'), ((2586, 2602), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2594, 2602), True, 'import numpy as np\n'), ((4590, 4612), 'torch.device', 'torch.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (4602, 4612), False, 'import torch, torch.nn as nn\n'), ((4641, 4666), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4664, 4666), False, 'import torch, torch.nn as nn\n'), ((5838, 5863), 'torch.from_numpy', 'torch.from_numpy', (['X_train'], {}), '(X_train)\n', (5854, 5863), False, 'import torch, torch.nn as nn\n'), ((6189, 6214), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6212, 6214), False, 'import torch, torch.nn as nn\n'), ((6438, 6480), 'torch.utils.data.TensorDataset', 'data_utils.TensorDataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (6462, 6480), True, 'import torch.utils.data as data_utils\n'), ((6549, 6618), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['train'], {'batch_size': 'minibatch_size', 'shuffle': '(True)'}), '(train, batch_size=minibatch_size, shuffle=True)\n', (6570, 6618), True, 'import torch.utils.data as data_utils\n'), ((6934, 6949), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (6947, 6949), False, 'from tensorboardX import SummaryWriter\n'), ((9267, 9291), 'torch.from_numpy', 'torch.from_numpy', (['X_test'], {}), '(X_test)\n', (9283, 9291), False, 'import torch, torch.nn as nn\n'), ((9414, 9439), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9437, 9439), False, 'import torch, torch.nn as nn\n'), ((9585, 9625), 'torch.utils.data.TensorDataset', 'data_utils.TensorDataset', (['X_test', 'y_test'], {}), '(X_test, y_test)\n', (9609, 9625), True, 'import torch.utils.data as data_utils\n'), ((9664, 9732), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['test'], {'batch_size': 'minibatch_size', 'shuffle': '(True)'}), '(test, batch_size=minibatch_size, shuffle=True)\n', (9685, 9732), True, 'import torch.utils.data as data_utils\n'), ((11063, 11115), 'sklearn.metrics.roc_curve', 'roc_curve', ([], {'y_true': 'y_test_labels', 'y_score': 'y_test_pred'}), '(y_true=y_test_labels, y_score=y_test_pred)\n', (11072, 11115), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((11253, 11307), 'sklearn.metrics.roc_curve', 'roc_curve', ([], {'y_true': 'y_train_labels', 'y_score': 'y_train_pred'}), '(y_true=y_train_labels, y_score=y_train_pred)\n', (11262, 11307), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((11636, 11646), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (11644, 11646), True, 'import matplotlib.pyplot as plt\n'), ((11882, 11913), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""g--"""'], {}), "([0, 1], [0, 1], 'g--')\n", (11890, 11913), True, 'import matplotlib.pyplot as plt\n'), ((11934, 11946), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11944, 11946), True, 'import matplotlib.pyplot as plt\n'), ((11971, 12003), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (11981, 12003), True, 'import matplotlib.pyplot as plt\n'), ((12028, 12061), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (12038, 12061), True, 'import matplotlib.pyplot as plt\n'), ((12548, 12594), 'matplotlib.pyplot.title', 'plt.title', (["('AUC(Neural Network ROC curve)' + t)"], {}), "('AUC(Neural Network ROC curve)' + t)\n", (12557, 12594), True, 'import matplotlib.pyplot as plt\n'), ((12617, 12670), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(0.5)'}), "(color='black', linestyle='-', linewidth=0.5)\n", (12625, 12670), True, 'import matplotlib.pyplot as plt\n'), ((12703, 12726), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12724, 12726), False, 'import datetime\n'), ((12863, 12968), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/HaoLI/Stata/credit/out/ROC figure/Figure_' + time2 + '.png')"], {'bbox_inches': '"""tight"""'}), "('/Users/HaoLI/Stata/credit/out/ROC figure/Figure_' + time2 +\n '.png', bbox_inches='tight')\n", (12874, 12968), True, 'import matplotlib.pyplot as plt\n'), ((13011, 13021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13019, 13021), True, 'import matplotlib.pyplot as plt\n'), ((9847, 9862), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9860, 9862), False, 'import torch, torch.nn as nn\n'), ((2968, 2993), 'torch.nn.Linear', 'nn.Linear', (['layer0', 'layer1'], {}), '(layer0, layer1)\n', (2977, 2993), True, 'import torch, torch.nn as nn\n'), ((3173, 3198), 'torch.nn.Linear', 'nn.Linear', (['layer1', 'layer2'], {}), '(layer1, layer2)\n', (3182, 3198), True, 'import torch, torch.nn as nn\n'), ((3450, 3470), 'torch.nn.Linear', 'nn.Linear', (['layer2', '(1)'], {}), '(layer2, 1)\n', (3459, 3470), True, 'import torch, torch.nn as nn\n'), ((3754, 3777), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'dropout'}), '(x, p=dropout)\n', (3763, 3777), True, 'import torch.nn.functional as F\n'), ((3870, 3893), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'dropout'}), '(x, p=dropout)\n', (3879, 3893), True, 'import torch.nn.functional as F\n'), ((4460, 4485), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4483, 4485), False, 'import torch, torch.nn as nn\n'), ((5984, 6009), 'torch.from_numpy', 'torch.from_numpy', (['y_train'], {}), '(y_train)\n', (6000, 6009), False, 'import torch, torch.nn as nn\n'), ((7473, 7498), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7496, 7498), False, 'import torch, torch.nn as nn\n'), ((8457, 8469), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (8467, 8469), True, 'import torch, torch.nn as nn\n'), ((8547, 8572), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8570, 8572), False, 'import torch, torch.nn as nn\n'), ((9352, 9376), 'torch.from_numpy', 'torch.from_numpy', (['y_test'], {}), '(y_test)\n', (9368, 9376), False, 'import torch, torch.nn as nn\n'), ((13063, 13088), 'sklearn.metrics.auc', 'auc', (['train_fpr', 'train_tpr'], {}), '(train_fpr, train_tpr)\n', (13066, 13088), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((13090, 13113), 'sklearn.metrics.auc', 'auc', (['test_fpr', 'test_tpr'], {}), '(test_fpr, test_tpr)\n', (13093, 13113), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((11727, 11752), 'sklearn.metrics.auc', 'auc', (['train_fpr', 'train_tpr'], {}), '(train_fpr, train_tpr)\n', (11730, 11752), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((11832, 11855), 'sklearn.metrics.auc', 'auc', (['test_fpr', 'test_tpr'], {}), '(test_fpr, test_tpr)\n', (11835, 11855), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
import cv2
import numpy as np
import common
import imgpheno as ft
def main():
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Test image segmentation and splitting')
parser.add_argument('files', metavar='FILE', nargs='+', help='Input images')
parser.add_argument('-o', '--output', metavar='PATH', default=".", help='Path for output files.')
parser.add_argument('-i', '--iters', metavar='N', type=int, default=5, help="The number of grabCut iterations. Default is 5.")
parser.add_argument('-m', '--margin', metavar='N', type=int, default=1, help="The margin of the foreground rectangle from the edges. Default is 1.")
parser.add_argument('--max-size', metavar='N', type=float, help="Scale the input image down if its perimeter exceeds N. Default is no scaling.")
parser.add_argument('--min-size-out', metavar='N', type=int, default=200, help="Set the minimum perimeter for output images. Smaller images are ignored. Default is 200.")
args = parser.parse_args()
for f in args.files:
split_image(f, args)
sys.stderr.write("Output was saved to %s\n" % args.output)
return 0
def split_image(path, args):
img = cv2.imread(path)
if img == None or img.size == 0:
sys.stderr.write("Failed to read %s. Skipping.\n" % path)
return -1
logging.info("Processing %s ..." % path)
# Scale the image down if its perimeter exceeds the maximum (if set).
img = common.scale_max_perimeter(img, args.max_size)
logging.info("Segmenting...")
# Perform segmentation.
mask = common.grabcut(img, args.iters, None, args.margin)
# Create a binary mask. Foreground is made white, background black.
bin_mask = np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
# Split the image into segments.
segments = ft.split_by_mask(img, bin_mask)
logging.info("Exporting segments...")
for i, im in enumerate(segments):
if sum(im.shape[:2]) < args.min_size_out:
continue
name = os.path.basename(path)
name = os.path.splitext(name)
out_path = "%s_%d%s" % (name[0], i, name[1])
out_path = os.path.join(args.output, out_path)
logging.info("\t%s" % out_path)
cv2.imwrite(out_path, im)
return 0
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.path.basename",
"common.scale_max_perimeter",
"cv2.imwrite",
"common.grabcut",
"imgpheno.split_by_mask",
"cv2.imread",
"logging.info",
"numpy.where",
"os.path.splitext",
"sys.stderr.write",
"os.path.join"
] |
[((119, 140), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (134, 140), False, 'import os\n'), ((161, 181), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (176, 181), False, 'import os\n'), ((268, 343), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s %(message)s"""'}), "(level=logging.INFO, format='%(levelname)s %(message)s')\n", (287, 343), False, 'import logging\n'), ((358, 434), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test image segmentation and splitting"""'}), "(description='Test image segmentation and splitting')\n", (381, 434), False, 'import argparse\n'), ((1317, 1375), 'sys.stderr.write', 'sys.stderr.write', (["('Output was saved to %s\\n' % args.output)"], {}), "('Output was saved to %s\\n' % args.output)\n", (1333, 1375), False, 'import sys\n'), ((1430, 1446), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1440, 1446), False, 'import cv2\n'), ((1573, 1613), 'logging.info', 'logging.info', (["('Processing %s ...' % path)"], {}), "('Processing %s ...' % path)\n", (1585, 1613), False, 'import logging\n'), ((1699, 1745), 'common.scale_max_perimeter', 'common.scale_max_perimeter', (['img', 'args.max_size'], {}), '(img, args.max_size)\n', (1725, 1745), False, 'import common\n'), ((1751, 1780), 'logging.info', 'logging.info', (['"""Segmenting..."""'], {}), "('Segmenting...')\n", (1763, 1780), False, 'import logging\n'), ((1821, 1871), 'common.grabcut', 'common.grabcut', (['img', 'args.iters', 'None', 'args.margin'], {}), '(img, args.iters, None, args.margin)\n', (1835, 1871), False, 'import common\n'), ((2090, 2121), 'imgpheno.split_by_mask', 'ft.split_by_mask', (['img', 'bin_mask'], {}), '(img, bin_mask)\n', (2106, 2121), True, 'import imgpheno as ft\n'), ((2127, 2164), 'logging.info', 'logging.info', (['"""Exporting segments..."""'], {}), "('Exporting segments...')\n", (2139, 2164), False, 'import logging\n'), ((1492, 1549), 'sys.stderr.write', 'sys.stderr.write', (["('Failed to read %s. Skipping.\\n' % path)"], {}), "('Failed to read %s. Skipping.\\n' % path)\n", (1508, 1549), False, 'import sys\n'), ((2290, 2312), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2306, 2312), False, 'import os\n'), ((2328, 2350), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (2344, 2350), False, 'import os\n'), ((2423, 2458), 'os.path.join', 'os.path.join', (['args.output', 'out_path'], {}), '(args.output, out_path)\n', (2435, 2458), False, 'import os\n'), ((2467, 2498), 'logging.info', 'logging.info', (["('\\t%s' % out_path)"], {}), "('\\t%s' % out_path)\n", (2479, 2498), False, 'import logging\n'), ((2507, 2532), 'cv2.imwrite', 'cv2.imwrite', (['out_path', 'im'], {}), '(out_path, im)\n', (2518, 2532), False, 'import cv2\n'), ((1960, 2024), 'numpy.where', 'np.where', (['((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD))', '(255)', '(0)'], {}), '((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255, 0)\n', (1968, 2024), True, 'import numpy as np\n')]
|
import os, glob
import numpy as np
import pandas as pd
from multiprocessing import Pool
from PIL import Image
from tqdm import tqdm
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
import warnings
warnings.filterwarnings("ignore")
import torch
from torchvision import transforms
from utils import get_next_day, mkdirs, psd2im
from utils import get_instance_segmentation_model
from utils import reshape_mask
from utils import get_GR, get_SE
class NPFDetection(object):
"""Class for NPF detection."""
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cpu_count = os.cpu_count() // 2 + 1
self.dataroot = os.path.join(opt.dataroot, opt.station)
self.station = opt.station
self.vmax = None if opt.dynamic_vmax else opt.vmax
self.tm_res = opt.time_res
self.df = pd.read_csv(os.path.join(self.dataroot, self.station+'.csv'), parse_dates=[0], index_col=0)
self.days = sorted(np.unique(self.df.index.date.astype(str)).tolist())
print(f'There are {len(self.days)} days of data to be processed.')
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.key_index = 0
def draw_one_day_images(self):
"""Draw NPF images with one-day unit"""
self.savefp = os.path.join(self.dataroot, 'images', 'one_day')
mkdirs(self.savefp)
self.dimg = 1
if self.cpu_count >= 8:
with Pool(self.cpu_count) as p:
p.map(self.draw_image, self.days)
else:
for day in tqdm(self.days):
self.draw_image(day)
def draw_two_day_images(self):
"""Draw NPF images with two-day unit"""
self.savefp = os.path.join(self.dataroot, 'images', 'two_day')
mkdirs(self.savefp)
self.dimg = 2
if self.cpu_count >= 8:
with Pool(self.cpu_count) as p:
p.map(self.draw_image, self.days)
else:
for day in tqdm(self.days):
self.draw_image(day)
def draw_image(self, day):
"""Draw an NPF image"""
if self.dimg == 1:
if not os.path.exists(os.path.join(self.savefp, day+'.png')):
try:
psd2im(self.df.loc[day], use_xaxis=False, use_yaxis=False, vmax=self.vmax, savefp=self.savefp, show_figure=False)
except Exception:
print(f'Cannot draw the NPF image for current day {day}.')
elif self.dimg == 2:
day_ = get_next_day(day)
if day_ in self.days and not os.path.exists(os.path.join(self.savefp, day+'_'+day_+'.png')):
try:
psd2im(self.df.loc[day:day_], use_xaxis=False, use_yaxis=False, vmax=self.vmax, savefp=self.savefp, show_figure=False)
except Exception:
print(f'Cannot draw the NPF image for current day {day}_{day_}.')
def detect_one_day_masks(self):
"""Detect masks for one-day NPF images"""
self.load_model()
size = (self.opt.im_size, self.opt.im_size)
res = {}
for im_path in glob.glob(os.path.join(self.dataroot, 'images/one_day')+'/*.png'):
mask = self.detect_mask(im_path, size)
if mask is not None:
res.update(mask)
print(f'Detected {len(res)} one-day masks whose scores are higher than {self.opt.scores:.2f}.')
savefp = os.path.join(self.dataroot, 'masks')
mkdirs(savefp)
np.save(os.path.join(savefp, 'one_day.npy'), res)
def detect_two_day_masks(self):
"""Detect masks for two-day NPF images"""
self.load_model()
size = (self.opt.im_size*2, self.opt.im_size)
res = {}
for im_path in glob.glob(os.path.join(self.dataroot, 'images/two_day')+'/*.png'):
mask = self.detect_mask(im_path, size)
if mask is not None:
res.update(mask)
print(f'Detected {len(res)} two-day masks whose scores are higher than {self.opt.scores:.2f}.')
savefp = os.path.join(self.dataroot, 'masks')
mkdirs(savefp)
np.save(os.path.join(savefp, 'two_day.npy'), res)
def load_model(self):
# load the pre-trained Mask R-CNN model
self.model = get_instance_segmentation_model()
self.model.load_state_dict(torch.load(f'{self.opt.ckpt_dir}/{self.opt.model_name}'))
self.model.to(self.device)
self.model.eval()
@torch.no_grad()
def detect_mask(self, im_path, size):
"""Detect valid masks for NPF images"""
# get mask
im = Image.open(im_path).convert('RGB').resize(size, Image.ANTIALIAS)
ts = transforms.ToTensor()(im)
out = self.model([ts.to(self.device)])[0]
if len(out['scores']) == 0:
return None
else:
idx_bool = out['scores'].cpu().numpy() >= self.opt.scores
index = [i for i, item in enumerate(idx_bool) if item]
if len(index) == 0:
return None
else:
masks = out['masks'][index].squeeze(1).cpu().numpy() >= self.opt.mask_thres
day = im_path.split(os.sep)[-1].split('.')[0].split('_')[0]
return {day: masks}
def visualize_masks(self):
self.masks_oneday = np.load(os.path.join(self.dataroot, 'masks', 'one_day.npy'), allow_pickle=True).tolist()
self.masks_twoday = np.load(os.path.join(self.dataroot, 'masks', 'two_day.npy'), allow_pickle=True).tolist()
self.keys = sorted(list(self.masks_oneday.keys()))
self.keys_ = sorted(list(self.masks_twoday.keys()))
self.len_keys = len(self.keys)
self.win = tk.Tk()
self.win.title('NPF Detection')
self.fig = Figure(dpi=100)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.win)
graph_widget = self.canvas.get_tk_widget()
graph_widget.grid(row=0, column=0, rowspan=2, columnspan=4, ipadx=200, sticky = tk.NW)
self.fig1 = Figure(dpi=100)
self.canvas1 = FigureCanvasTkAgg(self.fig1, master=self.win)
graph_widget1 = self.canvas1.get_tk_widget()
graph_widget1.grid(row=2, column=0, rowspan=2, columnspan=4, ipadx=200, sticky = tk.NW)
tk.Label(self.win, text='Select the one-day mask (select only one mask currently)').grid(row=0, column=5, columnspan=5, ipadx=50)
tk.Label(self.win, text='Select the two-day mask (select only one mask currently)').grid(row=2, column=5, columnspan=5, ipadx=50)
self.plot_next()
tk.Button(self.win,text="Prev",command=self.plot_prev).grid(row=5,column=3, columnspan=5, sticky=tk.W)
tk.Button(self.win,text="Next",command=self.plot_next).grid(row=5,column=7, columnspan=5, sticky=tk.W)
self.win.mainloop()
def plot(self):
self.fig.clear()
self.fig1.clear()
self.key = self.keys[self.key_index]
self.visualize_oneday_mask(self.fig, self.key)
if self.key in self.keys_:
self.visualize_twoday_mask(self.fig1, self.key)
self.canvas.draw_idle()
self.canvas1.draw_idle()
def plot_prev(self):
self.plot()
self.key_index -= 1
tk.Label(self.win, text=f'{self.key_index}/{self.len_keys}', fg='blue').grid(row=4, column=7, ipadx=50)
if self.key_index < 0:
tk.messagebox.showerror(title='Warning', message='You are at the begining, please click the Next button.')
def plot_next(self):
self.plot()
self.key_index += 1
tk.Label(self.win, text=f'{self.key_index}/{self.len_keys}', fg='blue').grid(row=4, column=7, ipadx=50)
if self.key_index == self.len_keys - 1:
tk.messagebox.showinfo(title='Warning', message='Good job! All masks have been checked!')
def visualize_oneday_mask(self, fig, day):
masks = self.masks_oneday[day]
num_masks = masks.shape[0]
ax = fig.add_subplot(1, num_masks+1, 1)
im = Image.open(os.path.join(self.dataroot, 'images/one_day', day+'.png'))
im = im.resize((self.opt.im_size, self.opt.im_size), Image.ANTIALIAS)
ax.imshow(np.array(im))
ax.set_title(day)
ax.axis('off')
# plot masks
for i in range(masks.shape[0]):
ax = fig.add_subplot(1, num_masks+1, i+2)
ax.imshow(masks[i], cmap='gray')
ax.set_title(f'mask {i}')
ax.axis('off')
for i in range(5):
ck_btn = tk.Checkbutton(self.win, text=f'one-day mask {i}')
ck_btn.grid(row=1, column=5+i, ipadx=10, ipady=5)
ck_btn.config(command=lambda btn=ck_btn:self.save_mask(btn))
def visualize_twoday_mask(self, fig, day):
day_ = get_next_day(day)
masks_ = self.masks_twoday[day]
num_masks = masks_.shape[0]
ax = fig.add_subplot(1, num_masks+1, 1)
im_ = Image.open(os.path.join(self.dataroot, 'images/two_day', day+'_'+day_+'.png'))
im_ = im_.resize((self.opt.im_size*2, self.opt.im_size), Image.ANTIALIAS)
ax.imshow(np.array(im_))
ax.set_title(day+'_'+day_)
ax.axis('off')
for i in range(masks_.shape[0]):
ax = fig.add_subplot(1, num_masks+1, i+2)
ax.imshow(masks_[i], cmap='gray')
ax.set_title(f'mask {i}')
ax.axis('off')
for i in range(5):
ck_btn_ = tk.Checkbutton(self.win, text=f'two-day mask {i}')
ck_btn_.grid(row=3, column=5+i, ipadx=10, ipady=5)
ck_btn_.config(command=lambda btn=ck_btn_:self.save_mask(btn))
def save_mask(self, btn):
text = btn.cget('text')
idx = int(text[-1])
if 'one-day' in text:
savefp = os.path.join(self.dataroot, 'masks/one_day')
mkdirs(savefp)
np.save(os.path.join(savefp, f'{self.key}.npy'), self.masks_oneday[self.key][idx])
elif 'two-day' in text:
savefp = os.path.join(self.dataroot, 'masks/two_day')
mkdirs(savefp)
np.save(os.path.join(savefp, f'{self.key}.npy'), self.masks_twoday[self.key][idx])
def get_SE_GR(self, day):
df = self.df.loc[day]
mask = np.load(os.path.join(self.dataroot, 'masks/one_day', day+'.npy'), allow_pickle=True)
mask = reshape_mask(mask, df.shape)
try:
st, et = get_SE(df, mask)
gr_dict = get_GR(df, mask, self.tm_res, savefp=self.savefp, vmax=self.vmax)
except:
# print(day)
return
try:
mask_ = np.load(os.path.join(self.dataroot, 'masks/two_day', day+'.npy'), allow_pickle=True)
df_ = self.df.loc[day:get_next_day(day)]
mask_ = reshape_mask(mask_, df_.shape)
st_two, et_two = get_SE(df_, mask_)
except:
st_two, et_two = st, et
save_dict = {**{
'date': [day],
'start_time_one': [st],
'end_time_one': [et],
'start_time_two': [st_two],
'end_time_two': [et_two]
}, **gr_dict}
pd.DataFrame(save_dict).to_csv(os.path.join(self.savefp, f'{day}.csv'), index=False)
def save_SE_GR(self):
r"""
obtain and save the start time, end time and the growth rates.
"""
files = sorted(glob.glob(os.path.join(self.dataroot, 'masks/one_day')+'/*.npy'))
days = [file.split(os.sep)[-1].split('.')[0] for file in files]
print(f'Calculating growth rates for {len(days)} days.')
self.savefp = os.path.join(self.dataroot, 'GR')
mkdirs(self.savefp)
if self.cpu_count >= 8:
with Pool(self.cpu_count) as p:
p.map(self.get_SE_GR, days)
else:
for day in tqdm(days):
self.get_SE_GR(day)
|
[
"utils.get_SE",
"utils.get_GR",
"utils.psd2im",
"torch.device",
"torch.no_grad",
"os.path.join",
"utils.get_next_day",
"tkinter.Label",
"utils.mkdirs",
"tkinter.Checkbutton",
"pandas.DataFrame",
"tkinter.Button",
"torch.load",
"matplotlib.figure.Figure",
"tkinter.Tk",
"tqdm.tqdm",
"utils.get_instance_segmentation_model",
"tkinter.messagebox.showinfo",
"torch.cuda.is_available",
"multiprocessing.Pool",
"tkinter.messagebox.showerror",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"utils.reshape_mask",
"warnings.filterwarnings",
"PIL.Image.open",
"os.cpu_count",
"numpy.array",
"torchvision.transforms.ToTensor"
] |
[((286, 319), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (309, 319), False, 'import warnings\n'), ((4704, 4719), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4717, 4719), False, 'import torch\n'), ((765, 804), 'os.path.join', 'os.path.join', (['opt.dataroot', 'opt.station'], {}), '(opt.dataroot, opt.station)\n', (777, 804), False, 'import os, glob\n'), ((1457, 1505), 'os.path.join', 'os.path.join', (['self.dataroot', '"""images"""', '"""one_day"""'], {}), "(self.dataroot, 'images', 'one_day')\n", (1469, 1505), False, 'import os, glob\n'), ((1515, 1534), 'utils.mkdirs', 'mkdirs', (['self.savefp'], {}), '(self.savefp)\n', (1521, 1534), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((1893, 1941), 'os.path.join', 'os.path.join', (['self.dataroot', '"""images"""', '"""two_day"""'], {}), "(self.dataroot, 'images', 'two_day')\n", (1905, 1941), False, 'import os, glob\n'), ((1951, 1970), 'utils.mkdirs', 'mkdirs', (['self.savefp'], {}), '(self.savefp)\n', (1957, 1970), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((3641, 3677), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks"""'], {}), "(self.dataroot, 'masks')\n", (3653, 3677), False, 'import os, glob\n'), ((3687, 3701), 'utils.mkdirs', 'mkdirs', (['savefp'], {}), '(savefp)\n', (3693, 3701), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((4285, 4321), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks"""'], {}), "(self.dataroot, 'masks')\n", (4297, 4321), False, 'import os, glob\n'), ((4331, 4345), 'utils.mkdirs', 'mkdirs', (['savefp'], {}), '(savefp)\n', (4337, 4345), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((4505, 4538), 'utils.get_instance_segmentation_model', 'get_instance_segmentation_model', ([], {}), '()\n', (4536, 4538), False, 'from utils import get_instance_segmentation_model\n'), ((5965, 5972), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (5970, 5972), True, 'import tkinter as tk\n'), ((6034, 6049), 'matplotlib.figure.Figure', 'Figure', ([], {'dpi': '(100)'}), '(dpi=100)\n', (6040, 6049), False, 'from matplotlib.figure import Figure\n'), ((6073, 6117), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.fig'], {'master': 'self.win'}), '(self.fig, master=self.win)\n', (6090, 6117), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((6289, 6304), 'matplotlib.figure.Figure', 'Figure', ([], {'dpi': '(100)'}), '(dpi=100)\n', (6295, 6304), False, 'from matplotlib.figure import Figure\n'), ((6329, 6374), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.fig1'], {'master': 'self.win'}), '(self.fig1, master=self.win)\n', (6346, 6374), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((9084, 9101), 'utils.get_next_day', 'get_next_day', (['day'], {}), '(day)\n', (9096, 9101), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((10679, 10707), 'utils.reshape_mask', 'reshape_mask', (['mask', 'df.shape'], {}), '(mask, df.shape)\n', (10691, 10707), False, 'from utils import reshape_mask\n'), ((11957, 11990), 'os.path.join', 'os.path.join', (['self.dataroot', '"""GR"""'], {}), "(self.dataroot, 'GR')\n", (11969, 11990), False, 'import os, glob\n'), ((12000, 12019), 'utils.mkdirs', 'mkdirs', (['self.savefp'], {}), '(self.savefp)\n', (12006, 12019), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((968, 1018), 'os.path.join', 'os.path.join', (['self.dataroot', "(self.station + '.csv')"], {}), "(self.dataroot, self.station + '.csv')\n", (980, 1018), False, 'import os, glob\n'), ((1268, 1293), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1291, 1293), False, 'import torch\n'), ((1244, 1264), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1256, 1264), False, 'import torch\n'), ((1299, 1318), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1311, 1318), False, 'import torch\n'), ((1728, 1743), 'tqdm.tqdm', 'tqdm', (['self.days'], {}), '(self.days)\n', (1732, 1743), False, 'from tqdm import tqdm\n'), ((2164, 2179), 'tqdm.tqdm', 'tqdm', (['self.days'], {}), '(self.days)\n', (2168, 2179), False, 'from tqdm import tqdm\n'), ((3719, 3754), 'os.path.join', 'os.path.join', (['savefp', '"""one_day.npy"""'], {}), "(savefp, 'one_day.npy')\n", (3731, 3754), False, 'import os, glob\n'), ((4363, 4398), 'os.path.join', 'os.path.join', (['savefp', '"""two_day.npy"""'], {}), "(savefp, 'two_day.npy')\n", (4375, 4398), False, 'import os, glob\n'), ((4575, 4631), 'torch.load', 'torch.load', (['f"""{self.opt.ckpt_dir}/{self.opt.model_name}"""'], {}), "(f'{self.opt.ckpt_dir}/{self.opt.model_name}')\n", (4585, 4631), False, 'import torch\n'), ((4925, 4946), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4944, 4946), False, 'from torchvision import transforms\n'), ((7671, 7782), 'tkinter.messagebox.showerror', 'tk.messagebox.showerror', ([], {'title': '"""Warning"""', 'message': '"""You are at the begining, please click the Next button."""'}), "(title='Warning', message=\n 'You are at the begining, please click the Next button.')\n", (7694, 7782), True, 'import tkinter as tk\n'), ((8031, 8125), 'tkinter.messagebox.showinfo', 'tk.messagebox.showinfo', ([], {'title': '"""Warning"""', 'message': '"""Good job! All masks have been checked!"""'}), "(title='Warning', message=\n 'Good job! All masks have been checked!')\n", (8053, 8125), True, 'import tkinter as tk\n'), ((8323, 8382), 'os.path.join', 'os.path.join', (['self.dataroot', '"""images/one_day"""', "(day + '.png')"], {}), "(self.dataroot, 'images/one_day', day + '.png')\n", (8335, 8382), False, 'import os, glob\n'), ((8480, 8492), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (8488, 8492), True, 'import numpy as np\n'), ((8830, 8880), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.win'], {'text': 'f"""one-day mask {i}"""'}), "(self.win, text=f'one-day mask {i}')\n", (8844, 8880), True, 'import tkinter as tk\n'), ((9255, 9327), 'os.path.join', 'os.path.join', (['self.dataroot', '"""images/two_day"""', "(day + '_' + day_ + '.png')"], {}), "(self.dataroot, 'images/two_day', day + '_' + day_ + '.png')\n", (9267, 9327), False, 'import os, glob\n'), ((9425, 9438), 'numpy.array', 'np.array', (['im_'], {}), '(im_)\n', (9433, 9438), True, 'import numpy as np\n'), ((9766, 9816), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.win'], {'text': 'f"""two-day mask {i}"""'}), "(self.win, text=f'two-day mask {i}')\n", (9780, 9816), True, 'import tkinter as tk\n'), ((10105, 10149), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks/one_day"""'], {}), "(self.dataroot, 'masks/one_day')\n", (10117, 10149), False, 'import os, glob\n'), ((10163, 10177), 'utils.mkdirs', 'mkdirs', (['savefp'], {}), '(savefp)\n', (10169, 10177), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((10586, 10644), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks/one_day"""', "(day + '.npy')"], {}), "(self.dataroot, 'masks/one_day', day + '.npy')\n", (10598, 10644), False, 'import os, glob\n'), ((10744, 10760), 'utils.get_SE', 'get_SE', (['df', 'mask'], {}), '(df, mask)\n', (10750, 10760), False, 'from utils import get_GR, get_SE\n'), ((10784, 10849), 'utils.get_GR', 'get_GR', (['df', 'mask', 'self.tm_res'], {'savefp': 'self.savefp', 'vmax': 'self.vmax'}), '(df, mask, self.tm_res, savefp=self.savefp, vmax=self.vmax)\n', (10790, 10849), False, 'from utils import get_GR, get_SE\n'), ((11110, 11140), 'utils.reshape_mask', 'reshape_mask', (['mask_', 'df_.shape'], {}), '(mask_, df_.shape)\n', (11122, 11140), False, 'from utils import reshape_mask\n'), ((11171, 11189), 'utils.get_SE', 'get_SE', (['df_', 'mask_'], {}), '(df_, mask_)\n', (11177, 11189), False, 'from utils import get_GR, get_SE\n'), ((11521, 11560), 'os.path.join', 'os.path.join', (['self.savefp', 'f"""{day}.csv"""'], {}), "(self.savefp, f'{day}.csv')\n", (11533, 11560), False, 'import os, glob\n'), ((12184, 12194), 'tqdm.tqdm', 'tqdm', (['days'], {}), '(days)\n', (12188, 12194), False, 'from tqdm import tqdm\n'), ((716, 730), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (728, 730), False, 'import os, glob\n'), ((1611, 1631), 'multiprocessing.Pool', 'Pool', (['self.cpu_count'], {}), '(self.cpu_count)\n', (1615, 1631), False, 'from multiprocessing import Pool\n'), ((2047, 2067), 'multiprocessing.Pool', 'Pool', (['self.cpu_count'], {}), '(self.cpu_count)\n', (2051, 2067), False, 'from multiprocessing import Pool\n'), ((2711, 2728), 'utils.get_next_day', 'get_next_day', (['day'], {}), '(day)\n', (2723, 2728), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((3341, 3386), 'os.path.join', 'os.path.join', (['self.dataroot', '"""images/one_day"""'], {}), "(self.dataroot, 'images/one_day')\n", (3353, 3386), False, 'import os, glob\n'), ((3985, 4030), 'os.path.join', 'os.path.join', (['self.dataroot', '"""images/two_day"""'], {}), "(self.dataroot, 'images/two_day')\n", (3997, 4030), False, 'import os, glob\n'), ((6537, 6625), 'tkinter.Label', 'tk.Label', (['self.win'], {'text': '"""Select the one-day mask (select only one mask currently)"""'}), "(self.win, text=\n 'Select the one-day mask (select only one mask currently)')\n", (6545, 6625), True, 'import tkinter as tk\n'), ((6676, 6764), 'tkinter.Label', 'tk.Label', (['self.win'], {'text': '"""Select the two-day mask (select only one mask currently)"""'}), "(self.win, text=\n 'Select the two-day mask (select only one mask currently)')\n", (6684, 6764), True, 'import tkinter as tk\n'), ((6843, 6899), 'tkinter.Button', 'tk.Button', (['self.win'], {'text': '"""Prev"""', 'command': 'self.plot_prev'}), "(self.win, text='Prev', command=self.plot_prev)\n", (6852, 6899), True, 'import tkinter as tk\n'), ((6955, 7011), 'tkinter.Button', 'tk.Button', (['self.win'], {'text': '"""Next"""', 'command': 'self.plot_next'}), "(self.win, text='Next', command=self.plot_next)\n", (6964, 7011), True, 'import tkinter as tk\n'), ((7522, 7593), 'tkinter.Label', 'tk.Label', (['self.win'], {'text': 'f"""{self.key_index}/{self.len_keys}"""', 'fg': '"""blue"""'}), "(self.win, text=f'{self.key_index}/{self.len_keys}', fg='blue')\n", (7530, 7593), True, 'import tkinter as tk\n'), ((7865, 7936), 'tkinter.Label', 'tk.Label', (['self.win'], {'text': 'f"""{self.key_index}/{self.len_keys}"""', 'fg': '"""blue"""'}), "(self.win, text=f'{self.key_index}/{self.len_keys}', fg='blue')\n", (7873, 7936), True, 'import tkinter as tk\n'), ((10199, 10238), 'os.path.join', 'os.path.join', (['savefp', 'f"""{self.key}.npy"""'], {}), "(savefp, f'{self.key}.npy')\n", (10211, 10238), False, 'import os, glob\n'), ((10329, 10373), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks/two_day"""'], {}), "(self.dataroot, 'masks/two_day')\n", (10341, 10373), False, 'import os, glob\n'), ((10387, 10401), 'utils.mkdirs', 'mkdirs', (['savefp'], {}), '(savefp)\n', (10393, 10401), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((10958, 11016), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks/two_day"""', "(day + '.npy')"], {}), "(self.dataroot, 'masks/two_day', day + '.npy')\n", (10970, 11016), False, 'import os, glob\n'), ((11490, 11513), 'pandas.DataFrame', 'pd.DataFrame', (['save_dict'], {}), '(save_dict)\n', (11502, 11513), True, 'import pandas as pd\n'), ((12073, 12093), 'multiprocessing.Pool', 'Pool', (['self.cpu_count'], {}), '(self.cpu_count)\n', (12077, 12093), False, 'from multiprocessing import Pool\n'), ((2349, 2388), 'os.path.join', 'os.path.join', (['self.savefp', "(day + '.png')"], {}), "(self.savefp, day + '.png')\n", (2361, 2388), False, 'import os, glob\n'), ((2432, 2549), 'utils.psd2im', 'psd2im', (['self.df.loc[day]'], {'use_xaxis': '(False)', 'use_yaxis': '(False)', 'vmax': 'self.vmax', 'savefp': 'self.savefp', 'show_figure': '(False)'}), '(self.df.loc[day], use_xaxis=False, use_yaxis=False, vmax=self.vmax,\n savefp=self.savefp, show_figure=False)\n', (2438, 2549), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((5577, 5628), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks"""', '"""one_day.npy"""'], {}), "(self.dataroot, 'masks', 'one_day.npy')\n", (5589, 5628), False, 'import os, glob\n'), ((5695, 5746), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks"""', '"""two_day.npy"""'], {}), "(self.dataroot, 'masks', 'two_day.npy')\n", (5707, 5746), False, 'import os, glob\n'), ((10423, 10462), 'os.path.join', 'os.path.join', (['savefp', 'f"""{self.key}.npy"""'], {}), "(savefp, f'{self.key}.npy')\n", (10435, 10462), False, 'import os, glob\n'), ((11070, 11087), 'utils.get_next_day', 'get_next_day', (['day'], {}), '(day)\n', (11082, 11087), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((11737, 11781), 'os.path.join', 'os.path.join', (['self.dataroot', '"""masks/one_day"""'], {}), "(self.dataroot, 'masks/one_day')\n", (11749, 11781), False, 'import os, glob\n'), ((2878, 3001), 'utils.psd2im', 'psd2im', (['self.df.loc[day:day_]'], {'use_xaxis': '(False)', 'use_yaxis': '(False)', 'vmax': 'self.vmax', 'savefp': 'self.savefp', 'show_figure': '(False)'}), '(self.df.loc[day:day_], use_xaxis=False, use_yaxis=False, vmax=self.\n vmax, savefp=self.savefp, show_figure=False)\n', (2884, 3001), False, 'from utils import get_next_day, mkdirs, psd2im\n'), ((4846, 4865), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (4856, 4865), False, 'from PIL import Image\n'), ((2786, 2838), 'os.path.join', 'os.path.join', (['self.savefp', "(day + '_' + day_ + '.png')"], {}), "(self.savefp, day + '_' + day_ + '.png')\n", (2798, 2838), False, 'import os, glob\n')]
|
"""
Copied from WRF_SPC.py Sep 20, 2019.
Given a model initialization time and a valid time, plot crefuh around hagelslag objects.
"""
import argparse
import datetime
import pdb
import os
import sys
import pandas as pd
import numpy as np
import fieldinfo # levels and color tables - Adapted from /glade/u/home/wrfrt/wwe/python_scripts/fieldinfo.py 20190125.
from wrf import to_np, getvar, get_cartopy, latlon_coords
from metpy.units import units
from netCDF4 import Dataset
import cartopy
import matplotlib
matplotlib.use("Agg") # allows dav slurm jobs
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# =============Arguments===================
parser = argparse.ArgumentParser(description = "Plot WRF and SPC storm reports",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--fill", type=str, default= 'crefuh', help='netCDF variable name for contour fill field')
parser.add_argument("-b", "--barb", choices=["shr06", "wind10m",""], type=str, default="wind10m", help='wind barbs')
parser.add_argument("-c", "--contour", type=str, default=None, help='contour field')
parser.add_argument("-o", "--outdir", type=str, default='.', help="name of output path")
parser.add_argument("-p", "--padding", type=float, nargs=4, help="padding on west, east, south and north side in km",
default=[175.,175.,175.,175.])
parser.add_argument("--timeshift", type=int, default=0, help="hours to shift background field")
parser.add_argument("--arrow", action='store_true', help="Add storm motion vector from hagelslag")
parser.add_argument("--no-fineprint", action='store_true', help="Don't write image details at bottom")
parser.add_argument("--force_new", action='store_true', help="overwrite any old outfile, if it exists")
parser.add_argument("--no-counties", action='store_true', help="Don't draw county borders (can be slow)")
parser.add_argument("--no-mask", action='store_true', help="Don't draw object mask")
parser.add_argument('-i', "--idir", type=str, default="/glade/p/mmm/parc/sobash/NSC/3KM_WRF_POST_12sec_ts",
help="path to WRF output files")
parser.add_argument('-s', "--stride", type=int, default=1, help="plot every stride points. speed things up with stride>1")
parser.add_argument('-t', "--trackdir", type=str, default="/glade/scratch/ahijevyc/track_data_ncarstorm_3km_REFL_1KM_AGL_csv",
help="path to hagelslag track-step files")
parser.add_argument("--patchdir", type=str, default="/glade/scratch/ahijevyc/track_data_ncarstorm_3km_REFL_1KM_AGL_nc",
help="path to hagelslag netCDF patches")
parser.add_argument("initial_time", type=lambda d: datetime.datetime.strptime(d, '%Y%m%d%H'),
help="model initialization date and hour, yyyymmddhh")
parser.add_argument("valid_time", type=lambda d: datetime.datetime.strptime(d, '%Y%m%d%H'),
help="model valid date and hour, yyyymmddhh")
parser.add_argument("-d", "--debug", action='store_true')
# Assign arguments to simply-named variables
args = parser.parse_args()
barb = args.barb
contour = args.contour
fill = args.fill
odir = args.outdir
padding = args.padding
timeshift = args.timeshift
arrow = args.arrow
no_fineprint = args.no_fineprint
force_new = args.force_new
no_counties = args.no_counties
no_mask = args.no_mask
idir = args.idir
stride = args.stride
patchdir = args.patchdir
trackdir = args.trackdir
initial_time = args.initial_time
valid_time = args.valid_time
debug = args.debug
if debug:
print(args)
# Derive lead time and make sure it is between 12 and 36 hours.
lead_time = valid_time - initial_time
if lead_time < datetime.timedelta(hours=7) or lead_time > datetime.timedelta(hours=36):
print("lead_time:",lead_time, "not between 7 and 36 hours")
#sys.exit(1)
def update_scale_labels(scale_xy):
# Update labels on axes with the distance along each axis.
# Cartopy axes do not have a set_xlabel() or set_ylabel() method. Add labels manually.
xspan = ax.get_xlim()
yspan = ax.get_ylim()
xlabel = "%dkm" % (round((xspan[1]-xspan[0])/1000.))
ylabel = "%dkm" % (round((yspan[1]-yspan[0])/1000.))
x, y = scale_xy
x.set_text(xlabel)
y.set_text(ylabel)
# Read hagelslag track_step csv file into pandas DataFrame.
mysterious_suffix = '' # '_13' or '_12'
tracks = trackdir + '/' + initial_time.strftime('track_step_NCARSTORM_d01_%Y%m%d-%H%M')+mysterious_suffix+'.csv'
if debug:
print("reading csv file",tracks)
df = pd.read_csv(tracks, parse_dates=['Run_Date', 'Valid_Date'])
# Throw out everything except requested valid times.
df = df[df.Valid_Date == valid_time]
if df.empty:
print("csv track step file", tracks, " has no objects at requested valid time",valid_time,". That is probably fine.")
sys.exit(0)
# Throw out weak UH objects
good_UH = 25
igood_UH = df['UP_HELI_MAX_max'] >= good_UH
if 'UP_HELI_MIN_min' in df.columns:
igood_UH = igood_UH | (df['UP_HELI_MIN_min'].abs() >= good_UH)
print("ignoring",(~igood_UH).sum(),"object with abs(UH) <",good_UH)
if debug:
if 'UP_HELI_MIN_min' in df.columns:
print(df[~igood_UH][["Step_ID","UP_HELI_MAX_max","UP_HELI_MIN_min"]])
else:
print(df[~igood_UH][["Step_ID","UP_HELI_MAX_max"]])
df = df[igood_UH]
if df.empty:
print("csv track step file", tracks, " has no good UH objects at requested valid time",valid_time,". That is probably fine.")
sys.exit(0)
# List of all png files that will be created.
pngfiles = odir + '/' + df.Step_ID + "_" + "{:+1.0f}".format(timeshift) + ".png"
if all([os.path.isfile(p) for p in pngfiles]) and not force_new:
# Exit if pngs all already exist and force_new option was not used.
print(initial_time, valid_time, "{:+1.0f}".format(timeshift) +"h",fill,"finished. Moving on.")
sys.exit(0)
if not no_mask:
# Read netCDF patches
patches = patchdir + '/' + initial_time.strftime('NCARSTORM_%Y%m%d-%H%M_d01_model_patches.nc')
pnc = Dataset(patches,'r')
masks = pnc.variables["masks"][:]
mlons = pnc.variables["lon"][:]
mlats = pnc.variables["lat"][:]
mtrack_ids = pnc.variables["track_id"][:]
mtrack_steps = pnc.variables["track_step"][:]
mask_centroid_lats = pnc.variables["centroid_lat"][:]
mask_centroid_lons = pnc.variables["centroid_lon"][:]
pnc.close()
# Get color map, levels, and netCDF variable name appropriate for requested variable (from fieldinfo module).
info = fieldinfo.nsc[fill]
if debug:
print("found nsc in fieldinfo.py. Using",info)
cmap = colors.ListedColormap(info['cmap'])
levels = info['levels']
fill = info['fname'][0]
# Get wrfout filename
history_time = valid_time + datetime.timedelta(hours=timeshift)
wrfout = idir + '/' + initial_time.strftime('%Y%m%d%H') + '/' + history_time.strftime('diags_d01_%Y-%m-%d_%H_%M_%S.nc')
if debug: print("About to open "+wrfout)
wrfnc = Dataset(wrfout,"r")
if fill not in wrfnc.variables:
print("variable "+ fill + " not found")
print("choices:", wrfnc.variables.keys())
sys.exit(1)
# Get a 2D var from wrfout file. It has projection info.
if debug:
print("getvar...")
cvar = getvar(wrfnc,fill)
wrflat, wrflon = latlon_coords(cvar)
# get cartopy mapping object
if debug: print("get_cartopy...")
WRF_proj = get_cartopy(cvar)
fineprint0 = 'fill '+fill+" ("+cvar.units+") "
if 'units' in info.keys():
cvar.metpy.convert_units(info['units'])
if hasattr(cvar, 'long_name'):
label = cvar.long_name
elif hasattr(cvar, 'description'):
label = cvar.description
# convert WRF lat/lons to x,y
pts = WRF_proj.transform_points(cartopy.crs.PlateCarree(), to_np(wrflon[::stride,::stride]), to_np(wrflat[::stride,::stride])) # Transform lon/lat to x and y (in meters) in WRF projection.
x, y, z = pts[:,:,0], pts[:,:,1], pts[:,:,2]
fig = plt.figure(figsize=(10,10))
if debug: print("plt.axes()")
ax = plt.axes(projection=WRF_proj)
ax.add_feature(cartopy.feature.STATES.with_scale('10m'), linewidth=0.35, alpha=0.55)
# Set title (month and hour)
ax.set_title(history_time.strftime("%b %HZ"))
# Empty fineprint placeholder in lower left corner of image.
fineprint_obj = plt.annotate(text=fineprint0, xy=(0,5), xycoords=('axes fraction', 'figure pixels'), va="bottom", fontsize=4)
if cvar.min() > levels[-1] or cvar.max() < levels[0]:
print('levels',levels,'out of range of cvar', cvar.values.min(), cvar.values.max())
sys.exit(1)
if debug:
print('levels:',levels, 'cmap:', cmap.colors)
if debug:
print("plotting filled contour",cvar.name,"...")
cfill = ax.contourf(x, y, to_np(cvar[::stride,::stride]), levels=levels, cmap=cmap)
# Color bar
cb = plt.colorbar(cfill, ax=ax, format='%.0f', shrink=0.52, orientation='horizontal')
if hasattr(cvar,"units"):
cb.set_label(label+" ("+cvar.units+")", fontsize="small")
if len(levels) < 10:
# label every level if there is room.
cb.set_ticks(levels)
cb.ax.tick_params(labelsize='xx-small')
cb.outline.set_linewidth(0.5)
# Create 2 annotation object placeholders for spatial scale. Will be updated with each set_extent().
scale_kw = {"ha":"center","rotation_mode":"anchor","xycoords":"axes fraction","textcoords":"offset points"}
scale_xy = ( ax.annotate("", (0.5, 0), xytext=(0,-5), va='top', rotation='horizontal', **scale_kw),
ax.annotate("", (0, 0.5), xytext=(-5,0), va='bottom', rotation='vertical', **scale_kw) )
# Special case of composite reflectivity, UH overlay
if args.fill == 'crefuh':
max_uh = getvar(wrfnc,info['fname'][1])
min_uh = getvar(wrfnc,info['fname'][2])
max_uh_threshold = info['max_threshold']
min_uh_threshold = info['min_threshold']
print("UH max:", max_uh.max().values)
print("UH min:", min_uh.min().values)
if max_uh.max() > max_uh_threshold:
print("Filled contour UH >",max_uh_threshold)
# Don't use contourf if the data fall outside the levels range. You will get ValueError: 'bboxes' cannot be empty.
# See https://github.com/SciTools/cartopy/issues/1290
cs1 = ax.contourf(x, y, to_np(max_uh), levels=[max_uh_threshold,1000], colors='black',
alpha=0.3 )
if debug: print("solid contour UH >",max_uh_threshold)
cs2 = ax.contour(x, y, to_np(max_uh), levels=max_uh_threshold*np.arange(1,6), colors='black',
linestyles='solid', linewidths=0.4 )
fineprint0 += "UH>"+str(max_uh_threshold) +" "+ max_uh.units + " "
# Oddly, the zero contour is plotted if there are no other valid contours
if 0.0 in cs2.levels:
print("uh has zero contour for some reason. Hide it")
if debug:
pdb.set_trace()
for i in cs2.collections: i.remove()
if min_uh.min() < min_uh_threshold:
print("Filled UH contour <",min_uh_threshold)
# Don't use contourf if the data fall outside the levels range. You will get ValueError: 'bboxes' cannot be empty.
# See https://github.com/SciTools/cartopy/issues/1290
negUH1 = ax.contourf(x, y, to_np(min_uh), levels=[-1000, min_uh_threshold], colors='black',
alpha=0.3 )
if debug: print("dashed contour UH <",min_uh_threshold)
negUH2 = ax.contour(x, y, to_np(min_uh), levels=min_uh_threshold*np.arange(6,0,-1), colors='black',
linestyles='dashed', linewidths=0.4 )
fineprint0 += "UH<"+str(-min_uh_threshold) +" "+ min_uh.units + " "
if 0.0 in negUH2.levels:
print("neg uh has a zero contour. Hide it")
if debug:
pdb.set_trace()
for i in negUH2.collections: i.remove()
# Read my own county shape file.
if not no_counties:
if debug:
print("About to draw counties")
reader = cartopy.io.shapereader.Reader('/glade/work/ahijevyc/share/shapeFiles/cb_2013_us_county_500k/cb_2013_us_county_500k.shp')
counties = list(reader.geometries())
# Create custom cartopy feature that can be added to the axes.
COUNTIES = cartopy.feature.ShapelyFeature(counties, cartopy.crs.PlateCarree())
ax.add_feature(COUNTIES, facecolor="none", edgecolor='black', alpha=0.25, linewidth=0.2)
if barb:
# Get barb netCDF variable name appropriate for requested variable (from fieldinfo module).
info = fieldinfo.nsc[barb]
if debug:
print("found nsc in fieldinfo.py. Using",info)
if args.barb == 'wind10m': u,v = getvar(wrfnc, 'uvmet10', units='kt')
if args.barb == 'shr06':
u = getvar(wrfnc, 'USHR6')*1.93
v = getvar(wrfnc, 'VSHR6')*1.93
u.attrs['units'] = 'kt'
v.attrs['units'] = 'kt'
# Density of barbs stays the same, no matter the domain size (padding)
# larger domain = greater stride
skip = int(round(np.max([(padding[0]+padding[1]), (padding[2]+padding[3])])/50))
if args.fill == 'crefuh': alpha=0.6
else: alpha=1.0
if debug: print("plotBarbs: starting barbs")
# barbs already oriented with map projection. In Basemap, we needed to use m.rotate_vector().
cs2 = ax.barbs(x[::skip*stride,::skip*stride], y[::skip*stride,::skip*stride],
to_np(u)[::skip*stride,::skip*stride], to_np(v)[::skip*stride,::skip*stride], color='black',
alpha=alpha, length=5, linewidth=0.25, sizes={'emptybarb':0.05} )
fineprint0 += "wind barb (" + u.units + ") "
if contour:
# Get netCDF variable name appropriate for requested variable from fieldinfo module.
info = fieldinfo.nsc[contour]
if debug:
print("found nsc in fieldinfo.py. Using",info)
cvar = getvar(wrfnc, info['fname'][0])
if 'units' in info.keys():
cvar.metpy.convert_units(info['units'])
levels = info['levels']
# could use levels from fieldinfo module, but default is often less cluttered.
alpha=0.4
if debug: print("starting "+contour+" contours")
cr = ax.contour(x[::stride,::stride], y[::stride,::stride],
cvar[::stride,::stride], levels=levels, colors='black', alpha=alpha,
linewidths=0.75)
clab = ax.clabel(cr, inline=False, fmt='%.0f', fontsize=6)
fineprint0 += "contour "+contour+" (" + cvar.units + ") "
for lon,lat,stepid,trackid,u,v,pngfile in zip(df.Centroid_Lon, df.Centroid_Lat,df.Step_ID,df.Track_ID,df.Storm_Motion_U,df.Storm_Motion_V,pngfiles):
fineprint = fineprint0 + "\nwrfout " + os.path.realpath(wrfout)
if not no_mask:
fineprint += "\npatches "+patches
fineprint += "\ntracks "+tracks
fineprint += "\ntrackid "+trackid
fineprint += "\ncreated "+str(datetime.datetime.now(tz=None)).split('.')[0]
if not no_fineprint: # show fineprint
fineprint_obj.set_text(fineprint)
x, y = WRF_proj.transform_point(lon, lat, cartopy.crs.PlateCarree()) # Transform lon/lat to x and y (in meters) in WRF projection.
ax.set_extent([x-padding[0]*1000., x+padding[1]*1000., y-padding[2]*1000., y+padding[3]*1000.], crs=WRF_proj)
track_id_int = int(trackid.split('_')[-1])
step_id_int = int(stepid.split('_')[-1])
# Contour object mask
if not no_mask:
# Find matching mask track id and step. For some reason, steps start with 1 in netCDF patches file
matches = (mtrack_ids == track_id_int) & (mtrack_steps == step_id_int+1)
ip = np.where(matches)[0][0]
if not any(matches):
pdb.set_trace()
tolerance = 0.025 # TODO: figure out why centroid of csv object and nc patch differ at all
if np.abs(lon-mask_centroid_lons[ip]) > tolerance:
print(stepid,lon,mask_centroid_lons[ip])
if np.abs(lat-mask_centroid_lats[ip]) > tolerance:
print(stepid,lat,mask_centroid_lats[ip])
mask = masks[ip]
mlon = mlons[ip]
mlat = mlats[ip]
mcntr = ax.contour(mlon, mlat, mask, levels=[0,10], colors='black', alpha=0.6,
linewidths=2., linestyles="solid", zorder=2, transform=cartopy.crs.PlateCarree())
# Update axes labels (distance along axes).
update_scale_labels(scale_xy)
if arrow:
# Storm motion vector points from previous location to present location.
smv = ax.arrow(x-u, y-v, u, v, color=mcntr.colors, alpha=mcntr.get_alpha(), # Can't get head to show. Tried quiver, plot, head_width, head_length..., annotate...
linewidth=1, zorder=2, capstyle='round', transform=WRF_proj) # tried length_includes_head=True, but zero-size gives ValueError about shape Nx2 needed.
# Save image.
plt.savefig(pngfile, dpi=175)
print('created ' + os.path.realpath(pngfile))
if arrow: smv.remove()
# Remove object mask contour
if not no_mask:
for i in mcntr.collections: i.remove()
if debug: pdb.set_trace()
plt.close(fig)
print("to sort -2 -1 +0 +1 +2 numerically:")
print("ls d01*png | sort -g -k 1."+str(len(stepid)+2))
print("to trim whitespace:")
print("convert -crop 980x1012+390+173 in.png out.png")
|
[
"numpy.abs",
"argparse.ArgumentParser",
"pandas.read_csv",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.figure",
"os.path.isfile",
"numpy.arange",
"matplotlib.colors.ListedColormap",
"netCDF4.Dataset",
"wrf.get_cartopy",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.max",
"wrf.getvar",
"datetime.timedelta",
"wrf.latlon_coords",
"cartopy.io.shapereader.Reader",
"datetime.datetime.now",
"os.path.realpath",
"datetime.datetime.strptime",
"matplotlib.use",
"cartopy.feature.STATES.with_scale",
"sys.exit",
"matplotlib.pyplot.annotate",
"wrf.to_np",
"numpy.where",
"pdb.set_trace",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.savefig"
] |
[((511, 532), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (525, 532), False, 'import matplotlib\n'), ((679, 808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot WRF and SPC storm reports"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Plot WRF and SPC storm reports',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (702, 808), False, 'import argparse\n'), ((4536, 4595), 'pandas.read_csv', 'pd.read_csv', (['tracks'], {'parse_dates': "['Run_Date', 'Valid_Date']"}), "(tracks, parse_dates=['Run_Date', 'Valid_Date'])\n", (4547, 4595), True, 'import pandas as pd\n'), ((6570, 6605), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["info['cmap']"], {}), "(info['cmap'])\n", (6591, 6605), True, 'import matplotlib.colors as colors\n'), ((6911, 6931), 'netCDF4.Dataset', 'Dataset', (['wrfout', '"""r"""'], {}), "(wrfout, 'r')\n", (6918, 6931), False, 'from netCDF4 import Dataset\n'), ((7167, 7186), 'wrf.getvar', 'getvar', (['wrfnc', 'fill'], {}), '(wrfnc, fill)\n', (7173, 7186), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((7203, 7222), 'wrf.latlon_coords', 'latlon_coords', (['cvar'], {}), '(cvar)\n', (7216, 7222), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((7297, 7314), 'wrf.get_cartopy', 'get_cartopy', (['cvar'], {}), '(cvar)\n', (7308, 7314), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((7830, 7858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7840, 7858), True, 'import matplotlib.pyplot as plt\n'), ((7893, 7922), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'WRF_proj'}), '(projection=WRF_proj)\n', (7901, 7922), True, 'import matplotlib.pyplot as plt\n'), ((8162, 8276), 'matplotlib.pyplot.annotate', 'plt.annotate', ([], {'text': 'fineprint0', 'xy': '(0, 5)', 'xycoords': "('axes fraction', 'figure pixels')", 'va': '"""bottom"""', 'fontsize': '(4)'}), "(text=fineprint0, xy=(0, 5), xycoords=('axes fraction',\n 'figure pixels'), va='bottom', fontsize=4)\n", (8174, 8276), True, 'import matplotlib.pyplot as plt\n'), ((8663, 8748), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cfill'], {'ax': 'ax', 'format': '"""%.0f"""', 'shrink': '(0.52)', 'orientation': '"""horizontal"""'}), "(cfill, ax=ax, format='%.0f', shrink=0.52, orientation='horizontal'\n )\n", (8675, 8748), True, 'import matplotlib.pyplot as plt\n'), ((16697, 16711), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (16706, 16711), True, 'import matplotlib.pyplot as plt\n'), ((4825, 4836), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4833, 4836), False, 'import sys\n'), ((5457, 5468), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5465, 5468), False, 'import sys\n'), ((5838, 5849), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5846, 5849), False, 'import sys\n'), ((6002, 6023), 'netCDF4.Dataset', 'Dataset', (['patches', '"""r"""'], {}), "(patches, 'r')\n", (6009, 6023), False, 'from netCDF4 import Dataset\n'), ((6705, 6740), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'timeshift'}), '(hours=timeshift)\n', (6723, 6740), False, 'import datetime\n'), ((7057, 7068), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7065, 7068), False, 'import sys\n'), ((7621, 7646), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (7644, 7646), False, 'import cartopy\n'), ((7648, 7681), 'wrf.to_np', 'to_np', (['wrflon[::stride, ::stride]'], {}), '(wrflon[::stride, ::stride])\n', (7653, 7681), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((7682, 7715), 'wrf.to_np', 'to_np', (['wrflat[::stride, ::stride]'], {}), '(wrflat[::stride, ::stride])\n', (7687, 7715), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((7938, 7978), 'cartopy.feature.STATES.with_scale', 'cartopy.feature.STATES.with_scale', (['"""10m"""'], {}), "('10m')\n", (7971, 7978), False, 'import cartopy\n'), ((8419, 8430), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8427, 8430), False, 'import sys\n'), ((8587, 8618), 'wrf.to_np', 'to_np', (['cvar[::stride, ::stride]'], {}), '(cvar[::stride, ::stride])\n', (8592, 8618), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((9498, 9529), 'wrf.getvar', 'getvar', (['wrfnc', "info['fname'][1]"], {}), "(wrfnc, info['fname'][1])\n", (9504, 9529), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((9542, 9573), 'wrf.getvar', 'getvar', (['wrfnc', "info['fname'][2]"], {}), "(wrfnc, info['fname'][2])\n", (9548, 9573), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((11755, 11885), 'cartopy.io.shapereader.Reader', 'cartopy.io.shapereader.Reader', (['"""/glade/work/ahijevyc/share/shapeFiles/cb_2013_us_county_500k/cb_2013_us_county_500k.shp"""'], {}), "(\n '/glade/work/ahijevyc/share/shapeFiles/cb_2013_us_county_500k/cb_2013_us_county_500k.shp'\n )\n", (11784, 11885), False, 'import cartopy\n'), ((13554, 13585), 'wrf.getvar', 'getvar', (['wrfnc', "info['fname'][0]"], {}), "(wrfnc, info['fname'][0])\n", (13560, 13585), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((16460, 16489), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pngfile'], {'dpi': '(175)'}), '(pngfile, dpi=175)\n', (16471, 16489), True, 'import matplotlib.pyplot as plt\n'), ((16681, 16696), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (16694, 16696), False, 'import pdb\n'), ((3694, 3721), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(7)'}), '(hours=7)\n', (3712, 3721), False, 'import datetime\n'), ((3737, 3765), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(36)'}), '(hours=36)\n', (3755, 3765), False, 'import datetime\n'), ((12040, 12065), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (12063, 12065), False, 'import cartopy\n'), ((12403, 12439), 'wrf.getvar', 'getvar', (['wrfnc', '"""uvmet10"""'], {'units': '"""kt"""'}), "(wrfnc, 'uvmet10', units='kt')\n", (12409, 12439), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((14339, 14363), 'os.path.realpath', 'os.path.realpath', (['wrfout'], {}), '(wrfout)\n', (14355, 14363), False, 'import os\n'), ((14712, 14737), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (14735, 14737), False, 'import cartopy\n'), ((2649, 2690), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['d', '"""%Y%m%d%H"""'], {}), "(d, '%Y%m%d%H')\n", (2675, 2690), False, 'import datetime\n'), ((2805, 2846), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['d', '"""%Y%m%d%H"""'], {}), "(d, '%Y%m%d%H')\n", (2831, 2846), False, 'import datetime\n'), ((5605, 5622), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (5619, 5622), False, 'import os\n'), ((10058, 10071), 'wrf.to_np', 'to_np', (['max_uh'], {}), '(max_uh)\n', (10063, 10071), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((10244, 10257), 'wrf.to_np', 'to_np', (['max_uh'], {}), '(max_uh)\n', (10249, 10257), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((11041, 11054), 'wrf.to_np', 'to_np', (['min_uh'], {}), '(min_uh)\n', (11046, 11054), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((11233, 11246), 'wrf.to_np', 'to_np', (['min_uh'], {}), '(min_uh)\n', (11238, 11246), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((12481, 12503), 'wrf.getvar', 'getvar', (['wrfnc', '"""USHR6"""'], {}), "(wrfnc, 'USHR6')\n", (12487, 12503), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((12521, 12543), 'wrf.getvar', 'getvar', (['wrfnc', '"""VSHR6"""'], {}), "(wrfnc, 'VSHR6')\n", (12527, 12543), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((13117, 13125), 'wrf.to_np', 'to_np', (['u'], {}), '(u)\n', (13122, 13125), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((13156, 13164), 'wrf.to_np', 'to_np', (['v'], {}), '(v)\n', (13161, 13164), False, 'from wrf import to_np, getvar, get_cartopy, latlon_coords\n'), ((15321, 15336), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15334, 15336), False, 'import pdb\n'), ((15447, 15483), 'numpy.abs', 'np.abs', (['(lon - mask_centroid_lons[ip])'], {}), '(lon - mask_centroid_lons[ip])\n', (15453, 15483), True, 'import numpy as np\n'), ((15559, 15595), 'numpy.abs', 'np.abs', (['(lat - mask_centroid_lats[ip])'], {}), '(lat - mask_centroid_lats[ip])\n', (15565, 15595), True, 'import numpy as np\n'), ((16513, 16538), 'os.path.realpath', 'os.path.realpath', (['pngfile'], {}), '(pngfile)\n', (16529, 16538), False, 'import os\n'), ((10660, 10675), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10673, 10675), False, 'import pdb\n'), ((11566, 11581), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (11579, 11581), False, 'import pdb\n'), ((12747, 12805), 'numpy.max', 'np.max', (['[padding[0] + padding[1], padding[2] + padding[3]]'], {}), '([padding[0] + padding[1], padding[2] + padding[3]])\n', (12753, 12805), True, 'import numpy as np\n'), ((15256, 15273), 'numpy.where', 'np.where', (['matches'], {}), '(matches)\n', (15264, 15273), True, 'import numpy as np\n'), ((15894, 15919), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (15917, 15919), False, 'import cartopy\n'), ((10283, 10298), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (10292, 10298), True, 'import numpy as np\n'), ((11272, 11291), 'numpy.arange', 'np.arange', (['(6)', '(0)', '(-1)'], {}), '(6, 0, -1)\n', (11281, 11291), True, 'import numpy as np\n'), ((14534, 14564), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'None'}), '(tz=None)\n', (14555, 14564), False, 'import datetime\n')]
|
import random
import matplotlib
import numpy
from matplotlib import pyplot as plt
from matplotlib.ticker import PercentFormatter
from Resources.data_loader import load_data
from Utils.CrossEntropy import CrossEntropySolver
from Algorithms.CrossEntropy.Solver.ACCADE_cross_entropy_solver import ACCADECrossEntropySolver
from Algorithms.CrossEntropy.Solver.DANE_cross_entropy_solver import DANECrossEntropySolver
from Algorithms.CrossEntropy.Solver.GIANT_cross_entropy_solver import GIANTCrossEntropySolver
from Algorithms.CrossEntropy.Solver.FedGD_cross_entropy_solver import FedGDCrossEntropySolver
from Algorithms.CrossEntropy.Solver.Fedsplit_cross_entropy_solver import FedSplitCrossEntropySolver
from keras.datasets import fashion_mnist
import sys
from constants import color_list, marker_list, GS_DCA, DCA_ONLY, GS_SDR, SDR_ONLY, PERFECT_AGGREGATION, \
first_order_list, second_order_list, GBMA, THRESHOLD, DC_FRAMEWORK
home_dir = '../'
sys.path.append(home_dir)
class CrossEntropyDemo(object):
def __init__(self, data_name, max_iter, repeat, gamma, sigma, p, m, distance_list, data_size_list):
self.data_name = data_name
self.max_iter = max_iter
self.repeat = repeat
self.gamma = gamma
self.sigma = sigma
self.p = p
self.m = m
self.distance_list = distance_list
self.data_size_list = data_size_list
self.n = None
self.d = None
self.x_train = None
self.x_test = None
self.y_train = None
self.y_test = None
self.w_opt = None
self.cond_num = None
self.num_class = None
self.shards = None
def fit(self, x_train, y_train, shards, x_test, y_test):
self.x_train = x_train
self.y_train = y_train
self.shards = shards
self.x_test = x_test
self.y_test = y_test
self.n, self.d = self.x_train.shape
print(self.x_train.shape)
print(self.y_train.shape)
self.num_class = numpy.max(self.y_train) + 1
file_name = home_dir + 'Resources/' + self.data_name + '_optimal.npz'
npz_file = numpy.load(file_name)
self.w_opt = npz_file['w_opt']
print(self.w_opt)
print(self.w_opt.shape)
def perform_training(self, tau_list, k_list, modes, is_search=True, newton_iter=100):
for r in range(self.repeat):
for i in range(len(k_list)):
for j in range(len(tau_list)):
print('repeat ' + str(r) + ' : k = ' + str(k_list[i]) + ' , tau = ' + str(tau_list[j]))
h_mat = numpy.random.randn(self.max_iter, k_list[i], self.m) / numpy.sqrt(
2) + 1j * numpy.random.randn(self.max_iter, k_list[i], self.m) / numpy.sqrt(2)
for device in range(self.m):
PL = (10 ** 2) * ((self.distance_list[device] / 1) ** (-3.76))
h_mat[:, :, device] = numpy.sqrt(PL) * h_mat[:, :, device]
solver = ACCADECrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test,
opt_mode=DCA_ONLY,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_ACCADE_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_GS-DCA.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = FedGDCrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=DC_FRAMEWORK,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_FedGD_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_DC_FRAMEWORK.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = FedSplitCrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=THRESHOLD,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_FedSplit_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_THRESHOLD.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = DANECrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_DANE_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_DCA only.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = GIANTCrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_GIANT_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_DCA only.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
del solver
def plot_results_versus_iteration(self, data_name, k, tau, modes, solvers, repeat, max_iter, legends):
err_mat = numpy.zeros((len(modes) + 1, repeat, max_iter))
acc_mat = numpy.zeros((len(modes) + 1, repeat, max_iter))
# centralized
for r in range(repeat):
file_name = home_dir + 'Outputs/centralized_training_demo/centralized_training_demo_' + data_name + '_repeat_' + str(
r) + '.npz'
npz_file = numpy.load(file_name)
err_mat[0][r] = npz_file['err']
acc_mat[0][r] = npz_file['acc']
for j in range(len(solvers)):
for r in range(repeat):
file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_' + solvers[
j] + '_' + data_name + '_antenna_' + str(
k) + '_tau_' + str(tau) + '_repeat_' + str(r) + '_' + modes[j] + '.npz'
npz_file = numpy.load(file_name)
# print(npz_file['acc'])
# print(npz_file['err'])
err_mat[j+1][r] = npz_file['err']
acc_mat[j+1][r] = npz_file['acc']
fig = plt.figure(figsize=(9, 8))
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
line_list = []
for i in range(len(modes)+1):
line, = plt.semilogy(numpy.median(err_mat[i], axis=0), color=color_list[i], linestyle='-',
marker=marker_list[i],
markerfacecolor='none', ms=7, markeredgewidth=2.5, linewidth=2.5)
line_list.append(line)
plt.legend(line_list, legends, fontsize=20)
plt.xlabel('Communication Rounds', fontsize=20)
plt.ylabel('Training Loss', fontsize=20)
plt.xlim(0, max_iter - 1)
plt.ylim(0.25, 2.2)
plt.tight_layout()
plt.grid()
image_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_err_' + data_name + '_antenna_' + str(
k) + '_tau_' + str(tau) + '.pdf'
fig.savefig(image_name, format='pdf', dpi=1200)
plt.show()
fig = plt.figure(figsize=(9, 8))
line_list = []
for i in range(len(modes)+1):
line, = plt.plot(numpy.median(acc_mat[i], axis=0), color=color_list[i], linestyle='-',
marker=marker_list[i],
markerfacecolor='none', ms=7, markeredgewidth=2.5, linewidth=2.5, clip_on=False)
line_list.append(line)
plt.legend(line_list, legends, fontsize=20)
plt.xlabel('Communication Rounds', fontsize=20)
plt.ylabel('Test Accuracy', fontsize=20)
plt.xlim(0, max_iter - 1)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.tight_layout()
plt.grid()
image_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_acc_' + data_name + '_antenna_' + str(
k) + '_tau_' + str(tau) + '.pdf'
fig.savefig(image_name, format='pdf', dpi=1200)
plt.show()
def normalization(x_train, x_test):
mean = numpy.mean(x_train)
std_ev = numpy.sqrt(numpy.var(x_train))
normalized_x_train = numpy.divide(numpy.subtract(x_train, mean), std_ev)
mean = numpy.mean(x_test)
std_ev = numpy.sqrt(numpy.var(x_test))
normalized_x_test = numpy.divide(numpy.subtract(x_test, mean), std_ev)
return normalized_x_train, normalized_x_test
if __name__ == '__main__':
max_iter = 25
repeat = 5
gamma = 1e-8
sigma = 1
tau = numpy.sqrt(10)
k = 5
p = 1
m = 10
is_search = True
newton_iter = 50
datasets = ['fashion_mnist']
tau_list = [1e-9]
k_list = [5]
# modes = [GS_DCA, PERFECT_AGGREGATION, DC_FRAMEWORK, THRESHOLD, DCA_ONLY, DCA_ONLY]
# solvers = ['ACCADE', 'ACCADE', 'FedGD', 'FedSplit', 'GIANT', 'DANE']
# legends = ['Proposed Algorithm', 'Baseline 0', 'Baseline 1', 'Baseline 2', 'Baseline 3', 'Baseline 4']
modes = [GS_DCA, DC_FRAMEWORK, THRESHOLD, DCA_ONLY, DCA_ONLY]
solvers = ['ACCADE', 'FedGD', 'FedSplit', 'GIANT', 'DANE']
legends = ['Baseline 0', 'Proposed Algorithm', 'Baseline 1', 'Baseline 2', 'Baseline 3', 'Baseline 4']
for data_name in datasets:
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
train_n = x_train.shape[0]
test_n = x_test.shape[0]
print(x_train.shape)
print(y_test.shape)
x_train = x_train.reshape(train_n, 28 * 28)
idx = numpy.argsort(y_train)
# idx = numpy.random.permutation(train_n)
y_train = numpy.array(y_train).reshape(train_n, 1)
x_test = x_test.reshape(test_n, 28 * 28)
y_test = numpy.array(y_test).reshape(test_n, 1)
x_train, x_test = normalization(x_train, x_test)
# non-iid data distribution construction
# print(idx)
x_train = x_train[idx]
y_train = y_train[idx]
shard_size = train_n // (6 * m)
sub_shards = [range(i, i + shard_size) for i in range(0, 6 * shard_size * m, shard_size)]
shard_ls = random.sample(range(6 * m), k=6 * m)
# first_shards = [sub_shards[shard_ls[i]] for i in range(0, 2 * m, 2)]
# second_shards = [sub_shards[shard_ls[i + 1]] for i in range(0, 2 * m, 2)]
# shards = [list(sub_shards[shard_ls[i]]) + list(sub_shards[shard_ls[i+1]]) for i in range(0, 2 * m, 2)]
shards = [list(sub_shards[shard_ls[i]]) + list(sub_shards[shard_ls[i + 1]]) + list(
sub_shards[shard_ls[i + 2]]) + list(sub_shards[shard_ls[i + 3]]) + list(sub_shards[shard_ls[i + 4]]) + list(
sub_shards[shard_ls[i + 5]]) for i
in range(0, 6 * m, 6)]
# print(shards[0])
# heterogeneity construction for data size and distance
distance_list = numpy.random.randint(100, 120, size=m)
# distance_list[0: int(m / 10)] = numpy.random.randint(5, 10, size=int(m / 10))
# distance_list[int(m / 10):] = numpy.random.randint(100, 120, size=9 * int(m / 10))
perm = numpy.random.permutation(m)
distance_list = distance_list[perm]
# print(distance_list)
data_size_list = numpy.zeros(m, dtype=int)
data_size_list[0:m] = 6 * shard_size
# data_size_list[0: int(m / 10)] = numpy.random.randint(int(0.08 * s), int(0.1 * s + 1), size=int(m / 10))
# data_size_list[int(m / 10):] = numpy.random.randint(int(1 * s), int(1.1 * s + 1), size=9 * int(m / 10))
perm = numpy.random.permutation(m)
data_size_list = data_size_list[perm]
demo = CrossEntropyDemo(data_name, max_iter, repeat, gamma, sigma, p, m, distance_list, data_size_list)
demo.fit(x_train, y_train, shards, x_test, y_test)
demo.perform_training(tau_list, k_list, modes, is_search=is_search, newton_iter=newton_iter)
for k in k_list:
for tau in tau_list:
demo.plot_results_versus_iteration(data_name, k, tau, modes, solvers, repeat, max_iter + 1, legends)
|
[
"numpy.load",
"Algorithms.CrossEntropy.Solver.GIANT_cross_entropy_solver.GIANTCrossEntropySolver",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.random.randint",
"matplotlib.pyplot.gca",
"numpy.sqrt",
"matplotlib.pyplot.tight_layout",
"Algorithms.CrossEntropy.Solver.FedGD_cross_entropy_solver.FedGDCrossEntropySolver",
"sys.path.append",
"numpy.random.randn",
"numpy.max",
"Algorithms.CrossEntropy.Solver.DANE_cross_entropy_solver.DANECrossEntropySolver",
"numpy.var",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.median",
"matplotlib.pyplot.legend",
"numpy.random.permutation",
"matplotlib.pyplot.ylabel",
"numpy.savez",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"numpy.subtract",
"numpy.zeros",
"Algorithms.CrossEntropy.Solver.ACCADE_cross_entropy_solver.ACCADECrossEntropySolver",
"numpy.array",
"Algorithms.CrossEntropy.Solver.Fedsplit_cross_entropy_solver.FedSplitCrossEntropySolver",
"matplotlib.pyplot.xlabel",
"matplotlib.ticker.PercentFormatter",
"keras.datasets.fashion_mnist.load_data"
] |
[((950, 975), 'sys.path.append', 'sys.path.append', (['home_dir'], {}), '(home_dir)\n', (965, 975), False, 'import sys\n'), ((10791, 10810), 'numpy.mean', 'numpy.mean', (['x_train'], {}), '(x_train)\n', (10801, 10810), False, 'import numpy\n'), ((10943, 10961), 'numpy.mean', 'numpy.mean', (['x_test'], {}), '(x_test)\n', (10953, 10961), False, 'import numpy\n'), ((11232, 11246), 'numpy.sqrt', 'numpy.sqrt', (['(10)'], {}), '(10)\n', (11242, 11246), False, 'import numpy\n'), ((2131, 2152), 'numpy.load', 'numpy.load', (['file_name'], {}), '(file_name)\n', (2141, 2152), False, 'import numpy\n'), ((8800, 8826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 8)'}), '(figsize=(9, 8))\n', (8810, 8826), True, 'from matplotlib import pyplot as plt\n'), ((9306, 9349), 'matplotlib.pyplot.legend', 'plt.legend', (['line_list', 'legends'], {'fontsize': '(20)'}), '(line_list, legends, fontsize=20)\n', (9316, 9349), True, 'from matplotlib import pyplot as plt\n'), ((9358, 9405), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Communication Rounds"""'], {'fontsize': '(20)'}), "('Communication Rounds', fontsize=20)\n", (9368, 9405), True, 'from matplotlib import pyplot as plt\n'), ((9414, 9454), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Training Loss"""'], {'fontsize': '(20)'}), "('Training Loss', fontsize=20)\n", (9424, 9454), True, 'from matplotlib import pyplot as plt\n'), ((9463, 9488), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(max_iter - 1)'], {}), '(0, max_iter - 1)\n', (9471, 9488), True, 'from matplotlib import pyplot as plt\n'), ((9497, 9516), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.25)', '(2.2)'], {}), '(0.25, 2.2)\n', (9505, 9516), True, 'from matplotlib import pyplot as plt\n'), ((9525, 9543), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9541, 9543), True, 'from matplotlib import pyplot as plt\n'), ((9552, 9562), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9560, 9562), True, 'from matplotlib import pyplot as plt\n'), ((9791, 9801), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9799, 9801), True, 'from matplotlib import pyplot as plt\n'), ((9817, 9843), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 8)'}), '(figsize=(9, 8))\n', (9827, 9843), True, 'from matplotlib import pyplot as plt\n'), ((10209, 10252), 'matplotlib.pyplot.legend', 'plt.legend', (['line_list', 'legends'], {'fontsize': '(20)'}), '(line_list, legends, fontsize=20)\n', (10219, 10252), True, 'from matplotlib import pyplot as plt\n'), ((10261, 10308), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Communication Rounds"""'], {'fontsize': '(20)'}), "('Communication Rounds', fontsize=20)\n", (10271, 10308), True, 'from matplotlib import pyplot as plt\n'), ((10317, 10357), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test Accuracy"""'], {'fontsize': '(20)'}), "('Test Accuracy', fontsize=20)\n", (10327, 10357), True, 'from matplotlib import pyplot as plt\n'), ((10366, 10391), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(max_iter - 1)'], {}), '(0, max_iter - 1)\n', (10374, 10391), True, 'from matplotlib import pyplot as plt\n'), ((10465, 10483), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10481, 10483), True, 'from matplotlib import pyplot as plt\n'), ((10492, 10502), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10500, 10502), True, 'from matplotlib import pyplot as plt\n'), ((10731, 10741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10739, 10741), True, 'from matplotlib import pyplot as plt\n'), ((10835, 10853), 'numpy.var', 'numpy.var', (['x_train'], {}), '(x_train)\n', (10844, 10853), False, 'import numpy\n'), ((10893, 10922), 'numpy.subtract', 'numpy.subtract', (['x_train', 'mean'], {}), '(x_train, mean)\n', (10907, 10922), False, 'import numpy\n'), ((10986, 11003), 'numpy.var', 'numpy.var', (['x_test'], {}), '(x_test)\n', (10995, 11003), False, 'import numpy\n'), ((11042, 11070), 'numpy.subtract', 'numpy.subtract', (['x_test', 'mean'], {}), '(x_test, mean)\n', (11056, 11070), False, 'import numpy\n'), ((11981, 12006), 'keras.datasets.fashion_mnist.load_data', 'fashion_mnist.load_data', ([], {}), '()\n', (12004, 12006), False, 'from keras.datasets import fashion_mnist\n'), ((12199, 12221), 'numpy.argsort', 'numpy.argsort', (['y_train'], {}), '(y_train)\n', (12212, 12221), False, 'import numpy\n'), ((13514, 13552), 'numpy.random.randint', 'numpy.random.randint', (['(100)', '(120)'], {'size': 'm'}), '(100, 120, size=m)\n', (13534, 13552), False, 'import numpy\n'), ((13749, 13776), 'numpy.random.permutation', 'numpy.random.permutation', (['m'], {}), '(m)\n', (13773, 13776), False, 'import numpy\n'), ((13877, 13902), 'numpy.zeros', 'numpy.zeros', (['m'], {'dtype': 'int'}), '(m, dtype=int)\n', (13888, 13902), False, 'import numpy\n'), ((14192, 14219), 'numpy.random.permutation', 'numpy.random.permutation', (['m'], {}), '(m)\n', (14216, 14219), False, 'import numpy\n'), ((2006, 2029), 'numpy.max', 'numpy.max', (['self.y_train'], {}), '(self.y_train)\n', (2015, 2029), False, 'import numpy\n'), ((8117, 8138), 'numpy.load', 'numpy.load', (['file_name'], {}), '(file_name)\n', (8127, 8138), False, 'import numpy\n'), ((10436, 10455), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', (['(1)'], {}), '(1)\n', (10452, 10455), False, 'from matplotlib.ticker import PercentFormatter\n'), ((8581, 8602), 'numpy.load', 'numpy.load', (['file_name'], {}), '(file_name)\n', (8591, 8602), False, 'import numpy\n'), ((9038, 9070), 'numpy.median', 'numpy.median', (['err_mat[i]'], {'axis': '(0)'}), '(err_mat[i], axis=0)\n', (9050, 9070), False, 'import numpy\n'), ((9934, 9966), 'numpy.median', 'numpy.median', (['acc_mat[i]'], {'axis': '(0)'}), '(acc_mat[i], axis=0)\n', (9946, 9966), False, 'import numpy\n'), ((12290, 12310), 'numpy.array', 'numpy.array', (['y_train'], {}), '(y_train)\n', (12301, 12310), False, 'import numpy\n'), ((12397, 12416), 'numpy.array', 'numpy.array', (['y_test'], {}), '(y_test)\n', (12408, 12416), False, 'import numpy\n'), ((3022, 3190), 'Algorithms.CrossEntropy.Solver.ACCADE_cross_entropy_solver.ACCADECrossEntropySolver', 'ACCADECrossEntropySolver', ([], {'m': 'self.m', 'h_mat': 'h_mat', 'tau': 'tau_list[j]', 'p': 'self.p', 'x_test': 'self.x_test', 'y_test': 'self.y_test', 'opt_mode': 'DCA_ONLY', 'num_class': 'self.num_class'}), '(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,\n x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY, num_class=\n self.num_class)\n', (3046, 3190), False, 'from Algorithms.CrossEntropy.Solver.ACCADE_cross_entropy_solver import ACCADECrossEntropySolver\n'), ((3886, 3956), 'numpy.savez', 'numpy.savez', (['out_file_name'], {'err': 'err', 'acc': 'acc', 'data_name': 'self.data_name'}), '(out_file_name, err=err, acc=acc, data_name=self.data_name)\n', (3897, 3956), False, 'import numpy\n'), ((3987, 4157), 'Algorithms.CrossEntropy.Solver.FedGD_cross_entropy_solver.FedGDCrossEntropySolver', 'FedGDCrossEntropySolver', ([], {'m': 'self.m', 'h_mat': 'h_mat', 'tau': 'tau_list[j]', 'p': 'self.p', 'x_test': 'self.x_test', 'y_test': 'self.y_test', 'opt_mode': 'DC_FRAMEWORK', 'num_class': 'self.num_class'}), '(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,\n x_test=self.x_test, y_test=self.y_test, opt_mode=DC_FRAMEWORK,\n num_class=self.num_class)\n', (4010, 4157), False, 'from Algorithms.CrossEntropy.Solver.FedGD_cross_entropy_solver import FedGDCrossEntropySolver\n'), ((4803, 4873), 'numpy.savez', 'numpy.savez', (['out_file_name'], {'err': 'err', 'acc': 'acc', 'data_name': 'self.data_name'}), '(out_file_name, err=err, acc=acc, data_name=self.data_name)\n', (4814, 4873), False, 'import numpy\n'), ((4904, 5075), 'Algorithms.CrossEntropy.Solver.Fedsplit_cross_entropy_solver.FedSplitCrossEntropySolver', 'FedSplitCrossEntropySolver', ([], {'m': 'self.m', 'h_mat': 'h_mat', 'tau': 'tau_list[j]', 'p': 'self.p', 'x_test': 'self.x_test', 'y_test': 'self.y_test', 'opt_mode': 'THRESHOLD', 'num_class': 'self.num_class'}), '(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,\n x_test=self.x_test, y_test=self.y_test, opt_mode=THRESHOLD, num_class=\n self.num_class)\n', (4930, 5075), False, 'from Algorithms.CrossEntropy.Solver.Fedsplit_cross_entropy_solver import FedSplitCrossEntropySolver\n'), ((5726, 5796), 'numpy.savez', 'numpy.savez', (['out_file_name'], {'err': 'err', 'acc': 'acc', 'data_name': 'self.data_name'}), '(out_file_name, err=err, acc=acc, data_name=self.data_name)\n', (5737, 5796), False, 'import numpy\n'), ((5827, 5993), 'Algorithms.CrossEntropy.Solver.DANE_cross_entropy_solver.DANECrossEntropySolver', 'DANECrossEntropySolver', ([], {'m': 'self.m', 'h_mat': 'h_mat', 'tau': 'tau_list[j]', 'p': 'self.p', 'x_test': 'self.x_test', 'y_test': 'self.y_test', 'opt_mode': 'DCA_ONLY', 'num_class': 'self.num_class'}), '(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,\n x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY, num_class=\n self.num_class)\n', (5849, 5993), False, 'from Algorithms.CrossEntropy.Solver.DANE_cross_entropy_solver import DANECrossEntropySolver\n'), ((6631, 6701), 'numpy.savez', 'numpy.savez', (['out_file_name'], {'err': 'err', 'acc': 'acc', 'data_name': 'self.data_name'}), '(out_file_name, err=err, acc=acc, data_name=self.data_name)\n', (6642, 6701), False, 'import numpy\n'), ((6732, 6899), 'Algorithms.CrossEntropy.Solver.GIANT_cross_entropy_solver.GIANTCrossEntropySolver', 'GIANTCrossEntropySolver', ([], {'m': 'self.m', 'h_mat': 'h_mat', 'tau': 'tau_list[j]', 'p': 'self.p', 'x_test': 'self.x_test', 'y_test': 'self.y_test', 'opt_mode': 'DCA_ONLY', 'num_class': 'self.num_class'}), '(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,\n x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY, num_class=\n self.num_class)\n', (6755, 6899), False, 'from Algorithms.CrossEntropy.Solver.GIANT_cross_entropy_solver import GIANTCrossEntropySolver\n'), ((7540, 7610), 'numpy.savez', 'numpy.savez', (['out_file_name'], {'err': 'err', 'acc': 'acc', 'data_name': 'self.data_name'}), '(out_file_name, err=err, acc=acc, data_name=self.data_name)\n', (7551, 7610), False, 'import numpy\n'), ((10400, 10409), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10407, 10409), True, 'from matplotlib import pyplot as plt\n'), ((2603, 2655), 'numpy.random.randn', 'numpy.random.randn', (['self.max_iter', 'k_list[i]', 'self.m'], {}), '(self.max_iter, k_list[i], self.m)\n', (2621, 2655), False, 'import numpy\n'), ((2658, 2671), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (2668, 2671), False, 'import numpy\n'), ((2759, 2772), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (2769, 2772), False, 'import numpy\n'), ((2955, 2969), 'numpy.sqrt', 'numpy.sqrt', (['PL'], {}), '(PL)\n', (2965, 2969), False, 'import numpy\n'), ((2704, 2756), 'numpy.random.randn', 'numpy.random.randn', (['self.max_iter', 'k_list[i]', 'self.m'], {}), '(self.max_iter, k_list[i], self.m)\n', (2722, 2756), False, 'import numpy\n')]
|
from FEM.Mesh.Geometry import Geometry
from FEM.Mesh.Delaunay import Delaunay
from FEM.PlaneStrain import PlaneStrain
from FEM.Utils.polygonal import roundCorner, giveCoordsCircle
import matplotlib.pyplot as plt
import numpy as np
E = 30*10**(5)
v = 0.25
b = 10
h = 20
he = h/4
ancho_en_h10_in = 18
ancho_en_h20_in = 10
p0 = 200
pp = 1000
ppx = pp*3/5
ppy = -pp*4/5
def darPolinomio(X, Y):
n = len(X)
A = np.zeros([n, n])
B = np.zeros([n, 1])
for i in range(n):
for j in range(n):
A[i, j] = X[i]**j
B[i, 0] = Y[i]
U = np.linalg.solve(A, B)
def f(x):
suma = 0
for i in range(n):
suma += U[i, 0]*x**i
return suma
return f
n = 20
parabola = darPolinomio(np.array([0, 10, 20]), np.array(
[0, b-ancho_en_h10_in/2, b-ancho_en_h20_in/2]))
c = [
[0, 0],
[2*b, 0]]
for i in range(1, n):
x = 2*b-parabola(h/n*i)
y = h/n*i
c += [[x, y]]
c += [[2*b-parabola(4*he), 4*he],
[parabola(4*he), 4*he]]
for i in reversed(range(1, n)):
x = parabola(h/n*i)
y = h/n*i
c += [[x, y]]
holes = []
radi = 2
cent = [b, h/2]
vert, seg = giveCoordsCircle(cent, radi, n=50)
hole = {'center': cent, 'segments': seg, 'vertices': vert}
holes += [hole]
params = Delaunay._strdelaunay(constrained=True, delaunay=True, a='0.1', o=2)
geometria = Delaunay(c, params, nvn=2, holes_dict=holes)
geometria.generateSegmentsFromCoords([0, 0], [2*b, 0])
geometria.generateSegmentsFromCoords(
[2*b-parabola(4*he), 4*he], [parabola(4*he), 4*he])
geometria.cbe = geometria.cbFromSegment(-2, 0, 1)
geometria.cbe += geometria.cbFromSegment(-2, 0, 2)
geometria.saveMesh('Mesh_tests/tunel')
geometria.show()
plt.show()
geometria.loadOnSegment(-1, fy=lambda s: -p0)
geometria.mask = None
O = PlaneStrain(geometria, E, v)
O.elementMatrices()
O.ensembling()
O.borderConditions()
O.solveES()
O.postProcess()
plt.show()
|
[
"matplotlib.pyplot.show",
"FEM.PlaneStrain.PlaneStrain",
"numpy.zeros",
"FEM.Mesh.Delaunay.Delaunay",
"FEM.Utils.polygonal.giveCoordsCircle",
"FEM.Mesh.Delaunay.Delaunay._strdelaunay",
"numpy.array",
"numpy.linalg.solve"
] |
[((1155, 1189), 'FEM.Utils.polygonal.giveCoordsCircle', 'giveCoordsCircle', (['cent', 'radi'], {'n': '(50)'}), '(cent, radi, n=50)\n', (1171, 1189), False, 'from FEM.Utils.polygonal import roundCorner, giveCoordsCircle\n'), ((1274, 1342), 'FEM.Mesh.Delaunay.Delaunay._strdelaunay', 'Delaunay._strdelaunay', ([], {'constrained': '(True)', 'delaunay': '(True)', 'a': '"""0.1"""', 'o': '(2)'}), "(constrained=True, delaunay=True, a='0.1', o=2)\n", (1295, 1342), False, 'from FEM.Mesh.Delaunay import Delaunay\n'), ((1355, 1399), 'FEM.Mesh.Delaunay.Delaunay', 'Delaunay', (['c', 'params'], {'nvn': '(2)', 'holes_dict': 'holes'}), '(c, params, nvn=2, holes_dict=holes)\n', (1363, 1399), False, 'from FEM.Mesh.Delaunay import Delaunay\n'), ((1706, 1716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1714, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1817), 'FEM.PlaneStrain.PlaneStrain', 'PlaneStrain', (['geometria', 'E', 'v'], {}), '(geometria, E, v)\n', (1800, 1817), False, 'from FEM.PlaneStrain import PlaneStrain\n'), ((1903, 1913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1911, 1913), True, 'import matplotlib.pyplot as plt\n'), ((417, 433), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (425, 433), True, 'import numpy as np\n'), ((442, 458), 'numpy.zeros', 'np.zeros', (['[n, 1]'], {}), '([n, 1])\n', (450, 458), True, 'import numpy as np\n'), ((571, 592), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (586, 592), True, 'import numpy as np\n'), ((752, 773), 'numpy.array', 'np.array', (['[0, 10, 20]'], {}), '([0, 10, 20])\n', (760, 773), True, 'import numpy as np\n'), ((775, 838), 'numpy.array', 'np.array', (['[0, b - ancho_en_h10_in / 2, b - ancho_en_h20_in / 2]'], {}), '([0, b - ancho_en_h10_in / 2, b - ancho_en_h20_in / 2])\n', (783, 838), True, 'import numpy as np\n')]
|
import cv2
import math
import numpy as np
def get_density_map_gaussian(im, points):
im_density = np.zeros_like(im, dtype=np.float64)
h, w = im_density.shape
if points is None:
return im_density
if points.shape[0] == 1:
x1 = max(0, min(w-1, round(points[0, 0])))
y1 = max(0, min(h-1, round(points[0, 1])))
im_density[y1, x1] = 255
return im_density
for j in range(points.shape[0]):
f_sz = 15
sigma = 4.0
H = np.multiply(cv2.getGaussianKernel(f_sz, sigma), (cv2.getGaussianKernel(f_sz, sigma)).T)
x = min(w-1, max(0, abs(int(math.floor(points[j, 0])))))
y = min(h-1, max(0, abs(int(math.floor(points[j, 1])))))
if x >= w or y >= h:
continue
x1 = x - f_sz//2 + 0
y1 = y - f_sz//2 + 0
x2 = x + f_sz//2 + 1
y2 = y + f_sz//2 + 1
dfx1, dfy1, dfx2, dfy2 = 0, 0, 0, 0
change_H = False
if x1 < 0:
dfx1 = abs(x1) + 0
x1 = 0
change_H = True
if y1 < 0:
dfy1 = abs(y1) + 0
y1 = 0
change_H = True
if x2 > w:
dfx2 = x2 - w
x2 = w
change_H = True
if y2 > h:
dfy2 = y2 - h
y2 = h
change_H = True
x1h, y1h, x2h, y2h = 1 + dfx1, 1 + dfy1, f_sz - dfx2, f_sz - dfy2
if change_H is True:
H = np.multiply(cv2.getGaussianKernel(y2h-y1h+1, sigma), (cv2.getGaussianKernel(x2h-x1h+1, sigma)).T)
im_density[y1:y2, x1:x2] += H
return im_density
|
[
"numpy.zeros_like",
"math.floor",
"cv2.getGaussianKernel"
] |
[((104, 139), 'numpy.zeros_like', 'np.zeros_like', (['im'], {'dtype': 'np.float64'}), '(im, dtype=np.float64)\n', (117, 139), True, 'import numpy as np\n'), ((506, 540), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['f_sz', 'sigma'], {}), '(f_sz, sigma)\n', (527, 540), False, 'import cv2\n'), ((543, 577), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['f_sz', 'sigma'], {}), '(f_sz, sigma)\n', (564, 577), False, 'import cv2\n'), ((1456, 1499), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['(y2h - y1h + 1)', 'sigma'], {}), '(y2h - y1h + 1, sigma)\n', (1477, 1499), False, 'import cv2\n'), ((1498, 1541), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['(x2h - x1h + 1)', 'sigma'], {}), '(x2h - x1h + 1, sigma)\n', (1519, 1541), False, 'import cv2\n'), ((618, 642), 'math.floor', 'math.floor', (['points[j, 0]'], {}), '(points[j, 0])\n', (628, 642), False, 'import math\n'), ((683, 707), 'math.floor', 'math.floor', (['points[j, 1]'], {}), '(points[j, 1])\n', (693, 707), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
"""Simple networks of caches modeled as single caches."""
import random
import numpy as np
from icarus.util import inheritdoc
from icarus.tools import DiscreteDist
from icarus.registry import register_cache_policy, CACHE_POLICY
from .policies import Cache
__all__ = [
'PathCache',
'TreeCache',
'ArrayCache',
'ShardedCache',
]
"""
So let me get this straight, these "systems" do not implement ANY delay or
any kind of change between fetching or adding data to or from specific nodes?!
- as it says at the end of the document, modeled as single caches -
"""
@register_cache_policy('PATH')
class PathCache(object):
"""Path of caches
This is not a single-node cache implementation but rather it implements
a path of caching nodes in which requests are fed to the first node of the
path and, in case of a miss, are propagated down to the remaining nodes
of the path. A miss occurs if none of the nodes on the path has the
requested content.
"""
def __init__(self, caches, **kwargs):
"""Constructor
Parameters
----------
caches : array-like
An array of caching nodes instances on the path
"""
self._caches = caches
self._len = len(caches)
"""
TODO: Implement all the below methods, with the appropriate "cache"
(implement EDRs as "caches")
"""
def __len__(self):
return self._len
@property
def maxlen(self):
return self._len
def has(self, k):
for c in self._caches:
if c.has(k):
return True
else:
return False
def get(self, k):
for i in range(self._len):
if self._caches[i].get(k):
break
else:
return False
# Put contents on all caches traversed by the retrieved content
for j in range(i):
self._caches[j].put(k)
return True
def put(self, k):
"""Insert an item in the cache if not already inserted.
If the element is already present in the cache, it will pushed to the
top of the cache.
Parameters
----------
k : any hashable type
The item to be inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
for c in self._caches:
c.put(k)
def remove(self, k):
raise NotImplementedError('This method is not implemented')
def position(self, k):
raise NotImplementedError('This method is not implemented')
def dump(self, serialized=True):
dump = [c.dump() for c in self._caches]
return sum(dump, []) if serialized else dump
def clear(self):
for c in self._caches:
c.clear()
@register_cache_policy('TREE')
class TreeCache(object):
"""Path of caches
This is not a single-node cache implementation but rather it implements
a tree of caching nodes in which requests are fed to a random leaf node
and, in case of a miss, are propagated down to the remaining nodes
of the path. A miss occurs if none of the nodes on the path has the
requested content.
Notes
-----
This cache can only be operated in a read-through manner and not in write
through or read/write aside. In other words, before issuing a put, you
must issue a get for the same item. The reason for this limitation is
to ensure that matching get/put requests go through the same randomly
selected node.
"""
def __init__(self, leaf_caches, root_cache, **kwargs):
"""Constructor
Parameters
----------
caches : array-like
An array of caching nodes instances on the path
segments : int
The number of segments
"""
self._leaf_caches = leaf_caches
self._root_cache = root_cache
self._len = sum(len(c) for c in leaf_caches) + len(root_cache)
self._n_leaves = len(leaf_caches)
self._leaf = None
def __len__(self):
return self._len
@property
def maxlen(self):
return self._len
def has(self, k):
raise NotImplementedError('This method is not implemented')
def get(self, k):
self._leaf = random.choice(self._leaf_caches)
if self._leaf.get(k):
return True
else:
if self._root_cache.get(k):
self._leaf.put(k)
return True
else:
return False
def put(self, k):
"""Insert an item in the cache if not already inserted.
If the element is already present in the cache, it will pushed to the
top of the cache.
Parameters
----------
k : any hashable type
The item to be inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
if self._leaf is None:
raise ValueError("You are trying to insert an item not requested before. "
"Tree cache can be used in read-through mode only")
self._leaf.put(k)
self._root_cache.put(k)
def remove(self, k):
raise NotImplementedError('This method is not implemented')
def position(self, k):
raise NotImplementedError('This method is not implemented')
def dump(self, serialized=True):
dump = [c.dump() for c in self._leaf_caches]
dump.append(self._root_cache.dump())
return sum(dump, []) if serialized else dump
def clear(self):
for c in self._caches:
c.clear()
@register_cache_policy('ARRAY')
class ArrayCache(object):
"""Array of caches
This is not a single-node cache implementation but rather it implements
an array of caching nodes in which requests are fed to a random node of
a set.
Notes
-----
This cache can only be operated in a read-through manner and not in write
through or read/write aside. In other words, before issuing a put, you
must issue a get for the same item. The reason for this limitation is
to ensure that matching get/put requests go through the same randomly
selected node.
"""
def __init__(self, caches, weights=None, **kwargs):
"""Constructor
Parameters
----------
caches : array-like
An array of caching nodes instances on the array
weights : array-like
Random weights according to which a cache of the array should be
selected to process a given request
"""
self._caches = caches
self._len = sum(len(c) for c in caches)
self._n_caches = len(caches)
self._selected_cache = None
if weights is not None:
if np.abs(np.sum(weights) - 1) > 0.0001:
raise ValueError("weights must sum up to 1")
if len(weights) != self._n_caches:
raise ValueError("weights must have as many elements as nr of caches")
randvar = DiscreteDist(weights)
self.select_cache = lambda : self._caches[randvar.rv() - 1]
else:
self.select_cache = lambda : random.choice(self._caches)
def __len__(self):
return self._len
@property
def maxlen(self):
return self._len
def has(self, k):
raise NotImplementedError('This method is not implemented')
def get(self, k):
self._selected_cache = self.select_cache()
return self._selected_cache.get(k)
def put(self, k):
"""Insert an item in the cache if not already inserted.
If the element is already present in the cache, it will pushed to the
top of the cache.
Parameters
----------
k : any hashable type
The item to be inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
if self._selected_cache is None:
raise ValueError("You are trying to insert an item not requested before. "
"Array cache can be used in read-through mode only")
self._selected_cache.put(k)
def remove(self, k):
raise NotImplementedError('This method is not implemented')
def position(self, k):
raise NotImplementedError('This method is not implemented')
def dump(self, serialized=True):
dump = [c.dump() for c in self._caches]
return sum(dump, []) if serialized else dump
def clear(self):
for c in self._caches:
c.clear()
@register_cache_policy('SHARD')
class ShardedCache(Cache):
"""Set of sharded caches.
Set of caches coordinately storing items. When a request reaches the
caches, the request is forwarded to the specific cache (shard) based on the
outcome of a hash function. So, an item can be stored only by a single
node of the system.
"""
def __init__(self, maxlen, policy='LRU', nodes=4, f_map=None,
policy_attr={}, **kwargs):
"""Constructor
Parameters
----------
maxlen : int
The maximum number of items the cache can store.
policy : str, optional
The eviction policy of each node (e.g., LRU, LFU, FIFO...).
Default is LRU.
nodes : int, optional
The number of nodes, default is 4.
f_map : callable, optional
A callable governing the mapping between items and caching nodes.
It receives as argument a value of an item :math:`k` and returns an
integer between :math:`0` and :math:`nodes - 1` identifying the
target node.
If not specified, the mapping is done by computing the hash of the
given item modulo the number of nodes.
policy_attr : dict, optional
A set of parameters for initializing the underlying caching policy.
Notes
-----
The maxlen parameter refers to the cumulative size of the caches in the
set. The size of each shard is derived dividing maxlen by the number
of nodes.
"""
maxlen = int(maxlen)
if maxlen <= 0:
raise ValueError('maxlen must be positive')
if not isinstance(nodes, int) or nodes <= 0 or nodes > maxlen:
raise ValueError('nodes must be an integer and 0 < nodes <= maxlen')
# If maxlen is not a multiple of nodes, then some nodes have one slot
# more than others
self._node_maxlen = [maxlen // nodes for _ in range(nodes)]
for i in range(maxlen % nodes):
self._node_maxlen[i] += 1
self._maxlen = maxlen
self._node = [CACHE_POLICY[policy](self._node_maxlen[i], **policy_attr)
for i in range(nodes)]
self.f_map = f_map if f_map is not None else lambda k: hash(k) % nodes
@inheritdoc(Cache)
def __len__(self):
return sum(len(s) for s in self._node)
@property
def maxlen(self):
return self._maxlen
@inheritdoc(Cache)
def has(self, k):
return self._node[self.f_map(k)].has(k)
@inheritdoc(Cache)
def get(self, k):
return self._node[self.f_map(k)].get(k)
@inheritdoc(Cache)
def put(self, k):
return self._node[self.f_map(k)].put(k)
@inheritdoc(Cache)
def dump(self, serialized=True):
dump = list(s.dump() for s in self._node)
return sum(dump, []) if serialized else dump
@inheritdoc(Cache)
def remove(self, k):
return self._node[self.f_map(k)].remove(k)
@inheritdoc(Cache)
def clear(self):
for s in self._node:
s.clear()
|
[
"numpy.sum",
"icarus.registry.register_cache_policy",
"random.choice",
"icarus.tools.DiscreteDist",
"icarus.util.inheritdoc"
] |
[((622, 651), 'icarus.registry.register_cache_policy', 'register_cache_policy', (['"""PATH"""'], {}), "('PATH')\n", (643, 651), False, 'from icarus.registry import register_cache_policy, CACHE_POLICY\n'), ((2904, 2933), 'icarus.registry.register_cache_policy', 'register_cache_policy', (['"""TREE"""'], {}), "('TREE')\n", (2925, 2933), False, 'from icarus.registry import register_cache_policy, CACHE_POLICY\n'), ((5804, 5834), 'icarus.registry.register_cache_policy', 'register_cache_policy', (['"""ARRAY"""'], {}), "('ARRAY')\n", (5825, 5834), False, 'from icarus.registry import register_cache_policy, CACHE_POLICY\n'), ((8821, 8851), 'icarus.registry.register_cache_policy', 'register_cache_policy', (['"""SHARD"""'], {}), "('SHARD')\n", (8842, 8851), False, 'from icarus.registry import register_cache_policy, CACHE_POLICY\n'), ((11142, 11159), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11152, 11159), False, 'from icarus.util import inheritdoc\n'), ((11301, 11318), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11311, 11318), False, 'from icarus.util import inheritdoc\n'), ((11395, 11412), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11405, 11412), False, 'from icarus.util import inheritdoc\n'), ((11489, 11506), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11499, 11506), False, 'from icarus.util import inheritdoc\n'), ((11583, 11600), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11593, 11600), False, 'from icarus.util import inheritdoc\n'), ((11747, 11764), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11757, 11764), False, 'from icarus.util import inheritdoc\n'), ((11847, 11864), 'icarus.util.inheritdoc', 'inheritdoc', (['Cache'], {}), '(Cache)\n', (11857, 11864), False, 'from icarus.util import inheritdoc\n'), ((4392, 4424), 'random.choice', 'random.choice', (['self._leaf_caches'], {}), '(self._leaf_caches)\n', (4405, 4424), False, 'import random\n'), ((7224, 7245), 'icarus.tools.DiscreteDist', 'DiscreteDist', (['weights'], {}), '(weights)\n', (7236, 7245), False, 'from icarus.tools import DiscreteDist\n'), ((7373, 7400), 'random.choice', 'random.choice', (['self._caches'], {}), '(self._caches)\n', (7386, 7400), False, 'import random\n'), ((6976, 6991), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (6982, 6991), True, 'import numpy as np\n')]
|
# 混雑度トーナメント選択により新たな探索母集団Qt+1を生成
import numpy as np
import random
import copy
class Tournament(object):
"""混雑度トーナメント選択
"""
def __init__(self, archive_set):
self._archive_set = copy.deepcopy(archive_set)
def tournament(self):
# アーカイブ母集団の個体数分の探索母集団を生成
size = int(self._archive_set.shape[0])
search_set = np.array([], dtype = np.float64)
for i in range(size):
rnd1 = random.randrange(size)
rnd2 = random.randrange(size)
# まずランクで比較
if self._archive_set[rnd1, 2] < self._archive_set[rnd2, 2]:
search_set = np.append(search_set, self._archive_set[rnd1, :])
elif self._archive_set[rnd1, 2] > self._archive_set[rnd2, 2]:
search_set = np.append(search_set, self._archive_set[rnd2, :])
# 次に混雑度距離で比較
elif self._archive_set[rnd1, 3] > self._archive_set[rnd2, 3]:
search_set = np.append(search_set, self._archive_set[rnd1, :])
else:
search_set = np.append(search_set, self._archive_set[rnd2, :])
search_set = search_set.reshape(size, -1)
return search_set
|
[
"numpy.append",
"copy.deepcopy",
"numpy.array",
"random.randrange"
] |
[((197, 223), 'copy.deepcopy', 'copy.deepcopy', (['archive_set'], {}), '(archive_set)\n', (210, 223), False, 'import copy\n'), ((353, 383), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (361, 383), True, 'import numpy as np\n'), ((435, 457), 'random.randrange', 'random.randrange', (['size'], {}), '(size)\n', (451, 457), False, 'import random\n'), ((477, 499), 'random.randrange', 'random.randrange', (['size'], {}), '(size)\n', (493, 499), False, 'import random\n'), ((625, 674), 'numpy.append', 'np.append', (['search_set', 'self._archive_set[rnd1, :]'], {}), '(search_set, self._archive_set[rnd1, :])\n', (634, 674), True, 'import numpy as np\n'), ((779, 828), 'numpy.append', 'np.append', (['search_set', 'self._archive_set[rnd2, :]'], {}), '(search_set, self._archive_set[rnd2, :])\n', (788, 828), True, 'import numpy as np\n'), ((958, 1007), 'numpy.append', 'np.append', (['search_set', 'self._archive_set[rnd1, :]'], {}), '(search_set, self._archive_set[rnd1, :])\n', (967, 1007), True, 'import numpy as np\n'), ((1056, 1105), 'numpy.append', 'np.append', (['search_set', 'self._archive_set[rnd2, :]'], {}), '(search_set, self._archive_set[rnd2, :])\n', (1065, 1105), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal
from powersimdata.tests.mock_grid import MockGrid
from powersimdata.tests.mock_scenario import MockScenario
from postreise.analyze.generation.emissions import (
generate_emissions_stats,
summarize_emissions_by_bus,
)
@pytest.fixture
def mock_plant():
# plant_id is the index
return {
"plant_id": [101, 102, 103, 104, 105],
"bus_id": [1001, 1002, 1003, 1004, 1005],
"type": ["solar", "wind", "ng", "coal", "dfo"],
"GenFuelCost": [0, 0, 3.3, 4.4, 5.5],
}
@pytest.fixture
def mock_gencost():
# plant_id is the index
return {
"plant_id": [101, 102, 103, 104, 105],
"type": [2] * 5,
"startup": [0] * 5,
"shutdown": [0] * 5,
"n": [3] * 5,
"c2": [1, 2, 3, 4, 5],
"c1": [10, 20, 30, 40, 50],
"c0": [100, 200, 300, 400, 500],
"interconnect": ["Western"] * 5,
}
@pytest.fixture
def mock_pg(mock_plant):
return pd.DataFrame(
{
plant_id: [(i + 1) * p for p in range(4)]
for i, plant_id in enumerate(mock_plant["plant_id"])
},
index=pd.date_range("2019-01-01", periods=4, freq="H"),
)
@pytest.fixture
def scenario(mock_plant, mock_gencost, mock_pg):
return MockScenario(
grid_attrs={"plant": mock_plant, "gencost_before": mock_gencost},
pg=mock_pg,
)
def _test_emissions_structure(emissions, mock_plant, pg):
plant = pd.DataFrame(mock_plant)
plant.set_index("plant_id", inplace=True)
# check data frame structure
err_msg = "generate_emissions_stats should return a data frame"
assert isinstance(emissions, pd.DataFrame), err_msg
for a, b in zip(pg.index.to_numpy(), emissions.index.to_numpy()):
assert a == b, "emissions and pg should have same index"
for a, b in zip(pg.columns.to_numpy(), emissions.columns.to_numpy()):
assert a == b, "emissions and pg should have same columns"
# sanity check values
emissions_from_wind = plant[plant.type == "wind"].index.values
err_msg = "Wind farm does not emit emissions"
assert emissions[emissions_from_wind[0]].sum() == 0, err_msg
emissions_from_solar = plant[plant.type == "solar"].index.values
err_msg = "Solar plant does not emit emissions"
assert emissions[emissions_from_solar[0]].sum() == 0, err_msg
negative_emissions_count = np.sum((emissions < 0).to_numpy().ravel())
assert negative_emissions_count == 0, "No plant should emit negative emissions"
class TestEmissionStatsArguments:
def test_pollutant_value(self, scenario):
with pytest.raises(ValueError) as excinfo:
generate_emissions_stats(scenario, pollutant="CO2")
assert "Unknown pollutant for generate_emissions_stats()" in str(excinfo.value)
def test_method_type(self, scenario):
with pytest.raises(TypeError) as excinfo:
generate_emissions_stats(scenario, method=1)
assert "method must be a str" in str(excinfo.value)
def test_method_value(self, scenario):
with pytest.raises(ValueError) as excinfo:
generate_emissions_stats(scenario, pollutant="nox", method="always-off")
assert "method for nox must be one of: {'simple'}" in str(excinfo.value)
class TestCarbonCalculation:
def test_carbon_calc_always_on(self, scenario, mock_plant):
carbon = generate_emissions_stats(scenario, method="always-on")
_test_emissions_structure(carbon, mock_plant, scenario.state.get_pg())
# check specific values
expected_values = np.array(
[
[0, 0, 4.82, 8.683333, 6.77],
[0, 0, 6.6998, 13.546000, 11.8475],
[0, 0, 9.4472, 21.1873333, 20.3100],
[0, 0, 13.0622, 31.6073333, 32.1575],
]
)
assert_array_almost_equal(
expected_values, carbon.to_numpy(), err_msg="Values do not match expected"
)
def test_carbon_calc_decommit(self, scenario, mock_plant):
carbon = generate_emissions_stats(scenario, method="decommit")
_test_emissions_structure(carbon, mock_plant, scenario.state.get_pg())
# check specific values
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 6.6998, 13.546000, 11.8475],
[0, 0, 9.4472, 21.1873333, 20.3100],
[0, 0, 13.0622, 31.6073333, 32.1575],
]
)
assert_array_almost_equal(
expected_values, carbon.to_numpy(), err_msg="Values do not match expected"
)
def test_carbon_calc_simple(self, scenario, mock_plant):
carbon = generate_emissions_stats(scenario, method="simple")
_test_emissions_structure(carbon, mock_plant, scenario.state.get_pg())
# check specific values
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1.407, 4.004, 4.2],
[0, 0, 2.814, 8.008, 8.4],
[0, 0, 4.221, 12.012, 12.6],
]
)
assert_array_almost_equal(
expected_values, carbon.to_numpy(), err_msg="Values do not match expected"
)
class TestNOxCalculation:
def test_calculate_nox_simple(self, scenario):
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0.000537, 0.002632, 0.007685],
[0, 0, 0.001074, 0.005264, 0.015370],
[0, 0, 0.001611, 0.007896, 0.023055],
]
)
nox = generate_emissions_stats(scenario, pollutant="nox", method="simple")
assert_array_almost_equal(
expected_values, nox.to_numpy(), err_msg="Values do not match expected"
)
def test_calculate_nox_disallowed_method(self, scenario):
with pytest.raises(ValueError):
generate_emissions_stats(scenario, pollutant="nox", method="decommit")
class TestSO2Calculation:
def test_calculate_so2_simple(self, scenario):
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 3.0000e-05, 3.8600e-03, 1.0945e-02],
[0, 0, 6.0000e-05, 7.7200e-03, 2.1890e-02],
[0, 0, 9.0000e-05, 1.1580e-02, 3.2835e-02],
]
)
nox = generate_emissions_stats(scenario, pollutant="so2", method="simple")
assert_array_almost_equal(
expected_values, nox.to_numpy(), err_msg="Values do not match expected"
)
def test_calculate_so2_disallowed_method(self, scenario):
with pytest.raises(ValueError):
generate_emissions_stats(scenario, pollutant="so2", method="always-on")
class TestEmissionsSummarization:
def test_emissions_is_non_negative(self, scenario):
carbon = generate_emissions_stats(scenario)
with pytest.raises(ValueError):
summarize_emissions_by_bus(
-1 * carbon, MockGrid(grid_attrs={"plant": mock_plant})
)
def test_emissions_summarization(self, mock_pg, mock_plant):
# setup
pg = pd.DataFrame(mock_pg).iloc[:3, :]
plant = pd.DataFrame(mock_plant)
plant.set_index("plant_id", inplace=True)
input_carbon_values = [
[0, 0, 6.6998, 13.546000, 11.8475],
[0, 0, 9.4472, 21.1873333, 20.3100],
[0, 0, 13.0622, 31.6073333, 32.1575],
]
input_carbon = pd.DataFrame(
input_carbon_values, index=pg.index, columns=pg.columns
)
expected_sum = {
"coal": {1004: 66.3406666},
"ng": {1003: 29.2092},
"dfo": {1005: 64.315},
}
# calculation
summation = summarize_emissions_by_bus(
input_carbon, MockGrid(grid_attrs={"plant": mock_plant})
)
# checks
err_msg = "summarize_emissions_by_bus didn't return a dict"
assert isinstance(summation, dict), err_msg
err_msg = "summarize_emissions_by_bus didn't return the right dict keys"
assert set(summation.keys()) == expected_sum.keys(), err_msg
for k in expected_sum.keys():
err_msg = "summation not correct for fuel " + k
assert expected_sum[k].keys() == summation[k].keys(), err_msg
for bus in expected_sum[k]:
err_msg = "summation not correct for bus " + str(bus)
assert expected_sum[k][bus] == pytest.approx(summation[k][bus]), err_msg
|
[
"pandas.DataFrame",
"powersimdata.tests.mock_grid.MockGrid",
"pandas.date_range",
"postreise.analyze.generation.emissions.generate_emissions_stats",
"pytest.raises",
"numpy.array",
"powersimdata.tests.mock_scenario.MockScenario",
"pytest.approx"
] |
[((1354, 1448), 'powersimdata.tests.mock_scenario.MockScenario', 'MockScenario', ([], {'grid_attrs': "{'plant': mock_plant, 'gencost_before': mock_gencost}", 'pg': 'mock_pg'}), "(grid_attrs={'plant': mock_plant, 'gencost_before':\n mock_gencost}, pg=mock_pg)\n", (1366, 1448), False, 'from powersimdata.tests.mock_scenario import MockScenario\n'), ((1540, 1564), 'pandas.DataFrame', 'pd.DataFrame', (['mock_plant'], {}), '(mock_plant)\n', (1552, 1564), True, 'import pandas as pd\n'), ((3468, 3522), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'method': '"""always-on"""'}), "(scenario, method='always-on')\n", (3492, 3522), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((3661, 3811), 'numpy.array', 'np.array', (['[[0, 0, 4.82, 8.683333, 6.77], [0, 0, 6.6998, 13.546, 11.8475], [0, 0, \n 9.4472, 21.1873333, 20.31], [0, 0, 13.0622, 31.6073333, 32.1575]]'], {}), '([[0, 0, 4.82, 8.683333, 6.77], [0, 0, 6.6998, 13.546, 11.8475], [0,\n 0, 9.4472, 21.1873333, 20.31], [0, 0, 13.0622, 31.6073333, 32.1575]])\n', (3669, 3811), True, 'import numpy as np\n'), ((4128, 4181), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'method': '"""decommit"""'}), "(scenario, method='decommit')\n", (4152, 4181), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((4320, 4458), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 6.6998, 13.546, 11.8475], [0, 0, 9.4472, \n 21.1873333, 20.31], [0, 0, 13.0622, 31.6073333, 32.1575]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 6.6998, 13.546, 11.8475], [0, 0, 9.4472, \n 21.1873333, 20.31], [0, 0, 13.0622, 31.6073333, 32.1575]])\n', (4328, 4458), True, 'import numpy as np\n'), ((4772, 4823), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'method': '"""simple"""'}), "(scenario, method='simple')\n", (4796, 4823), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((4962, 5077), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 1.407, 4.004, 4.2], [0, 0, 2.814, 8.008, 8.4], [0,\n 0, 4.221, 12.012, 12.6]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 1.407, 4.004, 4.2], [0, 0, 2.814, 8.008, \n 8.4], [0, 0, 4.221, 12.012, 12.6]])\n', (4970, 5077), True, 'import numpy as np\n'), ((5411, 5556), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0.000537, 0.002632, 0.007685], [0, 0, 0.001074, \n 0.005264, 0.01537], [0, 0, 0.001611, 0.007896, 0.023055]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0.000537, 0.002632, 0.007685], [0, 0, \n 0.001074, 0.005264, 0.01537], [0, 0, 0.001611, 0.007896, 0.023055]])\n', (5419, 5556), True, 'import numpy as np\n'), ((5668, 5736), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'pollutant': '"""nox"""', 'method': '"""simple"""'}), "(scenario, pollutant='nox', method='simple')\n", (5692, 5736), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((6157, 6290), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 3e-05, 0.00386, 0.010945], [0, 0, 6e-05, 0.00772, \n 0.02189], [0, 0, 9e-05, 0.01158, 0.032835]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 3e-05, 0.00386, 0.010945], [0, 0, 6e-05, \n 0.00772, 0.02189], [0, 0, 9e-05, 0.01158, 0.032835]])\n', (6165, 6290), True, 'import numpy as np\n'), ((6432, 6500), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'pollutant': '"""so2"""', 'method': '"""simple"""'}), "(scenario, pollutant='so2', method='simple')\n", (6456, 6500), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((6926, 6960), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {}), '(scenario)\n', (6950, 6960), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((7272, 7296), 'pandas.DataFrame', 'pd.DataFrame', (['mock_plant'], {}), '(mock_plant)\n', (7284, 7296), True, 'import pandas as pd\n'), ((7559, 7628), 'pandas.DataFrame', 'pd.DataFrame', (['input_carbon_values'], {'index': 'pg.index', 'columns': 'pg.columns'}), '(input_carbon_values, index=pg.index, columns=pg.columns)\n', (7571, 7628), True, 'import pandas as pd\n'), ((1220, 1268), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""'], {'periods': '(4)', 'freq': '"""H"""'}), "('2019-01-01', periods=4, freq='H')\n", (1233, 1268), True, 'import pandas as pd\n'), ((2694, 2719), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2707, 2719), False, 'import pytest\n'), ((2744, 2795), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'pollutant': '"""CO2"""'}), "(scenario, pollutant='CO2')\n", (2768, 2795), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((2940, 2964), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2953, 2964), False, 'import pytest\n'), ((2989, 3033), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'method': '(1)'}), '(scenario, method=1)\n', (3013, 3033), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((3151, 3176), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3164, 3176), False, 'import pytest\n'), ((3201, 3273), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'pollutant': '"""nox"""', 'method': '"""always-off"""'}), "(scenario, pollutant='nox', method='always-off')\n", (3225, 3273), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((5942, 5967), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5955, 5967), False, 'import pytest\n'), ((5981, 6051), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'pollutant': '"""nox"""', 'method': '"""decommit"""'}), "(scenario, pollutant='nox', method='decommit')\n", (6005, 6051), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((6706, 6731), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6719, 6731), False, 'import pytest\n'), ((6745, 6816), 'postreise.analyze.generation.emissions.generate_emissions_stats', 'generate_emissions_stats', (['scenario'], {'pollutant': '"""so2"""', 'method': '"""always-on"""'}), "(scenario, pollutant='so2', method='always-on')\n", (6769, 6816), False, 'from postreise.analyze.generation.emissions import generate_emissions_stats, summarize_emissions_by_bus\n'), ((6974, 6999), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6987, 6999), False, 'import pytest\n'), ((7893, 7935), 'powersimdata.tests.mock_grid.MockGrid', 'MockGrid', ([], {'grid_attrs': "{'plant': mock_plant}"}), "(grid_attrs={'plant': mock_plant})\n", (7901, 7935), False, 'from powersimdata.tests.mock_grid import MockGrid\n'), ((7070, 7112), 'powersimdata.tests.mock_grid.MockGrid', 'MockGrid', ([], {'grid_attrs': "{'plant': mock_plant}"}), "(grid_attrs={'plant': mock_plant})\n", (7078, 7112), False, 'from powersimdata.tests.mock_grid import MockGrid\n'), ((7222, 7243), 'pandas.DataFrame', 'pd.DataFrame', (['mock_pg'], {}), '(mock_pg)\n', (7234, 7243), True, 'import pandas as pd\n'), ((8563, 8595), 'pytest.approx', 'pytest.approx', (['summation[k][bus]'], {}), '(summation[k][bus])\n', (8576, 8595), False, 'import pytest\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from mrsimulator.method.query import TransitionQuery
from mrsimulator.methods import FiveQ_VAS
from mrsimulator.methods import SevenQ_VAS
from mrsimulator.methods import ThreeQ_VAS
__author__ = "<NAME>"
__email__ = "<EMAIL>"
methods = [ThreeQ_VAS, FiveQ_VAS, SevenQ_VAS]
names = ["ThreeQ_VAS", "FiveQ_VAS", "SevenQ_VAS"]
def sample_test_output(n):
return {
"magnetic_flux_density": "9.4 T",
"rotor_angle": "0.9553166181245 rad",
"rotor_frequency": "1000000000000.0 Hz",
"spectral_dimensions": [
{
"count": 1024,
"spectral_width": "25000.0 Hz",
"events": [{"transition_query": [{"ch1": {"P": [n], "D": [0]}}]}],
},
{
"count": 1024,
"spectral_width": "25000.0 Hz",
"events": [{"transition_query": [{"ch1": {"P": [-1], "D": [0]}}]}],
},
],
}
def test_MQ_VAS_rotor_freq():
e = "`rotor_frequency=1e12 Hz` is fixed for 2D Methods and cannot be modified."
isotopes = ["87Rb", "27Al", "51V"]
for iso, method in zip(isotopes, methods):
with pytest.raises(ValueError, match=f".*{e}.*"):
method(channels=[iso], rotor_frequency=10, spectral_dimensions=[{}, {}])
def test_MQ_VAS_affine():
sites = ["87Rb", "27Al", "51V"]
spins = [1.5, 2.5, 3.5]
k_MQ_MAS = {
3: {1.5: 21 / 27, 2.5: 114 / 72, 3.5: 303 / 135, 4.5: 546 / 216},
5: {2.5: 150 / 72, 3.5: 165 / 135, 4.5: 570 / 216},
7: {3.5: 483 / 135, 4.5: 84 / 216},
9: {4.5: 1116 / 216},
}
for j, method in enumerate(methods):
for i, isotope in zip(spins[j:], sites[j:]):
meth = method(channels=[isotope])
k = k_MQ_MAS[3 + 2 * j][i]
assert meth.spectral_dimensions[0].events[0].fraction == 1
assert meth.spectral_dimensions[1].events[0].fraction == 1
assert np.allclose(meth.affine_matrix, [1 / (k + 1), k / (k + 1), 0, 1])
def test_3Q_VAS_general():
"""3Q-VAS method test"""
mth = ThreeQ_VAS(channels=["87Rb"], spectral_dimensions=[{}, {}])
assert mth.name == "ThreeQ_VAS"
assert mth.description == "Simulate a 3Q variable-angle spinning spectrum."
assert mth.spectral_dimensions[0].events[0].transition_query == [
TransitionQuery(ch1={"P": [-3], "D": [0]})
]
assert mth.spectral_dimensions[1].events[0].transition_query == [
TransitionQuery(ch1={"P": [-1], "D": [0]})
]
assert ThreeQ_VAS.parse_dict_with_units(mth.json()) == mth
assert np.allclose(mth.affine_matrix, [0.5625, 0.4375, 0.0, 1.0])
serialize = mth.json()
_ = serialize.pop("affine_matrix")
assert serialize == {
"channels": ["87Rb"],
"description": "Simulate a 3Q variable-angle spinning spectrum.",
"name": "ThreeQ_VAS",
**sample_test_output(-3),
}
def test_5Q_VAS_general():
"""5Q-VAS method test"""
mth = FiveQ_VAS(channels=["17O"], spectral_dimensions=[{}, {}])
assert mth.name == "FiveQ_VAS"
assert mth.description == "Simulate a 5Q variable-angle spinning spectrum."
assert mth.spectral_dimensions[0].events[0].transition_query == [
TransitionQuery(ch1={"P": [-5], "D": [0]})
]
assert mth.spectral_dimensions[1].events[0].transition_query == [
TransitionQuery(ch1={"P": [-1], "D": [0]})
]
assert FiveQ_VAS.parse_dict_with_units(mth.json()) == mth
assert np.allclose(
mth.affine_matrix, [0.3243243243243243, 0.6756756756756757, 0.0, 1.0]
)
serialize = mth.json()
_ = serialize.pop("affine_matrix")
assert serialize == {
"channels": ["17O"],
"description": "Simulate a 5Q variable-angle spinning spectrum.",
"name": "FiveQ_VAS",
**sample_test_output(-5),
}
def test_7Q_VAS_general():
"""7Q-VAS method test"""
mth = SevenQ_VAS(channels=["51V"], spectral_dimensions=[{}, {}])
assert mth.name == "SevenQ_VAS"
assert mth.description == "Simulate a 7Q variable-angle spinning spectrum."
assert mth.spectral_dimensions[0].events[0].transition_query == [
TransitionQuery(ch1={"P": [-7], "D": [0]})
]
assert mth.spectral_dimensions[1].events[0].transition_query == [
TransitionQuery(ch1={"P": [-1], "D": [0]})
]
assert SevenQ_VAS.parse_dict_with_units(mth.json()) == mth
assert np.allclose(mth.affine_matrix, [0.2184466, 0.7815534, 0.0, 1.0])
serialize = mth.json()
_ = serialize.pop("affine_matrix")
assert serialize == {
"channels": ["51V"],
"description": "Simulate a 7Q variable-angle spinning spectrum.",
"name": "SevenQ_VAS",
**sample_test_output(-7),
}
|
[
"mrsimulator.methods.ThreeQ_VAS",
"mrsimulator.method.query.TransitionQuery",
"numpy.allclose",
"pytest.raises",
"mrsimulator.methods.SevenQ_VAS",
"mrsimulator.methods.FiveQ_VAS"
] |
[((2134, 2193), 'mrsimulator.methods.ThreeQ_VAS', 'ThreeQ_VAS', ([], {'channels': "['87Rb']", 'spectral_dimensions': '[{}, {}]'}), "(channels=['87Rb'], spectral_dimensions=[{}, {}])\n", (2144, 2193), False, 'from mrsimulator.methods import ThreeQ_VAS\n'), ((2639, 2697), 'numpy.allclose', 'np.allclose', (['mth.affine_matrix', '[0.5625, 0.4375, 0.0, 1.0]'], {}), '(mth.affine_matrix, [0.5625, 0.4375, 0.0, 1.0])\n', (2650, 2697), True, 'import numpy as np\n'), ((3033, 3090), 'mrsimulator.methods.FiveQ_VAS', 'FiveQ_VAS', ([], {'channels': "['17O']", 'spectral_dimensions': '[{}, {}]'}), "(channels=['17O'], spectral_dimensions=[{}, {}])\n", (3042, 3090), False, 'from mrsimulator.methods import FiveQ_VAS\n'), ((3535, 3621), 'numpy.allclose', 'np.allclose', (['mth.affine_matrix', '[0.3243243243243243, 0.6756756756756757, 0.0, 1.0]'], {}), '(mth.affine_matrix, [0.3243243243243243, 0.6756756756756757, 0.0,\n 1.0])\n', (3546, 3621), True, 'import numpy as np\n'), ((3966, 4024), 'mrsimulator.methods.SevenQ_VAS', 'SevenQ_VAS', ([], {'channels': "['51V']", 'spectral_dimensions': '[{}, {}]'}), "(channels=['51V'], spectral_dimensions=[{}, {}])\n", (3976, 4024), False, 'from mrsimulator.methods import SevenQ_VAS\n'), ((4471, 4535), 'numpy.allclose', 'np.allclose', (['mth.affine_matrix', '[0.2184466, 0.7815534, 0.0, 1.0]'], {}), '(mth.affine_matrix, [0.2184466, 0.7815534, 0.0, 1.0])\n', (4482, 4535), True, 'import numpy as np\n'), ((1207, 1250), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f""".*{e}.*"""'}), "(ValueError, match=f'.*{e}.*')\n", (1220, 1250), False, 'import pytest\n'), ((2000, 2065), 'numpy.allclose', 'np.allclose', (['meth.affine_matrix', '[1 / (k + 1), k / (k + 1), 0, 1]'], {}), '(meth.affine_matrix, [1 / (k + 1), k / (k + 1), 0, 1])\n', (2011, 2065), True, 'import numpy as np\n'), ((2388, 2430), 'mrsimulator.method.query.TransitionQuery', 'TransitionQuery', ([], {'ch1': "{'P': [-3], 'D': [0]}"}), "(ch1={'P': [-3], 'D': [0]})\n", (2403, 2430), False, 'from mrsimulator.method.query import TransitionQuery\n'), ((2515, 2557), 'mrsimulator.method.query.TransitionQuery', 'TransitionQuery', ([], {'ch1': "{'P': [-1], 'D': [0]}"}), "(ch1={'P': [-1], 'D': [0]})\n", (2530, 2557), False, 'from mrsimulator.method.query import TransitionQuery\n'), ((3285, 3327), 'mrsimulator.method.query.TransitionQuery', 'TransitionQuery', ([], {'ch1': "{'P': [-5], 'D': [0]}"}), "(ch1={'P': [-5], 'D': [0]})\n", (3300, 3327), False, 'from mrsimulator.method.query import TransitionQuery\n'), ((3412, 3454), 'mrsimulator.method.query.TransitionQuery', 'TransitionQuery', ([], {'ch1': "{'P': [-1], 'D': [0]}"}), "(ch1={'P': [-1], 'D': [0]})\n", (3427, 3454), False, 'from mrsimulator.method.query import TransitionQuery\n'), ((4220, 4262), 'mrsimulator.method.query.TransitionQuery', 'TransitionQuery', ([], {'ch1': "{'P': [-7], 'D': [0]}"}), "(ch1={'P': [-7], 'D': [0]})\n", (4235, 4262), False, 'from mrsimulator.method.query import TransitionQuery\n'), ((4347, 4389), 'mrsimulator.method.query.TransitionQuery', 'TransitionQuery', ([], {'ch1': "{'P': [-1], 'D': [0]}"}), "(ch1={'P': [-1], 'D': [0]})\n", (4362, 4389), False, 'from mrsimulator.method.query import TransitionQuery\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# from IPython import get_ipython
import time, os, sys, shutil
# from utils.fitting_utils import *
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # <--- This is important for 3d plotting
import sys, os, pickle
# import cv2
# from colour import Color
import h5py
import glob
import itertools
# and pytorch
import torch
# In[2]:
import ipywidgets as widgets
from ipywidgets import HBox, VBox
from IPython.display import display
# %matplotlib inline
get_ipython().run_line_magic('matplotlib', 'widget')
# In[ ]:
# In[3]:
# def unpack_from_jagged(jagged_line):
# ''' THE REVESER SO HERE IT UNPACKS AGAIN SO THE DATA CAN BE SAVED
# AS A JAGGED H5PY DATASET
# FROM OTHER: Takes the NX3, N, Mx3, M, M shapes and packs to a single float16
# We ravel the position, ravel the keyp, stack everything and
# - importantly - we also save M, the number of keypoints'''
# n_keyp = int(jagged_line[-1])
# keyp_idx2 = jagged_line[-(1+n_keyp):-1].astype('int')
# pkeyp2 = jagged_line[-(1+2*n_keyp):-(1+n_keyp)]
# keyp2 = jagged_line[-(1+5*n_keyp):-(1+2*n_keyp)].reshape((n_keyp,3))
# block2 = jagged_line[:-(1+5*n_keyp)].reshape((-1,4))
# pos2,pos_weights2 = block2[:,:3], block2[:,3]
# # HACK to cut the floor
# floor_logic = pos2[:,2] > .012
# pos2 = pos2[floor_logic,:]
# pos_weights2 = pos_weights2[floor_logic]
# return pos2, pos_weights2, keyp2, pkeyp2, keyp_idx2
from utils.analysis_tools import unpack_from_jagged
from utils.analysis_tools import particles_to_body_supports_cuda
class data_storage(object):
def __init__(self):
# TODO update all this properly
self.data_path = None
self.tracking_path = None
self.jagged_lines = None
self.has_implant = True
self.is_running = False
def load_jagged(self):
with h5py.File(self.data_path, mode='r') as hdf5_file:
print("Loading jagged lines from " + self.data_path + "...")
# print(hdf5_file.keys())
# print(len(hdf5_file['dataset']))
self.jagged_lines = hdf5_file['dataset'][...]
print("Loaded {} jagged lines.".format(len(self.jagged_lines)) )
def load_tracking(self):
with open(self.tracking_path, 'rb') as f:
tracked_behavior = pickle.load(f)
print(tracked_behavior.keys())
self.tracked_behavior = tracked_behavior
self.has_implant = tracked_behavior['has_implant']
self.start_frame = tracked_behavior['start_frame']
self.end_frame = tracked_behavior['end_frame']
# get the raw tracking data!
part = self.tracked_behavior['tracking_holder']
# unpack all the 3D coordinates!
part = torch.from_numpy(part).float().cuda()
part = torch.transpose(part,0,1)
if self.has_implant:
body_support_0 = particles_to_body_supports_cuda(part[:,:9],implant = True)
body_support_1 = particles_to_body_supports_cuda(part[:,9:],implant = False)
# and the spine length
s_0 = part[:,2].cpu().numpy()
s_1 = part[:,2+9].cpu().numpy()
else:
body_support_0 = particles_to_body_supports_cuda(part[:,:8],implant = False)
body_support_1 = particles_to_body_supports_cuda(part[:,8:],implant = False)
# and the spine length
s_0 = part[:,2].cpu().numpy()
s_1 = part[:,2+8].cpu().numpy()
# add the raw and smoothed coordinates as numpy arrays
self.body_support_0_raw = [i.cpu().numpy().squeeze() for i in body_support_0]
# self.body_support_0_smooth = body_support_0_smooth
self.s_0_raw = s_0
# self.s_0_smooth = s_0_smooth
self.body_support_1_raw = [i.cpu().numpy().squeeze() for i in body_support_1]
# self.body_support_1_smooth = body_support_1_smooth
self.s_1_raw = s_1
# self.s_1_smooth = s_1_smooth
def make_3d_axis(self):
# 3D plot of the
fig = plt.figure(figsize = (4.5,4.5))
ax = fig.add_subplot(111, projection='3d')
# add to self for use later
self.fig = fig
self.ax = ax
def add_raw_data(self,frame):
# unpack the raw data in a plottable format
pos, pos_weights, keyp, pkeyp, ikeyp = unpack_from_jagged(self.jagged_lines[frame])
X, Y, Z = pos[:,0],pos[:,1],pos[:,2]
# add to axis 3D plot of Sphere
self.h_pc = self.ax.scatter(X, Y, Z, zdir='z', s=2, c='k', alpha = .05,rasterized=False)
body_colors = ['dodgerblue','red','lime','orange']
body_indices = [0,1,2,3]
# loop over the types of body, and make emptyscatter plots
self.h_kp_list = []
for body in body_indices:
h_kp = self.ax.scatter([],[],[], zdir='z', s=25, c=body_colors[body],rasterized=False)
self.h_kp_list.append(h_kp)
# THEN set the 3d values to be what the shoud be
for body in body_indices:
self.h_kp_list[body]._offsets3d = (keyp[ikeyp==body,0], keyp[ikeyp==body,1], keyp[ikeyp==body,2])
# for axis adjustment
self.max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0
self.mid_x = (X.max()+X.min()) * 0.5
self.mid_y = (Y.max()+Y.min()) * 0.5
self.mid_z = (Z.max()+Z.min()) * 0.5
def update_raw_data(self,frame):
# get new raw data!
pos, pos_weights, keyp, pkeyp, ikeyp = unpack_from_jagged(self.jagged_lines[frame])
X, Y, Z = pos[:,0],pos[:,1],pos[:,2]
# update the pointcloud
self.h_pc._offsets3d = (X,Y,Z)
# and update the keypoints
for body in range(4):
self.h_kp_list[body]._offsets3d = (keyp[ikeyp==body,0], keyp[ikeyp==body,1], keyp[ikeyp==body,2])
def plot_skeleton(self,body_support,color = 'k',body_idx = 0,has_implant = False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
#print("c_hip is {}".format(c_hip))
if has_implant:
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip,c_impl]
p_line = [c_nose,c_nose,c_mid,c_impl,c_impl]
q_line = [c_mid,c_tip,c_ass,c_nose,c_tip]
else:
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip]
p_line = [c_nose,c_nose,c_mid]
q_line = [c_mid,c_tip,c_ass]
# add the body points
for p in p_skel:
h_bp = self.ax.scatter(p[0],p[1],p[2],zdir='z', s=50, alpha = 1 , c=color,rasterized=False)
self.h_bp_list[body_idx].append(h_bp)
# and the lines between body parts
for p,q in zip(p_line,q_line):
h_skel = self.ax.plot([p[0],q[0]],[p[1],q[1]],[p[2],q[2]],c=color,lw = 4)
self.h_skel_list[body_idx].append(h_skel)
def add_skel_fit(self,frame,fit='raw',plot_ellipsoids = True):
# frame index
i_frame = frame-self.start_frame
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
# and plot!
self.h_skel_list = [[],[]]
self.h_bp_list = [[],[]]
self.plot_skeleton(body_support_0,color = 'k',body_idx = 0,has_implant = self.has_implant)
self.plot_skeleton(body_support_1,color = 'peru',body_idx = 1,has_implant = False)
def update_skeleton(self,body_support,body_idx = 0, has_implant = False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
if has_implant :
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip,c_impl]
p_line = [c_nose,c_nose,c_mid,c_impl,c_impl]
q_line = [c_mid,c_tip,c_ass,c_nose,c_tip]
else:
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip]
p_line = [c_nose,c_nose,c_mid]
q_line = [c_mid,c_tip,c_ass]
# update the body points
for j,p in enumerate(p_skel):
self.h_bp_list[body_idx][j]._offsets3d = ([p[0]],[p[1]],[p[2]])
# update the lines between body parts
for j,(p,q) in enumerate(zip(p_line,q_line)):
# # lines are an extra level deep for some stupid matplotlib reason
# self.h_skel_list[body_idx][j][0].set_xdata([p[0],q[0]])
# self.h_skel_list[body_idx][j][0].set_ydata([p[1],q[1]])
# self.h_skel_list[body_idx][j][0].set_3d_properties([p[2],q[2]])
# new matplotlilb has changed how this is done:
self.h_skel_list[body_idx][j][0].set_data_3d([p[0],q[0]],[p[1],q[1]],[p[2],q[2]])
def update_skel_fit(self,frame,fit='raw'):
# get the data out frame index
i_frame = frame-self.start_frame
# speed up this list nonsense
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
self.update_skeleton(body_support_0,body_idx = 0, has_implant = self.has_implant)
self.update_skeleton(body_support_1,body_idx = 1, has_implant = False)
def add_ellip_fit(self,frame,fit='raw',plot_ellipsoids = True):
# frame index
i_frame = frame-self.start_frame
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
self.h_hip_list = [[],[]]
self.plot_ellipsoids(body_support_0,s_0,color = 'k',body_idx = 0,has_implant=self.has_implant)
self.plot_ellipsoids(body_support_1,s_1,color = 'peru',body_idx = 1,has_implant=False)
def add_wireframe_to_axis(self,ax,R_body,c_hip, a_nose,b_nose,a_hip,b_hip,r_impl,style='hip',this_color='k',this_alpha=.4):
# FIRST PLOT THE ELLIPSE, which is the hip
# generate points on a sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
# get the mesh, by using the equation of an ellipsoid
if style == 'hip':
x=np.cos(u)*a_hip
y=np.sin(u)*np.sin(v)*b_hip
z=np.sin(u)*np.cos(v)*b_hip
this_color = 'grey'
if style == 'nose':
x=np.cos(u)*a_nose
y=np.sin(u)*np.sin(v)*b_nose
z=np.sin(u)*np.cos(v)*b_nose
if style == 'impl':
x=np.cos(u)*r_impl
y=np.sin(u)*np.sin(v)*r_impl
z=np.sin(u)*np.cos(v)*r_impl
# pack to matrix of positions
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
# apply the rotatation and unpack
# posi_rotated = ((R_body @ (posi.T + c_hip).T ).T + t_body).T
# REMEBRE BODY SUPPORTS ARE [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose]
posi_rotated = np.einsum('ij,ja->ia',R_body,posi) + c_hip[:,np.newaxis]
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
# reshape for wireframe
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
h_hip = ax.plot_wireframe(x, y, z, color=this_color,alpha = this_alpha)
return h_hip
def plot_ellipsoids(self,body_support,s,color = 'k',body_idx = 0,has_implant=False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
# this is not so elegant, hm hm
_, a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose,b_nose,d_nose,x_impl,z_impl,r_impl= self.tracked_behavior['body_constants']
a_hip_delta = a_hip_max - a_hip_min
b_hip_delta = b_hip_max - b_hip_min
a_hip_0 = a_hip_min
b_hip_0 = b_hip_min
a_hip = a_hip_0 + a_hip_delta * s
b_hip = b_hip_0 + b_hip_delta * (1.-s)
d_hip = .75 * a_hip
if has_implant:
RRs,ccs,styles = [R_body,R_nose,R_nose],[c_hip,c_nose,c_impl],['hip','nose','impl']
else:
RRs,ccs,styles = [R_body,R_nose],[c_hip,c_nose],['hip','nose']
for RR,cc,style in zip(RRs,ccs,styles):
h_hip = self.add_wireframe_to_axis(self.ax,RR,
cc,
a_nose,
b_nose,
a_hip,
b_hip,
r_impl,
style=style,this_color=color)
self.h_hip_list[body_idx].append(h_hip)
def update_wireframe_lines(self,h_hip,X,Y,Z):
# h_hip is the handle to the lines3dcollection
# much of the code is taken from the source of the marplotlib wireframe plotting
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
rstride = 1
cstride = 1
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(range(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1):
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(range(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1):
cii += [cols-1]
else:
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = ([list(zip(xl, yl, zl))
for xl, yl, zl in zip(xlines, ylines, zlines)]
+ [list(zip(xl, yl, zl))
for xl, yl, zl in zip(txlines, tylines, tzlines)])
h_hip.set_segments(lines)
def calculate_wireframe_points(self,R_body,c_hip,a_nose,b_nose,a_hip,b_hip,r_impl,style='hip'):
# FIRST PLOT THE ELLIPSE, which is the hip
# generate points on a sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
# get the mesh, by using the equation of an ellipsoid
if style == 'hip':
x=np.cos(u)*a_hip
y=np.sin(u)*np.sin(v)*b_hip
z=np.sin(u)*np.cos(v)*b_hip
if style == 'nose':
x=np.cos(u)*a_nose
y=np.sin(u)*np.sin(v)*b_nose
z=np.sin(u)*np.cos(v)*b_nose
if style == 'impl':
x=np.cos(u)*r_impl
y=np.sin(u)*np.sin(v)*r_impl
z=np.sin(u)*np.cos(v)*r_impl
# pack to matrix of positions
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
# apply the rotatation and unpack
# posi_rotated = ((R_body @ (posi.T + c_hip).T ).T + t_body).T
# REMEBRE BODY SUPPORTS ARE [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose]
posi_rotated = np.einsum('ij,ja->ia',R_body,posi) + c_hip[:,np.newaxis]
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
# reshape for wireframe
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
return x,y,z
def update_ellipsoids(self,body_support,s,body_idx = 0, has_implant = False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
# this is not so elegant, hm hm
# this is STILL not so elegant, hm hm
_, a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose,b_nose,d_nose,x_impl,z_impl,r_impl= self.tracked_behavior['body_constants']
a_hip_delta = a_hip_max - a_hip_min
b_hip_delta = b_hip_max - b_hip_min
a_hip_0 = a_hip_min
b_hip_0 = b_hip_min
a_hip = a_hip_0 + a_hip_delta * s
b_hip = b_hip_0 + b_hip_delta * (1.-s)
d_hip = .75 * a_hip
if has_implant:
RRs,ccs,styles = [R_body,R_nose,R_nose],[c_hip,c_nose,c_impl],['hip','nose','impl']
else:
RRs,ccs,styles = [R_body,R_nose],[c_hip,c_nose],['hip','nose']
for jj, (RR,cc,style) in enumerate(zip(RRs,ccs,styles)):
X,Y,Z = self.calculate_wireframe_points(RR,
cc,
a_nose,
b_nose,
a_hip,
b_hip,
r_impl,
style=style)
h_hip = self.h_hip_list[body_idx][jj]
self.update_wireframe_lines(h_hip,X,Y,Z)
def update_ellip_fit(self,frame,fit = 'raw'):
# get the data out frame index
i_frame = frame-self.start_frame
# speed up this list nonsense
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
self.update_ellipsoids(body_support_0,s_0,body_idx = 0,has_implant = self.has_implant)
self.update_ellipsoids(body_support_1,s_1,body_idx = 1,has_implant = False)
def unpack_trace(self,body_support,trace_indices,body_idx = 0,what_type=['hip'],color='k'):
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
type_list = np.array(['hip','ass','mid','nose','tip','impl'])
c_list = [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
ii_c_list = np.arange(len(type_list))
# TODO make the decay work!
for ttt in what_type:
# this is also not so elegant
selecta = np.arange(len(type_list))[type_list == ttt]
dat = c_list[selecta[0]].squeeze()
X,Y,Z = dat[trace_indices,0],dat[trace_indices,1],dat[trace_indices,2]
h_trace = self.ax.plot(X,Y,Z,lw=2,c=color,alpha = .65)
self.h_trace_list[body_idx][ii_c_list[type_list == ttt][0]] = h_trace
def add_trace(self,frame,trace='raw',trace_length=90,trace_clip = None,decay_factor=.9, type_list = ['nose']):
# get the particle, convert to torch tensor, calculate body supports
i_frame = frame-self.start_frame
# make a holder for the lines
self.h_trace_list = [[None]*5,[None]*5]
if trace_clip is not None:
i_clip = trace_clip-self.start_frame
i_trace_start = np.max([i_clip, i_frame-trace_length])
else:
i_trace_start = np.max([0, i_frame-trace_length])
#print("i_trace_start is {} and i_frame is {}".format(i_trace_start,i_frame))
trace_indices = np.arange(i_trace_start,i_frame)
if trace == 'raw':
self.unpack_trace(self.body_support_0_raw,trace_indices, body_idx = 0,what_type=type_list,color='black')
self.unpack_trace(self.body_support_1_raw,trace_indices, body_idx = 1,what_type=type_list,color='peru')
if trace == 'smooth':
self.unpack_trace(self.body_support_0_smooth,trace_indices, body_idx = 0,what_type=type_list,color='black')
self.unpack_trace(self.body_support_1_smooth,trace_indices, body_idx = 1,what_type=type_list,color='peru')
def update_trace_3dlines(self,body_support,trace_indices,body_idx=0,what_type=['hip']):
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
type_list = np.array(['hip','ass','mid','nose','tip','impl'])
c_list = [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
ii_c_list = np.arange(len(type_list))
# TODO make the decay work!
for ttt in what_type:
# this is also not so elegant
selecta = np.arange(len(type_list))[type_list == ttt]
dat = c_list[selecta[0]].squeeze()
X,Y,Z = dat[trace_indices,0],dat[trace_indices,1],dat[trace_indices,2]
# self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_xdata(X)
# self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_ydata(Y)
# self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_3d_properties(Z)
# Ugh matplotlib changed the api, the new way makes much more sense though, so fine..
self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_data_3d(X,Y,Z)
def update_trace_fit(self,frame,trace='raw',trace_length=90,trace_clip = None,decay_factor=.9, type_list = None):
# get the particle, convert to torch tensor, calculate body supports
i_frame = frame-self.start_frame
if trace_clip is not None:
i_clip = trace_clip-self.start_frame
i_trace_start = np.max([i_clip, i_frame-trace_length])
else:
i_trace_start = np.max([0, i_frame-trace_length])
# these are the indices to plot
trace_indices = np.arange(i_trace_start,i_frame)
if trace =='raw':
body_support_0 = self.body_support_0_raw
body_support_1 = self.body_support_1_raw
elif trace =='smooth':
body_support_0 = self.body_support_0_smooth
body_support_1 = self.body_support_1_smooth
else:
return
if len(trace_indices)== 0:
# just skip if there is no trace
return
self.update_trace_3dlines(body_support_0,trace_indices,body_idx=0,what_type = type_list)
self.update_trace_3dlines(body_support_1,trace_indices,body_idx=1,what_type = type_list)
def finish_3d_axis(self,view_style = 'ex', zoom = False, dump = False):
# finish the labeling, plot adjustments, dump and show
ax = self.ax
if self.max_range is not None:
ax.set_xlim(self.mid_x - self.max_range, self.mid_x + self.max_range)
ax.set_ylim(self.mid_y - self.max_range, self.mid_y + self.max_range)
ax.set_zlim(0, 2*self.max_range)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
if view_style == 'top':
az = -30
el = 90
if view_style == 'side':
az = -15
el = 9
if view_style == 'mix':
az = 150
el = 50
if view_style == 'ex':
az = -14
el = 46
if view_style == 'ex':
az = -46
el = 23
ax.view_init(elev=el, azim=az)
storage = data_storage()
# In[4]:
play = widgets.Play(
value=0,
min=0,
max=10000,
step=10,
interval=100,
description="Press play",
disabled=False
)
slider = widgets.IntSlider(value=0,
min=0,
max=10000)
def on_value_change(change):
frame = int(change['new'])
storage.update_raw_data( change['new'] )
storage.update_skel_fit( int(change['new']) )
storage.update_ellip_fit( int(change['new']) )
# storage.update_trace_fit( int(change['new']) )
# storage.update_trace_fit(frame)
storage.fig.canvas.draw()
slider.observe(on_value_change, 'value')
widgets.jslink((play, 'value'),(slider, 'value'))
# In[5]:
data_path_textbox = widgets.Text(
value='/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/pre_processed_frames.hdf5',
description='Path:'
)
tracking_path_textbox = widgets.Text(
value='/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/tracked_behavior_in_progress.pkl',
description='Path:'
)
load_button = widgets.Button(
description='Load data',
)
load_behavior_button = widgets.Button(
description='Load tracking',
)
# In[6]:
@load_button.on_click
def plot_on_click(b):
storage.data_path = data_path_textbox.value
storage.load_jagged()
# and make the plot
storage.add_raw_data( int(play.value) )
storage.finish_3d_axis()
storage.fig.canvas.draw()
# set the min and max time to the behavior!
play.min = 0
play.max = len(storage.jagged_lines)
slider.min = 0
slider.max = len(storage.jagged_lines)
@load_behavior_button.on_click
def plot_on_click2(b):
storage.tracking_path = tracking_path_textbox.value
storage.load_tracking()
storage.add_skel_fit( int(play.value) )
storage.add_ellip_fit( int(play.value) )
# storage.add_trace( int(play.value) )
play.min = storage.tracked_behavior['start_frame']
play.max = storage.tracked_behavior['end_frame']
slider.min = storage.tracked_behavior['start_frame']
slider.max = storage.tracked_behavior['end_frame']
# # set the min and max time to the tracked behavior!
# play.min = 0
# play.max = len(storage.jagged_lines)
storage.fig.canvas.draw()
# In[7]:
frame_textbox = widgets.BoundedIntText(
value=0,
min = 0,
max = 10000,
description='Frame #:'
)
jump_frame_button = widgets.Button(
description='Jump to frame',
)
# In[8]:
@jump_frame_button.on_click
def update_frame(b):
play.value = frame_textbox.value
# storage.update_raw_data( frame_textbox.value)
# storage.fig.canvas.draw()
# In[9]:
fps = 60
time_textbox = widgets.BoundedFloatText(
value=0,
min = 0,
max = 10000/60,
description='Time [s]:'
)
jump_time_button = widgets.Button(
description='Jump to time',
)
# In[10]:
@jump_time_button.on_click
def update_time(b):
play.value = int(time_textbox.value * fps)
# storage.update_raw_data( int(time_textbox.value * fps) )
# storage.fig.canvas.draw()
# In[ ]:
# In[11]:
# widgets.jslink((play, 'value'),(frame_textbox, 'value'))
# In[12]:
raw_ok =widgets.Valid(
value=True,
indent = True,
description='Raw data',
)
track_ok = widgets.Valid(
value=True,
description='Tracking'
)
# In[13]:
check_raw = widgets.Checkbox(
value=True,
description='Display raw data',
disabled=False,
indent=True
)
check_skel = widgets.Checkbox(
value=True,
description='Display skeleton',
disabled=False,
indent=False
)
check_ellip = widgets.Checkbox(
value=True,
description='Display ellipsoids',
disabled=False,
indent=True
)
check_trace = widgets.Checkbox(
value=False,
description='Display trace',
disabled=False,
indent=False
)
# In[14]:
sub10_button = widgets.Button(
description='<< 10',
)
sub5_button = widgets.Button(
description='< 5',
)
add10_button = widgets.Button(
description='10 >>',
)
add5_button = widgets.Button(
description='5 >',
)
@sub10_button.on_click
def update_frame(b):
play.value = play.value - 10
@sub5_button.on_click
def update_frame(b):
play.value = play.value - 5
@add5_button.on_click
def update_frame(b):
play.value = play.value + 5
@add10_button.on_click
def update_frame(b):
play.value = play.value + 10
# In[15]:
from ipywidgets import AppLayout, GridspecLayout
item_layout = widgets.Layout(margin='0 0 10px 10px')
dashboard = VBox([
HBox([data_path_textbox, load_button], layout = item_layout) ,
HBox([tracking_path_textbox, load_behavior_button], layout = item_layout) ,
HBox([track_ok, raw_ok], layout = item_layout) ,
HBox([play, slider], layout = item_layout) ,
HBox([sub10_button,sub5_button,add5_button,add10_button]) ,
HBox([frame_textbox,jump_frame_button], layout = item_layout) ,
HBox([time_textbox,jump_time_button] , layout = item_layout) ,
HBox([check_raw,check_skel]),
HBox([check_ellip,check_trace])
])
output = widgets.Output()
with output:
storage.make_3d_axis()
storage.fig.canvas.toolbar_position = 'bottom'
# In[ ]:
# In[16]:
from ipywidgets import AppLayout
from ipywidgets import HTML, Layout, Dropdown, Output, Textarea, VBox, Label, Text
from ipywidgets import Label, Layout, HBox
from IPython.display import display
# header = HTML("<h1><center><\"(__)~~.. MousePlayer <\"(__)~~....</center></h1>")
# header = HTML("<h1><center><\"(__)~~.. ʍօʊֆɛ քʟǟʏɛʀ <\"(__)~~....</center></h1>")
header = HTML("<h1><center>🐭 ʍօʊֆɛ քʟǟʏɛʀ 🐭</center></h1>")
# board = VBox( [header, HBox([output,dashboard]) ], layout=Layout(justify_content = 'center') )
board = AppLayout(header=None,
left_sidebar=None,
center=output,
right_sidebar=dashboard,
footer=None,
pane_widths=[0,2, 2])
app = VBox( [header, board ], layout=Layout(justify_content = 'center') )
# In[ ]:
# In[17]:
# In[ ]:
# In[ ]:
# In[18]:
# TODO toggles to show trace, ellipsoids, skeleton, raw data,
# Labeles showing if data is loaded or tracking is loaded
# Tracking without the raw data (get the xy limits from the xy data)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"utils.analysis_tools.particles_to_body_supports_cuda",
"ipywidgets.Valid",
"ipywidgets.Text",
"numpy.einsum",
"ipywidgets.jslink",
"ipywidgets.Output",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"numpy.sin",
"ipywidgets.BoundedIntText",
"ipywidgets.Button",
"numpy.transpose",
"numpy.max",
"numpy.reshape",
"ipywidgets.Layout",
"numpy.broadcast_arrays",
"ipywidgets.HTML",
"h5py.File",
"ipywidgets.IntSlider",
"ipywidgets.HBox",
"utils.analysis_tools.unpack_from_jagged",
"numpy.cos",
"torch.from_numpy",
"ipywidgets.BoundedFloatText",
"ipywidgets.AppLayout",
"ipywidgets.Play",
"numpy.array",
"ipywidgets.Checkbox",
"torch.transpose"
] |
[((24684, 24793), 'ipywidgets.Play', 'widgets.Play', ([], {'value': '(0)', 'min': '(0)', 'max': '(10000)', 'step': '(10)', 'interval': '(100)', 'description': '"""Press play"""', 'disabled': '(False)'}), "(value=0, min=0, max=10000, step=10, interval=100, description=\n 'Press play', disabled=False)\n", (24696, 24793), True, 'import ipywidgets as widgets\n'), ((24828, 24872), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'value': '(0)', 'min': '(0)', 'max': '(10000)'}), '(value=0, min=0, max=10000)\n', (24845, 24872), True, 'import ipywidgets as widgets\n'), ((25256, 25306), 'ipywidgets.jslink', 'widgets.jslink', (["(play, 'value')", "(slider, 'value')"], {}), "((play, 'value'), (slider, 'value'))\n", (25270, 25306), True, 'import ipywidgets as widgets\n'), ((25339, 25478), 'ipywidgets.Text', 'widgets.Text', ([], {'value': '"""/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/pre_processed_frames.hdf5"""', 'description': '"""Path:"""'}), "(value=\n '/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/pre_processed_frames.hdf5'\n , description='Path:')\n", (25351, 25478), True, 'import ipywidgets as widgets\n'), ((25504, 25650), 'ipywidgets.Text', 'widgets.Text', ([], {'value': '"""/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/tracked_behavior_in_progress.pkl"""', 'description': '"""Path:"""'}), "(value=\n '/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/tracked_behavior_in_progress.pkl'\n , description='Path:')\n", (25516, 25650), True, 'import ipywidgets as widgets\n'), ((25667, 25706), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Load data"""'}), "(description='Load data')\n", (25681, 25706), True, 'import ipywidgets as widgets\n'), ((25738, 25781), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Load tracking"""'}), "(description='Load tracking')\n", (25752, 25781), True, 'import ipywidgets as widgets\n'), ((26891, 26964), 'ipywidgets.BoundedIntText', 'widgets.BoundedIntText', ([], {'value': '(0)', 'min': '(0)', 'max': '(10000)', 'description': '"""Frame #:"""'}), "(value=0, min=0, max=10000, description='Frame #:')\n", (26913, 26964), True, 'import ipywidgets as widgets\n'), ((27008, 27051), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Jump to frame"""'}), "(description='Jump to frame')\n", (27022, 27051), True, 'import ipywidgets as widgets\n'), ((27281, 27367), 'ipywidgets.BoundedFloatText', 'widgets.BoundedFloatText', ([], {'value': '(0)', 'min': '(0)', 'max': '(10000 / 60)', 'description': '"""Time [s]:"""'}), "(value=0, min=0, max=10000 / 60, description=\n 'Time [s]:')\n", (27305, 27367), True, 'import ipywidgets as widgets\n'), ((27403, 27445), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Jump to time"""'}), "(description='Jump to time')\n", (27417, 27445), True, 'import ipywidgets as widgets\n'), ((27765, 27827), 'ipywidgets.Valid', 'widgets.Valid', ([], {'value': '(True)', 'indent': '(True)', 'description': '"""Raw data"""'}), "(value=True, indent=True, description='Raw data')\n", (27778, 27827), True, 'import ipywidgets as widgets\n'), ((27857, 27906), 'ipywidgets.Valid', 'widgets.Valid', ([], {'value': '(True)', 'description': '"""Tracking"""'}), "(value=True, description='Tracking')\n", (27870, 27906), True, 'import ipywidgets as widgets\n'), ((27945, 28038), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Display raw data"""', 'disabled': '(False)', 'indent': '(True)'}), "(value=True, description='Display raw data', disabled=False,\n indent=True)\n", (27961, 28038), True, 'import ipywidgets as widgets\n'), ((28067, 28161), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Display skeleton"""', 'disabled': '(False)', 'indent': '(False)'}), "(value=True, description='Display skeleton', disabled=False,\n indent=False)\n", (28083, 28161), True, 'import ipywidgets as widgets\n'), ((28191, 28287), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Display ellipsoids"""', 'disabled': '(False)', 'indent': '(True)'}), "(value=True, description='Display ellipsoids', disabled=\n False, indent=True)\n", (28207, 28287), True, 'import ipywidgets as widgets\n'), ((28316, 28408), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Display trace"""', 'disabled': '(False)', 'indent': '(False)'}), "(value=False, description='Display trace', disabled=False,\n indent=False)\n", (28332, 28408), True, 'import ipywidgets as widgets\n'), ((28452, 28487), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""<< 10"""'}), "(description='<< 10')\n", (28466, 28487), True, 'import ipywidgets as widgets\n'), ((28510, 28543), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""< 5"""'}), "(description='< 5')\n", (28524, 28543), True, 'import ipywidgets as widgets\n'), ((28567, 28602), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""10 >>"""'}), "(description='10 >>')\n", (28581, 28602), True, 'import ipywidgets as widgets\n'), ((28625, 28658), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""5 >"""'}), "(description='5 >')\n", (28639, 28658), True, 'import ipywidgets as widgets\n'), ((29051, 29089), 'ipywidgets.Layout', 'widgets.Layout', ([], {'margin': '"""0 0 10px 10px"""'}), "(margin='0 0 10px 10px')\n", (29065, 29089), True, 'import ipywidgets as widgets\n'), ((29658, 29674), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (29672, 29674), True, 'import ipywidgets as widgets\n'), ((30169, 30219), 'ipywidgets.HTML', 'HTML', (['"""<h1><center>🐭 ʍօʊֆɛ քʟǟʏɛʀ 🐭</center></h1>"""'], {}), "('<h1><center>🐭 ʍօʊֆɛ քʟǟʏɛʀ 🐭</center></h1>')\n", (30173, 30219), False, 'from ipywidgets import HTML, Layout, Dropdown, Output, Textarea, VBox, Label, Text\n'), ((30328, 30450), 'ipywidgets.AppLayout', 'AppLayout', ([], {'header': 'None', 'left_sidebar': 'None', 'center': 'output', 'right_sidebar': 'dashboard', 'footer': 'None', 'pane_widths': '[0, 2, 2]'}), '(header=None, left_sidebar=None, center=output, right_sidebar=\n dashboard, footer=None, pane_widths=[0, 2, 2])\n', (30337, 30450), False, 'from ipywidgets import AppLayout\n'), ((2940, 2967), 'torch.transpose', 'torch.transpose', (['part', '(0)', '(1)'], {}), '(part, 0, 1)\n', (2955, 2967), False, 'import torch\n'), ((4168, 4198), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 4.5)'}), '(figsize=(4.5, 4.5))\n', (4178, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4465, 4509), 'utils.analysis_tools.unpack_from_jagged', 'unpack_from_jagged', (['self.jagged_lines[frame]'], {}), '(self.jagged_lines[frame])\n', (4483, 4509), False, 'from utils.analysis_tools import unpack_from_jagged\n'), ((5634, 5678), 'utils.analysis_tools.unpack_from_jagged', 'unpack_from_jagged', (['self.jagged_lines[frame]'], {}), '(self.jagged_lines[frame])\n', (5652, 5678), False, 'from utils.analysis_tools import unpack_from_jagged\n'), ((12371, 12393), 'numpy.reshape', 'np.reshape', (['x', 'u.shape'], {}), '(x, u.shape)\n', (12381, 12393), True, 'import numpy as np\n'), ((12409, 12431), 'numpy.reshape', 'np.reshape', (['y', 'u.shape'], {}), '(y, u.shape)\n', (12419, 12431), True, 'import numpy as np\n'), ((12447, 12469), 'numpy.reshape', 'np.reshape', (['z', 'u.shape'], {}), '(z, u.shape)\n', (12457, 12469), True, 'import numpy as np\n'), ((14214, 14242), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (14233, 14242), True, 'import numpy as np\n'), ((16790, 16812), 'numpy.reshape', 'np.reshape', (['x', 'u.shape'], {}), '(x, u.shape)\n', (16800, 16812), True, 'import numpy as np\n'), ((16828, 16850), 'numpy.reshape', 'np.reshape', (['y', 'u.shape'], {}), '(y, u.shape)\n', (16838, 16850), True, 'import numpy as np\n'), ((16866, 16888), 'numpy.reshape', 'np.reshape', (['z', 'u.shape'], {}), '(z, u.shape)\n', (16876, 16888), True, 'import numpy as np\n'), ((19555, 19609), 'numpy.array', 'np.array', (["['hip', 'ass', 'mid', 'nose', 'tip', 'impl']"], {}), "(['hip', 'ass', 'mid', 'nose', 'tip', 'impl'])\n", (19563, 19609), True, 'import numpy as np\n'), ((20822, 20855), 'numpy.arange', 'np.arange', (['i_trace_start', 'i_frame'], {}), '(i_trace_start, i_frame)\n', (20831, 20855), True, 'import numpy as np\n'), ((21580, 21634), 'numpy.array', 'np.array', (["['hip', 'ass', 'mid', 'nose', 'tip', 'impl']"], {}), "(['hip', 'ass', 'mid', 'nose', 'tip', 'impl'])\n", (21588, 21634), True, 'import numpy as np\n'), ((23062, 23095), 'numpy.arange', 'np.arange', (['i_trace_start', 'i_frame'], {}), '(i_trace_start, i_frame)\n', (23071, 23095), True, 'import numpy as np\n'), ((29114, 29172), 'ipywidgets.HBox', 'HBox', (['[data_path_textbox, load_button]'], {'layout': 'item_layout'}), '([data_path_textbox, load_button], layout=item_layout)\n', (29118, 29172), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29183, 29254), 'ipywidgets.HBox', 'HBox', (['[tracking_path_textbox, load_behavior_button]'], {'layout': 'item_layout'}), '([tracking_path_textbox, load_behavior_button], layout=item_layout)\n', (29187, 29254), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29264, 29308), 'ipywidgets.HBox', 'HBox', (['[track_ok, raw_ok]'], {'layout': 'item_layout'}), '([track_ok, raw_ok], layout=item_layout)\n', (29268, 29308), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29318, 29358), 'ipywidgets.HBox', 'HBox', (['[play, slider]'], {'layout': 'item_layout'}), '([play, slider], layout=item_layout)\n', (29322, 29358), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29368, 29428), 'ipywidgets.HBox', 'HBox', (['[sub10_button, sub5_button, add5_button, add10_button]'], {}), '([sub10_button, sub5_button, add5_button, add10_button])\n', (29372, 29428), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29433, 29493), 'ipywidgets.HBox', 'HBox', (['[frame_textbox, jump_frame_button]'], {'layout': 'item_layout'}), '([frame_textbox, jump_frame_button], layout=item_layout)\n', (29437, 29493), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29502, 29560), 'ipywidgets.HBox', 'HBox', (['[time_textbox, jump_time_button]'], {'layout': 'item_layout'}), '([time_textbox, jump_time_button], layout=item_layout)\n', (29506, 29560), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29570, 29599), 'ipywidgets.HBox', 'HBox', (['[check_raw, check_skel]'], {}), '([check_raw, check_skel])\n', (29574, 29599), False, 'from ipywidgets import Label, Layout, HBox\n'), ((29604, 29636), 'ipywidgets.HBox', 'HBox', (['[check_ellip, check_trace]'], {}), '([check_ellip, check_trace])\n', (29608, 29636), False, 'from ipywidgets import Label, Layout, HBox\n'), ((30540, 30572), 'ipywidgets.Layout', 'Layout', ([], {'justify_content': '"""center"""'}), "(justify_content='center')\n", (30546, 30572), False, 'from ipywidgets import Label, Layout, HBox\n'), ((2010, 2045), 'h5py.File', 'h5py.File', (['self.data_path'], {'mode': '"""r"""'}), "(self.data_path, mode='r')\n", (2019, 2045), False, 'import h5py\n'), ((2460, 2474), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2471, 2474), False, 'import sys, os, pickle\n'), ((3025, 3083), 'utils.analysis_tools.particles_to_body_supports_cuda', 'particles_to_body_supports_cuda', (['part[:, :9]'], {'implant': '(True)'}), '(part[:, :9], implant=True)\n', (3056, 3083), False, 'from utils.analysis_tools import particles_to_body_supports_cuda\n'), ((3113, 3172), 'utils.analysis_tools.particles_to_body_supports_cuda', 'particles_to_body_supports_cuda', (['part[:, 9:]'], {'implant': '(False)'}), '(part[:, 9:], implant=False)\n', (3144, 3172), False, 'from utils.analysis_tools import particles_to_body_supports_cuda\n'), ((3338, 3397), 'utils.analysis_tools.particles_to_body_supports_cuda', 'particles_to_body_supports_cuda', (['part[:, :8]'], {'implant': '(False)'}), '(part[:, :8], implant=False)\n', (3369, 3397), False, 'from utils.analysis_tools import particles_to_body_supports_cuda\n'), ((3427, 3486), 'utils.analysis_tools.particles_to_body_supports_cuda', 'particles_to_body_supports_cuda', (['part[:, 8:]'], {'implant': '(False)'}), '(part[:, 8:], implant=False)\n', (3458, 3486), False, 'from utils.analysis_tools import particles_to_body_supports_cuda\n'), ((12178, 12214), 'numpy.einsum', 'np.einsum', (['"""ij,ja->ia"""', 'R_body', 'posi'], {}), "('ij,ja->ia', R_body, posi)\n", (12187, 12214), True, 'import numpy as np\n'), ((14541, 14556), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (14553, 14556), True, 'import numpy as np\n'), ((14558, 14573), 'numpy.transpose', 'np.transpose', (['Y'], {}), '(Y)\n', (14570, 14573), True, 'import numpy as np\n'), ((14575, 14590), 'numpy.transpose', 'np.transpose', (['Z'], {}), '(Z)\n', (14587, 14590), True, 'import numpy as np\n'), ((16597, 16633), 'numpy.einsum', 'np.einsum', (['"""ij,ja->ia"""', 'R_body', 'posi'], {}), "('ij,ja->ia', R_body, posi)\n", (16606, 16633), True, 'import numpy as np\n'), ((20596, 20636), 'numpy.max', 'np.max', (['[i_clip, i_frame - trace_length]'], {}), '([i_clip, i_frame - trace_length])\n', (20602, 20636), True, 'import numpy as np\n'), ((20677, 20712), 'numpy.max', 'np.max', (['[0, i_frame - trace_length]'], {}), '([0, i_frame - trace_length])\n', (20683, 20712), True, 'import numpy as np\n'), ((22882, 22922), 'numpy.max', 'np.max', (['[i_clip, i_frame - trace_length]'], {}), '([i_clip, i_frame - trace_length])\n', (22888, 22922), True, 'import numpy as np\n'), ((22963, 22998), 'numpy.max', 'np.max', (['[0, i_frame - trace_length]'], {}), '([0, i_frame - trace_length])\n', (22969, 22998), True, 'import numpy as np\n'), ((11437, 11446), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (11443, 11446), True, 'import numpy as np\n'), ((11607, 11616), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (11613, 11616), True, 'import numpy as np\n'), ((11748, 11757), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (11754, 11757), True, 'import numpy as np\n'), ((15888, 15897), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (15894, 15897), True, 'import numpy as np\n'), ((16026, 16035), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (16032, 16035), True, 'import numpy as np\n'), ((16167, 16176), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (16173, 16176), True, 'import numpy as np\n'), ((11467, 11476), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (11473, 11476), True, 'import numpy as np\n'), ((11477, 11486), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (11483, 11486), True, 'import numpy as np\n'), ((11507, 11516), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (11513, 11516), True, 'import numpy as np\n'), ((11517, 11526), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (11523, 11526), True, 'import numpy as np\n'), ((11638, 11647), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (11644, 11647), True, 'import numpy as np\n'), ((11648, 11657), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (11654, 11657), True, 'import numpy as np\n'), ((11679, 11688), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (11685, 11688), True, 'import numpy as np\n'), ((11689, 11698), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (11695, 11698), True, 'import numpy as np\n'), ((11779, 11788), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (11785, 11788), True, 'import numpy as np\n'), ((11789, 11798), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (11795, 11798), True, 'import numpy as np\n'), ((11820, 11829), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (11826, 11829), True, 'import numpy as np\n'), ((11830, 11839), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (11836, 11839), True, 'import numpy as np\n'), ((15918, 15927), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (15924, 15927), True, 'import numpy as np\n'), ((15928, 15937), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (15934, 15937), True, 'import numpy as np\n'), ((15958, 15967), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (15964, 15967), True, 'import numpy as np\n'), ((15968, 15977), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (15974, 15977), True, 'import numpy as np\n'), ((16057, 16066), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (16063, 16066), True, 'import numpy as np\n'), ((16067, 16076), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (16073, 16076), True, 'import numpy as np\n'), ((16098, 16107), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (16104, 16107), True, 'import numpy as np\n'), ((16108, 16117), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (16114, 16117), True, 'import numpy as np\n'), ((16198, 16207), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (16204, 16207), True, 'import numpy as np\n'), ((16208, 16217), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (16214, 16217), True, 'import numpy as np\n'), ((16239, 16248), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (16245, 16248), True, 'import numpy as np\n'), ((16249, 16258), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (16255, 16258), True, 'import numpy as np\n'), ((2887, 2909), 'torch.from_numpy', 'torch.from_numpy', (['part'], {}), '(part)\n', (2903, 2909), False, 'import torch\n')]
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import re, os, sys
from dwave_qbsolv import QBSolv
from dwave.system.samplers import DWaveSampler, DWaveCliqueSampler
from dwave.system.composites import EmbeddingComposite, FixedEmbeddingComposite
import dimod
import hybrid
import minorminer
import networkx as nx
from numpy import linalg as la
from networkx.generators.atlas import *
import numpy as np
import networkx as nx
import random, copy
import math
from scipy.sparse import csr_matrix
import argparse
import logging
import datetime as dt
from qpu_sampler_time import QPUTimeSubproblemAutoEmbeddingSampler
#
# The Quantum Graph Community Detection Algorithm has been described
# in the following publications. Please cite in your publication.
#
# <NAME>, <NAME>, <NAME>,
# 2017, Graph Partitioning using Quantum Annealing on the
# D-Wave System, Proceedings of the 2nd International
# Workshop on Post Moore’s Era Supercomputing (PMES), 22-29.
#
# <NAME>, <NAME>, <NAME> 2020, Detecting
# Multiple Communities using Quantum Annealing on the D-Wave System,
# PLOS ONE 15(2): e0227538. https://doi.org/10.1371/journal.pone.0227538
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, 2021, Reduction of the Molecular Hamiltonian Matrix using
# Quantum Community Detection, Sci Rep 11, 4099 (2021).
# https://doi.org/10.1038/s41598-021-83561-x#
#
def build_mod(Adj, thresh, num_edges):
#Builds the modularity matrix from the Adjacency matrix.
#Given an adj matrix, it constructs the modularity matrix and its graph.
Dim = Adj.shape[1]
print ("\n Dim = ", Dim)
print ("\n Computing modularity matrix ...")
Deg = np.zeros([Dim])
M = 0.0
# Calc Adj degrees
Deg = Adj.sum(1)
M = Deg.sum()
mtotal = M/2.0
Mod = np.zeros([Dim,Dim])
# Calc modularity matrix
Mod = Mod + Adj
Mod = Mod - (Deg * Deg.T)/M
np.set_printoptions(precision=3)
return mtotal, Mod
def get_block_number(big_indx, num_blocks, num_nodes):
#indx = math.ceil(big_indx/num_nodes) # node indx starts from 0
indx = math.floor(big_indx/num_nodes) # node indx starts from 0
#print("big_indx=", big_indx," Indx=", indx, " num_blocks=", num_blocks)
if indx > num_blocks-1:
raise ValueError("block indx cannot be larger than num_blocks-1")
return int(indx)
def get_indx_within_block(big_indx, num_nodes):
return big_indx%num_nodes
def get_entry_beta_B(i_indx, j_indx, beta, graph, modularity, num_nodes, num_blocks):
i_block_indx = get_block_number(i_indx, num_blocks, num_nodes)
j_block_indx = get_block_number(j_indx, num_blocks, num_nodes)
i_indx_within_block = get_indx_within_block(i_indx, num_nodes)
j_indx_within_block = get_indx_within_block(j_indx, num_nodes)
if i_block_indx == j_block_indx:
return beta*modularity[i_indx_within_block, j_indx_within_block]
else:
return 0
def get_entry_B_Gamma(i_indx, j_indx, modularity, beta,gamma, GAMMA, num_nodes, num_parts, num_blocks):
i_indx_within_block = get_indx_within_block(i_indx, num_nodes)
j_indx_within_block = get_indx_within_block(j_indx, num_nodes)
if i_indx_within_block == j_indx_within_block:
return gamma[i_indx_within_block]
else:
return 0
def get_entry_add_diag(i_indx,gamma, GAMMA, num_nodes, num_parts, num_blocks):
gamma_entry = GAMMA[i_indx]
return -2*gamma_entry
def get_i_j_entry(i_indx, j_indx, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks):
#print("i_indx=", i_indx," j_indx=", j_indx)
if i_indx == j_indx:
bB = get_entry_beta_B(i_indx, j_indx, beta, graph, modularity, num_nodes, num_blocks)
BG = get_entry_B_Gamma(i_indx, j_indx, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks)
diag = get_entry_add_diag(i_indx,gamma, GAMMA, num_nodes, num_parts, num_blocks)
return bB + BG + diag
else:
bB = get_entry_beta_B(i_indx, j_indx, beta, graph, modularity, num_nodes, num_blocks)
BG = get_entry_B_Gamma(i_indx, j_indx, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks)
return bB + BG
def threshold_mmatrix(graph, mmatrix, threshold):
msize = mmatrix.shape[0]
for i in range(0, msize):
mmatrix[i,i] = mmatrix[i,i] + graph.degree(i)
for i in range(0, msize):
for j in range(0, msize):
if i!=j and abs(mmatrix[i,j]) < threshold:
mmatrix[i,j] = 0.0
return mmatrix
def makeQubo(graph, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks, threshold):
# Create QUBO matrix
qsize = num_blocks*num_nodes
Q = np.zeros([qsize,qsize])
# Note: weights are set to the negative due to maximization
# Set node weights
for i in range(qsize):
entry = get_i_j_entry(i, i, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
Q[i,i] = -entry
# Set off-diagonal weights
for i in range(qsize):
for j in range(i, qsize):
if i != j:
entry = get_i_j_entry(i, j, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
if abs(entry) > threshold:
Q[i,j] = -entry
Q[j,i] = -entry
return Q
def write_qubo_file(graph, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks, threshold):
###qubo format
# p qubo target maxDiagonals nDiagonals nElements
#target = 0 implies unconstrained problem
nElements = 0 #to be counted
maxDiagonals = num_nodes*num_blocks # number of diagonal in topology
nDiagonals = num_nodes*num_blocks #number of diagonals the problem
qubo_file = open("body.qubo", "w")
# Write node header
qubo_string_diag = "".join(["\nc nodes first \n"])
qubo_file.write(qubo_string_diag)
# Write nodes
for i in range(num_blocks*num_nodes):
entry = get_i_j_entry(i, i, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
qubo_string_diag = "".join([str(i)+" "+str(i)+" "+str(entry)+"\n"])
qubo_file.write(qubo_string_diag)
# Write coupler header
qubo_string_couplers = "".join(["\nc couplers \n"])
qubo_file.write(qubo_string_couplers)
# Write couplers
for i in range(num_blocks*num_nodes):
for j in range(i, num_blocks*num_nodes):
if i != j:
entry = get_i_j_entry(i, j, modularity, beta, gamma, GAMMA, graph, num_nodes, num_parts, num_blocks)
if abs(entry) > threshold:
qubo_string_couplers = "".join([str(i)+" "+str(j)+" "+str(2*entry)+"\n"]) #x2 because of what qbsolv minimizes
qubo_file.write(qubo_string_couplers)
nElements += 1
qubo_file.close()
# Write header to separate file now that we know the nElements
# p qubo target maxDiagonals nDiagonals nElements
qubo_file = open("graph.qubo", "w")
qubo_string_initialize = "".join(["p qubo 0 " + str(maxDiagonals)+" "+str(nDiagonals)+" "+str(nElements)+"\n"])
qubo_file.write(qubo_string_initialize)
qubo_file.close()
# Put qubo file together - header and body
os.system("cat body.qubo >> graph.qubo")
os.system("rm body.qubo")
def get_qubo_solution():
myFile = open("dwave_output.out", 'r')
line_count = 0
for lines in myFile:
line_count += 1
if line_count == 2:
bit_string = lines
break
return bit_string.strip()
def violating_contraints(graph, x_indx, num_blocks, num_nodes, num_parts, result):
#each node in exactly one part
for node in range(num_nodes):
value = 0
for j in range(num_blocks):
value += x_indx[(node, j)]
if value >1:
print ("constraint violated: node %d in %d parts. Degree: %d" %(node, value, graph.degree(node)))
value = 0
#balancing contraints
sum_v_i = 0
for node in range(num_nodes):
sum_x_ik = 0
for j in range(num_blocks):
sum_x_ik += x_indx[(node, j)]
node_i = (1 - sum_x_ik)
sum_v_i += node_i
print ("\nlast part size",sum_v_i , - num_nodes/float(num_parts))
num_clusters_found = 0
for j in range(num_blocks):
value = 0
for node in range(num_nodes):
value += x_indx[(node, j)]
print ("part %d has %d nodes" %(j, value))
if value > 0:
num_clusters_found += 1
result['num_clusters_found'] = num_clusters_found
#######################################################
######## penalty weight function #####################
####### ###################
def set_penalty_constant(num_nodes, num_blocks, beta0, gamma0):
beta = beta0
gamma = [gamma0 for i in range(num_nodes)]
GAMMA = [gamma[i] for j in range(num_blocks) for i in range(num_nodes) ]
return beta, gamma, GAMMA
#########
def calcModularityMetric(mtotal, modularity, part_number):
Dim = modularity.shape[1]
print ("\n Dim = ", Dim)
msum = 0.0
for ii in range(0, Dim):
for jj in range(0, Dim):
if part_number[ii] == part_number[jj]:
msum = msum + modularity[ii,jj]
mmetric = msum / (2.0 * mtotal)
return mmetric
def run_qbsolv():
rval = random.randint(1,1000)
estring = "qbsolv -r " + str(rval) + " -i graph.qubo -m -o dwave_output.out"
print('\n', estring)
os.system(estring)
def process_solution_qbsolv(graph, num_blocks, num_nodes, num_parts, result):
bit_string = get_qubo_solution()
print (bit_string)
print ("num non-zeros: ", sum([int(i) for i in bit_string]))
x_indx = {}
qubo_soln = [int(i) for i in bit_string]
for i in range(num_blocks*num_nodes):
i_block_indx = get_block_number(i, num_blocks, num_nodes)
i_indx_within_block = get_indx_within_block(i, num_nodes)
x_indx[(i_indx_within_block, i_block_indx)] = qubo_soln[i]
violating_contraints(graph, x_indx, num_blocks, num_nodes, num_parts, result)
part_number = {}
for key in x_indx:
node, part = key
if x_indx[key] == 1:
part_number[node] = part
return part_number
def process_solution(ss, graph, num_blocks, num_nodes, num_parts, result):
qsol = {}
for i in range(num_blocks*num_nodes):
qsol[i] = int(ss[0,i])
qtotal = 0
for i in range(num_blocks*num_nodes):
qtotal += qsol[i]
print('\nnum non-zeros = ', qtotal)
x_indx = {}
qubo_soln = qsol
for i in range(num_blocks*num_nodes):
i_block_indx = get_block_number(i, num_blocks, num_nodes)
i_indx_within_block = get_indx_within_block(i, num_nodes)
x_indx[(i_indx_within_block, i_block_indx)] = qubo_soln[i]
violating_contraints(graph, x_indx, num_blocks, num_nodes, num_parts, result)
part_number = {}
for key in x_indx:
node, part = key
if x_indx[key] == 1:
part_number[node] = part
return part_number
def getEmbedding(qsize):
#dsystem = DWaveCliqueSampler()
#embedding = dsystem.largest_clique()
#print('embedding found, len = ', len(embedding))
#print('embedding = ', embedding)
#exit(0)
ksize = qsize
qsystem = DWaveSampler()
ksub = nx.complete_graph(ksize).edges()
embedding = minorminer.find_embedding(ksub, qsystem.edgelist)
print('\nembedding done')
return embedding
def runDwave(Q, num_nodes, k, embedding, qsize, run_label, result):
# Using D-Wave/qbsolv
# Needed when greater than number of nodes/variables that can fit on the D-Wave
sampler = FixedEmbeddingComposite(DWaveSampler(), embedding)
#sampler = DWaveCliqueSampler()
rval = random.randint(1,10000)
t0 = dt.datetime.now()
solution = QBSolv().sample_qubo(Q, solver=sampler, seed=rval,
label=run_label)
wtime = dt.datetime.now() - t0
result['wall_clock_time'] = wtime
# Collect first energy and num_occ, num diff solutions, and total solutions
first = True
ndiff = 0
total_solns = 0
for sample, energy, num_occurrences in solution.data():
#print(sample, "Energy: ", energy, "Occurrences: ", num_occurrences)
if first == True:
result['energy'] = energy
result['num_occ'] = num_occurrences
first = False
ndiff += 1
total_solns += num_occurrences
result['num_diff_solns'] = ndiff
result['total_solns'] = total_solns
print('\n qbsolv response:')
print(solution)
ss = solution.samples()
#print("\n qbsolv samples=" + str(list(solution.samples())))
#print('\nss = ', ss)
print(flush=True)
return ss
def runDwaveHybrid(Q, num_nodes, k, sub_qsize, run_label, result):
bqm = dimod.BQM.from_qubo(Q)
rparams = {}
rparams['label'] = run_label
# QPU sampler with timing
QPUSubSamTime = QPUTimeSubproblemAutoEmbeddingSampler(num_reads=100, sampling_params=rparams)
# define the workflow
iteration = hybrid.Race(
hybrid.InterruptableTabuSampler(),
hybrid.EnergyImpactDecomposer(size=sub_qsize, rolling=True, rolling_history=0.15)
#| hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=100, sampling_params=rparams)
#| QTS.QPUTimeSubproblemAutoEmbeddingSampler(num_reads=100, sampling_params=rparams)
| QPUSubSamTime
| hybrid.SplatComposer()
) | hybrid.MergeSamples(aggregate=True)
workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3)
# Run the workflow
init_state = hybrid.State.from_problem(bqm)
t0 = dt.datetime.now()
solution = workflow.run(init_state).result()
wtime = dt.datetime.now() - t0
#hybrid.profiling.print_counters(workflow)
#print('\nQ timers = ', QPUSubSamTime.timers)
#print('\nQ counters = ', QPUSubSamTime.counters)
result['wall_clock_time'] = wtime
# Collect number of QPU accesses and QPU time used
result['num_qpu_accesses'] = QPUSubSamTime.num_accesses
result['total_qpu_time'] = QPUSubSamTime.total_qpu_time
# Collect from lowest energy result
result['energy'] = solution.samples.first.energy
result['num_occ'] = solution.samples.first.num_occurrences
# Collect number of different solutions w different energies
result['num_diff_solns'] = len(solution.samples)
total_solns = 0
for energy, num_occ in solution.samples.data(['energy', 'num_occurrences']):
total_solns += num_occ
result['total_solns'] = total_solns
# Show list of results in energy order
print(solution.samples)
# Collect the first solution
ss = np.zeros([1,num_nodes])
for i in range(num_nodes):
ss[0,i] = solution.samples.first.sample[i]
return ss
def cluster(Q, k, embedding, qsize, run_label, result):
# Start with Q
qsize = Q.shape[1]
print('\n Q size = ', qsize)
# Cluster into k parts using DWave
ss = runDwave(Q, qsize, k, embedding, qsize, run_label, result)
return ss
def clusterHybrid(Q, k, sub_qsize, run_label, result):
# Start with Q
qsize = Q.shape[1]
print('\n Q size = ', qsize)
# Cluster into k parts using Hybrid/DWave ocean
ss = runDwaveHybrid(Q, qsize, k, sub_qsize, run_label, result)
return ss
|
[
"numpy.set_printoptions",
"hybrid.MergeSamples",
"random.randint",
"hybrid.InterruptableTabuSampler",
"numpy.zeros",
"os.system",
"math.floor",
"qpu_sampler_time.QPUTimeSubproblemAutoEmbeddingSampler",
"hybrid.SplatComposer",
"dimod.BQM.from_qubo",
"minorminer.find_embedding",
"hybrid.LoopUntilNoImprovement",
"hybrid.State.from_problem",
"hybrid.EnergyImpactDecomposer",
"networkx.complete_graph",
"dwave.system.samplers.DWaveSampler",
"dwave_qbsolv.QBSolv",
"datetime.datetime.now"
] |
[((1645, 1660), 'numpy.zeros', 'np.zeros', (['[Dim]'], {}), '([Dim])\n', (1653, 1660), True, 'import numpy as np\n'), ((1755, 1775), 'numpy.zeros', 'np.zeros', (['[Dim, Dim]'], {}), '([Dim, Dim])\n', (1763, 1775), True, 'import numpy as np\n'), ((1855, 1887), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (1874, 1887), True, 'import numpy as np\n'), ((2046, 2078), 'math.floor', 'math.floor', (['(big_indx / num_nodes)'], {}), '(big_indx / num_nodes)\n', (2056, 2078), False, 'import math\n'), ((4557, 4581), 'numpy.zeros', 'np.zeros', (['[qsize, qsize]'], {}), '([qsize, qsize])\n', (4565, 4581), True, 'import numpy as np\n'), ((6934, 6974), 'os.system', 'os.system', (['"""cat body.qubo >> graph.qubo"""'], {}), "('cat body.qubo >> graph.qubo')\n", (6943, 6974), False, 'import re, os, sys\n'), ((6977, 7002), 'os.system', 'os.system', (['"""rm body.qubo"""'], {}), "('rm body.qubo')\n", (6986, 7002), False, 'import re, os, sys\n'), ((8944, 8967), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (8958, 8967), False, 'import random, copy\n'), ((9071, 9089), 'os.system', 'os.system', (['estring'], {}), '(estring)\n', (9080, 9089), False, 'import re, os, sys\n'), ((10781, 10795), 'dwave.system.samplers.DWaveSampler', 'DWaveSampler', ([], {}), '()\n', (10793, 10795), False, 'from dwave.system.samplers import DWaveSampler, DWaveCliqueSampler\n'), ((10852, 10901), 'minorminer.find_embedding', 'minorminer.find_embedding', (['ksub', 'qsystem.edgelist'], {}), '(ksub, qsystem.edgelist)\n', (10877, 10901), False, 'import minorminer\n'), ((11236, 11260), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (11250, 11260), False, 'import random, copy\n'), ((11267, 11284), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (11282, 11284), True, 'import datetime as dt\n'), ((12230, 12252), 'dimod.BQM.from_qubo', 'dimod.BQM.from_qubo', (['Q'], {}), '(Q)\n', (12249, 12252), False, 'import dimod\n'), ((12347, 12424), 'qpu_sampler_time.QPUTimeSubproblemAutoEmbeddingSampler', 'QPUTimeSubproblemAutoEmbeddingSampler', ([], {'num_reads': '(100)', 'sampling_params': 'rparams'}), '(num_reads=100, sampling_params=rparams)\n', (12384, 12424), False, 'from qpu_sampler_time import QPUTimeSubproblemAutoEmbeddingSampler\n'), ((12883, 12938), 'hybrid.LoopUntilNoImprovement', 'hybrid.LoopUntilNoImprovement', (['iteration'], {'convergence': '(3)'}), '(iteration, convergence=3)\n', (12912, 12938), False, 'import hybrid\n'), ((12976, 13006), 'hybrid.State.from_problem', 'hybrid.State.from_problem', (['bqm'], {}), '(bqm)\n', (13001, 13006), False, 'import hybrid\n'), ((13014, 13031), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (13029, 13031), True, 'import datetime as dt\n'), ((14000, 14024), 'numpy.zeros', 'np.zeros', (['[1, num_nodes]'], {}), '([1, num_nodes])\n', (14008, 14024), True, 'import numpy as np\n'), ((11164, 11178), 'dwave.system.samplers.DWaveSampler', 'DWaveSampler', ([], {}), '()\n', (11176, 11178), False, 'from dwave.system.samplers import DWaveSampler, DWaveCliqueSampler\n'), ((11403, 11420), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (11418, 11420), True, 'import datetime as dt\n'), ((12834, 12869), 'hybrid.MergeSamples', 'hybrid.MergeSamples', ([], {'aggregate': '(True)'}), '(aggregate=True)\n', (12853, 12869), False, 'import hybrid\n'), ((13089, 13106), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (13104, 13106), True, 'import datetime as dt\n'), ((10805, 10829), 'networkx.complete_graph', 'nx.complete_graph', (['ksize'], {}), '(ksize)\n', (10822, 10829), True, 'import networkx as nx\n'), ((11298, 11306), 'dwave_qbsolv.QBSolv', 'QBSolv', ([], {}), '()\n', (11304, 11306), False, 'from dwave_qbsolv import QBSolv\n'), ((12481, 12514), 'hybrid.InterruptableTabuSampler', 'hybrid.InterruptableTabuSampler', ([], {}), '()\n', (12512, 12514), False, 'import hybrid\n'), ((12805, 12827), 'hybrid.SplatComposer', 'hybrid.SplatComposer', ([], {}), '()\n', (12825, 12827), False, 'import hybrid\n'), ((12520, 12606), 'hybrid.EnergyImpactDecomposer', 'hybrid.EnergyImpactDecomposer', ([], {'size': 'sub_qsize', 'rolling': '(True)', 'rolling_history': '(0.15)'}), '(size=sub_qsize, rolling=True, rolling_history\n =0.15)\n', (12549, 12606), False, 'import hybrid\n')]
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
class LabelSmoothSoftmaxCEV1(nn.Module):
'''
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
'''
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV1, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logits, label):
'''
args: logits: tensor of shape (N, C, H, W)
args: label: tensor of shape(N, H, W)
'''
# overcome ignored label
logits = logits.float() # use fp32 to avoid nan
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == self.lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes
label = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
logs = self.log_softmax(logits)
loss = -torch.sum(logs * label, dim=1)
loss[ignore] = 0
if self.reduction == 'mean':
loss = loss.sum() / n_valid
if self.reduction == 'sum':
loss = loss.sum()
return loss
class LSRCrossEntropyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, label, lb_smooth, reduction, lb_ignore):
# prepare label
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - lb_smooth, lb_smooth / num_classes
label = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
ignore = ignore.nonzero()
_, M = ignore.size()
a, *b = ignore.chunk(M, dim=1)
mask = [a, torch.arange(label.size(1)), *b]
label[mask] = 0
coeff = (num_classes - 1) * lb_neg + lb_pos
ctx.coeff = coeff
ctx.mask = mask
ctx.logits = logits
ctx.label = label
ctx.reduction = reduction
ctx.n_valid = n_valid
loss = torch.log_softmax(logits, dim=1).neg_().mul_(label).sum(dim=1)
if reduction == 'mean':
loss = loss.sum().div_(n_valid)
if reduction == 'sum':
loss = loss.sum()
return loss
@staticmethod
def backward(ctx, grad_output):
coeff = ctx.coeff
mask = ctx.mask
logits = ctx.logits
label = ctx.label
reduction = ctx.reduction
n_valid = ctx.n_valid
scores = torch.softmax(logits, dim=1).mul_(coeff)
scores[mask] = 0
if reduction == 'none':
grad = scores.sub_(label).mul_(grad_output.unsqueeze(1))
elif reduction == 'sum':
grad = scores.sub_(label).mul_(grad_output)
elif reduction == 'mean':
grad = scores.sub_(label).mul_(grad_output.div_(n_valid))
return grad, None, None, None, None, None
class LabelSmoothSoftmaxCEV2(nn.Module):
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV2, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
def forward(self, logits, label):
return LSRCrossEntropyFunction.apply(
logits, label, self.lb_smooth, self.reduction, self.lb_ignore)
if __name__ == '__main__':
import torchvision
import torch
import numpy as np
import random
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
net1 = torchvision.models.resnet18(pretrained=True)
net2 = torchvision.models.resnet18(pretrained=True)
criteria1 = LabelSmoothSoftmaxCEV1(lb_smooth=0.1, ignore_index=255)
criteria2 = LabelSmoothSoftmaxCEV2(lb_smooth=0.1, ignore_index=255)
net1.cuda()
net2.cuda()
net1.train()
net2.train()
criteria1.cuda()
criteria2.cuda()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs = 128
for it in range(300000):
inten = torch.randn(bs, 3, 224, 244).cuda()
inten[0, 1, 0, 0] = 255
inten[0, 0, 1, 2] = 255
inten[0, 2, 5, 28] = 255
lbs = torch.randint(0, 1000, (bs, )).cuda()
logits = net1(inten)
loss1 = criteria1(logits, lbs)
optim1.zero_grad()
loss1.backward()
optim1.step()
# print(net1.fc.weight[:, :5])
logits = net2(inten)
loss2 = criteria2(logits, lbs)
optim2.zero_grad()
loss2.backward()
optim2.step()
# print(net2.fc.weight[:, :5])
with torch.no_grad():
if (it+1) % 50 == 0:
print('iter: {}, ================='.format(it+1))
# print(net1.fc.weight.numel())
print(torch.mean(torch.abs(net1.fc.weight - net2.fc.weight)).item())
print(torch.mean(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
# print(loss1.item())
# print(loss2.item())
print(loss1.item() - loss2.item())
|
[
"torchvision.models.resnet18",
"torch.log_softmax",
"torch.randint",
"numpy.random.seed",
"torch.nn.LogSoftmax",
"torch.manual_seed",
"torch.randn",
"torch.softmax",
"torch.abs",
"random.seed",
"torch.empty_like",
"torch.no_grad",
"torch.sum"
] |
[((3876, 3897), 'torch.manual_seed', 'torch.manual_seed', (['(15)'], {}), '(15)\n', (3893, 3897), False, 'import torch\n'), ((3902, 3917), 'random.seed', 'random.seed', (['(15)'], {}), '(15)\n', (3913, 3917), False, 'import random\n'), ((3922, 3940), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (3936, 3940), True, 'import numpy as np\n'), ((3998, 4042), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4025, 4042), False, 'import torchvision\n'), ((4054, 4098), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4081, 4098), False, 'import torchvision\n'), ((514, 534), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (527, 534), True, 'import torch.nn as nn\n'), ((797, 812), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (810, 812), False, 'import torch\n'), ((1276, 1306), 'torch.sum', 'torch.sum', (['(logs * label)'], {'dim': '(1)'}), '(logs * label, dim=1)\n', (1285, 1306), False, 'import torch\n'), ((5087, 5102), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5100, 5102), False, 'import torch\n'), ((2903, 2931), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2916, 2931), False, 'import torch\n'), ((4525, 4553), 'torch.randn', 'torch.randn', (['bs', '(3)', '(224)', '(244)'], {}), '(bs, 3, 224, 244)\n', (4536, 4553), False, 'import torch\n'), ((4672, 4701), 'torch.randint', 'torch.randint', (['(0)', '(1000)', '(bs,)'], {}), '(0, 1000, (bs,))\n', (4685, 4701), False, 'import torch\n'), ((1925, 1949), 'torch.empty_like', 'torch.empty_like', (['logits'], {}), '(logits)\n', (1941, 1949), False, 'import torch\n'), ((2442, 2474), 'torch.log_softmax', 'torch.log_softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2459, 2474), False, 'import torch\n'), ((5285, 5327), 'torch.abs', 'torch.abs', (['(net1.fc.weight - net2.fc.weight)'], {}), '(net1.fc.weight - net2.fc.weight)\n', (5294, 5327), False, 'import torch\n'), ((5370, 5418), 'torch.abs', 'torch.abs', (['(net1.conv1.weight - net2.conv1.weight)'], {}), '(net1.conv1.weight - net2.conv1.weight)\n', (5379, 5418), False, 'import torch\n'), ((1114, 1138), 'torch.empty_like', 'torch.empty_like', (['logits'], {}), '(logits)\n', (1130, 1138), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, <NAME> <<EMAIL>>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import numpy as np
import thermosteam as tmo
import flexsolve as flx
from warnings import warn
from thermosteam import functional as fn
from . import indexer
from . import equilibrium as eq
from . import units_of_measure as thermo_units
from collections.abc import Iterable
from .exceptions import DimensionError, InfeasibleRegion
from chemicals.elements import array_to_atoms, symbol_to_index
from . import utils
from .constants import g
__all__ = ('Stream', )
# %% Utilities
mol_units = indexer.ChemicalMolarFlowIndexer.units
mass_units = indexer.ChemicalMassFlowIndexer.units
vol_units = indexer.ChemicalVolumetricFlowIndexer.units
class StreamData:
__slots__ = ('_imol', '_T', '_P', '_phases')
def __init__(self, imol, thermal_condition, phases):
self._imol = imol.copy()
self._T = thermal_condition._T
self._P = thermal_condition._P
self._phases = phases
# %%
@utils.units_of_measure(thermo_units.stream_units_of_measure)
@utils.thermo_user
@utils.registered(ticket_name='s')
class Stream:
"""
Create a Stream object that defines material flow rates
along with its thermodynamic state. Thermodynamic and transport
properties of a stream are available as properties, while
thermodynamic equilbrium (e.g. VLE, and bubble and dew points)
are available as methods.
Parameters
----------
ID : str, optional
A unique identification. If ID is None, stream will not be registered.
If no ID is given, stream will be registered with a unique ID.
flow : Iterable[float], optional
All flow rates corresponding to chemical `IDs`.
phase : 'l', 'g', or 's'
Either gas (g), liquid (l), or solid (s). Defaults to 'l'.
T : float
Temperature [K]. Defaults to 298.15.
P : float
Pressure [Pa]. Defaults to 101325.
units : str, optional
Flow rate units of measure (only mass, molar, and
volumetric flow rates are valid). Defaults to 'kmol/hr'.
price : float, optional
Price per unit mass [USD/kg]. Defaults to 0.
total_flow : float, optional
Total flow rate.
thermo : :class:`~thermosteam.Thermo`, optional
Thermo object to initialize input and output streams. Defaults to
`biosteam.settings.get_thermo()`.
characterization_factors : dict, optional
Characterization factors for life cycle assessment.
**chemical_flows : float
ID - flow pairs.
Examples
--------
Before creating a stream, first set the chemicals:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
Create a stream, defining the thermodynamic condition and flow rates:
>>> s1 = tmo.Stream(ID='s1',
... Water=20, Ethanol=10, units='kg/hr',
... T=298.15, P=101325, phase='l')
>>> s1.show(flow='kg/hr') # Use the show method to select units of display
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
>>> s1.show(composition=True, flow='kg/hr') # Its also possible to show by composition
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
composition: Water 0.667
Ethanol 0.333
------- 30 kg/hr
All flow rates are stored as an array in the `mol` attribute:
>>> s1.mol # Molar flow rates [kmol/hr]
array([1.11 , 0.217])
Mass and volumetric flow rates are available as property arrays:
>>> s1.mass
property_array([20.0, 10.0])
>>> s1.vol
property_array([0.02006, 0.012724])
These arrays work just like ordinary arrays, but the data is linked to the molar flows:
>>> # Mass flows are always up to date with molar flows
>>> s1.mol[0] = 1
>>> s1.mass[0]
18.015
>>> # Changing mass flows changes molar flows
>>> s1.mass[0] *= 2
>>> s1.mol[0]
2.0
>>> # Property arrays act just like normal arrays
>>> s1.mass + 2
array([38.031, 12. ])
The temperature, pressure and phase are attributes as well:
>>> (s1.T, s1.P, s1.phase)
(298.15, 101325.0, 'l')
The most convinient way to get and set flow rates is through
the `get_flow` and `set_flow` methods:
>>> # Set flow
>>> s1.set_flow(1, 'gpm', 'Water')
>>> s1.get_flow('gpm', 'Water')
1.0
>>> # Set multiple flows
>>> s1.set_flow([10, 20], 'kg/hr', ('Ethanol', 'Water'))
>>> s1.get_flow('kg/hr', ('Ethanol', 'Water'))
array([10., 20.])
It is also possible to index using IDs through the
`imol`, `imass`, and `ivol` indexers:
>>> s1.imol.show()
ChemicalMolarFlowIndexer (kmol/hr):
(l) Water 1.11
Ethanol 0.2171
>>> s1.imol['Water']
1.1101687012358397
>>> s1.imol['Ethanol', 'Water']
array([0.217, 1.11 ])
Thermodynamic properties are available as stream properties:
>>> s1.H # Enthalpy (kJ/hr)
0.0
Note that the reference enthalpy is 0.0 at the reference
temperature of 298.15 K, and pressure of 101325 Pa.
Retrive the enthalpy at a 10 degC above the reference.
>>> s1.T += 10
>>> s1.H
1083.467954...
Other thermodynamic properties are temperature and pressure dependent as well:
>>> s1.rho # Density [kg/m3]
908.648
It may be more convinient to get properties with different units:
>>> s1.get_property('rho', 'g/cm3')
0.90864
It is also possible to set some of the properties in different units:
>>> s1.set_property('T', 40, 'degC')
>>> s1.T
313.15
Bubble point and dew point computations can be performed through stream methods:
>>> bp = s1.bubble_point_at_P() # Bubble point at constant pressure
>>> bp
BubblePointValues(T=357.09, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.49 0.51])
The bubble point results contain all results as attributes:
>>> bp.T # Temperature [K]
357.088...
>>> bp.y # Vapor composition
array([0.49, 0.51])
Vapor-liquid equilibrium can be performed by setting 2 degrees of freedom from the following list: `T` [Temperature; in K], `P` [Pressure; in Pa], `V` [Vapor fraction], `H` [Enthalpy; in kJ/hr].
Set vapor fraction and pressure of the stream:
>>> s1.vle(P=101325, V=0.5)
>>> s1.show()
MultiStream: s1
phases: ('g', 'l'), T: 364.8 K, P: 101325 Pa
flow (kmol/hr): (g) Water 0.472
Ethanol 0.192
(l) Water 0.638
Ethanol 0.0255
Note that the stream is a now a MultiStream object to manage multiple phases.
Each phase can be accessed separately too:
>>> s1['l'].show()
Stream:
phase: 'l', T: 364.8 K, P: 101325 Pa
flow (kmol/hr): Water 0.638
Ethanol 0.0255
>>> s1['g'].show()
Stream:
phase: 'g', T: 364.8 K, P: 101325 Pa
flow (kmol/hr): Water 0.472
Ethanol 0.192
We can convert a MultiStream object back to a Stream object by setting the phase:
>>> s1.phase = 'l'
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 364.8 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
"""
__slots__ = (
'_ID', '_imol', '_thermal_condition', '_thermo', '_streams',
'_bubble_point_cache', '_dew_point_cache',
'_vle_cache', '_lle_cache', '_sle_cache',
'_sink', '_source', '_price', '_islinked', '_property_cache_key',
'_property_cache', 'characterization_factors', '_user_equilibrium',
# '_velocity', '_height'
)
line = 'Stream'
#: [DisplayUnits] Units of measure for IPython display (class attribute)
display_units = thermo_units.DisplayUnits(T='K', P='Pa',
flow=('kmol/hr', 'kg/hr', 'm3/hr'),
composition=False,
N=7)
_units_of_measure = thermo_units.stream_units_of_measure
_flow_cache = {}
def __init__(self, ID= '', flow=(), phase='l', T=298.15, P=101325.,
units=None, price=0., total_flow=None, thermo=None,
characterization_factors=None,
# velocity=0., height=0.,
**chemical_flows):
#: dict[obj, float] Characterization factors for life cycle assessment in impact / kg.
self.characterization_factors = {} if characterization_factors is None else {}
self._thermal_condition = tmo.ThermalCondition(T, P)
thermo = self._load_thermo(thermo)
chemicals = thermo.chemicals
self.price = price
# self.velocity = velocity
# self.height = height
if units:
name, factor = self._get_flow_name_and_factor(units)
if name == 'mass':
group_wt_compositions = chemicals._group_wt_compositions
for cID in tuple(chemical_flows):
if cID in group_wt_compositions:
compositions = group_wt_compositions[cID]
group_flow = chemical_flows.pop(cID)
chemical_group = chemicals[cID]
for i in range(len(chemical_group)):
chemical_flows[chemical_group[i]._ID] = group_flow * compositions[i]
elif name == 'vol':
group_wt_compositions = chemicals._group_wt_compositions
for cID in chemical_flows:
if cID in group_wt_compositions:
raise ValueError(f"cannot set volumetric flow by chemical group '{i}'")
self._init_indexer(flow, phase, chemicals, chemical_flows)
mol = self.mol
flow = getattr(self, name)
if total_flow is not None: mol *= total_flow / mol.sum()
material_data = mol / factor
flow[:] = material_data
else:
self._init_indexer(flow, phase, chemicals, chemical_flows)
if total_flow:
mol = self.mol
mol *= total_flow / mol.sum()
self._sink = self._source = None # For BioSTEAM
self.reset_cache()
self._register(ID)
self._islinked = False
self._user_equilibrium = None
def reset_flow(self, phase=None, units=None, total_flow=None, **chemical_flows):
"""
Convinience method for resetting flow rate data.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=1)
>>> s1.reset_flow(Ethanol=1, phase='g', units='kg/hr', total_flow=2)
>>> s1.show('cwt')
Stream: s1
phase: 'g', T: 298.15 K, P: 101325 Pa
composition: Ethanol 1
------- 2 kg/hr
"""
imol = self._imol
imol.empty()
if phase: imol.phase = phase
if chemical_flows:
keys, values = zip(*chemical_flows.items())
if units is None:
self.imol[keys] = values
else:
self.set_flow(values, units, keys)
if total_flow:
if units is None:
self.F_mol = total_flow
else:
self.set_total_flow(total_flow, units)
def _reset_thermo(self, thermo):
if thermo is self._thermo: return
self._thermo = thermo
self._imol.reset_chemicals(thermo.chemicals)
self._islinked = False
self.reset_cache()
if hasattr(self, '_streams'):
for phase, stream in self._streams.items():
stream._imol = self._imol.get_phase(phase)
stream._thermo = thermo
def user_equilibrium(self, *args, **kwargs):
return self._user_equilibrium(self, *args, **kwargs)
def set_user_equilibrium(self, f):
self._user_equilibrium = f
@property
def has_user_equilibrium(self):
return self._user_equilibrium is not None
def get_CF(self, key, units=None):
"""
Returns the life-cycle characterization factor on a kg basis given the
impact indicator key.
Parameters
----------
key : str
Key of impact indicator.
units : str, optional
Units of impact indicator. Before using this argument, the default units
of the impact indicator should be defined with
thermosteam.settings.define_impact_indicator.
Units must also be dimensionally consistent with the default units.
"""
try:
value = self.characterization_factors[key]
except:
return 0.
if units is not None:
original_units = tmo.settings.get_impact_indicator_units(key)
value = original_units.convert(value, units)
return value
def set_CF(self, key, value, units=None):
"""
Set the life-cycle characterization factor on a kg basis given the
impact indicator key and the units of measure.
Parameters
----------
key : str
Key of impact indicator.
value : float
Characterization factor value.
units : str, optional
Units of impact indicator. Before using this argument, the default units
of the impact indicator should be defined with
thermosteam.settings.define_impact_indicator.
Units must also be dimensionally consistent with the default units.
"""
if units is not None:
original_units = tmo.settings.get_impact_indicator_units(key)
value = original_units.unconvert(value, units)
self.characterization_factors[key] = value
def get_impact(self, key):
"""Return hourly rate of the impact indicator given the key."""
cfs = self.characterization_factors
return cfs[key] * self.F_mass if key in cfs else 0.
def empty_negative_flows(self):
"""
Replace flows of all components with negative values with 0.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=1, Ethanol=-1)
>>> s1.empty_negative_flows()
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 1
"""
data = self._imol._data
data[data < 0.] = 0.
def shares_flow_rate_with(self, other):
"""
Return whether other stream shares data with this one.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1')
>>> other = s1.flow_proxy()
>>> s1.shares_flow_rate_with(other)
True
>>> s1 = tmo.MultiStream('s1', phases=('l', 'g'))
>>> s1['g'].shares_flow_rate_with(s1)
True
>>> s2 = tmo.MultiStream('s2', phases=('l', 'g'))
>>> s1['g'].shares_flow_rate_with(s2)
False
>>> s1['g'].shares_flow_rate_with(s2['g'])
False
"""
imol = self._imol
other_imol = other._imol
if imol.__class__ is other_imol.__class__ and imol._data is other_imol._data:
shares_data = True
elif isinstance(other, tmo.MultiStream):
phase = self.phase
substreams = other._streams
if phase in substreams:
substream = substreams[phase]
shares_data = self.shares_flow_rate_with(substream)
else:
shares_data = False
else:
shares_data = False
return shares_data
def as_stream(self):
"""Does nothing."""
def get_data(self):
"""
Return a StreamData object containing data on material flow rates,
temperature, pressure, and phase(s).
See Also
--------
Stream.set_data
Examples
--------
Get and set data from stream at different conditions
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream('stream', Water=10)
>>> data = stream.get_data()
>>> stream.vle(V=0.5, P=101325)
>>> data_vle = stream.get_data()
>>> stream.set_data(data)
>>> stream.show()
Stream: stream
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 10
>>> stream.set_data(data_vle)
>>> stream.show()
MultiStream: stream
phases: ('g', 'l'), T: 373.12 K, P: 101325 Pa
flow (kmol/hr): (g) Water 5
(l) Water 5
Note that only StreamData objects are valid for this method:
>>> stream.set_data({'T': 298.15})
Traceback (most recent call last):
ValueError: stream_data must be a StreamData object; not dict
"""
return StreamData(self._imol, self._thermal_condition, self.phases)
def set_data(self, stream_data):
"""
Set material flow rates, temperature, pressure, and phase(s) through a
StreamData object
See Also
--------
Stream.get_data
"""
if isinstance(stream_data, StreamData):
self.phases = stream_data._phases
self._imol.copy_like(stream_data._imol)
self._thermal_condition.copy_like(stream_data)
else:
raise ValueError(f'stream_data must be a StreamData object; not {type(stream_data).__name__}')
@property
def price(self):
"""[float] Price of stream per unit mass [USD/kg]."""
return self._price
@price.setter
def price(self, price):
if np.isfinite(price):
self._price = float(price)
else:
raise AttributeError(f'price must be finite, not {price}')
# @property
# def velocity(self):
# """[float] Velocity of stream [m/s]."""
# return self._velocity
# @velocity.setter
# def velocity(self, velocity):
# if np.isfinite(velocity):
# self._velocity = float(velocity)
# else:
# raise AttributeError(f'velocity must be finite, not {velocity}')
# @property
# def height(self):
# """[float] Relative height of stream [m]."""
# return self._height
# @height.setter
# def height(self, height):
# if np.isfinite(height):
# self._height = float(height)
# else:
# raise AttributeError(f'height must be finite, not {height}')
# @property
# def potential_energy(self):
# """[float] Potential energy flow rate [kW]"""
# return (g * self.height * self.F_mass) / 3.6e6
# @property
# def kinetic_energy(self):
# """[float] Kinetic energy flow rate [kW]"""
# return 0.5 * self.F_mass / 3.6e6 * self._velocity * self._velocity
def isempty(self):
"""
Return whether or not stream is empty.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream()
>>> stream.isempty()
True
"""
return self._imol.isempty()
def sanity_check(self):
"""
Raise an InfeasibleRegion error if flow rates are infeasible.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1')
>>> s1.sanity_check()
>>> s1.mol[0] = -1.
>>> s1.sanity_check()
Traceback (most recent call last):
InfeasibleRegion: negative material flow rate is infeasible
"""
material = self._imol._data
if material[material < 0.].any(): raise InfeasibleRegion('negative material flow rate')
@property
def vapor_fraction(self):
"""Molar vapor fraction."""
return 1.0 if self.phase in 'gG' else 0.0
@property
def liquid_fraction(self):
"""Molar liquid fraction."""
return 1.0 if self.phase in 'lL' else 0.0
@property
def solid_fraction(self):
"""Molar solid fraction."""
return 1.0 if self.phase in 'sS' else 0.0
def isfeed(self):
"""Return whether stream has a sink but no source."""
return bool(self._sink and not self._source)
def isproduct(self):
"""Return whether stream has a source but no sink."""
return bool(self._source and not self._sink)
@property
def main_chemical(self):
"""[str] ID of chemical with the largest mol fraction in stream."""
return self.chemicals.tuple[self.mol.argmax()].ID
def disconnect_source(self):
"""Disconnect stream from source."""
source = self._source
if source:
outs = source.outs
index = outs.index(self)
outs[index] = None
def disconnect_sink(self):
"""Disconnect stream from sink."""
sink = self._sink
if sink:
ins = sink.ins
index = ins.index(self)
ins[index] = None
def disconnect(self):
"""Disconnect stream from unit operations."""
self.disconnect_source()
self.disconnect_sink()
def _init_indexer(self, flow, phase, chemicals, chemical_flows):
"""Initialize molar flow rates."""
if len(flow) == 0:
if chemical_flows:
imol = indexer.ChemicalMolarFlowIndexer(phase, chemicals=chemicals, **chemical_flows)
else:
imol = indexer.ChemicalMolarFlowIndexer.blank(phase, chemicals)
else:
assert not chemical_flows, ("may specify either 'flow' or "
"'chemical_flows', but not both")
if isinstance(flow, indexer.ChemicalMolarFlowIndexer):
imol = flow
imol.phase = phase
else:
imol = indexer.ChemicalMolarFlowIndexer.from_data(
np.asarray(flow, dtype=float), phase, chemicals)
self._imol = imol
def reset_cache(self):
"""Reset cache regarding equilibrium methods."""
self._bubble_point_cache = eq.BubblePointCache()
self._dew_point_cache = eq.DewPointCache()
self._property_cache_key = None, None, None
self._property_cache = {}
@classmethod
def _get_flow_name_and_factor(cls, units):
cache = cls._flow_cache
if units in cache:
name, factor = cache[units]
else:
dimensionality = thermo_units.get_dimensionality(units)
if dimensionality == mol_units.dimensionality:
name = 'mol'
factor = mol_units.conversion_factor(units)
elif dimensionality == mass_units.dimensionality:
name = 'mass'
factor = mass_units.conversion_factor(units)
elif dimensionality == vol_units.dimensionality:
name = 'vol'
factor = vol_units.conversion_factor(units)
else:
raise DimensionError("dimensions for flow units must be in molar, "
"mass or volumetric flow rates, not "
f"'{dimensionality}'")
cache[units] = name, factor
return name, factor
### Property getters ###
def get_atomic_flow(self, symbol):
"""
Return flow rate of atom in kmol / hr given the atomic symbol.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream(Water=1)
>>> stream.get_atomic_flow('H') # kmol/hr of H
2.0
>>> stream.get_atomic_flow('O') # kmol/hr of O
1.0
"""
return (self.chemicals.formula_array[symbol_to_index[symbol], :] * self.mol).sum()
def get_atomic_flows(self):
"""
Return dictionary of atomic flow rates in kmol / hr.
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream(Water=1)
>>> stream.get_atomic_flows()
{'H': 2.0, 'O': 1.0}
"""
return array_to_atoms(self.chemicals.formula_array @ self.mol)
def get_flow(self, units, key=...):
"""
Return an flow rates in requested units.
Parameters
----------
units : str
Units of measure.
key : tuple[str] or str, optional
Chemical identifiers.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.get_flow('kg/hr', 'Water')
20.0
"""
name, factor = self._get_flow_name_and_factor(units)
indexer = getattr(self, 'i' + name)
return factor * indexer[key]
def set_flow(self, data, units, key=...):
"""
Set flow rates in given units.
Parameters
----------
data : 1d ndarray or float
Flow rate data.
units : str
Units of measure.
key : Iterable[str] or str, optional
Chemical identifiers.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream(ID='s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.set_flow(10, 'kg/hr', 'Water')
>>> s1.get_flow('kg/hr', 'Water')
10.0
"""
name, factor = self._get_flow_name_and_factor(units)
indexer = getattr(self, 'i' + name)
indexer[key] = np.asarray(data, dtype=float) / factor
def get_total_flow(self, units):
"""
Get total flow rate in given units.
Parameters
----------
units : str
Units of measure.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.get_total_flow('kg/hr')
30.0
"""
name, factor = self._get_flow_name_and_factor(units)
flow = getattr(self, 'F_' + name)
return factor * flow
def set_total_flow(self, value, units):
"""
Set total flow rate in given units keeping the composition constant.
Parameters
----------
value : float
New total flow rate.
units : str
Units of measure.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.set_total_flow(1.0,'kg/hr')
>>> s1.get_total_flow('kg/hr')
0.9999999999999999
"""
name, factor = self._get_flow_name_and_factor(units)
setattr(self, 'F_' + name, value / factor)
### Stream data ###
@property
def source(self):
"""[Unit] Outlet location."""
return self._source
@property
def sink(self):
"""[Unit] Inlet location."""
return self._sink
@property
def thermal_condition(self):
"""
[ThermalCondition] Contains the temperature and pressure conditions
of the stream.
"""
return self._thermal_condition
@property
def T(self):
"""[float] Temperature in Kelvin."""
return self._thermal_condition._T
@T.setter
def T(self, T):
self._thermal_condition._T = float(T)
@property
def P(self):
"""[float] Pressure in Pascal."""
return self._thermal_condition._P
@P.setter
def P(self, P):
self._thermal_condition._P = float(P)
@property
def phase(self):
"""Phase of stream."""
return self._imol._phase._phase
@phase.setter
def phase(self, phase):
self._imol._phase.phase = phase
@property
def mol(self):
"""[array] Molar flow rates in kmol/hr."""
return self._imol._data
@mol.setter
def mol(self, value):
mol = self.mol
if mol is not value: mol[:] = value
@property
def mass(self):
"""[property_array] Mass flow rates in kg/hr."""
return self.imass._data
@mass.setter
def mass(self, value):
mass = self.mass
if mass is not value: mass[:] = value
@property
def vol(self):
"""[property_array] Volumetric flow rates in m3/hr."""
return self.ivol._data
@vol.setter
def vol(self, value):
vol = self.vol
if vol is not value:
vol[:] = value
@property
def imol(self):
"""[Indexer] Flow rate indexer with data in kmol/hr."""
return self._imol
@property
def imass(self):
"""[Indexer] Flow rate indexer with data in kg/hr."""
return self._imol.by_mass()
@property
def ivol(self):
"""[Indexer] Flow rate indexer with data in m3/hr."""
return self._imol.by_volume(self._thermal_condition)
### Net flow properties ###
@property
def cost(self):
"""[float] Total cost of stream in USD/hr."""
return self.price * self.F_mass
@property
def F_mol(self):
"""[float] Total molar flow rate in kmol/hr."""
return self._imol._data.sum()
@F_mol.setter
def F_mol(self, value):
F_mol = self.F_mol
if not F_mol: raise AttributeError("undefined composition; cannot set flow rate")
self._imol._data[:] *= value/F_mol
@property
def F_mass(self):
"""[float] Total mass flow rate in kg/hr."""
return np.dot(self.chemicals.MW, self.mol)
@F_mass.setter
def F_mass(self, value):
F_mass = self.F_mass
if not F_mass: raise AttributeError("undefined composition; cannot set flow rate")
self.imol._data[:] *= value/F_mass
@property
def F_vol(self):
"""[float] Total volumetric flow rate in m3/hr."""
F_mol = self.F_mol
return 1000. * self.V * F_mol if F_mol else 0.
@F_vol.setter
def F_vol(self, value):
F_vol = self.F_vol
if not F_vol: raise AttributeError("undefined composition; cannot set flow rate")
self.imol._data[:] *= value / F_vol
@property
def H(self):
"""[float] Enthalpy flow rate in kJ/hr."""
H = self._get_property_cache('H', True)
if H is None:
self._property_cache['H'] = H = self.mixture.H(
self.phase, self.mol, *self._thermal_condition
)
return H
@H.setter
def H(self, H: float):
if not H and self.isempty(): return
try: self.T = self.mixture.solve_T(self.phase, self.mol, H,
*self._thermal_condition)
except Exception as error: # pragma: no cover
phase = self.phase.lower()
if phase == 'g':
# Maybe too little heat, liquid must be present
self.phase = 'l'
elif phase == 'l':
# Maybe too much heat, gas must be present
self.phase = 'g'
else:
raise error
self.T = self.mixture.solve_T(self.phase, self.mol, H,
*self._thermal_condition)
@property
def S(self):
"""[float] Absolute entropy flow rate in kJ/hr."""
S = self._get_property_cache('S', True)
if S is None:
self._property_cache['S'] = S = self.mixture.S(
self.phase, self.mol, *self._thermal_condition
)
return S
@property
def Hnet(self):
"""[float] Total enthalpy flow rate (including heats of formation) in kJ/hr."""
return self.H + self.Hf
@property
def Hf(self):
"""[float] Enthalpy of formation flow rate in kJ/hr."""
return (self.chemicals.Hf * self.mol).sum()
@property
def LHV(self):
"""[float] Lower heating value flow rate in kJ/hr."""
return (self.chemicals.LHV * self.mol).sum()
@property
def HHV(self):
"""[float] Higher heating value flow rate in kJ/hr."""
return (self.chemicals.HHV * self.mol).sum()
@property
def Hvap(self):
"""[float] Enthalpy of vaporization flow rate in kJ/hr."""
mol = self.mol
T = self._thermal_condition._T
Hvap = self._get_property_cache('Hvap', True)
if Hvap is None:
self._property_cache['Hvap'] = Hvap = sum([
i*j.Hvap(T) for i,j in zip(mol, self.chemicals)
if i and not j.locked_state
])
return Hvap
def _get_property_cache(self, name, flow=False):
property_cache = self._property_cache
thermal_condition = self._thermal_condition
imol = self._imol
data = imol._data
total = data.sum()
if total == 0.: return 0.
composition = data / total
literal = (imol._phase._phase, thermal_condition._T, thermal_condition._P)
last_literal, last_composition, last_total = self._property_cache_key
if literal == last_literal and (composition == last_composition).all():
prop = property_cache.get(name)
if not prop: return prop
if flow:
return prop * total / last_total
else:
return prop
else:
self._property_cache_key = (literal, composition, total)
property_cache.clear()
return None
@property
def C(self):
"""[float] Heat capacity flow rate in kJ/hr."""
C = self._get_property_cache('C', True)
if C is None:
self._property_cache['C'] = C = self.mixture.Cn(self.phase, self.mol, self.T)
return C
### Composition properties ###
@property
def z_mol(self):
"""[1d array] Molar composition."""
mol = self.mol
z = mol / mol.sum()
z.setflags(0)
return z
@property
def z_mass(self):
"""[1d array] Mass composition."""
mass = self.chemicals.MW * self.mol
F_mass = mass.sum()
if F_mass == 0:
z = mass
else:
z = mass / mass.sum()
z.setflags(0)
return z
@property
def z_vol(self):
"""[1d array] Volumetric composition."""
vol = 1. * self.vol
z = vol / vol.sum()
z.setflags(0)
return z
@property
def MW(self):
"""[float] Overall molecular weight."""
return self.mixture.MW(self.mol)
@property
def V(self):
"""[float] Molar volume [m^3/mol]."""
V = self._get_property_cache('V')
if V is None:
self._property_cache['V'] = V = self.mixture.V(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return V
@property
def kappa(self):
"""[float] Thermal conductivity [W/m/k]."""
kappa = self._get_property_cache('kappa')
if kappa is None:
self._property_cache['kappa'] = kappa = self.mixture.kappa(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return kappa
@property
def Cn(self):
"""[float] Molar heat capacity [J/mol/K]."""
Cn = self._get_property_cache('Cn')
if Cn is None:
self._property_cache['Cn'] = Cn = self.mixture.Cn(
*self._imol.get_phase_and_composition(),
self.T
)
return Cn
@property
def mu(self):
"""[float] Hydrolic viscosity [Pa*s]."""
mu = self._get_property_cache('mu')
if mu is None:
self._property_cache['mu'] = mu = self.mixture.mu(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return mu
@property
def sigma(self):
"""[float] Surface tension [N/m]."""
mol = self.mol
sigma = self._get_property_cache('sigma')
if sigma is None:
self._property_cache['sigma'] = sigma = self.mixture.sigma(
mol / mol.sum(), *self._thermal_condition
)
return sigma
@property
def epsilon(self):
"""[float] Relative permittivity [-]."""
mol = self.mol
epsilon = self._get_property_cache('epsilon')
if epsilon is None:
self._property_cache['epsilon'] = epsilon = self.mixture.epsilon(
mol / mol.sum(), *self._thermal_condition
)
return epsilon
@property
def Cp(self):
"""[float] Heat capacity [J/g/K]."""
return self.Cn / self.MW
@property
def alpha(self):
"""[float] Thermal diffusivity [m^2/s]."""
return fn.alpha(self.kappa,
self.rho,
self.Cp * 1000.)
@property
def rho(self):
"""[float] Density [kg/m^3]."""
return fn.V_to_rho(self.V, self.MW)
@property
def nu(self):
"""[float] Kinematic viscosity [m^2/s]."""
return fn.mu_to_nu(self.mu, self.rho)
@property
def Pr(self):
"""[float] Prandtl number [-]."""
return fn.Pr(self.Cp * 1000,
self.kappa,
self.mu)
### Stream methods ###
@property
def available_chemicals(self):
"""list[Chemical] All chemicals with nonzero flow."""
return [i for i, j in zip(self.chemicals, self.mol) if j]
def in_thermal_equilibrium(self, other):
"""
Return whether or not stream is in thermal equilibrium with
another stream.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> stream = Stream(Water=1, T=300)
>>> other = Stream(Water=1, T=300)
>>> stream.in_thermal_equilibrium(other)
True
"""
return self._thermal_condition.in_equilibrium(other._thermal_condition)
@classmethod
def sum(cls, streams, ID=None, thermo=None, energy_balance=True):
"""
Return a new Stream object that represents the sum of all given streams.
Examples
--------
Sum two streams:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s_sum = tmo.Stream.sum([s1, s1], 's_sum')
>>> s_sum.show(flow='kg/hr')
Stream: s_sum
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
Sum two streams with new property package:
>>> thermo = tmo.Thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s_sum = tmo.Stream.sum([s1, s1], 's_sum', thermo)
>>> s_sum.show(flow='kg/hr')
Stream: s_sum
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
"""
new = cls(ID, thermo=thermo)
if streams: new.copy_thermal_condition(streams[0])
new.mix_from(streams, energy_balance)
return new
def separate_out(self, other, energy_balance=True):
"""
Separate out given stream from this one.
Examples
--------
Separate out another stream with the same thermodynamic property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=30, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=5, units='kg/hr')
>>> s1.separate_out(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 5
It's also possible to separate out streams with different property packages
so long as all chemicals are defined in the mixed stream's property
package:
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1', Water=40, units='kg/hr')
>>> tmo.settings.set_thermo(['Ethanol'], cache=True)
>>> s2 = tmo.Stream('s2', Ethanol=20, units='kg/hr')
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s_mix = tmo.Stream.sum([s1, s2], 's_mix')
>>> s_mix.separate_out(s2)
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Removing empty streams is fine too:
>>> s1.empty(); s_mix.separate_out(s1)
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
"""
if other:
if self is other: self.empty()
if energy_balance: H_new = self.H - other.H
self._imol.separate_out(other._imol)
if energy_balance: self.H = H_new
def mix_from(self, others, energy_balance=True, vle=False):
"""
Mix all other streams into this one, ignoring its initial contents.
Examples
--------
Mix two streams with the same thermodynamic property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.copy('s2')
>>> s1.mix_from([s1, s2])
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
It's also possible to mix streams with different property packages
so long as all chemicals are defined in the mixed stream's property
package:
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1', Water=40, units='kg/hr')
>>> tmo.settings.set_thermo(['Ethanol'], cache=True)
>>> s2 = tmo.Stream('s2', Ethanol=20, units='kg/hr')
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s_mix = tmo.Stream('s_mix')
>>> s_mix.mix_from([s1, s2])
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
Mixing empty streams is fine too:
>>> s1.empty(); s2.empty(); s_mix.mix_from([s1, s2])
>>> s_mix.show()
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
"""
others = [i for i in others if i]
N_others = len(others)
if N_others == 0:
self.empty()
elif N_others == 1:
self.copy_like(others[0])
elif vle:
phases = ''.join([i.phase for i in others])
self.phases = tuple(set(phases))
self._imol.mix_from([i._imol for i in others])
if energy_balance:
H = sum([i.H for i in others])
self.vle(H=self.H, P=self.P)
else:
self.vle(T=self.T, P=self.P)
else:
self.P = min([i.P for i in others])
if energy_balance: H = sum([i.H for i in others])
self._imol.mix_from([i._imol for i in others])
if energy_balance and not self.isempty():
try:
self.H = H
except:
phases = ''.join([i.phase for i in others])
self.phases = tuple(set(phases))
self._imol.mix_from([i._imol for i in others])
self.H = H
def split_to(self, s1, s2, split, energy_balance=True):
"""
Split molar flow rate from this stream to two others given
the split fraction or an array of split fractions.
Examples
--------
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol'], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s = tmo.Stream('s', Water=20, Ethanol=10, units='kg/hr')
>>> s1 = tmo.Stream('s1')
>>> s2 = tmo.Stream('s2')
>>> split = chemicals.kwarray(dict(Water=0.5, Ethanol=0.1))
>>> s.split_to(s1, s2, split)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 10
Ethanol 1
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 10
Ethanol 9
"""
mol = self.mol
chemicals = self.chemicals
values = mol * split
dummy = mol - values
if s1.chemicals is chemicals:
s1.mol[:] = values
else:
CASs, values = zip(*[(i, j) for i, j in zip(chemicals.CASs, values) if j])
s1.empty()
s1._imol[CASs] = values
values = dummy
if s2.chemicals is chemicals:
s2.mol[:] = values
else:
s2.empty()
CASs, values = zip(*[(i, j) for i, j in zip(chemicals.CASs, values) if j])
s2._imol[CASs] = values
if energy_balance:
tc1 = s1._thermal_condition
tc2 = s2._thermal_condition
tc = self._thermal_condition
tc1._T = tc2._T = tc._T
tc1._P = tc2._P = tc._P
s1.phase = s2.phase = self.phase
def link_with(self, other, flow=True, phase=True, TP=True):
"""
Link with another stream.
Parameters
----------
other : Stream
flow : bool, defaults to True
Whether to link the flow rate data.
phase : bool, defaults to True
Whether to link the phase.
TP : bool, defaults to True
Whether to link the temperature and pressure.
See Also
--------
:obj:`~Stream.flow_proxy`
:obj:`~Stream.proxy`
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.link_with(s1)
>>> s1.mol is s2.mol
True
>>> s2.thermal_condition is s1.thermal_condition
True
>>> s1.phase = 'g'
>>> s2.phase
'g'
"""
if not isinstance(other._imol, self._imol.__class__):
at_unit = f" at unit {self.source}" if self.source is other.sink else ""
raise RuntimeError(f"stream {self} cannot link with stream {other}" + at_unit
+ "; streams must have the same class to link")
if self._islinked and not (self.source is other.sink or self.sink is other.source):
raise RuntimeError(f"stream {self} cannot link with stream {other};"
f" {self} already linked")
if TP and flow and (phase or self._imol._data.ndim == 2):
self._imol._data_cache = other._imol._data_cache
else:
self._imol._data_cache.clear()
if TP:
self._thermal_condition = other._thermal_condition
if flow:
self._imol._data = other._imol._data
if phase and self._imol._data.ndim == 1:
self._imol._phase = other._imol._phase
self._islinked = other._islinked = True
def unlink(self):
"""
Unlink stream from other streams.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.link_with(s1)
>>> s1.unlink()
>>> s2.mol is s1.mol
False
MultiStream phases cannot be unlinked:
>>> s1 = tmo.MultiStream(None, phases=('l', 'g'))
>>> s1['g'].unlink()
Traceback (most recent call last):
RuntimeError: phase is locked; stream cannot be unlinked
"""
imol = self._imol
if hasattr(imol, '_phase') and isinstance(imol._phase, tmo._phase.LockedPhase):
raise RuntimeError('phase is locked; stream cannot be unlinked')
if self._islinked:
imol._data_cache.clear()
imol._data = imol._data.copy()
imol._phase = imol._phase.copy()
self._thermal_condition = self._thermal_condition.copy()
self.reset_cache()
self._islinked = False
def copy_like(self, other):
"""
Copy all conditions of another stream.
Examples
--------
Copy data from another stream with the same property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=2, units='kg/hr')
>>> s1.copy_like(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 2
Copy data from another stream with a different property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s2 = tmo.Stream('s2', Water=2, units='kg/hr')
>>> s1.copy_like(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 2
"""
if isinstance(other, tmo.MultiStream):
phase = other.phase
if len(phase) == 1:
imol = other._imol.to_chemical_indexer(phase)
else:
self.phases = other.phases
imol = other._imol
else:
imol = other._imol
self._imol.copy_like(imol)
self._thermal_condition.copy_like(other._thermal_condition)
def copy_thermal_condition(self, other):
"""
Copy thermal conditions (T and P) of another stream.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=2, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=1, units='kg/hr', T=300.00)
>>> s1.copy_thermal_condition(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 300 K, P: 101325 Pa
flow (kg/hr): Water 2
"""
self._thermal_condition.copy_like(other._thermal_condition)
def copy_flow(self, other, IDs=..., *, remove=False, exclude=False):
"""
Copy flow rates of another stream to self.
Parameters
----------
other : Stream
Flow rates will be copied from here.
IDs=... : Iterable[str], defaults to all chemicals.
Chemical IDs.
remove=False: bool, optional
If True, copied chemicals will be removed from `stream`.
exclude=False: bool, optional
If True, exclude designated chemicals when copying.
Examples
--------
Initialize streams:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
Copy all flows:
>>> s2.copy_flow(s1)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
Reset and copy just water flow:
>>> s2.empty()
>>> s2.copy_flow(s1, 'Water')
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Reset and copy all flows except water:
>>> s2.empty()
>>> s2.copy_flow(s1, 'Water', exclude=True)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Ethanol 10
Cut and paste flows:
>>> s2.copy_flow(s1, remove=True)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
Its also possible to copy flows from a multistream:
>>> s1.phases = ('g', 'l')
>>> s1.imol['g', 'Water'] = 10
>>> s2.copy_flow(s1, remove=True)
>>> s2.show()
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 10
>>> s1.show()
MultiStream: s1
phases: ('g', 'l'), T: 298.15 K, P: 101325 Pa
flow: 0
Copy flows except except water and remove water:
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.copy_flow(s1, 'Water', exclude=True, remove=True)
"""
other_mol = other.mol
other_chemicals = other.chemicals
chemicals = self.chemicals
if IDs == ...:
if exclude: return
if chemicals is other_chemicals:
self.mol[:] = other.mol
else:
self.empty()
CASs, values = zip(*[(i, j) for i, j in zip(other_chemicals.CASs, other_mol) if j])
self.imol[CASs] = values
if remove:
if isinstance(other, tmo.MultiStream):
other.imol.data[:] = 0.
else:
other_mol[:] = 0.
else:
if exclude:
if isinstance(IDs, str):
if IDs in other_chemicals:
bad_index = other_chemicals.index(IDs)
other_index = [i for i in range(other_chemicals.size) if i != bad_index]
else:
other_index = slice()
else:
IDs = [i for i in IDs if i in other_chemicals]
bad_index = set(other_chemicals.indices(IDs))
if bad_index:
other_index = [i for i in range(other_chemicals.size) if i not in bad_index]
else:
other_index = slice()
else:
other_index = other_chemicals.get_index(IDs)
if chemicals is other_chemicals:
self.mol[other_index] = other_mol[other_index]
else:
CASs = other_chemicals.CASs
other_index = [i for i in other_index if other_mol[i] or CASs[i] in chemicals]
self.imol[tuple([CASs[i] for i in other_index])] = other_mol[other_index]
if remove:
if isinstance(other, tmo.MultiStream):
other.imol.data[:, other_index] = 0
else:
other_mol[other_index] = 0
def copy(self, ID=None, thermo=None):
"""
Return a copy of the stream.
Examples
--------
Create a copy of a new stream:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1_copy = s1.copy('s1_copy')
>>> s1_copy.show(flow='kg/hr')
Stream: s1_copy
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
Warnings
--------
Prices, LCA characterization factors are not copied are not copied.
"""
cls = self.__class__
new = cls.__new__(cls)
new._islinked = False
new._sink = new._source = None
new.characterization_factors = {}
new._thermo = thermo or self._thermo
new._imol = self._imol.copy()
if thermo and thermo.chemicals is not self.chemicals:
new._imol.reset_chemicals(thermo.chemicals)
new._thermal_condition = self._thermal_condition.copy()
new._user_equilibrium = self._user_equilibrium
new.reset_cache()
new.price = 0
new.ID = ID
return new
__copy__ = copy
def flow_proxy(self, ID=None):
"""
Return a new stream that shares flow rate data with this one.
See Also
--------
:obj:`~Stream.link_with`
:obj:`~Stream.proxy`
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.flow_proxy()
>>> s2.mol is s1.mol
True
"""
cls = self.__class__
new = cls.__new__(cls)
new.ID = new._sink = new._source = None
new.price = 0
new._thermo = self._thermo
new._imol = imol = self._imol._copy_without_data()
imol._data = self._imol._data
new._thermal_condition = self._thermal_condition.copy()
new.reset_cache()
new.characterization_factors = {}
self._islinked = new._islinked = True
new._user_equilibrium = self._user_equilibrium
return new
def proxy(self, ID=None):
"""
Return a new stream that shares all thermochemical data with this one.
See Also
--------
:obj:`~Stream.link_with`
:obj:`~Stream.flow_proxy`
Warning
-------
Price and characterization factor data is not shared
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.proxy()
>>> s2.imol is s1.imol and s2.thermal_condition is s1.thermal_condition
True
"""
cls = self.__class__
new = cls.__new__(cls)
new.ID = None
new._sink = new._source = None
new.price = self.price
new._thermo = self._thermo
new._imol = self._imol
new._thermal_condition = self._thermal_condition
new._property_cache = self._property_cache
new._property_cache_key = self._property_cache_key
new._bubble_point_cache = self._bubble_point_cache
new._dew_point_cache = self._dew_point_cache
new._user_equilibrium = self._user_equilibrium
try: new._vle_cache = self._vle_cache
except AttributeError: pass
new.characterization_factors = {}
self._islinked = new._islinked = True
return new
def empty(self):
"""Empty stream flow rates.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.empty()
>>> s1.F_mol
0.0
"""
self._imol._data[:] = 0.
### Equilibrium ###
@property
def vle(self):
"""[VLE] An object that can perform vapor-liquid equilibrium on the stream."""
self.phases = ('g', 'l')
return self.vle
@property
def lle(self):
"""[LLE] An object that can perform liquid-liquid equilibrium on the stream."""
self.phases = ('L', 'l')
return self.lle
@property
def sle(self):
"""[SLE] An object that can perform solid-liquid equilibrium on the stream."""
self.phases = ('s', 'l')
return self.sle
@property
def vle_chemicals(self):
"""list[Chemical] Chemicals cabable of liquid-liquid equilibrium."""
chemicals = self.chemicals
chemicals_tuple = chemicals.tuple
indices = chemicals.get_vle_indices(self.mol != 0)
return [chemicals_tuple[i] for i in indices]
@property
def lle_chemicals(self):
"""list[Chemical] Chemicals cabable of vapor-liquid equilibrium."""
chemicals = self.chemicals
chemicals_tuple = chemicals.tuple
indices = chemicals.get_lle_indices(self.mol != 0)
return [chemicals_tuple[i] for i in indices]
def get_bubble_point(self, IDs=None):
"""
Return a BubblePoint object capable of computing bubble points.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.get_bubble_point()
BubblePoint([Water, Ethanol])
"""
chemicals = self.chemicals[IDs] if IDs else self.vle_chemicals
bp = self._bubble_point_cache(chemicals, self._thermo)
return bp
def get_dew_point(self, IDs=None):
"""
Return a DewPoint object capable of computing dew points.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.get_dew_point()
DewPoint([Water, Ethanol])
"""
chemicals = self.chemicals.retrieve(IDs) if IDs else self.vle_chemicals
dp = self._dew_point_cache(chemicals, self._thermo)
return dp
def bubble_point_at_T(self, T=None, IDs=None):
"""
Return a BubblePointResults object with all data on the bubble point at constant temperature.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.bubble_point_at_T()
BubblePointValues(T=350.00, P=76622, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.486 0.514])
"""
bp = self.get_bubble_point(IDs)
z = self.get_normalized_mol(bp.IDs)
return bp(z, T=T or self.T)
def bubble_point_at_P(self, P=None, IDs=None):
"""
Return a BubblePointResults object with all data on the bubble point at constant pressure.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.bubble_point_at_P()
BubblePointValues(T=357.09, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.49 0.51])
"""
bp = self.get_bubble_point(IDs)
z = self.get_normalized_mol(bp.IDs)
return bp(z, P=P or self.P)
def dew_point_at_T(self, T=None, IDs=None):
"""
Return a DewPointResults object with all data on the dew point
at constant temperature.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all
chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.dew_point_at_T()
DewPointValues(T=350.00, P=48991, IDs=('Water', 'Ethanol'), z=[0.836 0.164], x=[0.984 0.016])
"""
dp = self.get_dew_point(IDs)
z = self.get_normalized_mol(dp.IDs)
return dp(z, T=T or self.T)
def dew_point_at_P(self, P=None, IDs=None):
"""
Return a DewPointResults object with all data on the dew point
at constant pressure.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all
chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.dew_point_at_P()
DewPointValues(T=368.66, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], x=[0.984 0.016])
"""
dp = self.get_dew_point(IDs)
z = self.get_normalized_mol(dp.IDs)
return dp(z, P=P or self.P)
def get_normalized_mol(self, IDs):
"""
Return normalized molar fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kmol/hr')
>>> s1.get_normalized_mol(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.imol[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_normalized_mass(self, IDs):
"""
Return normalized mass fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kg/hr')
>>> s1.get_normalized_mass(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.imass[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_normalized_vol(self, IDs):
"""
Return normalized mass fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_normalized_vol(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.ivol[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_molar_fraction(self, IDs):
"""
Return molar fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kmol/hr')
>>> s1.get_molar_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_mol = self.F_mol
return self.imol[IDs] / F_mol if F_mol else 0.
get_molar_composition = get_molar_fraction
def get_mass_fraction(self, IDs):
"""
Return mass fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kg/hr')
>>> s1.get_mass_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_mass = self.F_mass
return self.imass[IDs] / F_mass if F_mass else 0.
get_mass_composition = get_mass_fraction
def get_volumetric_fraction(self, IDs):
"""
Return volumetric fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_volumetric_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_vol = self.F_vol
return self.ivol[IDs] / F_vol if F_vol else 0.
get_volumetric_composition = get_volumetric_fraction
def get_concentration(self, IDs):
"""
Return concentration of given chemicals in kmol/m3.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_concentration(('Water', 'Ethanol'))
array([27.672, 4.265])
"""
F_vol = self.F_vol
return self.imol[IDs] / F_vol if F_vol else 0.
@property
def P_vapor(self):
"""Vapor pressure of liquid."""
chemicals = self.vle_chemicals
F_l = eq.LiquidFugacities(chemicals, self.thermo)
IDs = tuple([i.ID for i in chemicals])
x = self.get_molar_fraction(IDs)
if x.sum() < 1e-12: return 0
return F_l(x, self.T).sum()
def receive_vent(self, other, energy_balance=True):
"""
Receive vapors from another stream by vapor-liquid equilibrium between
a gas and liquid stream assuming only a small amount of chemicals
in vapor-liquid equilibrium is present
Examples
--------
The energy balance is performed by default:
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol', 'Methanol', tmo.Chemical('N2', phase='g')], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s1 = tmo.Stream('s1', N2=20, units='m3/hr', phase='g', T=330)
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=2, T=330)
>>> s1.receive_vent(s2)
>>> s1.show(flow='kmol/hr')
Stream: s1
phase: 'g', T: 323.13 K, P: 101325 Pa
flow (kmol/hr): Water 0.0798
Ethanol 0.0889
N2 0.739
Set energy balance to false to receive vent isothermally:
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol', 'Methanol', tmo.Chemical('N2', phase='g')], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s1 = tmo.Stream('s1', N2=20, units='m3/hr', phase='g', T=330)
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=2, T=330)
>>> s1.receive_vent(s2, energy_balance=False)
>>> s1.show(flow='kmol/hr')
Stream: s1
phase: 'g', T: 330 K, P: 101325 Pa
flow (kmol/hr): Water 0.111
Ethanol 0.123
N2 0.739
"""
assert self.phase == 'g', 'stream must be a gas to receive vent'
ms = tmo.Stream(None, T=self.T, P=self.P, thermo=self.thermo)
ms.mix_from([self, other], energy_balance=False)
if energy_balance: ms.H = H = self.H + other.H
ms.vle._setup()
chemicals = ms.vle_chemicals
F_l = eq.LiquidFugacities(chemicals, ms.thermo)
IDs = tuple([i.ID for i in chemicals])
x = other.get_molar_fraction(IDs)
T = ms.T
P = ms.P
vapor = ms['g']
liquid = ms['l']
F_mol_vapor = vapor.F_mol
mol_old = liquid.imol[IDs]
if energy_balance:
def equilibrium_approximation(T):
f_l = F_l(x, T)
y = f_l / P
mol_new = F_mol_vapor * y
vapor.imol[IDs] = mol_new
liquid.imol[IDs] = mol_old - mol_new
index = liquid.mol < 0.
vapor.mol[index] += liquid.mol[index]
liquid.mol[index] = 0
ms.H = H
return ms.T
flx.wegstein(equilibrium_approximation, T)
else:
f_l = F_l(x, T)
y = f_l / P
mol_new = F_mol_vapor * y
vapor.imol[IDs] = mol_new
liquid.imol[IDs] = mol_old - mol_new
index = liquid.mol < 0.
vapor.mol[index] += liquid.mol[index]
liquid.mol[index] = 0
self.copy_like(vapor)
other.copy_like(liquid)
self.T = other.T = ms.T
### Casting ###
@property
def islinked(self):
"""
[bool] Whether data regarding the thermal condition, material flow rates,
and phases are shared with other streams.
"""
return self._islinked
@property
def phases(self):
"""tuple[str] All phases present."""
return (self.phase,)
@phases.setter
def phases(self, phases):
if self.phases == phases: return
if self._islinked: self.unlink()
if len(phases) == 1:
self.phase = phases[0]
else:
self.__class__ = tmo.MultiStream
self._imol = self._imol.to_material_indexer(phases)
self._streams = {}
self._vle_cache = eq.VLECache(self._imol,
self._thermal_condition,
self._thermo,
self._bubble_point_cache,
self._dew_point_cache)
self._lle_cache = eq.LLECache(self._imol,
self._thermal_condition,
self._thermo)
self._sle_cache = eq.SLECache(self._imol,
self._thermal_condition,
self._thermo)
### Representation ###
def _basic_info(self):
return f"{type(self).__name__}: {self.ID or ''}\n"
def _info_phaseTP(self, phase, T_units, P_units):
T = thermo_units.convert(self.T, 'K', T_units)
P = thermo_units.convert(self.P, 'Pa', P_units)
s = '' if isinstance(phase, str) else 's'
return f" phase{s}: {repr(phase)}, T: {T:.5g} {T_units}, P: {P:.6g} {P_units}\n"
def _source_info(self):
source = self.source
return f"{source}-{source.outs.index(self)}" if source else self.ID
def _translate_layout(self, layout, flow, composition, N):
if layout:
for param in (flow, composition, N):
if param is not None: raise ValueError(f'cannot specify both `layout` and `{param}`')
if layout[0] == 'c':
composition = True
layout = layout[1:]
if layout.startswith('wt'):
flow = 'kg/hr'
layout = layout[2:]
elif layout.startswith('mol'):
flow = 'kmol/hr'
layout = layout[3:]
elif layout.startswith('vol'):
flow = 'm3/hr'
layout = layout[3:]
elif layout.isdigit():
flow = 'kmol/hr'
else:
raise ValueError(
"`layout` must have the form "
"{'c' or ''}{'wt', 'mol' or 'vol'}{# or ''};"
"for example: 'cwt100' corresponds to compostion=True, "
"flow='kg/hr', and N=100."
)
if layout.isdigit():
N = int(layout)
return flow, composition, N
def _info(self, layout, T, P, flow, composition, N, IDs):
"""Return string with all specifications."""
flow, composition, N = self._translate_layout(layout, flow, composition, N)
from .indexer import nonzeros
basic_info = self._basic_info()
if not IDs:
IDs = self.chemicals.IDs
data = self.imol.data
else:
data = self.imol[IDs]
IDs, data = nonzeros(IDs, data)
IDs = tuple(IDs)
display_units = self.display_units
T_units = T or display_units.T
P_units = P or display_units.P
flow_units = flow or display_units.flow
N_max = display_units.N if N is None else N
basic_info += self._info_phaseTP(self.phase, T_units, P_units)
if N_max == 0:
return basic_info[:-1]
composition = display_units.composition if composition is None else composition
N_IDs = len(IDs)
if N_IDs == 0:
return basic_info + ' flow: 0'
# Start of third line (flow rates)
name, factor = self._get_flow_name_and_factor(flow_units)
indexer = getattr(self, 'i' + name)
# Remaining lines (all flow rates)
flow_array = factor * indexer[IDs]
if composition:
total_flow = flow_array.sum()
beginning = " composition: "
new_line = '\n' + 14 * ' '
flow_array = flow_array/total_flow
else:
beginning = f' flow ({flow_units}): '
new_line = '\n' + len(beginning) * ' '
flow_rates = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 2
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i: flow_rates += new_line
flow_rates += IDs[i] + spaces + f'{flow_array[i]:.3g}'
if too_many_chemicals: flow_rates += new_line + '...'
if composition:
dashes = '-' * (maxlen - 2)
flow_rates += f"{new_line}{dashes} {total_flow:.3g} {flow_units}"
return (basic_info
+ beginning
+ flow_rates)
def show(self, layout=None, T=None, P=None, flow=None, composition=None, N=None, IDs=None):
"""
Print all specifications.
Parameters
----------
layout : str, optional
Convenience paramater for passing `flow`, `composition`, and `N`.
Must have the form {'c' or ''}{'wt', 'mol' or 'vol'}{# or ''}.
For example: 'cwt100' corresponds to compostion=True, flow='kg/hr',
and N=100.
T : str, optional
Temperature units.
P : str, optional
Pressure units.
flow : str, optional
Flow rate units.
composition : bool, optional
Whether to show composition.
N : int, optional
Number of compounds to display.
IDs : tuple[str], optional
IDs of compounds to display. Defaults to all chemicals
.
Notes
-----
Default values are stored in `Stream.display_units`.
"""
print(self._info(layout, T, P, flow, composition, N, IDs))
_ipython_display_ = show
def print(self, units=None):
"""
Print in a format that you can use recreate the stream.
Parameters
----------
units : str, optional
Units of measure for material flow rates. Defaults to 'kmol/hr'
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream(ID='s1',
... Water=20, Ethanol=10, units='kg/hr',
... T=298.15, P=101325, phase='l')
>>> s1.print(units='kg/hr')
Stream(ID='s1', phase='l', T=298.15, P=101325, Water=20, Ethanol=10, units='kg/hr')
>>> s1.print() # Units default to kmol/hr
Stream(ID='s1', phase='l', T=298.15, P=101325, Water=1.11, Ethanol=0.2171, units='kmol/hr')
"""
if not units:
units = 'kmol/hr'
flow = self.mol
else:
flow = self.get_flow(units)
chemical_flows = utils.repr_IDs_data(self.chemicals.IDs, flow)
price = utils.repr_kwarg('price', self.price)
print(f"{type(self).__name__}(ID={repr(self.ID)}, phase={repr(self.phase)}, T={self.T:.2f}, "
f"P={self.P:.6g}{price}{chemical_flows}, units={repr(units)})")
|
[
"thermosteam.ThermalCondition",
"thermosteam.functional.V_to_rho",
"thermosteam.functional.mu_to_nu",
"numpy.asarray",
"numpy.isfinite",
"thermosteam.settings.get_impact_indicator_units",
"chemicals.elements.array_to_atoms",
"thermosteam.functional.Pr",
"thermosteam.Stream",
"numpy.dot",
"thermosteam.functional.alpha",
"flexsolve.wegstein"
] |
[((9089, 9115), 'thermosteam.ThermalCondition', 'tmo.ThermalCondition', (['T', 'P'], {}), '(T, P)\n', (9109, 9115), True, 'import thermosteam as tmo\n'), ((18628, 18646), 'numpy.isfinite', 'np.isfinite', (['price'], {}), '(price)\n', (18639, 18646), True, 'import numpy as np\n'), ((25333, 25388), 'chemicals.elements.array_to_atoms', 'array_to_atoms', (['(self.chemicals.formula_array @ self.mol)'], {}), '(self.chemicals.formula_array @ self.mol)\n', (25347, 25388), False, 'from chemicals.elements import array_to_atoms, symbol_to_index\n'), ((31046, 31081), 'numpy.dot', 'np.dot', (['self.chemicals.MW', 'self.mol'], {}), '(self.chemicals.MW, self.mol)\n', (31052, 31081), True, 'import numpy as np\n'), ((38354, 38402), 'thermosteam.functional.alpha', 'fn.alpha', (['self.kappa', 'self.rho', '(self.Cp * 1000.0)'], {}), '(self.kappa, self.rho, self.Cp * 1000.0)\n', (38362, 38402), True, 'from thermosteam import functional as fn\n'), ((38540, 38568), 'thermosteam.functional.V_to_rho', 'fn.V_to_rho', (['self.V', 'self.MW'], {}), '(self.V, self.MW)\n', (38551, 38568), True, 'from thermosteam import functional as fn\n'), ((38667, 38697), 'thermosteam.functional.mu_to_nu', 'fn.mu_to_nu', (['self.mu', 'self.rho'], {}), '(self.mu, self.rho)\n', (38678, 38697), True, 'from thermosteam import functional as fn\n'), ((38787, 38829), 'thermosteam.functional.Pr', 'fn.Pr', (['(self.Cp * 1000)', 'self.kappa', 'self.mu'], {}), '(self.Cp * 1000, self.kappa, self.mu)\n', (38792, 38829), True, 'from thermosteam import functional as fn\n'), ((75134, 75190), 'thermosteam.Stream', 'tmo.Stream', (['None'], {'T': 'self.T', 'P': 'self.P', 'thermo': 'self.thermo'}), '(None, T=self.T, P=self.P, thermo=self.thermo)\n', (75144, 75190), True, 'import thermosteam as tmo\n'), ((13425, 13469), 'thermosteam.settings.get_impact_indicator_units', 'tmo.settings.get_impact_indicator_units', (['key'], {}), '(key)\n', (13464, 13469), True, 'import thermosteam as tmo\n'), ((14303, 14347), 'thermosteam.settings.get_impact_indicator_units', 'tmo.settings.get_impact_indicator_units', (['key'], {}), '(key)\n', (14342, 14347), True, 'import thermosteam as tmo\n'), ((26880, 26909), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (26890, 26909), True, 'import numpy as np\n'), ((76130, 76172), 'flexsolve.wegstein', 'flx.wegstein', (['equilibrium_approximation', 'T'], {}), '(equilibrium_approximation, T)\n', (76142, 76172), True, 'import flexsolve as flx\n'), ((23042, 23071), 'numpy.asarray', 'np.asarray', (['flow'], {'dtype': 'float'}), '(flow, dtype=float)\n', (23052, 23071), True, 'import numpy as np\n')]
|
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_array
import numpy as np
from ..utils.tools import Solver
class MissForest(Solver):
def __init__(
self,
n_estimators=300,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features='auto',
max_samples=None,
normalizer='min_max'):
"""
Parameters
----------
n_estimators: integer, optional (default=10)
max_depth: integer or None, optional (default=None)
The maximum depth of the tree.
If None, then nodes are expanded until all leaves are pure
or until all leaves contain less than min_samples_split samples.
min_samples_split: int, float, optional (default=2)
The minimum number of samples required to split an internal node
min_samples_leaf: int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves
at least min_samples_leaf training samples in each of the left and right branches.
This may have the effect of smoothing the model, especially in regression.
max_features: int, float, string or None, optional (default=”auto”)
The number of features to consider when looking for the best split
if int, then consider max_features features at each split.
If float, then max_features is a fraction and int(max_features * n_features) features are considered at each split.
If “auto”, then max_features=n_features.
If “sqrt”, then max_features=sqrt(n_features).
If “log2”, then max_features=log2(n_features).
If None, then max_features=n_features.
max_samples: int or float, default=None
If bootstrap is True, the number of samples to draw from X to train each base estimator.
If None (default), then draw X.shape[0] samples.
If int, then draw max_samples samples.
If float, then draw max_samples * X.shape[0] samples. Thus, max_samples should be in the interval (0, 1)
"""
self.coltype_dict = None
self.mask_memo_dict = None
self.sorted_col = None
self.stop = False
self.rf_reg = RandomForestRegressor(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
min_samples_split=min_samples_split)
self.rf_cla = RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
min_samples_split=min_samples_split)
self.imp_continuous_index = None
self.imp_categorical_index = None
self.normalizer = normalizer
Solver.__init__(self,
normalizer=normalizer)
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
self.sorted_col = self.sort_col(missing_mask)
self.coltype_dict = self._judge_type(X)
self.imp_continuous_index, self.imp_categorical_index = \
self.get_type_index(missing_mask, self.coltype_dict)
differ_categorical = float('inf')
differ_continuous = float('inf')
init_fill = X
while self.stop is False:
differ_categorical_old = differ_categorical
differ_continuous_old = differ_continuous
x_old_imp = init_fill
x_new_imp = []
for col in self.sorted_col:
tmp = []
if self.coltype_dict[col] is 'categorical':
model = self.rf_cla
else:
model = self.rf_reg
x_obs, y_obs, x_mis = self.split(init_fill, col, missing_mask)
model.fit(x_obs, y_obs)
y_mis = model.predict(x_mis)
for ele in y_mis:
tmp.append(ele)
x_new_imp.append(ele)
init_fill[:, col][missing_mask[:,col]] = tmp
x_new_imp = np.asarray(x_new_imp)
differ_continuous, differ_categorical = self._lose_func(x_new_imp, x_old_imp)
if differ_continuous >= differ_continuous_old and differ_categorical >= differ_categorical_old:
self.stop = True
return init_fill
def _lose_func(self, imp_new, imp_old):
"""
Evaluation Method, mathematical concept are available at 'https://www.stu-zhouyc.com/iterForest/metrics'
:param imputed_data_old: a dict like {'col name':[predicted value1,...],...}
the dict contains original missing index which is part of the original data
its the last estimated data
accompany with brand-new imputed data, they are going to be evaluate.
:return:
"""
continuous_imp_new = imp_new[self.imp_continuous_index]
continuous_imp_old = imp_old[self.imp_continuous_index]
categorical_imp_new = imp_new[self.imp_categorical_index]
categorical_imp_old = imp_old[self.imp_categorical_index]
try:
continuous_div = continuous_imp_new - continuous_imp_old
continuous_div = continuous_div.dot(continuous_div)
continuous_sum = continuous_imp_new.dot(continuous_imp_new)
categorical_count = np.sum(categorical_imp_new == categorical_imp_old)
categorical_var_len = len(categorical_imp_new)
except:
categorical_var_len = 0.01
categorical_count = 0
continuous_div = 0
continuous_sum = 0.001
if categorical_var_len is 0:
categorical_differ = 0
else:
categorical_differ = categorical_count / categorical_var_len
if continuous_sum is 0:
continuous_differ = 0
else:
continuous_differ = continuous_div / continuous_sum
return continuous_differ, categorical_differ
|
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.sum",
"sklearn.utils.check_array",
"numpy.asarray",
"sklearn.ensemble.RandomForestRegressor"
] |
[((2477, 2653), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'min_samples_leaf': 'min_samples_leaf', 'max_features': 'max_features', 'min_samples_split': 'min_samples_split'}), '(n_estimators=n_estimators, max_depth=max_depth,\n min_samples_leaf=min_samples_leaf, max_features=max_features,\n min_samples_split=min_samples_split)\n', (2498, 2653), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2844, 3021), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'min_samples_leaf': 'min_samples_leaf', 'max_features': 'max_features', 'min_samples_split': 'min_samples_split'}), '(n_estimators=n_estimators, max_depth=max_depth,\n min_samples_leaf=min_samples_leaf, max_features=max_features,\n min_samples_split=min_samples_split)\n', (2866, 3021), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3427, 3465), 'sklearn.utils.check_array', 'check_array', (['X'], {'force_all_finite': '(False)'}), '(X, force_all_finite=False)\n', (3438, 3465), False, 'from sklearn.utils import check_array\n'), ((4610, 4631), 'numpy.asarray', 'np.asarray', (['x_new_imp'], {}), '(x_new_imp)\n', (4620, 4631), True, 'import numpy as np\n'), ((5980, 6030), 'numpy.sum', 'np.sum', (['(categorical_imp_new == categorical_imp_old)'], {}), '(categorical_imp_new == categorical_imp_old)\n', (5986, 6030), True, 'import numpy as np\n')]
|
import numpy as np
import time
from nms.nums_py2 import py_cpu_nms # for cpu
# from nms.gpu_nms import gpu_nms # for gpu
np.random.seed( 1 ) # keep fixed
num_rois = 6000
minxy = np.random.randint(50,145,size=(num_rois ,2))
maxxy = np.random.randint(150,200,size=(num_rois ,2))
score = 0.8*np.random.random_sample((num_rois ,1))+0.2
boxes_new = np.concatenate((minxy,maxxy,score), axis=1).astype(np.float32)
def nms_test_time(boxes_new):
thresh = [0.7,0.8,0.9]
T = 50
for i in range(len(thresh)):
since = time.time()
for t in range(T):
keep = py_cpu_nms(boxes_new, thresh=thresh[i]) # for cpu
# keep = gpu_nms(boxes_new, thresh=thresh[i]) # for gpu
print("thresh={:.1f}, time wastes:{:.4f}".format(thresh[i], (time.time()-since)/T))
return keep
if __name__ =="__main__":
nms_test_time(boxes_new)
|
[
"numpy.random.seed",
"numpy.random.random_sample",
"nms.nums_py2.py_cpu_nms",
"time.time",
"numpy.random.randint",
"numpy.concatenate"
] |
[((127, 144), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (141, 144), True, 'import numpy as np\n'), ((186, 232), 'numpy.random.randint', 'np.random.randint', (['(50)', '(145)'], {'size': '(num_rois, 2)'}), '(50, 145, size=(num_rois, 2))\n', (203, 232), True, 'import numpy as np\n'), ((239, 286), 'numpy.random.randint', 'np.random.randint', (['(150)', '(200)'], {'size': '(num_rois, 2)'}), '(150, 200, size=(num_rois, 2))\n', (256, 286), True, 'import numpy as np\n'), ((297, 335), 'numpy.random.random_sample', 'np.random.random_sample', (['(num_rois, 1)'], {}), '((num_rois, 1))\n', (320, 335), True, 'import numpy as np\n'), ((353, 398), 'numpy.concatenate', 'np.concatenate', (['(minxy, maxxy, score)'], {'axis': '(1)'}), '((minxy, maxxy, score), axis=1)\n', (367, 398), True, 'import numpy as np\n'), ((535, 546), 'time.time', 'time.time', ([], {}), '()\n', (544, 546), False, 'import time\n'), ((594, 633), 'nms.nums_py2.py_cpu_nms', 'py_cpu_nms', (['boxes_new'], {'thresh': 'thresh[i]'}), '(boxes_new, thresh=thresh[i])\n', (604, 633), False, 'from nms.nums_py2 import py_cpu_nms\n'), ((790, 801), 'time.time', 'time.time', ([], {}), '()\n', (799, 801), False, 'import time\n')]
|
#coding=utf-8
#调色板
import cv2
import numpy as np
img = np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('image')
def callback(x):
pass
#参数1:名称;参数2:作用窗口,参数3、4:最小值和最大值;参数5:值更改回调方法
cv2.createTrackbar('R', 'image', 0, 255, callback)
cv2.createTrackbar('G', 'image', 0, 255, callback)
cv2.createTrackbar('B', 'image', 0, 255, callback)
while (1):
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r = cv2.getTrackbarPos('R', 'image')
g = cv2.getTrackbarPos('G', 'image')
b = cv2.getTrackbarPos('B', 'image')
img[:] = [b, g, r]
cv2.destroyAllWindows()
|
[
"cv2.createTrackbar",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.getTrackbarPos",
"cv2.imshow",
"cv2.namedWindow"
] |
[((56, 89), 'numpy.zeros', 'np.zeros', (['(300, 512, 3)', 'np.uint8'], {}), '((300, 512, 3), np.uint8)\n', (64, 89), True, 'import numpy as np\n'), ((90, 114), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (105, 114), False, 'import cv2\n'), ((188, 238), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""R"""', '"""image"""', '(0)', '(255)', 'callback'], {}), "('R', 'image', 0, 255, callback)\n", (206, 238), False, 'import cv2\n'), ((239, 289), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""G"""', '"""image"""', '(0)', '(255)', 'callback'], {}), "('G', 'image', 0, 255, callback)\n", (257, 289), False, 'import cv2\n'), ((290, 340), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""B"""', '"""image"""', '(0)', '(255)', 'callback'], {}), "('B', 'image', 0, 255, callback)\n", (308, 340), False, 'import cv2\n'), ((587, 610), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (608, 610), False, 'import cv2\n'), ((357, 381), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (367, 381), False, 'import cv2\n'), ((447, 479), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""R"""', '"""image"""'], {}), "('R', 'image')\n", (465, 479), False, 'import cv2\n'), ((488, 520), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""G"""', '"""image"""'], {}), "('G', 'image')\n", (506, 520), False, 'import cv2\n'), ((529, 561), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""B"""', '"""image"""'], {}), "('B', 'image')\n", (547, 561), False, 'import cv2\n'), ((389, 403), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (400, 403), False, 'import cv2\n')]
|
'''Code from python notebook by simoninithomas
available at https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/master/Q%20learning/Q%20Learning%20with%20FrozenLake.ipynb
'''
import numpy as np
import gym
import random
env = gym.make("FrozenLake-v0")
action_size = env.action_space.n
state_size = env.observation_space.n
qtable = np.zeros((state_size, action_size))
#print(qtable)
total_episodes = 10000 # Total episodes
learning_rate = 0.5 # Learning rate
max_steps = 50 # Max steps per episode
gamma = 0.95 # Discounting rate
# Exploration parameters
epsilon = 1.0 # Exploration rate
max_epsilon = 1.0 # Exploration probability at start
min_epsilon = 0.01 # Minimum exploration probability
decay_rate = 0.001 # Exponential decay rate for exploration prob
# List of rewards
rewards = []
# 2 For life or until learning is stopped
for episode in range(total_episodes):
# Reset the environment
state = env.reset()
step = 0
done = False
total_rewards = 0
print("EPISODE",episode)
for step in range(max_steps):
# 3. Choose an action a in the current world state (s)
## First we randomize a number
exp_exp_tradeoff = random.uniform(0, 1)
## If this number > greater than epsilon --> exploitation (taking the biggest Q value for this state)
if exp_exp_tradeoff > epsilon:
action = np.argmax(qtable[state,:])
#print("Let's exploit.", action)
env.render()
# Else doing a random choice --> exploration
else:
action = env.action_space.sample()
#print("Let's explore.",action)
env.render()
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, done, info = env.step(action)
print("NEW STATE:",new_state,"REWARD:",reward)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
# qtable[new_state,:] : all the actions we can take from new state
qtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action])
print("QTABLE AT",state,qtable[state])
total_rewards += reward
# Our new state is state
state = new_state
# If done (if we're dead) : finish episode
if done == True:
print("GAME OVER.\n\n")
break
episode += 1
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
print(epsilon)
rewards.append(total_rewards)
print ("Score over time: " + str(sum(rewards)/total_episodes))
print(qtable)
env.reset()
for episode in range(0):
state = env.reset()
step = 0
done = False
print("****************************************************")
print("EPISODE ", episode)
for step in range(max_steps):
env.render()
# Take the action (index) that have the maximum expected future reward given that state
action = np.argmax(qtable[state,:])
new_state, reward, done, info = env.step(action)
if done:
break
state = new_state
env.close()
|
[
"gym.make",
"numpy.argmax",
"random.uniform",
"numpy.zeros",
"numpy.max",
"numpy.exp"
] |
[((252, 277), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {}), "('FrozenLake-v0')\n", (260, 277), False, 'import gym\n'), ((358, 393), 'numpy.zeros', 'np.zeros', (['(state_size, action_size)'], {}), '((state_size, action_size))\n', (366, 393), True, 'import numpy as np\n'), ((1299, 1319), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1313, 1319), False, 'import random\n'), ((3237, 3264), 'numpy.argmax', 'np.argmax', (['qtable[state, :]'], {}), '(qtable[state, :])\n', (3246, 3264), True, 'import numpy as np\n'), ((1499, 1526), 'numpy.argmax', 'np.argmax', (['qtable[state, :]'], {}), '(qtable[state, :])\n', (1508, 1526), True, 'import numpy as np\n'), ((2714, 2743), 'numpy.exp', 'np.exp', (['(-decay_rate * episode)'], {}), '(-decay_rate * episode)\n', (2720, 2743), True, 'import numpy as np\n'), ((2217, 2245), 'numpy.max', 'np.max', (['qtable[new_state, :]'], {}), '(qtable[new_state, :])\n', (2223, 2245), True, 'import numpy as np\n')]
|
import os.path
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from numpy import dot
from numpy.linalg import norm
class NotIntegerError(Exception):
pass
# 문서를 불러와 단어로 토큰화 후, 단어들을 word_list에 저장후 word_list 반환
def doc_tokenize(doc_name):
with open(doc_name, 'rt') as fp:
string = fp.read()
word_list = word_tokenize(string)
# 유사도 계산시 정확성을 높이기 위해 큰 의미가 없는 단어인 불용어를 word_list에서 제거
word_list = [word for word in word_list if word not in stop_words]
# 소문자와 대문자로 인해 의미 구별이 되는 것을 방지하기 위해, 모든 단어를 소문자화
word_list = [word.lower() if word.islower() == False else word for word in word_list]
return word_list
# list안 word의 term frequency 값 계산 후 dict 형태로 반환
def tf(list):
tf_dict = {word : list.count(word) if word in list else 0 for word in word_zip}
return tf_dict
# list안 word의 tf 값과 idf 값을 곱하여 tf-idf 값 계산 후 알파벳 순으로 정렬하여 list 원소가 (word, tf-idf) 형식을 가진 list 형태로 반환
def tf_idf(list):
tf_dict = tf(list)
tf_idf_dict = {word : tf_dict[word] * idf_dict[word] for word in tf_dict.keys()}
return sorted(tf_idf_dict.items())
# doc_1과 doc_2 문서의 cosine 유사도를 계산 후 유사도 값을 반환
def cos_similarity(doc_1_name, doc_2_name):
# doc_1과 doc_2 문서의 tf-idf값 계산
doc_1 = tf_idf(doc_tokenize(doc_1_name))
doc_2 = tf_idf(doc_tokenize(doc_2_name))
# doc_1의 word의 tf-idf 값을 vactor_1에 할당
vector_1 = [value[1] for value in doc_1]
# doc_2의 word의 tf-idf 값을 vactor_2에 할당
vector_2 = [value[1] for value in doc_2]
# vector_1과 vector_2 사이의 각도를 구한후 100을 곱하여 % 수치로 반환, 소숫점 2자리까지 반올림
return round((dot(vector_1, vector_2) / (norm(vector_1) * norm(vector_2)))*100, 2)
while True:
try:
# 문서 수 입력
doc_count = float(input('Please enter the count of documents : '))
if doc_count % 1 != 0:
raise NotIntegerError()
doc_count = int(doc_count)
doc_name_list = []
i = 0
while i < doc_count:
doc_name = input(f'Please enter the name of documents [{i + 1}{"/"}{doc_count}] : ') + ".txt"
# 존재하지 않은 문서 이름을 입력시 다시 입력, 존재하는 문서 입력시 doc_name_list에 할당
if os.path.isfile(doc_name):
doc_name_list.append(doc_name)
i += 1
else:
print('Please enter the name of an existing document.')
break
except ValueError:
# 문서 수를 입력할 때 숫자를 입력하지 않으면 excpet 발생
print('Please enter the number.')
except NotIntegerError:
# 문서 수를 입력할 때 정수를 입력하지 않으면 excpet 발생
print('Please enter the integer.')
stop_words = set(stopwords.words('english'))
# idf 값을 계산하기 위해 모든 문서를 doc_zip에 할당
doc_zip = [doc_tokenize(name) for name in doc_name_list]
# tf-idf 값을 계산하기 위해 모든 문서의 단어를 중복되지 않게 word_zip에 할당
word_zip = list(set([word for doc in doc_zip for word in doc]))
# 각 단어마다 inverse document frequency 값 계산 후 dict에 할당
idf_dict = {}
for word in word_zip:
word_count = 0
for doc in doc_zip:
if word in doc:
word_count += 1
idf_dict[word] = np.log((1 + doc_count) / (word_count))
# 경로 상의 모든 문서의 서로 간의 유사도를 계산 후 similarity_dict에 저장
similarity_dict = {(doc_name_list[i], doc_name_list[j]) : cos_similarity(doc_name_list[i], doc_name_list[j]) for i in range(len(doc_name_list)-1) for j in range(i+1, doc_count)}
# 유사도가 가장 큰 문서 2개를 계산 후 출력
key_min = max(similarity_dict.keys(), key = lambda x: similarity_dict[x])
value_min = max(similarity_dict.values())
print(f"The similarity between {key_min[0]} and {key_min[1]} is highest at {value_min}%")
|
[
"numpy.log",
"numpy.linalg.norm",
"nltk.corpus.stopwords.words",
"numpy.dot",
"nltk.tokenize.word_tokenize"
] |
[((369, 390), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['string'], {}), '(string)\n', (382, 390), False, 'from nltk.tokenize import word_tokenize\n'), ((2598, 2624), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2613, 2624), False, 'from nltk.corpus import stopwords\n'), ((3042, 3078), 'numpy.log', 'np.log', (['((1 + doc_count) / word_count)'], {}), '((1 + doc_count) / word_count)\n', (3048, 3078), True, 'import numpy as np\n'), ((1603, 1626), 'numpy.dot', 'dot', (['vector_1', 'vector_2'], {}), '(vector_1, vector_2)\n', (1606, 1626), False, 'from numpy import dot\n'), ((1630, 1644), 'numpy.linalg.norm', 'norm', (['vector_1'], {}), '(vector_1)\n', (1634, 1644), False, 'from numpy.linalg import norm\n'), ((1647, 1661), 'numpy.linalg.norm', 'norm', (['vector_2'], {}), '(vector_2)\n', (1651, 1661), False, 'from numpy.linalg import norm\n')]
|
"""
NCL_conwomap_2.py
=================
This script illustrates the following concepts:
- Drawing a simple filled contour plot
- Selecting a different color map
- Changing the size/shape of a contour plot
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/conwomap_2.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/conwomap_2_lg.png
"""
import cartopy.crs as ccrs
import geocat.datafiles as gdf
import matplotlib.pyplot as plt
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
from geocat.viz import cmaps as gvcmaps
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/cone.nc"))
u = ds.u.isel(time=4)
###############################################################################
# Plot:
# Generate figure (set its size (width, height) in inches)
plt.figure(figsize=(10, 6))
# Generate axes, using Cartopy
projection = ccrs.PlateCarree()
ax = plt.axes(projection=projection)
# Import an NCL colormap
newcmp = gvcmaps.gui_default
# Contourf-plot data (for filled contours)
p = u.plot.contourf(ax=ax,
vmin=-1,
vmax=10,
levels=12,
cmap=newcmp,
add_colorbar=False,
transform=projection,
extend='neither',
add_labels=False)
# Contour-plot data (for borderlines)
u.plot.contour(ax=ax,
vmin=-1,
vmax=10,
levels=12,
linewidths=0.5,
colors='black',
add_colorbar=False,
transform=projection,
extend='neither',
add_labels=False)
# Add horizontal colorbar
cbar = plt.colorbar(p, orientation='horizontal', shrink=0.5)
cbar.ax.tick_params(labelsize=16)
cbar.set_ticks(np.linspace(0, 9, 10))
# Use geocat.viz.util convenience function to set axes limits & tick values without calling several matplotlib functions
gvutil.set_axes_limits_and_ticks(ax,
xlim=(0, 49),
ylim=(0, 29),
xticks=np.linspace(0, 40, 5),
yticks=np.linspace(0, 25, 6))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=5,
y_minor_per_major=5,
labelsize=16)
# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.
gvutil.set_titles_and_labels(ax,
lefttitle="Cone amplitude",
lefttitlefontsize=18,
righttitle="ndim",
righttitlefontsize=18,
xlabel="X",
ylabel="Y",
labelfontsize=18)
# Show the plot
plt.show()
|
[
"matplotlib.pyplot.show",
"geocat.viz.util.set_titles_and_labels",
"matplotlib.pyplot.axes",
"geocat.datafiles.get",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"geocat.viz.util.add_major_minor_ticks",
"numpy.linspace",
"cartopy.crs.PlateCarree"
] |
[((1170, 1197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1180, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1261), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1259, 1261), True, 'import cartopy.crs as ccrs\n'), ((1267, 1298), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'projection'}), '(projection=projection)\n', (1275, 1298), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2126), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {'orientation': '"""horizontal"""', 'shrink': '(0.5)'}), "(p, orientation='horizontal', shrink=0.5)\n", (2085, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2748), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax'], {'x_minor_per_major': '(5)', 'y_minor_per_major': '(5)', 'labelsize': '(16)'}), '(ax, x_minor_per_major=5, y_minor_per_major=5,\n labelsize=16)\n', (2684, 2748), True, 'from geocat.viz import util as gvutil\n'), ((2926, 3101), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax'], {'lefttitle': '"""Cone amplitude"""', 'lefttitlefontsize': '(18)', 'righttitle': '"""ndim"""', 'righttitlefontsize': '(18)', 'xlabel': '"""X"""', 'ylabel': '"""Y"""', 'labelfontsize': '(18)'}), "(ax, lefttitle='Cone amplitude',\n lefttitlefontsize=18, righttitle='ndim', righttitlefontsize=18, xlabel=\n 'X', ylabel='Y', labelfontsize=18)\n", (2954, 3101), True, 'from geocat.viz import util as gvutil\n'), ((3313, 3323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3321, 3323), True, 'import matplotlib.pyplot as plt\n'), ((966, 997), 'geocat.datafiles.get', 'gdf.get', (['"""netcdf_files/cone.nc"""'], {}), "('netcdf_files/cone.nc')\n", (973, 997), True, 'import geocat.datafiles as gdf\n'), ((2176, 2197), 'numpy.linspace', 'np.linspace', (['(0)', '(9)', '(10)'], {}), '(0, 9, 10)\n', (2187, 2197), True, 'import numpy as np\n'), ((2492, 2513), 'numpy.linspace', 'np.linspace', (['(0)', '(40)', '(5)'], {}), '(0, 40, 5)\n', (2503, 2513), True, 'import numpy as np\n'), ((2555, 2576), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(6)'], {}), '(0, 25, 6)\n', (2566, 2576), True, 'import numpy as np\n')]
|
import os, os.path as op
import logging
import numpy as np
import cv2
import progressbar
import ast
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pprint
import PIL
from lib.backend import backendDb
from lib.backend import backendMedia
from lib.utils import util
def add_parsers(subparsers):
evaluateDetectionParser(subparsers)
evaluateSegmentationIoUParser(subparsers)
evaluateBinarySegmentationParser(subparsers)
def _evaluateDetectionForClassPascal(c, c_gt, name, args):
def _voc_ap(rec, prec):
""" Compute VOC AP given precision and recall. """
# First append sentinel values at the end.
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# Compute the precision envelope.
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# To calculate area under PR curve, look for points
# where X axis (recall) changes value.
i = np.where(mrec[1:] != mrec[:-1])[0]
# Sum (\Delta recall) * prec.
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
c.execute('SELECT * FROM objects WHERE name=? ORDER BY score DESC',
(name, ))
entries_det = c.fetchall()
logging.info('Total %d detected objects for class "%s"', len(entries_det),
name)
# Go down dets and mark TPs and FPs.
tp = np.zeros(len(entries_det), dtype=float)
fp = np.zeros(len(entries_det), dtype=float)
# Detected of no interest.
ignored = np.zeros(len(entries_det), dtype=bool)
# 'already_detected' used to penalize multiple detections of same GT box.
already_detected = set()
# Go through each detection.
for idet, entry_det in enumerate(entries_det):
bbox_det = np.array(backendDb.objectField(entry_det, 'bbox'),
dtype=float)
imagefile = backendDb.objectField(entry_det, 'imagefile')
name = backendDb.objectField(entry_det, 'name')
# Get all GT boxes from the same imagefile [of the same class].
c_gt.execute('SELECT * FROM objects WHERE imagefile=? AND name=?',
(imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
bboxes_gt = np.array(
[backendDb.objectField(entry, 'bbox') for entry in entries_gt],
dtype=float)
# Separately manage no GT boxes.
if bboxes_gt.size == 0:
fp[idet] = 1.
continue
# Intersection between bbox_det and all bboxes_gt.
ixmin = np.maximum(bboxes_gt[:, 0], bbox_det[0])
iymin = np.maximum(bboxes_gt[:, 1], bbox_det[1])
ixmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2],
bbox_det[0] + bbox_det[2])
iymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3],
bbox_det[1] + bbox_det[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
intersection = iw * ih
# Union between bbox_det and all bboxes_gt.
union = (bbox_det[2] * bbox_det[3] +
bboxes_gt[:, 2] * bboxes_gt[:, 3] - intersection)
# IoU and get the best IoU.
IoUs = intersection / union
max_IoU = np.max(IoUs)
objectid_gt = objectids_gt[np.argmax(IoUs)]
logging.debug('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,
idet, objectid_gt)
# Find which objects count towards TP and FN (should be detected).
c_gt.execute(
'SELECT * FROM objects WHERE imagefile=? AND name=? AND %s' %
args.where_object_gt, (imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt_of_interest = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
# If 1) large enough IoU and
# 2) this GT box was not detected before.
if max_IoU > args.IoU_thresh and not objectid_gt in already_detected:
if objectid_gt in objectids_gt_of_interest:
tp[idet] = 1.
else:
ignored[idet] = True
already_detected.add(objectid_gt)
else:
fp[idet] = 1.
# Find the number of GT of interest.
c_gt.execute(
'SELECT COUNT(1) FROM objects WHERE %s AND name=?' %
args.where_object_gt, (name, ))
n_gt = c_gt.fetchone()[0]
logging.info('Total objects of interest: %d', n_gt)
# Remove dets, neither TP or FP.
tp = tp[np.bitwise_not(ignored)]
fp = fp[np.bitwise_not(ignored)]
logging.info('ignored: %d, tp: %d, fp: %d, gt: %d',
np.count_nonzero(ignored), np.count_nonzero(tp),
np.count_nonzero(fp), n_gt)
assert np.count_nonzero(tp) + np.count_nonzero(fp) + np.count_nonzero(
ignored) == len(entries_det)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(n_gt)
# Avoid divide by zero in case the first detection matches a difficult
# ground truth.
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
aps = _voc_ap(rec, prec)
print('Average precision for class "%s": %.4f' % (name, aps))
return aps
def _writeCurveValues(out_dir, X, Y, metrics_name, name, header):
if name is not None:
name = util.validateFileName(name)
stem = '%s-%s' % (metrics_name, name)
else:
stem = metrics_name
plt.savefig(op.join(out_dir, '%s.png' % stem))
plt.savefig(op.join(out_dir, '%s.eps' % stem))
with open(op.join(out_dir, '%s.txt' % stem), 'w') as f:
f.write('%s\n' % header)
for x, y in zip(X, Y):
f.write('%f %f\n' % (x, y))
def _beautifyPlot(ax):
ax.grid(which='major', linewidth='0.5')
ax.grid(which='minor', linewidth='0.2')
loc = ticker.MultipleLocator(0.2)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
loc = ticker.MultipleLocator(0.1)
ax.xaxis.set_minor_locator(loc)
ax.yaxis.set_minor_locator(loc)
ax.set_aspect('equal', adjustable='box')
def _evaluateDetectionForClassSklearn(c, c_gt, class_name, args, sklearn):
''' Helper function for evaluateDetection. '''
# Detected objects sorted by descending score (confidence).
if class_name is None:
c.execute('SELECT * FROM objects ORDER BY score DESC')
else:
c.execute('SELECT * FROM objects WHERE name=? ORDER BY score DESC',
(class_name, ))
entries_det = c.fetchall()
logging.info('Num of positive "%s": %d', class_name, len(entries_det))
# Create arrays 'y_score' with predicted scores, binary 'y_true' for GT,
# and a binary 'y_ignored' for detected objects that are neither TP nor FP.
y_score = np.zeros(len(entries_det), dtype=float)
y_true = np.zeros(len(entries_det), dtype=bool)
y_ignored = np.zeros(len(entries_det), dtype=bool)
# 'already_detected' used to penalize multiple detections of same GT box
already_detected = set()
# Go through each detection.
for idet, entry_det in enumerate(entries_det):
bbox_det = np.array(backendDb.objectField(entry_det, 'bbox'),
dtype=float)
imagefile = backendDb.objectField(entry_det, 'imagefile')
name = backendDb.objectField(entry_det, 'name')
score = backendDb.objectField(entry_det, 'score')
y_score[idet] = score
# Get all GT boxes from the same imagefile and of the same class.
c_gt.execute('SELECT * FROM objects WHERE imagefile=? AND name=?',
(imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
bboxes_gt = np.array(
[backendDb.objectField(entry, 'bbox') for entry in entries_gt],
dtype=float)
# Separately manage the case of no GT boxes in this image.
if bboxes_gt.size == 0:
y_score[idet] = False
continue
# Intersection between bbox_det and all bboxes_gt.
ixmin = np.maximum(bboxes_gt[:, 0], bbox_det[0])
iymin = np.maximum(bboxes_gt[:, 1], bbox_det[1])
ixmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2],
bbox_det[0] + bbox_det[2])
iymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3],
bbox_det[1] + bbox_det[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
intersection = iw * ih
# Union between bbox_det and all bboxes_gt.
union = (bbox_det[2] * bbox_det[3] +
bboxes_gt[:, 2] * bboxes_gt[:, 3] - intersection)
# Compute the best IoU between the bbox_det and all bboxes_gt.
IoUs = intersection / union
max_IoU = np.max(IoUs)
objectid_gt = objectids_gt[np.argmax(IoUs)]
logging.debug('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,
idet, objectid_gt)
# Get all GT objects that are of interest.
c_gt.execute(
'SELECT * FROM objects WHERE imagefile=? AND name=? AND %s' %
args.where_object_gt, (imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt_of_interest = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
# Compute TP and FP. An object is a TP if:
# 1) it has a large enough IoU with a GT object and
# 2) this GT object was not detected before.
if max_IoU > args.IoU_thresh and not objectid_gt in already_detected:
if objectid_gt not in objectids_gt_of_interest:
y_ignored[idet] = True
already_detected.add(objectid_gt)
y_true[idet] = True
else:
y_true[idet] = False
# It doesn't matter if y_ignore'd GT fall into TP or FP. Kick them out.
y_score = y_score[np.bitwise_not(y_ignored)]
y_true = y_true[np.bitwise_not(y_ignored)]
# Find the number of GT of interest.
if class_name is None:
c_gt.execute('SELECT COUNT(1) FROM objects WHERE %s' %
args.where_object_gt)
else:
c_gt.execute(
'SELECT COUNT(1) FROM objects WHERE %s AND name=?' %
args.where_object_gt, (class_name, ))
num_gt = c_gt.fetchone()[0]
logging.info('Number of ground truth "%s": %d', class_name, num_gt)
# Add FN to y_score and y_true.
num_fn = num_gt - np.count_nonzero(y_true)
logging.info('Number of false negative "%s": %d', class_name, num_fn)
y_score = np.pad(y_score, [0, num_fn], constant_values=0.)
y_true = np.pad(y_true, [0, num_fn], constant_values=True)
# We need the point for threshold=0 to have y=0. Not sure why it's not yet.
# TODO: figure out how to do it properly.
y_score = np.pad(y_score, [0, 1000000], constant_values=0.0001)
y_true = np.pad(y_true, [0, 1000000], constant_values=False)
if 'precision_recall_curve' in args.extra_metrics:
precision, recall, _ = sklearn.metrics.precision_recall_curve(
y_true=y_true, probas_pred=y_score)
if args.out_dir:
plt.clf()
plt.plot(recall, precision)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall')
plt.ylabel('Precision')
_beautifyPlot(plt.gca())
_writeCurveValues(args.out_dir, recall, precision,
'precision-recall', class_name,
'recall precision')
if 'roc_curve' in args.extra_metrics:
fpr, tpr, _ = sklearn.metrics.roc_curve(y_true=y_true,
probas_pred=y_score)
sklearn.metrics.auc(x=fpr, y=tpr)
if args.out_dir:
plt.clf()
plt.plot(fpr, tpr)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('FPR')
plt.ylabel('TPR')
_beautifyPlot(plt.gca())
_writeCurveValues(args.out_dir, fpr, tpr, 'roc', class_name,
'fpr tpr')
# Compute all metrics for this class.
aps = sklearn.metrics.average_precision_score(y_true=y_true,
y_score=y_score)
if class_name is None:
print('Average precision: %.4f' % aps)
else:
print('Average precision for class "%s": %.4f' % (class_name, aps))
return aps
def evaluateDetectionParser(subparsers):
parser = subparsers.add_parser(
'evaluateDetection',
description='Evaluate detections given a ground truth database.')
parser.set_defaults(func=evaluateDetection)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--IoU_thresh', type=float, default=0.5)
parser.add_argument('--where_object_gt', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, plots and text files are written here.')
parser.add_argument(
'--extra_metrics',
nargs='+',
default=[],
choices=[
'precision_recall_curve',
'roc_curve',
],
help='Select metrics to be computed in addition to average precision. '
'This is implemented only for evaluation_backend="sklearn". '
'They are computed for every class. The names match those at '
'https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics'
)
parser.add_argument(
'--evaluation_backend',
choices=['sklearn', 'pascal-voc', 'sklearn-all-classes'],
default='sklearn',
help='Detection evaluation is different across papers and methods. '
'PASCAL VOC produces average-precision score a bit different '
'than the sklearn package. A good overview on metrics: '
'https://github.com/rafaelpadilla/Object-Detection-Metrics. '
'"sklearn-all-classes" reports only one accuracy.')
def evaluateDetection(c, args):
if 'sklearn' in args.evaluation_backend:
import sklearn.metrics
# Load the ground truth database.
if not op.exists(args.gt_db_file):
raise FileNotFoundError('File does not exist: %s' % args.gt_db_file)
conn_gt = backendDb.connect(args.gt_db_file, 'load_to_memory')
c_gt = conn_gt.cursor()
# Some info for logging.
c.execute('SELECT COUNT(1) FROM objects')
logging.info('The evaluated database has %d objects.', c.fetchone()[0])
c_gt.execute('SELECT COUNT(1) FROM objects WHERE %s' %
args.where_object_gt)
logging.info('The ground truth database has %d objects of interest.',
c_gt.fetchone()[0])
c_gt.execute('SELECT DISTINCT(name) FROM objects')
names = c_gt.fetchall()
if args.evaluation_backend == 'sklearn':
for name, in names:
_evaluateDetectionForClassSklearn(c, c_gt, name, args, sklearn)
elif args.evaluation_backend == 'pascal-voc':
for name, in names:
if args.metrics is not None:
logging.warning('extra_metrics not supported for pascal-voc.')
_evaluateDetectionForClassPascal(c, c_gt, name, args)
elif args.evaluation_backend == 'sklearn-all-classes':
# This method does not separate results by classes.
_evaluateDetectionForClassSklearn(c, c_gt, None, args, sklearn)
else:
assert False
conn_gt.close()
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k],
minlength=n**2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def calc_fw_iu(hist):
pred_per_class = hist.sum(0)
gt_per_class = hist.sum(1)
return np.nansum(
(gt_per_class * np.diag(hist)) /
(pred_per_class + gt_per_class - np.diag(hist))) / gt_per_class.sum()
def calc_pixel_accuracy(hist):
gt_per_class = hist.sum(1)
return np.diag(hist).sum() / gt_per_class.sum()
def calc_mean_accuracy(hist):
gt_per_class = hist.sum(1)
acc_per_class = np.diag(hist) / gt_per_class
return np.nanmean(acc_per_class)
def save_colorful_images(prediction, filename, palette, postfix='_color.png'):
im = PIL.Image.fromarray(palette[prediction.squeeze()])
im.save(filename[:-4] + postfix)
def label_mapping(input_, mapping):
output = np.copy(input_)
for ind in range(len(mapping)):
output[input_ == mapping[ind][0]] = mapping[ind][1]
return np.array(output, dtype=np.int64)
def plot_confusion_matrix(cm, classes, normalize=False, cmap=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if cmap is None:
cmap = plt.cm.Blues
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
logging.info("Normalized confusion matrix.")
else:
logging.info(
'Confusion matrix will be computed without normalization.')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('Ground truth')
plt.xlabel('Predicted label')
def _label2classMapping(gt_mapping_dict, pred_mapping_dict):
''' Parse user-defined label mapping dictionaries. '''
# "gt_mapping_dict" maps mask pixel-values to classes.
labelmap_gt = ast.literal_eval(gt_mapping_dict)
labelmap_pr = ast.literal_eval(
pred_mapping_dict) if pred_mapping_dict else labelmap_gt
# Create a list of classes.
class_names = list(labelmap_gt.values())
labelmap_gt_new = {}
# Here, we remap pixel-values to indices of class_names.
for key in labelmap_gt:
labelmap_gt_new[key] = class_names.index(labelmap_gt[key])
labelmap_gt = labelmap_gt_new
labelmap_pr_new = {}
for key in labelmap_pr:
if not labelmap_pr[key] in class_names:
raise ValueError(
'Class %s is in "pred_mapping_dict" but not in "gt_mapping_dict"'
)
labelmap_pr_new[key] = class_names.index(labelmap_pr[key])
labelmap_pr = labelmap_pr_new
return labelmap_gt, labelmap_pr, class_names
def evaluateSegmentationIoUParser(subparsers):
parser = subparsers.add_parser(
'evaluateSegmentationIoU',
description='Evaluate mask segmentation w.r.t. a ground truth db.')
parser.set_defaults(func=evaluateSegmentationIoU)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--where_image', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, output files with be written to "out_dir".')
parser.add_argument(
'--out_prefix',
default='',
help='A prefix to add to output filenames, '
'Use it to keep predictions from different epochs in one dir.')
parser.add_argument(
'--gt_mapping_dict',
required=True,
help=
'A map from ground truth maskfile to classes written as a json string. '
'E.g. "{0: \'background\', 255: \'car\'}"')
parser.add_argument(
'--pred_mapping_dict',
help='A map from predicted masks to classes written as a json string, '
'if different from "gt_mapping_dict"')
parser.add_argument(
'--class_to_record_iou',
help='If specified, IoU for a class is recorded into the "score" '
'field of the "images" table. '
'If not specified, mean IoU is recorded. '
'Should correspond to values of "gt_mapping_dict". E.g. "background".')
parser.add_argument(
'--out_summary_file',
help='Text file, where the summary is going to be appended as just one '
'line of format: out_prefix \\t IoU_class1 \\t IoU_class2 \\t etc.')
def evaluateSegmentationIoU(c, args):
import pandas as pd
import matplotlib.pyplot as plt
# Get corresponding maskfiles from predictions and ground truth.
logging.info('Opening ground truth dataset: %s', args.gt_db_file)
c.execute('ATTACH ? AS "attached"', (args.gt_db_file, ))
c.execute('SELECT pr.imagefile,pr.maskfile,gt.maskfile '
'FROM images pr INNER JOIN attached.images gt '
'WHERE pr.imagefile=gt.imagefile AND pr.maskfile IS NOT NULL '
'AND gt.maskfile IS NOT NULL '
'AND %s '
'ORDER BY pr.imagefile ASC' % args.where_image)
entries = c.fetchall()
logging.info(
'Total %d images in both the open and the ground truth databases.',
len(entries))
logging.debug(pprint.pformat(entries))
imreader = backendMedia.MediaReader(rootdir=args.rootdir)
labelmap_gt, labelmap_pr, class_names = _label2classMapping(
args.gt_mapping_dict, args.pred_mapping_dict)
if args.class_to_record_iou is not None and not args.class_to_record_iou in class_names:
raise ValueError(
'class_to_record_iou=%s is not among values of gt_mapping_dict=%s'
% (args.class_to_record_iou, args.gt_mapping_dict))
hist_all = np.zeros((len(class_names), len(class_names)))
for imagefile, maskfile_pr, maskfile_gt in progressbar.progressbar(
entries):
# Load masks and bring them to comparable form.
mask_gt = util.applyLabelMappingToMask(imreader.maskread(maskfile_gt),
labelmap_gt)
mask_pr = util.applyLabelMappingToMask(imreader.maskread(maskfile_pr),
labelmap_pr)
mask_pr = cv2.resize(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]),
interpolation=cv2.INTER_NEAREST)
# Evaluate one image pair.
careabout = ~np.isnan(mask_gt)
mask_gt = mask_gt[careabout][:].astype(int)
mask_pr = mask_pr[careabout][:].astype(int)
hist = fast_hist(mask_gt, mask_pr, len(class_names))
hist_all += hist
# Compute and record results by image.
iou_list = per_class_iu(hist)
if args.class_to_record_iou is None:
iou = iou_list.mean()
else:
iou = iou_list[class_names.index(args.class_to_record_iou)]
c.execute('UPDATE images SET score=? WHERE imagefile=?',
(iou, imagefile))
# Get label distribution.
pr_per_class = hist_all.sum(0)
gt_per_class = hist_all.sum(1)
iou_list = per_class_iu(hist_all)
fwIoU = calc_fw_iu(hist_all)
pixAcc = calc_pixel_accuracy(hist_all)
mAcc = calc_mean_accuracy(hist_all)
result_df = pd.DataFrame({
'class': class_names,
'IoU': iou_list,
"pr_distribution": pr_per_class,
"gt_distribution": gt_per_class,
})
result_df["IoU"] *= 100 # Changing to percent ratio.
result_df.set_index("class", inplace=True)
print("---- info per class -----")
print(result_df)
result_ser = pd.Series({
"pixAcc": pixAcc,
"mAcc": mAcc,
"fwIoU": fwIoU,
"mIoU": iou_list.mean()
})
result_ser = result_ser[["pixAcc", "mAcc", "fwIoU", "mIoU"]]
result_ser *= 100 # change to percent ratio
if args.out_dir is not None:
if not op.exists(args.out_dir):
os.makedirs(args.out_dir)
out_summary_path = op.join(args.out_dir, args.out_summary_file)
logging.info('Will add summary to: %s', out_summary_path)
with open(out_summary_path, 'a') as f:
f.write(args.out_prefix + '\t' +
'\t'.join(['%.2f' % x for x in result_df['IoU']]) + '\n')
# Save confusion matrix
fig = plt.figure()
normalized_hist = (hist.astype("float") /
hist.sum(axis=1)[:, np.newaxis])
plot_confusion_matrix(normalized_hist, classes=class_names)
outfigfn = op.join(args.out_dir, "%sconf_mat.pdf" % args.out_prefix)
fig.savefig(outfigfn,
transparent=True,
bbox_inches='tight',
pad_inches=0,
dpi=300)
print("Confusion matrix was saved to %s" % outfigfn)
outdffn = op.join(args.out_dir,
"%seval_result_df.csv" % args.out_prefix)
result_df.to_csv(outdffn)
print('Info per class was saved at %s !' % outdffn)
outserfn = op.join(args.out_dir,
"%seval_result_ser.csv" % args.out_prefix)
result_ser.to_csv(outserfn)
print('Total result is saved at %s !' % outserfn)
def getPrecRecall(tp, fp, fn):
''' Accumulate into Precision-Recall curve. '''
ROC = np.zeros((256, 2), dtype=float)
for val in range(256):
if tp[val] == 0 and fp[val] == 0:
precision = -1.
else:
precision = tp[val] / float(tp[val] + fp[val])
if tp[val] == 0 and fn[val] == 0:
recall = -1.
else:
recall = tp[val] / float(tp[val] + fn[val])
ROC[val, 0] = recall
ROC[val, 1] = precision
ROC = ROC[np.bitwise_and(ROC[:, 0] != -1, ROC[:, 1] != -1), :]
ROC = np.vstack((ROC, np.array([0, ROC[-1, 1]])))
area = -np.trapz(x=ROC[:, 0], y=ROC[:, 1])
return ROC, area
def evaluateBinarySegmentationParser(subparsers):
parser = subparsers.add_parser(
'evaluateBinarySegmentation',
description=
'Evaluate mask segmentation ROC curve w.r.t. a ground truth db. '
'Ground truth values must be 0 for background, 255 for foreground, '
'and the rest for "dontcare".'
'Predicted mask must be grayscale in [0,255], '
'with brightness meaning probability of foreground.')
parser.set_defaults(func=evaluateBinarySegmentation)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--where_image', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, result files with be written to "out_dir".')
parser.add_argument(
'--out_prefix',
default='',
help='A prefix to add to output filenames, '
'Use it to keep predictions from different epochs in one dir.')
parser.add_argument('--display_images_roc',
action='store_true',
help='Specify to display on screen')
def evaluateBinarySegmentation(c, args):
import pandas as pd
# Get corresponding maskfiles from predictions and ground truth.
c.execute('ATTACH ? AS "attached"', (args.gt_db_file, ))
c.execute('SELECT pr.imagefile,pr.maskfile,gt.maskfile '
'FROM images pr INNER JOIN attached.images gt '
'WHERE pr.imagefile=gt.imagefile '
'AND pr.maskfile IS NOT NULL '
'AND gt.maskfile IS NOT NULL '
'AND %s '
'ORDER BY pr.imagefile ASC' % args.where_image)
entries = c.fetchall()
logging.info(
'Total %d images in both the open and the ground truth databases.' %
len(entries))
logging.debug(pprint.pformat(entries))
imreader = backendMedia.MediaReader(rootdir=args.rootdir)
TPs = np.zeros((256, ), dtype=int)
FPs = np.zeros((256, ), dtype=int)
FNs = np.zeros((256, ), dtype=int)
if args.display_images_roc:
fig = plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
for imagefile, maskfile_pr, maskfile_gt in progressbar.progressbar(
entries):
# Load masks and bring them to comparable form.
mask_gt = imreader.maskread(maskfile_gt)
mask_pr = imreader.maskread(maskfile_pr)
mask_pr = cv2.resize(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]),
cv2.INTER_NEAREST)
# Some printputs.
gt_pos = np.count_nonzero(mask_gt == 255)
gt_neg = np.count_nonzero(mask_gt == 0)
gt_other = mask_gt.size - gt_pos - gt_neg
logging.debug('GT: positive: %d, negative: %d, others: %d.', gt_pos,
gt_neg, gt_other)
# If there is torch.
try:
import torch
# Use only relevant pixels (not the 'dontcare' class.)
relevant = np.bitwise_or(mask_gt == 0, mask_gt == 255)
mask_gt = mask_gt[relevant].flatten()
mask_pr = mask_pr[relevant].flatten()
mask_gt = torch.Tensor(mask_gt)
mask_pr = torch.Tensor(mask_pr)
try:
mask_gt = mask_gt.cuda()
mask_pr = mask_pr.cuda()
except RuntimeError:
pass
TP = np.zeros((256, ), dtype=int)
FP = np.zeros((256, ), dtype=int)
FN = np.zeros((256, ), dtype=int)
for val in range(256):
tp = torch.nonzero(torch.mul(mask_pr > val,
mask_gt == 255)).size()[0]
fp = torch.nonzero(torch.mul(mask_pr > val,
mask_gt != 255)).size()[0]
fn = torch.nonzero(torch.mul(mask_pr <= val,
mask_gt == 255)).size()[0]
tn = torch.nonzero(torch.mul(mask_pr <= val,
mask_gt != 255)).size()[0]
TP[val] = tp
FP[val] = fp
FN[val] = fn
TPs[val] += tp
FPs[val] += fp
FNs[val] += fn
ROC, area = getPrecRecall(TP, FP, FN)
logging.info('%s\t%.2f' % (op.basename(imagefile), area * 100.))
except ImportError:
# TODO: write the same without torch, on CPU
raise NotImplementedError(
'Non-torch implementation is still to be implemented.')
if args.display_images_roc:
plt.plot(ROC[:, 0], ROC[:, 1], 'go-', linewidth=2, markersize=4)
plt.pause(0.05)
fig.show()
# Accumulate into Precision-Recall curve.
ROC, area = getPrecRecall(TPs, FPs, FNs)
print(
"Average across image area under the Precision-Recall curve, perc: %.2f"
% (area * 100.))
if args.out_dir is not None:
if not op.exists(args.out_dir):
os.makedirs(args.out_dir)
fig = plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.plot(ROC[:, 0], ROC[:, 1], 'bo-', linewidth=2, markersize=6)
out_plot_path = op.join(args.out_dir,
'%srecall-prec.png' % args.out_prefix)
fig.savefig(out_plot_path,
transparent=True,
bbox_inches='tight',
pad_inches=0,
dpi=300)
|
[
"pprint.pformat",
"numpy.sum",
"numpy.maximum",
"numpy.argmax",
"matplotlib.pyplot.clf",
"numpy.isnan",
"matplotlib.pyplot.figure",
"lib.backend.backendDb.connect",
"matplotlib.pyplot.gca",
"numpy.diag",
"numpy.bitwise_or",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.nanmean",
"numpy.pad",
"pandas.DataFrame",
"lib.backend.backendMedia.MediaReader",
"numpy.copy",
"logging.warning",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.cumsum",
"numpy.max",
"torch.Tensor",
"numpy.finfo",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.pause",
"cv2.resize",
"numpy.trapz",
"numpy.minimum",
"lib.backend.backendDb.objectField",
"matplotlib.pyplot.ylim",
"os.path.basename",
"numpy.bitwise_not",
"lib.utils.util.validateFileName",
"torch.mul",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.xlim",
"logging.debug",
"numpy.count_nonzero",
"matplotlib.pyplot.plot",
"os.makedirs",
"progressbar.progressbar",
"numpy.zeros",
"logging.info",
"numpy.where",
"numpy.array",
"numpy.bitwise_and",
"ast.literal_eval",
"matplotlib.pyplot.xlabel"
] |
[((4587, 4638), 'logging.info', 'logging.info', (['"""Total objects of interest: %d"""', 'n_gt'], {}), "('Total objects of interest: %d', n_gt)\n", (4599, 4638), False, 'import logging\n'), ((5041, 5054), 'numpy.cumsum', 'np.cumsum', (['fp'], {}), '(fp)\n', (5050, 5054), True, 'import numpy as np\n'), ((5064, 5077), 'numpy.cumsum', 'np.cumsum', (['tp'], {}), '(tp)\n', (5073, 5077), True, 'import numpy as np\n'), ((5981, 6008), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.2)'], {}), '(0.2)\n', (6003, 6008), True, 'import matplotlib.ticker as ticker\n'), ((6091, 6118), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (6113, 6118), True, 'import matplotlib.ticker as ticker\n'), ((10568, 10635), 'logging.info', 'logging.info', (['"""Number of ground truth "%s": %d"""', 'class_name', 'num_gt'], {}), '(\'Number of ground truth "%s": %d\', class_name, num_gt)\n', (10580, 10635), False, 'import logging\n'), ((10724, 10793), 'logging.info', 'logging.info', (['"""Number of false negative "%s": %d"""', 'class_name', 'num_fn'], {}), '(\'Number of false negative "%s": %d\', class_name, num_fn)\n', (10736, 10793), False, 'import logging\n'), ((10808, 10857), 'numpy.pad', 'np.pad', (['y_score', '[0, num_fn]'], {'constant_values': '(0.0)'}), '(y_score, [0, num_fn], constant_values=0.0)\n', (10814, 10857), True, 'import numpy as np\n'), ((10870, 10919), 'numpy.pad', 'np.pad', (['y_true', '[0, num_fn]'], {'constant_values': '(True)'}), '(y_true, [0, num_fn], constant_values=True)\n', (10876, 10919), True, 'import numpy as np\n'), ((11061, 11114), 'numpy.pad', 'np.pad', (['y_score', '[0, 1000000]'], {'constant_values': '(0.0001)'}), '(y_score, [0, 1000000], constant_values=0.0001)\n', (11067, 11114), True, 'import numpy as np\n'), ((11128, 11179), 'numpy.pad', 'np.pad', (['y_true', '[0, 1000000]'], {'constant_values': '(False)'}), '(y_true, [0, 1000000], constant_values=False)\n', (11134, 11179), True, 'import numpy as np\n'), ((14489, 14541), 'lib.backend.backendDb.connect', 'backendDb.connect', (['args.gt_db_file', '"""load_to_memory"""'], {}), "(args.gt_db_file, 'load_to_memory')\n", (14506, 14541), False, 'from lib.backend import backendDb\n'), ((16393, 16418), 'numpy.nanmean', 'np.nanmean', (['acc_per_class'], {}), '(acc_per_class)\n', (16403, 16418), True, 'import numpy as np\n'), ((16648, 16663), 'numpy.copy', 'np.copy', (['input_'], {}), '(input_)\n', (16655, 16663), True, 'import numpy as np\n'), ((16771, 16803), 'numpy.array', 'np.array', (['output'], {'dtype': 'np.int64'}), '(output, dtype=np.int64)\n', (16779, 16803), True, 'import numpy as np\n'), ((17303, 17353), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (17313, 17353), True, 'import matplotlib.pyplot as plt\n'), ((17358, 17372), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (17370, 17372), True, 'import matplotlib.pyplot as plt\n'), ((17418, 17462), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(90)'}), '(tick_marks, classes, rotation=90)\n', (17428, 17462), True, 'import matplotlib.pyplot as plt\n'), ((17467, 17498), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (17477, 17498), True, 'import matplotlib.pyplot as plt\n'), ((17504, 17522), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17520, 17522), True, 'import matplotlib.pyplot as plt\n'), ((17527, 17553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ground truth"""'], {}), "('Ground truth')\n", (17537, 17553), True, 'import matplotlib.pyplot as plt\n'), ((17558, 17587), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (17568, 17587), True, 'import matplotlib.pyplot as plt\n'), ((17788, 17821), 'ast.literal_eval', 'ast.literal_eval', (['gt_mapping_dict'], {}), '(gt_mapping_dict)\n', (17804, 17821), False, 'import ast\n'), ((20365, 20430), 'logging.info', 'logging.info', (['"""Opening ground truth dataset: %s"""', 'args.gt_db_file'], {}), "('Opening ground truth dataset: %s', args.gt_db_file)\n", (20377, 20430), False, 'import logging\n'), ((21025, 21071), 'lib.backend.backendMedia.MediaReader', 'backendMedia.MediaReader', ([], {'rootdir': 'args.rootdir'}), '(rootdir=args.rootdir)\n', (21049, 21071), False, 'from lib.backend import backendMedia\n'), ((21566, 21598), 'progressbar.progressbar', 'progressbar.progressbar', (['entries'], {}), '(entries)\n', (21589, 21598), False, 'import progressbar\n'), ((22976, 23099), 'pandas.DataFrame', 'pd.DataFrame', (["{'class': class_names, 'IoU': iou_list, 'pr_distribution': pr_per_class,\n 'gt_distribution': gt_per_class}"], {}), "({'class': class_names, 'IoU': iou_list, 'pr_distribution':\n pr_per_class, 'gt_distribution': gt_per_class})\n", (22988, 23099), True, 'import pandas as pd\n'), ((25029, 25060), 'numpy.zeros', 'np.zeros', (['(256, 2)'], {'dtype': 'float'}), '((256, 2), dtype=float)\n', (25037, 25060), True, 'import numpy as np\n'), ((27458, 27504), 'lib.backend.backendMedia.MediaReader', 'backendMedia.MediaReader', ([], {'rootdir': 'args.rootdir'}), '(rootdir=args.rootdir)\n', (27482, 27504), False, 'from lib.backend import backendMedia\n'), ((27516, 27543), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (27524, 27543), True, 'import numpy as np\n'), ((27555, 27582), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (27563, 27582), True, 'import numpy as np\n'), ((27594, 27621), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (27602, 27621), True, 'import numpy as np\n'), ((27838, 27870), 'progressbar.progressbar', 'progressbar.progressbar', (['entries'], {}), '(entries)\n', (27861, 27870), False, 'import progressbar\n'), ((672, 707), 'numpy.concatenate', 'np.concatenate', (['([0.0], rec, [1.0])'], {}), '(([0.0], rec, [1.0]))\n', (686, 707), True, 'import numpy as np\n'), ((721, 757), 'numpy.concatenate', 'np.concatenate', (['([0.0], prec, [0.0])'], {}), '(([0.0], prec, [0.0]))\n', (735, 757), True, 'import numpy as np\n'), ((1111, 1156), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (1117, 1156), True, 'import numpy as np\n'), ((1954, 1999), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""imagefile"""'], {}), "(entry_det, 'imagefile')\n", (1975, 1999), False, 'from lib.backend import backendDb\n'), ((2015, 2055), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""name"""'], {}), "(entry_det, 'name')\n", (2036, 2055), False, 'from lib.backend import backendDb\n'), ((2721, 2761), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 0]', 'bbox_det[0]'], {}), '(bboxes_gt[:, 0], bbox_det[0])\n', (2731, 2761), True, 'import numpy as np\n'), ((2778, 2818), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 1]', 'bbox_det[1]'], {}), '(bboxes_gt[:, 1], bbox_det[1])\n', (2788, 2818), True, 'import numpy as np\n'), ((2835, 2907), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 0] + bboxes_gt[:, 2])', '(bbox_det[0] + bbox_det[2])'], {}), '(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox_det[0] + bbox_det[2])\n', (2845, 2907), True, 'import numpy as np\n'), ((2951, 3023), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 1] + bboxes_gt[:, 3])', '(bbox_det[1] + bbox_det[3])'], {}), '(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox_det[1] + bbox_det[3])\n', (2961, 3023), True, 'import numpy as np\n'), ((3064, 3094), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin)', '(0.0)'], {}), '(ixmax - ixmin, 0.0)\n', (3074, 3094), True, 'import numpy as np\n'), ((3107, 3137), 'numpy.maximum', 'np.maximum', (['(iymax - iymin)', '(0.0)'], {}), '(iymax - iymin, 0.0)\n', (3117, 3137), True, 'import numpy as np\n'), ((3424, 3436), 'numpy.max', 'np.max', (['IoUs'], {}), '(IoUs)\n', (3430, 3436), True, 'import numpy as np\n'), ((3497, 3591), 'logging.debug', 'logging.debug', (['"""max_IoU=%.3f for idet %d with objectid_gt %d."""', 'max_IoU', 'idet', 'objectid_gt'], {}), "('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,\n idet, objectid_gt)\n", (3510, 3591), False, 'import logging\n'), ((4689, 4712), 'numpy.bitwise_not', 'np.bitwise_not', (['ignored'], {}), '(ignored)\n', (4703, 4712), True, 'import numpy as np\n'), ((4726, 4749), 'numpy.bitwise_not', 'np.bitwise_not', (['ignored'], {}), '(ignored)\n', (4740, 4749), True, 'import numpy as np\n'), ((4825, 4850), 'numpy.count_nonzero', 'np.count_nonzero', (['ignored'], {}), '(ignored)\n', (4841, 4850), True, 'import numpy as np\n'), ((4852, 4872), 'numpy.count_nonzero', 'np.count_nonzero', (['tp'], {}), '(tp)\n', (4868, 4872), True, 'import numpy as np\n'), ((4891, 4911), 'numpy.count_nonzero', 'np.count_nonzero', (['fp'], {}), '(fp)\n', (4907, 4911), True, 'import numpy as np\n'), ((5480, 5507), 'lib.utils.util.validateFileName', 'util.validateFileName', (['name'], {}), '(name)\n', (5501, 5507), False, 'from lib.utils import util\n'), ((5608, 5641), 'os.path.join', 'op.join', (['out_dir', "('%s.png' % stem)"], {}), "(out_dir, '%s.png' % stem)\n", (5615, 5641), True, 'import os, os.path as op\n'), ((5659, 5692), 'os.path.join', 'op.join', (['out_dir', "('%s.eps' % stem)"], {}), "(out_dir, '%s.eps' % stem)\n", (5666, 5692), True, 'import os, os.path as op\n'), ((7388, 7433), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""imagefile"""'], {}), "(entry_det, 'imagefile')\n", (7409, 7433), False, 'from lib.backend import backendDb\n'), ((7449, 7489), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""name"""'], {}), "(entry_det, 'name')\n", (7470, 7489), False, 'from lib.backend import backendDb\n'), ((7506, 7547), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""score"""'], {}), "(entry_det, 'score')\n", (7527, 7547), False, 'from lib.backend import backendDb\n'), ((8280, 8320), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 0]', 'bbox_det[0]'], {}), '(bboxes_gt[:, 0], bbox_det[0])\n', (8290, 8320), True, 'import numpy as np\n'), ((8337, 8377), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 1]', 'bbox_det[1]'], {}), '(bboxes_gt[:, 1], bbox_det[1])\n', (8347, 8377), True, 'import numpy as np\n'), ((8394, 8466), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 0] + bboxes_gt[:, 2])', '(bbox_det[0] + bbox_det[2])'], {}), '(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox_det[0] + bbox_det[2])\n', (8404, 8466), True, 'import numpy as np\n'), ((8510, 8582), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 1] + bboxes_gt[:, 3])', '(bbox_det[1] + bbox_det[3])'], {}), '(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox_det[1] + bbox_det[3])\n', (8520, 8582), True, 'import numpy as np\n'), ((8623, 8653), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin)', '(0.0)'], {}), '(ixmax - ixmin, 0.0)\n', (8633, 8653), True, 'import numpy as np\n'), ((8666, 8696), 'numpy.maximum', 'np.maximum', (['(iymax - iymin)', '(0.0)'], {}), '(iymax - iymin, 0.0)\n', (8676, 8696), True, 'import numpy as np\n'), ((9018, 9030), 'numpy.max', 'np.max', (['IoUs'], {}), '(IoUs)\n', (9024, 9030), True, 'import numpy as np\n'), ((9091, 9185), 'logging.debug', 'logging.debug', (['"""max_IoU=%.3f for idet %d with objectid_gt %d."""', 'max_IoU', 'idet', 'objectid_gt'], {}), "('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,\n idet, objectid_gt)\n", (9104, 9185), False, 'import logging\n'), ((10136, 10161), 'numpy.bitwise_not', 'np.bitwise_not', (['y_ignored'], {}), '(y_ignored)\n', (10150, 10161), True, 'import numpy as np\n'), ((10183, 10208), 'numpy.bitwise_not', 'np.bitwise_not', (['y_ignored'], {}), '(y_ignored)\n', (10197, 10208), True, 'import numpy as np\n'), ((10695, 10719), 'numpy.count_nonzero', 'np.count_nonzero', (['y_true'], {}), '(y_true)\n', (10711, 10719), True, 'import numpy as np\n'), ((14370, 14396), 'os.path.exists', 'op.exists', (['args.gt_db_file'], {}), '(args.gt_db_file)\n', (14379, 14396), True, 'import os, os.path as op\n'), ((15865, 15878), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (15872, 15878), True, 'import numpy as np\n'), ((16353, 16366), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16360, 16366), True, 'import numpy as np\n'), ((17149, 17193), 'logging.info', 'logging.info', (['"""Normalized confusion matrix."""'], {}), "('Normalized confusion matrix.')\n", (17161, 17193), False, 'import logging\n'), ((17212, 17284), 'logging.info', 'logging.info', (['"""Confusion matrix will be computed without normalization."""'], {}), "('Confusion matrix will be computed without normalization.')\n", (17224, 17284), False, 'import logging\n'), ((17840, 17875), 'ast.literal_eval', 'ast.literal_eval', (['pred_mapping_dict'], {}), '(pred_mapping_dict)\n', (17856, 17875), False, 'import ast\n'), ((20984, 21007), 'pprint.pformat', 'pprint.pformat', (['entries'], {}), '(entries)\n', (20998, 21007), False, 'import pprint\n'), ((21966, 22061), 'cv2.resize', 'cv2.resize', (['mask_pr', '(mask_gt.shape[1], mask_gt.shape[0])'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]), interpolation=cv2\n .INTER_NEAREST)\n', (21976, 22061), False, 'import cv2\n'), ((23696, 23740), 'os.path.join', 'op.join', (['args.out_dir', 'args.out_summary_file'], {}), '(args.out_dir, args.out_summary_file)\n', (23703, 23740), True, 'import os, os.path as op\n'), ((23749, 23806), 'logging.info', 'logging.info', (['"""Will add summary to: %s"""', 'out_summary_path'], {}), "('Will add summary to: %s', out_summary_path)\n", (23761, 23806), False, 'import logging\n'), ((24024, 24036), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24034, 24036), True, 'import matplotlib.pyplot as plt\n'), ((24235, 24292), 'os.path.join', 'op.join', (['args.out_dir', "('%sconf_mat.pdf' % args.out_prefix)"], {}), "(args.out_dir, '%sconf_mat.pdf' % args.out_prefix)\n", (24242, 24292), True, 'import os, os.path as op\n'), ((24545, 24608), 'os.path.join', 'op.join', (['args.out_dir', "('%seval_result_df.csv' % args.out_prefix)"], {}), "(args.out_dir, '%seval_result_df.csv' % args.out_prefix)\n", (24552, 24608), True, 'import os, os.path as op\n'), ((24748, 24812), 'os.path.join', 'op.join', (['args.out_dir', "('%seval_result_ser.csv' % args.out_prefix)"], {}), "(args.out_dir, '%seval_result_ser.csv' % args.out_prefix)\n", (24755, 24812), True, 'import os, os.path as op\n'), ((25562, 25596), 'numpy.trapz', 'np.trapz', ([], {'x': 'ROC[:, 0]', 'y': 'ROC[:, 1]'}), '(x=ROC[:, 0], y=ROC[:, 1])\n', (25570, 25596), True, 'import numpy as np\n'), ((27417, 27440), 'pprint.pformat', 'pprint.pformat', (['entries'], {}), '(entries)\n', (27431, 27440), False, 'import pprint\n'), ((27670, 27682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27680, 27682), True, 'import matplotlib.pyplot as plt\n'), ((27691, 27711), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (27701, 27711), True, 'import matplotlib.pyplot as plt\n'), ((27720, 27743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (27730, 27743), True, 'import matplotlib.pyplot as plt\n'), ((27752, 27766), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (27760, 27766), True, 'import matplotlib.pyplot as plt\n'), ((27775, 27789), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (27783, 27789), True, 'import matplotlib.pyplot as plt\n'), ((28058, 28134), 'cv2.resize', 'cv2.resize', (['mask_pr', '(mask_gt.shape[1], mask_gt.shape[0])', 'cv2.INTER_NEAREST'], {}), '(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]), cv2.INTER_NEAREST)\n', (28068, 28134), False, 'import cv2\n'), ((28208, 28240), 'numpy.count_nonzero', 'np.count_nonzero', (['(mask_gt == 255)'], {}), '(mask_gt == 255)\n', (28224, 28240), True, 'import numpy as np\n'), ((28258, 28288), 'numpy.count_nonzero', 'np.count_nonzero', (['(mask_gt == 0)'], {}), '(mask_gt == 0)\n', (28274, 28288), True, 'import numpy as np\n'), ((28347, 28437), 'logging.debug', 'logging.debug', (['"""GT: positive: %d, negative: %d, others: %d."""', 'gt_pos', 'gt_neg', 'gt_other'], {}), "('GT: positive: %d, negative: %d, others: %d.', gt_pos, gt_neg,\n gt_other)\n", (28360, 28437), False, 'import logging\n'), ((30707, 30719), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30717, 30719), True, 'import matplotlib.pyplot as plt\n'), ((30728, 30748), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (30738, 30748), True, 'import matplotlib.pyplot as plt\n'), ((30757, 30780), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (30767, 30780), True, 'import matplotlib.pyplot as plt\n'), ((30789, 30803), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (30797, 30803), True, 'import matplotlib.pyplot as plt\n'), ((30812, 30826), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (30820, 30826), True, 'import matplotlib.pyplot as plt\n'), ((30835, 30899), 'matplotlib.pyplot.plot', 'plt.plot', (['ROC[:, 0]', 'ROC[:, 1]', '"""bo-"""'], {'linewidth': '(2)', 'markersize': '(6)'}), "(ROC[:, 0], ROC[:, 1], 'bo-', linewidth=2, markersize=6)\n", (30843, 30899), True, 'import matplotlib.pyplot as plt\n'), ((30924, 30984), 'os.path.join', 'op.join', (['args.out_dir', "('%srecall-prec.png' % args.out_prefix)"], {}), "(args.out_dir, '%srecall-prec.png' % args.out_prefix)\n", (30931, 30984), True, 'import os, os.path as op\n'), ((871, 903), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (881, 903), True, 'import numpy as np\n'), ((1024, 1055), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (1032, 1055), True, 'import numpy as np\n'), ((1851, 1891), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""bbox"""'], {}), "(entry_det, 'bbox')\n", (1872, 1891), False, 'from lib.backend import backendDb\n'), ((2318, 2358), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (2339, 2358), False, 'from lib.backend import backendDb\n'), ((3472, 3487), 'numpy.argmax', 'np.argmax', (['IoUs'], {}), '(IoUs)\n', (3481, 3487), True, 'import numpy as np\n'), ((3921, 3961), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (3942, 3961), False, 'from lib.backend import backendDb\n'), ((4976, 5001), 'numpy.count_nonzero', 'np.count_nonzero', (['ignored'], {}), '(ignored)\n', (4992, 5001), True, 'import numpy as np\n'), ((5708, 5741), 'os.path.join', 'op.join', (['out_dir', "('%s.txt' % stem)"], {}), "(out_dir, '%s.txt' % stem)\n", (5715, 5741), True, 'import os, os.path as op\n'), ((7285, 7325), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""bbox"""'], {}), "(entry_det, 'bbox')\n", (7306, 7325), False, 'from lib.backend import backendDb\n'), ((7843, 7883), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (7864, 7883), False, 'from lib.backend import backendDb\n'), ((9066, 9081), 'numpy.argmax', 'np.argmax', (['IoUs'], {}), '(IoUs)\n', (9075, 9081), True, 'import numpy as np\n'), ((9491, 9531), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (9512, 9531), False, 'from lib.backend import backendDb\n'), ((11392, 11401), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11399, 11401), True, 'import matplotlib.pyplot as plt\n'), ((11414, 11441), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {}), '(recall, precision)\n', (11422, 11441), True, 'import matplotlib.pyplot as plt\n'), ((11454, 11470), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (11462, 11470), True, 'import matplotlib.pyplot as plt\n'), ((11483, 11499), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (11491, 11499), True, 'import matplotlib.pyplot as plt\n'), ((11512, 11532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (11522, 11532), True, 'import matplotlib.pyplot as plt\n'), ((11545, 11568), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (11555, 11568), True, 'import matplotlib.pyplot as plt\n'), ((12035, 12044), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12042, 12044), True, 'import matplotlib.pyplot as plt\n'), ((12057, 12075), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (12065, 12075), True, 'import matplotlib.pyplot as plt\n'), ((12088, 12104), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (12096, 12104), True, 'import matplotlib.pyplot as plt\n'), ((12117, 12133), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (12125, 12133), True, 'import matplotlib.pyplot as plt\n'), ((12146, 12163), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (12156, 12163), True, 'import matplotlib.pyplot as plt\n'), ((12176, 12193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (12186, 12193), True, 'import matplotlib.pyplot as plt\n'), ((15910, 15923), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (15917, 15923), True, 'import numpy as np\n'), ((22143, 22160), 'numpy.isnan', 'np.isnan', (['mask_gt'], {}), '(mask_gt)\n', (22151, 22160), True, 'import numpy as np\n'), ((23605, 23628), 'os.path.exists', 'op.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (23614, 23628), True, 'import os, os.path as op\n'), ((23642, 23667), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (23653, 23667), False, 'import os, os.path as op\n'), ((25443, 25491), 'numpy.bitwise_and', 'np.bitwise_and', (['(ROC[:, 0] != -1)', '(ROC[:, 1] != -1)'], {}), '(ROC[:, 0] != -1, ROC[:, 1] != -1)\n', (25457, 25491), True, 'import numpy as np\n'), ((25522, 25547), 'numpy.array', 'np.array', (['[0, ROC[-1, 1]]'], {}), '([0, ROC[-1, 1]])\n', (25530, 25547), True, 'import numpy as np\n'), ((28614, 28657), 'numpy.bitwise_or', 'np.bitwise_or', (['(mask_gt == 0)', '(mask_gt == 255)'], {}), '(mask_gt == 0, mask_gt == 255)\n', (28627, 28657), True, 'import numpy as np\n'), ((28780, 28801), 'torch.Tensor', 'torch.Tensor', (['mask_gt'], {}), '(mask_gt)\n', (28792, 28801), False, 'import torch\n'), ((28824, 28845), 'torch.Tensor', 'torch.Tensor', (['mask_pr'], {}), '(mask_pr)\n', (28836, 28845), False, 'import torch\n'), ((29017, 29044), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (29025, 29044), True, 'import numpy as np\n'), ((29063, 29090), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (29071, 29090), True, 'import numpy as np\n'), ((29109, 29136), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (29117, 29136), True, 'import numpy as np\n'), ((30256, 30320), 'matplotlib.pyplot.plot', 'plt.plot', (['ROC[:, 0]', 'ROC[:, 1]', '"""go-"""'], {'linewidth': '(2)', 'markersize': '(4)'}), "(ROC[:, 0], ROC[:, 1], 'go-', linewidth=2, markersize=4)\n", (30264, 30320), True, 'import matplotlib.pyplot as plt\n'), ((30333, 30348), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (30342, 30348), True, 'import matplotlib.pyplot as plt\n'), ((30630, 30653), 'os.path.exists', 'op.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (30639, 30653), True, 'import os, os.path as op\n'), ((30667, 30692), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (30678, 30692), False, 'import os, os.path as op\n'), ((2436, 2472), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""bbox"""'], {}), "(entry, 'bbox')\n", (2457, 2472), False, 'from lib.backend import backendDb\n'), ((4930, 4950), 'numpy.count_nonzero', 'np.count_nonzero', (['tp'], {}), '(tp)\n', (4946, 4950), True, 'import numpy as np\n'), ((4953, 4973), 'numpy.count_nonzero', 'np.count_nonzero', (['fp'], {}), '(fp)\n', (4969, 4973), True, 'import numpy as np\n'), ((5236, 5256), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (5244, 5256), True, 'import numpy as np\n'), ((7961, 7997), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""bbox"""'], {}), "(entry, 'bbox')\n", (7982, 7997), False, 'from lib.backend import backendDb\n'), ((11595, 11604), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11602, 11604), True, 'import matplotlib.pyplot as plt\n'), ((12220, 12229), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12227, 12229), True, 'import matplotlib.pyplot as plt\n'), ((16229, 16242), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16236, 16242), True, 'import numpy as np\n'), ((15299, 15361), 'logging.warning', 'logging.warning', (['"""extra_metrics not supported for pascal-voc."""'], {}), "('extra_metrics not supported for pascal-voc.')\n", (15314, 15361), False, 'import logging\n'), ((16059, 16072), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16066, 16072), True, 'import numpy as np\n'), ((16117, 16130), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16124, 16130), True, 'import numpy as np\n'), ((29972, 29994), 'os.path.basename', 'op.basename', (['imagefile'], {}), '(imagefile)\n', (29983, 29994), True, 'import os, os.path as op\n'), ((29208, 29248), 'torch.mul', 'torch.mul', (['(mask_pr > val)', '(mask_gt == 255)'], {}), '(mask_pr > val, mask_gt == 255)\n', (29217, 29248), False, 'import torch\n'), ((29340, 29380), 'torch.mul', 'torch.mul', (['(mask_pr > val)', '(mask_gt != 255)'], {}), '(mask_pr > val, mask_gt != 255)\n', (29349, 29380), False, 'import torch\n'), ((29472, 29513), 'torch.mul', 'torch.mul', (['(mask_pr <= val)', '(mask_gt == 255)'], {}), '(mask_pr <= val, mask_gt == 255)\n', (29481, 29513), False, 'import torch\n'), ((29605, 29646), 'torch.mul', 'torch.mul', (['(mask_pr <= val)', '(mask_gt != 255)'], {}), '(mask_pr <= val, mask_gt != 255)\n', (29614, 29646), False, 'import torch\n')]
|
#!/usr/bin/env python
# Tests for `xclim` package, command line interface
from __future__ import annotations
import numpy as np
import pytest
import xarray as xr
from click.testing import CliRunner
import xclim
from xclim.cli import cli
from xclim.testing import open_dataset
try:
from dask.distributed import Client
except ImportError:
Client = None
K2C = 273.15
@pytest.mark.parametrize(
"indicators,indnames",
[
([xclim.atmos.tg_mean], ["tg_mean"]),
(
# Note: This test is dependent on indicator name length and terminal dimensions.
[xclim.atmos.tn_mean, xclim.atmos.ice_days],
["tn_mean", "ice_days"],
),
],
)
def test_info(indicators, indnames):
runner = CliRunner()
results = runner.invoke(cli, ["info"] + indnames)
for ind in indicators:
assert ind.title in results.output
assert ind.identifier in results.output
def test_indices():
runner = CliRunner()
results = runner.invoke(cli, ["indices"])
for name, ind in xclim.core.indicator.registry.items():
assert name.lower() in results.output
@pytest.mark.parametrize(
"indicator,indname",
[
(xclim.atmos.heating_degree_days, "heating_degree_days"),
(xclim.land.base_flow_index, "base_flow_index"),
],
)
def test_indicator_help(indicator, indname):
runner = CliRunner()
results = runner.invoke(cli, [indname, "--help"])
for name in indicator.parameters.keys():
if name not in ["ds", "indexer"]:
assert name in results.output
@pytest.mark.parametrize(
"indicator,expected,varnames",
[
("tg_mean", 272.15, ["tas"]),
("dtrvar", 0.0, ["tasmin", "tasmax"]),
("heating_degree_days", 6588.0, ["tas"]),
("solidprcptot", 31622400.0, ["tas", "pr"]),
],
)
def test_normal_computation(
tasmin_series, tasmax_series, pr_series, tmp_path, indicator, expected, varnames
):
tasmin = tasmin_series(np.ones(366) + 270.15, start="1/1/2000")
tasmax = tasmax_series(np.ones(366) + 272.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
ds = xr.Dataset(
data_vars={
"tasmin": tasmin,
"tasmax": tasmax,
"tas": xclim.atmos.tg(tasmin, tasmax),
"pr": pr,
}
)
input_file = tmp_path / "in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
args = ["-i", str(input_file), "-o", str(output_file), "-v", indicator]
runner = CliRunner()
results = runner.invoke(cli, args)
for varname in varnames:
assert f"Parsed {varname} = {varname}" in results.output
assert "Processing :" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
outvar = list(out.data_vars.values())[0]
np.testing.assert_allclose(outvar[0], expected)
def test_multi_input(tas_series, pr_series, tmp_path):
tas = tas_series(np.ones(366) + 273.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
tas_file = tmp_path / "multi_tas_in.nc"
pr_file = tmp_path / "multi_pr_in.nc"
output_file = tmp_path / "out.nc"
tas.to_dataset().to_netcdf(tas_file)
pr.to_dataset().to_netcdf(pr_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(tmp_path / "multi_*_in.nc"),
"-o",
str(output_file),
"-v",
"solidprcptot",
],
)
assert "Processing : solidprcptot" in results.output
out = xr.open_dataset(output_file)
assert out.solidprcptot.sum() == 0
def test_multi_output(tmp_path):
ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc")
input_file = tmp_path / "ws_in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"wind_speed_from_vector",
],
)
assert "Processing : wind_speed_from_vector" in results.output
def test_renaming_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.name = "tas"
tas.to_netcdf(input_file)
with xclim.set_options(cf_compliance="warn"):
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tn_mean",
"--tasmin",
"tas",
],
)
assert "Processing : tn_mean" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tn_mean[0] == 1.0
def test_indicator_chain(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tg_mean",
"growing_degree_days",
],
)
assert "Processing : tg_mean" in results.output
assert "Processing : growing_degree_days" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tg_mean[0] == 1.0
assert out.growing_degree_days[0] == 0
def test_missing_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "tn_mean"]
)
assert results.exit_code == 2
assert "'tasmin' was not found in the input dataset." in results.output
@pytest.mark.parametrize(
"options,output",
[
(["--dask-nthreads", "2"], "Error: '--dask-maxmem' must be given"),
(["--chunks", "time:90"], "100% Complete"),
(["--chunks", "time:90,lat:5"], "100% Completed"),
(["--version"], xclim.__version__),
],
)
def test_global_options(tas_series, tmp_path, options, output):
if "dask" in options[0]:
pytest.importorskip("dask.distributed")
tas = tas_series(np.ones(366), start="1/1/2000")
tas = xr.concat([tas] * 10, dim="lat")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
["-i", str(input_file), "-o", str(output_file)] + options + ["tg_mean"],
)
assert output in results.output
def test_suspicious_precipitation_flags(pr_series, tmp_path):
bad_pr = pr_series(np.zeros(365), start="1971-01-01")
# Add some strangeness
bad_pr[8] = -1e-6 # negative values
bad_pr[120] = 301 / 3600 / 24 # 301mm/day
bad_pr[121:141] = 1.1574074074074072e-05 # 1mm/day
bad_pr[200:300] = 5.787037037037036e-05 # 5mm/day
input_file = tmp_path / "bad_pr.nc"
output_file = tmp_path / "out.nc"
bad_pr.to_netcdf(input_file)
runner = CliRunner()
runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "dataflags", "pr"]
)
with xr.open_dataset(output_file) as ds:
for var in ds.data_vars:
assert var
@pytest.mark.slow
def test_dataflags_output(tmp_path, tas_series, tasmax_series, tasmin_series):
ds = xr.Dataset()
for series, val in zip([tas_series, tasmax_series, tasmin_series], [0, 10, -10]):
vals = val + K2C + np.sin(np.pi * np.arange(366 * 3) / 366)
arr = series(vals, start="1971-01-01")
ds = xr.merge([ds, arr])
input_file = tmp_path / "ws_in.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"dataflags",
"-r",
],
)
assert "Dataset passes quality control checks!" in results.output
def test_bad_usage(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
# No command
results = runner.invoke(cli, ["-i", str(input_file)])
assert "Missing command" in results.output
# Indicator not found:
results = runner.invoke(cli, ["info", "mean_ether_velocity"])
assert "Indicator 'mean_ether_velocity' not found in xclim" in results.output
# No input file given
results = runner.invoke(cli, ["-o", str(output_file), "base_flow_index"])
assert "No input file name given" in results.output
# No output file given
results = runner.invoke(cli, ["-i", str(input_file), "tg_mean"])
assert "No output file name given" in results.output
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"--dask-nthreads",
"2",
"tg_mean",
],
)
if Client is None: # dask.distributed not installed
assert "distributed scheduler is not installed" in results.output
else:
assert "'--dask-maxmem' must be given" in results.output
@pytest.mark.requires_docs
@pytest.mark.parametrize("method, pattern", [("-r", "`GH/"), ("-m", "[GH/")])
def test_release_notes(method, pattern):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", method],
)
assert ":pull:`" not in results.output
assert ":issue:`" not in results.output
assert ":user:`" not in results.output
assert pattern in results.output
@pytest.mark.parametrize(
"method, error",
[
(
["-m", "-r"],
"Cannot return both Markdown and ReStructuredText in same release_notes call.",
),
(list(), "Must specify Markdown (-m) or ReStructuredText (-r)."),
],
)
def test_release_notes_failure(method, error):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", *method],
)
assert error in results.output
def test_show_version_info(capsys):
runner = CliRunner()
results = runner.invoke(cli, ["show_version_info"])
assert "INSTALLED VERSIONS" in results.output
assert "python" in results.output
assert "boltons: installed" in results.output
|
[
"pytest.importorskip",
"xclim.atmos.tg",
"xclim.core.indicator.registry.items",
"xarray.open_dataset",
"numpy.zeros",
"numpy.ones",
"xarray.concat",
"xarray.Dataset",
"xarray.merge",
"numpy.arange",
"pytest.mark.parametrize",
"xclim.set_options",
"numpy.testing.assert_allclose",
"click.testing.CliRunner",
"xclim.testing.open_dataset"
] |
[((380, 547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicators,indnames"""', "[([xclim.atmos.tg_mean], ['tg_mean']), ([xclim.atmos.tn_mean, xclim.atmos.\n ice_days], ['tn_mean', 'ice_days'])]"], {}), "('indicators,indnames', [([xclim.atmos.tg_mean], [\n 'tg_mean']), ([xclim.atmos.tn_mean, xclim.atmos.ice_days], ['tn_mean',\n 'ice_days'])])\n", (403, 547), False, 'import pytest\n'), ((1139, 1302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicator,indname"""', "[(xclim.atmos.heating_degree_days, 'heating_degree_days'), (xclim.land.\n base_flow_index, 'base_flow_index')]"], {}), "('indicator,indname', [(xclim.atmos.\n heating_degree_days, 'heating_degree_days'), (xclim.land.\n base_flow_index, 'base_flow_index')])\n", (1162, 1302), False, 'import pytest\n'), ((1584, 1804), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicator,expected,varnames"""', "[('tg_mean', 272.15, ['tas']), ('dtrvar', 0.0, ['tasmin', 'tasmax']), (\n 'heating_degree_days', 6588.0, ['tas']), ('solidprcptot', 31622400.0, [\n 'tas', 'pr'])]"], {}), "('indicator,expected,varnames', [('tg_mean', 272.15,\n ['tas']), ('dtrvar', 0.0, ['tasmin', 'tasmax']), ('heating_degree_days',\n 6588.0, ['tas']), ('solidprcptot', 31622400.0, ['tas', 'pr'])])\n", (1607, 1804), False, 'import pytest\n'), ((6190, 6445), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options,output"""', '[([\'--dask-nthreads\', \'2\'], "Error: \'--dask-maxmem\' must be given"), ([\n \'--chunks\', \'time:90\'], \'100% Complete\'), ([\'--chunks\', \'time:90,lat:5\'\n ], \'100% Completed\'), ([\'--version\'], xclim.__version__)]'], {}), '(\'options,output\', [([\'--dask-nthreads\', \'2\'],\n "Error: \'--dask-maxmem\' must be given"), ([\'--chunks\', \'time:90\'],\n \'100% Complete\'), ([\'--chunks\', \'time:90,lat:5\'], \'100% Completed\'), ([\n \'--version\'], xclim.__version__)])\n', (6213, 6445), False, 'import pytest\n'), ((9703, 9779), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method, pattern"""', "[('-r', '`GH/'), ('-m', '[GH/')]"], {}), "('method, pattern', [('-r', '`GH/'), ('-m', '[GH/')])\n", (9726, 9779), False, 'import pytest\n'), ((751, 762), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (760, 762), False, 'from click.testing import CliRunner\n'), ((971, 982), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (980, 982), False, 'from click.testing import CliRunner\n'), ((1051, 1088), 'xclim.core.indicator.registry.items', 'xclim.core.indicator.registry.items', ([], {}), '()\n', (1086, 1088), False, 'import xclim\n'), ((1385, 1396), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1394, 1396), False, 'from click.testing import CliRunner\n'), ((2535, 2546), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2544, 2546), False, 'from click.testing import CliRunner\n'), ((2781, 2809), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (2796, 2809), True, 'import xarray as xr\n'), ((2859, 2906), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['outvar[0]', 'expected'], {}), '(outvar[0], expected)\n', (2885, 2906), True, 'import numpy as np\n'), ((3296, 3307), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3305, 3307), False, 'from click.testing import CliRunner\n'), ((3602, 3630), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (3617, 3630), True, 'import xarray as xr\n'), ((3714, 3771), 'xclim.testing.open_dataset', 'open_dataset', (['"""ERA5/daily_surface_cancities_1990-1993.nc"""'], {}), "('ERA5/daily_surface_cancities_1990-1993.nc')\n", (3726, 3771), False, 'from xclim.testing import open_dataset\n'), ((3892, 3903), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3901, 3903), False, 'from click.testing import CliRunner\n'), ((4918, 4946), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (4933, 4946), True, 'import xarray as xr\n'), ((5203, 5214), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5212, 5214), False, 'from click.testing import CliRunner\n'), ((5629, 5657), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (5644, 5657), True, 'import xarray as xr\n'), ((5958, 5969), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5967, 5969), False, 'from click.testing import CliRunner\n'), ((6687, 6719), 'xarray.concat', 'xr.concat', (['([tas] * 10)'], {'dim': '"""lat"""'}), "([tas] * 10, dim='lat')\n", (6696, 6719), True, 'import xarray as xr\n'), ((6840, 6851), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6849, 6851), False, 'from click.testing import CliRunner\n'), ((7492, 7503), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7501, 7503), False, 'from click.testing import CliRunner\n'), ((7818, 7830), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (7828, 7830), True, 'import xarray as xr\n'), ((8147, 8158), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8156, 8158), False, 'from click.testing import CliRunner\n'), ((8605, 8616), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8614, 8616), False, 'from click.testing import CliRunner\n'), ((9834, 9845), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (9843, 9845), False, 'from click.testing import CliRunner\n'), ((10433, 10444), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (10442, 10444), False, 'from click.testing import CliRunner\n'), ((10615, 10626), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (10624, 10626), False, 'from click.testing import CliRunner\n'), ((2119, 2131), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (2126, 2131), True, 'import numpy as np\n'), ((3045, 3057), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (3052, 3057), True, 'import numpy as np\n'), ((4264, 4276), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (4271, 4276), True, 'import numpy as np\n'), ((4431, 4470), 'xclim.set_options', 'xclim.set_options', ([], {'cf_compliance': '"""warn"""'}), "(cf_compliance='warn')\n", (4448, 4470), False, 'import xclim\n'), ((4489, 4500), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4498, 4500), False, 'from click.testing import CliRunner\n'), ((5051, 5063), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (5058, 5063), True, 'import numpy as np\n'), ((5806, 5818), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (5813, 5818), True, 'import numpy as np\n'), ((6584, 6623), 'pytest.importorskip', 'pytest.importorskip', (['"""dask.distributed"""'], {}), "('dask.distributed')\n", (6603, 6623), False, 'import pytest\n'), ((6645, 6657), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (6652, 6657), True, 'import numpy as np\n'), ((7105, 7118), 'numpy.zeros', 'np.zeros', (['(365)'], {}), '(365)\n', (7113, 7118), True, 'import numpy as np\n'), ((7618, 7646), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (7633, 7646), True, 'import xarray as xr\n'), ((8045, 8064), 'xarray.merge', 'xr.merge', (['[ds, arr]'], {}), '([ds, arr])\n', (8053, 8064), True, 'import xarray as xr\n'), ((8453, 8465), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (8460, 8465), True, 'import numpy as np\n'), ((1991, 2003), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (1998, 2003), True, 'import numpy as np\n'), ((2059, 2071), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (2066, 2071), True, 'import numpy as np\n'), ((2985, 2997), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (2992, 2997), True, 'import numpy as np\n'), ((2271, 2301), 'xclim.atmos.tg', 'xclim.atmos.tg', (['tasmin', 'tasmax'], {}), '(tasmin, tasmax)\n', (2285, 2301), False, 'import xclim\n'), ((7959, 7977), 'numpy.arange', 'np.arange', (['(366 * 3)'], {}), '(366 * 3)\n', (7968, 7977), True, 'import numpy as np\n')]
|
from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from scipy.sparse import issparse
import numdifftools as nd
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing as mp
import itertools, functools
from ..tools.utils import timeit
def is_outside_domain(x, domain):
x = x[None, :] if x.ndim == 1 else x
return np.any(np.logical_or(x < domain[0], x > domain[1]), axis=1)
def grad(f, x):
"""Gradient of scalar-valued function f evaluated at x"""
return nd.Gradient(f)(x)
def laplacian(f, x):
"""Laplacian of scalar field f evaluated at x"""
hes = nd.Hessdiag(f)(x)
return sum(hes)
# ---------------------------------------------------------------------------------------------------
# vector field function
@timeit
def vector_field_function(x, vf_dict, dim=None, kernel='full', **kernel_kwargs):
"""vector field function constructed by sparseVFC.
Reference: Regularized vector field learning with sparse approximation for mismatch removal, Ma, Jiayi, etc. al, Pattern Recognition
"""
# x=np.array(x).reshape((1, -1))
if "div_cur_free_kernels" in vf_dict.keys():
has_div_cur_free_kernels = True
else:
has_div_cur_free_kernels = False
#x = np.array(x)
if x.ndim == 1:
x = x[None, :]
if has_div_cur_free_kernels:
if kernel == 'full':
kernel_ind = 0
elif kernel == 'df_kernel':
kernel_ind = 1
elif kernel == 'cf_kernel':
kernel_ind = 2
else:
raise ValueError(f"the kernel can only be one of {'full', 'df_kernel', 'cf_kernel'}!")
K = con_K_div_cur_free(x, vf_dict["X_ctrl"], vf_dict["sigma"], vf_dict["eta"], **kernel_kwargs)[kernel_ind]
else:
Xc = vf_dict["X_ctrl"]
K = con_K(x, Xc, vf_dict["beta"], **kernel_kwargs)
K = K.dot(vf_dict["C"])
if dim is not None and not has_div_cur_free_kernels:
if np.isscalar(dim):
K = K[:, :dim]
elif dim is not None:
K = K[:, dim]
return K
@timeit
def con_K(x, y, beta, method='cdist', return_d=False):
"""con_K constructs the kernel K, where K(i, j) = k(x, y) = exp(-beta * ||x - y||^2).
Arguments
---------
x: :class:`~numpy.ndarray`
Original training data points.
y: :class:`~numpy.ndarray`
Control points used to build kernel basis functions.
beta: float (default: 0.1)
Paramerter of Gaussian Kernel, k(x, y) = exp(-beta*||x-y||^2),
return_d: bool
If True the intermediate 3D matrix x - y will be returned for analytical Jacobian.
Returns
-------
K: :class:`~numpy.ndarray`
the kernel to represent the vector field function.
"""
if method == 'cdist' and not return_d:
K = cdist(x, y, 'sqeuclidean')
if len(K) == 1:
K = K.flatten()
else:
n = x.shape[0]
m = y.shape[0]
# https://stackoverflow.com/questions/1721802/what-is-the-equivalent-of-matlabs-repmat-in-numpy
# https://stackoverflow.com/questions/12787475/matlabs-permute-in-python
D = np.matlib.tile(x[:, :, None], [1, 1, m]) - np.transpose(
np.matlib.tile(y[:, :, None], [1, 1, n]), [2, 1, 0])
K = np.squeeze(np.sum(D ** 2, 1))
K = -beta * K
K = np.exp(K)
if return_d:
return K, D
else:
return K
@timeit
def con_K_div_cur_free(x, y, sigma=0.8, eta=0.5):
"""Construct a convex combination of the divergence-free kernel T_df and curl-free kernel T_cf with a bandwidth sigma
and a combination coefficient gamma.
Arguments
---------
x: :class:`~numpy.ndarray`
Original training data points.
y: :class:`~numpy.ndarray`
Control points used to build kernel basis functions
sigma: int (default: `0.8`)
Bandwidth parameter.
eta: int (default: `0.5`)
Combination coefficient for the divergence-free or the curl-free kernels.
Returns
-------
A tuple of G (the combined kernel function), divergence-free kernel and curl-free kernel.
See also: :func:`sparseVFC`.
"""
m, d = x.shape
n, d = y.shape
sigma2 = sigma ** 2
G_tmp = np.matlib.tile(x[:, :, None], [1, 1, n]) - np.transpose(
np.matlib.tile(y[:, :, None], [1, 1, m]), [2, 1, 0]
)
G_tmp = np.squeeze(np.sum(G_tmp ** 2, 1))
G_tmp3 = -G_tmp / sigma2
G_tmp = -G_tmp / (2 * sigma2)
G_tmp = np.exp(G_tmp) / sigma2
G_tmp = np.kron(G_tmp, np.ones((d, d)))
x_tmp = np.matlib.tile(x, [n, 1])
y_tmp = np.matlib.tile(y, [1, m]).T
y_tmp = y_tmp.reshape((d, m * n), order='F').T
xminusy = x_tmp - y_tmp
G_tmp2 = np.zeros((d * m, d * n))
tmp4_ = np.zeros((d, d))
for i in tqdm(range(d), desc="Iterating each dimension in con_K_div_cur_free:"):
for j in np.arange(i, d):
tmp1 = xminusy[:, i].reshape((m, n), order='F')
tmp2 = xminusy[:, j].reshape((m, n), order='F')
tmp3 = tmp1 * tmp2
tmp4 = tmp4_.copy()
tmp4[i, j] = 1
tmp4[j, i] = 1
G_tmp2 = G_tmp2 + np.kron(tmp3, tmp4)
G_tmp2 = G_tmp2 / sigma2
G_tmp3 = np.kron((G_tmp3 + d - 1), np.eye(d))
G_tmp4 = np.kron(np.ones((m, n)), np.eye(d)) - G_tmp2
df_kernel, cf_kernel = (1 - eta) * G_tmp * (G_tmp2 + G_tmp3), eta * G_tmp * G_tmp4
G = df_kernel + cf_kernel
return G, df_kernel, cf_kernel
def vecfld_from_adata(adata, basis='', vf_key='VecFld'):
if basis is not None or len(basis) > 0:
vf_key = '%s_%s' % (vf_key, basis)
if vf_key not in adata.uns.keys():
raise ValueError(
f'Vector field function {vf_key} is not included in the adata object! '
f"Try firstly running dyn.tl.VectorField(adata, basis='{basis}')")
vf_dict = adata.uns[vf_key]['VecFld']
func = lambda x: vector_field_function(x, vf_dict)
return vf_dict, func
def vector_transformation(V, Q):
"""Transform vectors from PCA space to the original space using the formula:
:math:`\hat{v} = v Q^T`,
where `Q, v, \hat{v}` are the PCA loading matrix, low dimensional vector and the
transformed high dimensional vector.
Parameters
----------
V: :class:`~numpy.ndarray`
The n x k array of vectors to be transformed, where n is the number of vectors,
k the dimension.
Q: :class:`~numpy.ndarray`
PCA loading matrix with dimension d x k, where d is the dimension of the original space,
and k the number of leading PCs.
Returns
-------
ret: :class:`~numpy.ndarray`
The array of transformed vectors.
"""
return V @ Q.T
def vector_field_function_transformation(vf_func, Q):
"""Transform vector field function from PCA space to the original space.
The formula used for transformation:
:math:`\hat{f} = f Q^T`,
where `Q, f, \hat{f}` are the PCA loading matrix, low dimensional vector field function and the
transformed high dimensional vector field function.
Parameters
----------
vf_func: callable
The vector field function.
Q: :class:`~numpy.ndarray`
PCA loading matrix with dimension d x k, where d is the dimension of the original space,
and k the number of leading PCs.
Returns
-------
ret: callable
The transformed vector field function.
"""
return lambda x: vf_func.func(x) @ Q.T
# ---------------------------------------------------------------------------------------------------
# jacobian
def Jacobian_rkhs_gaussian(x, vf_dict, vectorize=False):
"""analytical Jacobian for RKHS vector field functions with Gaussian kernel.
Arguments
---------
x: :class:`~numpy.ndarray`
Coordinates where the Jacobian is evaluated.
vf_dict: dict
A dictionary containing RKHS vector field control points, Gaussian bandwidth,
and RKHS coefficients.
Essential keys: 'X_ctrl', 'beta', 'C'
Returns
-------
J: :class:`~numpy.ndarray`
Jacobian matrices stored as d-by-d-by-n numpy arrays evaluated at x.
d is the number of dimensions and n the number of coordinates in x.
"""
if x.ndim == 1:
K, D = con_K(x[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J = (vf_dict['C'].T * K) @ D[0].T
elif not vectorize:
n, d = x.shape
J = np.zeros((d, d, n))
for i, xi in enumerate(x):
K, D = con_K(xi[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J[:, :, i] = (vf_dict['C'].T * K) @ D[0].T
else:
K, D = con_K(x, vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
if K.ndim == 1: K = K[None, :]
J = np.einsum('nm, mi, njm -> ijn', K, vf_dict['C'], D)
return -2 * vf_dict['beta'] * J
def Jacobian_rkhs_gaussian_parallel(x, vf_dict, cores=None):
n = len(x)
if cores is None: cores = mp.cpu_count()
n_j_per_core = int(np.ceil(n / cores))
xx = []
for i in range(0, n, n_j_per_core):
xx.append(x[i:i+n_j_per_core])
#with mp.Pool(cores) as p:
# ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
with ThreadPool(cores) as p:
ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
return ret
def Jacobian_numerical(f, input_vector_convention='row'):
'''
Get the numerical Jacobian of the vector field function.
If the input_vector_convention is 'row', it means that fjac takes row vectors
as input, otherwise the input should be an array of column vectors. Note that
the returned Jacobian would behave exactly the same if the input is an 1d array.
The column vector convention is slightly faster than the row vector convention.
So the matrix of row vector convention is converted into column vector convention
under the hood.
No matter the input vector convention, the returned Jacobian is of the following
format:
df_1/dx_1 df_1/dx_2 df_1/dx_3 ...
df_2/dx_1 df_2/dx_2 df_2/dx_3 ...
df_3/dx_1 df_3/dx_2 df_3/dx_3 ...
... ... ... ...
'''
fjac = nd.Jacobian(lambda x: f(x.T).T)
if input_vector_convention == 'row' or input_vector_convention == 0:
def f_aux(x):
x = x.T
return fjac(x)
return f_aux
else:
return fjac
@timeit
def elementwise_jacobian_transformation(Js, qi, qj):
"""Inverse transform low dimensional k x k Jacobian matrix (:math:`\partial F_i / \partial x_j`) back to the
d-dimensional gene expression space. The formula used to inverse transform Jacobian matrix calculated from
low dimension (PCs) is:
:math:`Jac = Q J Q^T`,
where `Q, J, Jac` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
dimensional Jacobian matrix. This function takes only one row from Q to form qi or qj.
Parameters
----------
Js: :class:`~numpy.ndarray`
k x k x n matrices of n k-by-k Jacobians.
qi: :class:`~numpy.ndarray`
The i-th row of the PC loading matrix Q with dimension d x k, corresponding to the regulator gene i.
qj: :class:`~numpy.ndarray`
The j-th row of the PC loading matrix Q with dimension d x k, corresponding to the effector gene j.
Returns
-------
ret: :class:`~numpy.ndarray`
The calculated vector of Jacobian matrix (:math:`\partial F_i / \partial x_j`) for each cell.
"""
Js = np.atleast_3d(Js)
n = Js.shape[2]
ret = np.zeros(n)
for i in tqdm(range(n), "calculating Jacobian for each cell"):
ret[i] = qi @ Js[:, :, i] @ qj
return ret
@timeit
def subset_jacobian_transformation(Js, Qi, Qj, cores=1):
"""Transform Jacobian matrix (:math:`\partial F_i / \partial x_j`) from PCA space to the original space.
The formula used for transformation:
:math:`\hat{J} = Q J Q^T`,
where `Q, J, \hat{J}` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
dimensional Jacobian matrix. This function takes multiple rows from Q to form Qi or Qj.
Parameters
----------
fjac: callable
The function for calculating numerical Jacobian matrix.
X: :class:`~numpy.ndarray`
The samples coordinates with dimension n_obs x n_PCs, from which Jacobian will be calculated.
Qi: :class:`~numpy.ndarray`
Sampled genes' PCA loading matrix with dimension n' x n_PCs, from which local dimension Jacobian matrix (k x k)
will be inverse transformed back to high dimension.
Qj: :class:`~numpy.ndarray`
Sampled genes' (sample genes can be the same as those in Qi or different) PCs loading matrix with dimension
n' x n_PCs, from which local dimension Jacobian matrix (k x k) will be inverse transformed back to high dimension.
cores: int (default: 1):
Number of cores to calculate Jacobian. If cores is set to be > 1, multiprocessing will be used to
parallel the Jacobian calculation.
return_J: bool (default: False)
Whether to return the raw tensor of Jacobian matrix of each cell before transformation.
Returns
-------
ret: :class:`~numpy.ndarray`
The calculated Jacobian matrix (n_gene x n_gene x n_obs) for each cell.
"""
Js = np.atleast_3d(Js)
Qi = np.atleast_2d(Qi)
Qj = np.atleast_2d(Qj)
d1, d2, n = Qi.shape[0], Qj.shape[0], Js.shape[2]
ret = np.zeros((d1, d2, n))
if cores == 1:
ret = transform_jacobian(Js, Qi, Qj, pbar=True)
else:
if cores is None: cores = mp.cpu_count()
n_j_per_core = int(np.ceil(n / cores))
JJ = []
for i in range(0, n, n_j_per_core):
JJ.append(Js[:, :, i:i+n_j_per_core])
with ThreadPool(cores) as p:
ret = p.starmap(transform_jacobian, zip(JJ,
itertools.repeat(Qi), itertools.repeat(Qj)))
ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
return ret
def transform_jacobian(Js, Qi, Qj, pbar=False):
d1, d2, n = Qi.shape[0], Qj.shape[0], Js.shape[2]
ret = np.zeros((d1, d2, n), dtype=np.float32)
if pbar:
iterj = tqdm(range(n), desc='Transforming subset Jacobian')
else:
iterj = range(n)
for i in iterj:
J = Js[:, :, i]
ret[:, :, i] = Qi @ J @ Qj.T
return ret
def average_jacobian_by_group(Js, group_labels):
"""
Returns a dictionary of averaged jacobians with group names as the keys.
No vectorized indexing was used due to its high memory cost.
"""
d1, d2, _ = Js.shape
groups = np.unique(group_labels)
J_mean = {}
N = {}
for i, g in enumerate(group_labels):
if g in J_mean.keys():
J_mean[g] += Js[:, :, i]
N[g] += 1
else:
J_mean[g] = Js[:, :, i]
N[g] = 1
for g in groups:
J_mean[g] /= N[g]
return J_mean
# ---------------------------------------------------------------------------------------------------
# dynamical properties
def _divergence(f, x):
"""Divergence of the reconstructed vector field function f evaluated at x"""
jac = nd.Jacobian(f)(x)
return np.trace(jac)
@timeit
def compute_divergence(f_jac, X, vectorize_size=1):
"""Calculate divergence for many samples by taking the trace of a Jacobian matrix.
vectorize_size is used to control the number of samples computed in each vectorized batch.
If vectorize_size = 1, there's no vectorization whatsoever.
If vectorize_size = None, all samples are vectorized.
"""
n = len(X)
if vectorize_size is None: vectorize_size = n
div = np.zeros(n)
for i in tqdm(range(0, n, vectorize_size), desc="Calculating divergence"):
J = f_jac(X[i:i+vectorize_size])
div[i:i+vectorize_size] = np.trace(J)
return div
def acceleration_(v, J):
if v.ndim == 1: v = v[:, None]
return J.dot(v)
def curvature_1(a, v):
"""https://link.springer.com/article/10.1007/s12650-018-0474-6"""
if v.ndim == 1: v = v[:, None]
kappa = np.linalg.norm(np.outer(v, a)) / np.linalg.norm(v)**3
return kappa
def curvature_2(a, v):
"""https://dl.acm.org/doi/10.5555/319351.319441"""
# if v.ndim == 1: v = v[:, None]
kappa = (np.multiply(a, np.dot(v, v)) - np.multiply(v, np.dot(v, a))) / np.linalg.norm(v)**4
return kappa
def torsion_(v, J, a):
"""only works in 3D"""
if v.ndim == 1: v = v[:, None]
tau = np.outer(v, a).dot(J.dot(a)) / np.linalg.norm(np.outer(v, a))**2
return tau
@timeit
def compute_acceleration(vf, f_jac, X, return_all=False):
"""Calculate acceleration for many samples via
.. math::
a = J \cdot v.
"""
n = len(X)
acce = np.zeros((n, X.shape[1]))
v_ = vf(X)
J_ = f_jac(X)
for i in tqdm(range(n), desc=f"Calculating acceleration"):
v = v_[i]
J = J_[:, :, i]
acce[i] = acceleration_(v, J).flatten()
if return_all:
return v_, J_, acce
else:
return acce
@timeit
def compute_curvature(vf, f_jac, X, formula=2):
"""Calculate curvature for many samples via
Formula 1:
.. math::
\kappa = \frac{||\mathbf{v} \times \mathbf{a}||}{||\mathbf{V}||^3}
Formula 2:
.. math::
\kappa = \frac{||\mathbf{Jv} (\mathbf{v} \cdot \mathbf{v}) - ||\mathbf{v} (\mathbf{v} \cdot \mathbf{Jv})}{||\mathbf{V}||^4}
"""
n = len(X)
curv = np.zeros(n)
v, _, a = compute_acceleration(vf, f_jac, X, return_all=True)
cur_mat = np.zeros((n, X.shape[1])) if formula == 2 else None
for i in tqdm(range(n), desc="Calculating curvature"):
if formula == 1:
curv[i] = curvature_1(a[i], v[i])
elif formula == 2:
cur_mat[i] = curvature_2(a[i], v[i])
curv[i] = np.linalg.norm(cur_mat[i])
return (curv, cur_mat)
@timeit
def compute_torsion(vf, f_jac, X):
"""Calculate torsion for many samples via
.. math::
\tau = \frac{(\mathbf{v} \times \mathbf{a}) \cdot (\mathbf{J} \cdot \mathbf{a})}{||\mathbf{V} \times \mathbf{a}||^2}
"""
if X.shape[1] != 3:
raise Exception(f'torsion is only defined in 3 dimension.')
n = len(X)
tor = np.zeros((n, X.shape[1], X.shape[1]))
v, J, a = compute_acceleration(vf, f_jac, X, return_all=True)
for i in tqdm(range(n), desc="Calculating torsion"):
tor[i] = torsion_(v[i], J[:, :, i], a[i])
return tor
def _curl(f, x, method='analytical', VecFld=None, jac=None):
"""Curl of the reconstructed vector field f evaluated at x in 3D"""
if jac is None:
if method == 'analytical' and VecFld is not None:
jac = Jacobian_rkhs_gaussian(x, VecFld)
else:
jac = nd.Jacobian(f)(x)
return np.array([jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]])
def curl2d(f, x, method='analytical', VecFld=None, jac=None):
"""Curl of the reconstructed vector field f evaluated at x in 2D"""
if jac is None:
if method == 'analytical' and VecFld is not None:
jac = Jacobian_rkhs_gaussian(x, VecFld)
else:
jac = nd.Jacobian(f)(x)
curl = jac[1, 0] - jac[0, 1]
return curl
@timeit
def compute_curl(f_jac, X):
"""Calculate curl for many samples for 2/3 D systems.
"""
if X.shape[1] > 3:
raise Exception(f'curl is only defined in 2/3 dimension.')
n = len(X)
if X.shape[1] == 2:
curl = np.zeros(n)
f = curl2d
else:
curl = np.zeros((n, 2, 2))
f = _curl
for i in tqdm(range(n), desc=f"Calculating {X.shape[1]}-D curl"):
J = f_jac(X[i])
curl[i] = f(None, None, method='analytical', VecFld=None, jac=J)
return curl
# ---------------------------------------------------------------------------------------------------
# ranking related utilies
def get_metric_gene_in_rank(mat, genes, neg=False):
metric_in_rank = mat.mean(0).A1 if issparse(mat) else mat.mean(0)
rank = metric_in_rank.argsort() if neg else metric_in_rank.argsort()[::-1]
metric_in_rank, genes_in_rank = metric_in_rank[rank], genes[rank]
return metric_in_rank, genes_in_rank
def get_metric_gene_in_rank_by_group(mat, genes, groups, grp, neg=False):
mask = groups == grp
if type(mask) == pd.Series: mask = mask.values
gene_wise_metrics, group_wise_metrics = mat[mask, :].mean(0).A1 if issparse(mat) else mat[mask, :].mean(0), \
mat[mask, :].mean(0).A1 if issparse(mat) else mat[mask, :].mean(0)
rank = gene_wise_metrics.argsort() if neg else gene_wise_metrics.argsort()[::-1]
gene_wise_metrics, genes_in_rank = gene_wise_metrics[rank], genes[rank]
return gene_wise_metrics, group_wise_metrics, genes_in_rank
def get_sorted_metric_genes_df(df, genes, neg=False):
sorted_metric = pd.DataFrame({key: (sorted(values, reverse=False) if neg else sorted(values, reverse=True))
for key, values in df.transpose().iterrows()})
sorted_genes = pd.DataFrame({key: (genes[values.argsort()] if neg else genes[values.argsort()[::-1]])
for key, values in df.transpose().iterrows()})
return sorted_metric, sorted_genes
def rank_vector_calculus_metrics(mat, genes, group, groups, uniq_group):
if issparse(mat):
mask = mat.data > 0
pos_mat, neg_mat = mat.copy(), mat.copy()
pos_mat.data[~ mask], neg_mat.data[mask] = 0, 0
pos_mat.eliminate_zeros()
neg_mat.eliminate_zeros()
else:
mask = mat > 0
pos_mat, neg_mat = mat.copy(), mat.copy()
pos_mat[~ mask], neg_mat[mask] = 0, 0
if group is None:
metric_in_rank, genes_in_rank = get_metric_gene_in_rank(abs(mat), genes)
pos_metric_in_rank, pos_genes_in_rank = get_metric_gene_in_rank(pos_mat, genes)
neg_metric_in_rank, neg_genes_in_rank = get_metric_gene_in_rank(neg_mat, genes, neg=True)
return metric_in_rank, genes_in_rank, pos_metric_in_rank, pos_genes_in_rank, neg_metric_in_rank, neg_genes_in_rank
else:
gene_wise_metrics, gene_wise_genes, gene_wise_pos_metrics, gene_wise_pos_genes, gene_wise_neg_metrics, gene_wise_neg_genes = {}, {}, {}, {}, {}, {}
group_wise_metrics, group_wise_genes, group_wise_pos_metrics, group_wise_pos_genes, group_wise_neg_metrics, group_wise_neg_genes = {}, {}, {}, {}, {}, {}
for i, grp in tqdm(enumerate(uniq_group), desc='ranking genes across gropus'):
gene_wise_metrics[grp], group_wise_metrics[grp], gene_wise_genes[grp] = None, None, None
gene_wise_metrics[grp], group_wise_metrics[grp], gene_wise_genes[grp] = \
get_metric_gene_in_rank_by_group(abs(mat), genes, groups, grp)
gene_wise_pos_metrics[grp], group_wise_pos_metrics[grp], gene_wise_pos_genes[grp] = None, None, None
gene_wise_pos_metrics[grp], group_wise_pos_metrics[grp], gene_wise_pos_genes[grp] = \
get_metric_gene_in_rank_by_group(pos_mat, genes, groups, grp)
gene_wise_neg_metrics[grp], group_wise_neg_metrics[grp], gene_wise_neg_genes[grp] = None, None, None
gene_wise_neg_metrics[grp], group_wise_neg_metrics[grp], gene_wise_neg_genes[grp] = \
get_metric_gene_in_rank_by_group(neg_mat, genes, groups, grp, neg=True)
metric_in_group_rank_by_gene, genes_in_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_metrics), genes)
pos_metric_gene_rank_by_group, pos_genes_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_pos_metrics), genes)
neg_metric_in_group_rank_by_gene, neg_genes_in_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_neg_metrics), genes, neg=True)
metric_in_gene_rank_by_group, genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_metrics), pd.DataFrame(gene_wise_genes)
pos_metric_in_gene_rank_by_group, pos_genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_pos_metrics), pd.DataFrame(gene_wise_pos_genes)
neg_metric_in_gene_rank_by_group, neg_genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_neg_metrics), pd.DataFrame(gene_wise_neg_genes)
return (metric_in_gene_rank_by_group, genes_in_gene_rank_by_group, pos_metric_in_gene_rank_by_group,
pos_genes_in_gene_rank_by_group, neg_metric_in_gene_rank_by_group, neg_genes_in_gene_rank_by_group,
metric_in_group_rank_by_gene, genes_in_group_rank_by_gene, pos_metric_gene_rank_by_group,
pos_genes_group_rank_by_gene, neg_metric_in_group_rank_by_gene, neg_genes_in_group_rank_by_gene,)
|
[
"numdifftools.Hessdiag",
"numpy.trace",
"numpy.sum",
"scipy.sparse.issparse",
"numpy.einsum",
"numpy.ones",
"numdifftools.Gradient",
"numpy.arange",
"numpy.exp",
"numpy.matlib.tile",
"numpy.linalg.norm",
"numpy.unique",
"multiprocessing.cpu_count",
"numpy.atleast_2d",
"pandas.DataFrame",
"multiprocessing.dummy.Pool",
"numpy.transpose",
"numpy.kron",
"scipy.spatial.distance.cdist",
"numpy.ceil",
"numpy.dot",
"numdifftools.Jacobian",
"numpy.vstack",
"itertools.repeat",
"numpy.atleast_3d",
"numpy.outer",
"numpy.isscalar",
"numpy.zeros",
"numpy.array",
"numpy.logical_or",
"numpy.eye"
] |
[((3391, 3400), 'numpy.exp', 'np.exp', (['K'], {}), '(K)\n', (3397, 3400), True, 'import numpy as np\n'), ((4647, 4672), 'numpy.matlib.tile', 'np.matlib.tile', (['x', '[n, 1]'], {}), '(x, [n, 1])\n', (4661, 4672), True, 'import numpy as np\n'), ((4805, 4829), 'numpy.zeros', 'np.zeros', (['(d * m, d * n)'], {}), '((d * m, d * n))\n', (4813, 4829), True, 'import numpy as np\n'), ((4843, 4859), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (4851, 4859), True, 'import numpy as np\n'), ((12110, 12127), 'numpy.atleast_3d', 'np.atleast_3d', (['Js'], {}), '(Js)\n', (12123, 12127), True, 'import numpy as np\n'), ((12158, 12169), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (12166, 12169), True, 'import numpy as np\n'), ((14054, 14071), 'numpy.atleast_3d', 'np.atleast_3d', (['Js'], {}), '(Js)\n', (14067, 14071), True, 'import numpy as np\n'), ((14081, 14098), 'numpy.atleast_2d', 'np.atleast_2d', (['Qi'], {}), '(Qi)\n', (14094, 14098), True, 'import numpy as np\n'), ((14108, 14125), 'numpy.atleast_2d', 'np.atleast_2d', (['Qj'], {}), '(Qj)\n', (14121, 14125), True, 'import numpy as np\n'), ((14191, 14212), 'numpy.zeros', 'np.zeros', (['(d1, d2, n)'], {}), '((d1, d2, n))\n', (14199, 14212), True, 'import numpy as np\n'), ((14918, 14957), 'numpy.zeros', 'np.zeros', (['(d1, d2, n)'], {'dtype': 'np.float32'}), '((d1, d2, n), dtype=np.float32)\n', (14926, 14957), True, 'import numpy as np\n'), ((15425, 15448), 'numpy.unique', 'np.unique', (['group_labels'], {}), '(group_labels)\n', (15434, 15448), True, 'import numpy as np\n'), ((16018, 16031), 'numpy.trace', 'np.trace', (['jac'], {}), '(jac)\n', (16026, 16031), True, 'import numpy as np\n'), ((16492, 16503), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (16500, 16503), True, 'import numpy as np\n'), ((17579, 17604), 'numpy.zeros', 'np.zeros', (['(n, X.shape[1])'], {}), '((n, X.shape[1]))\n', (17587, 17604), True, 'import numpy as np\n'), ((18271, 18282), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (18279, 18282), True, 'import numpy as np\n'), ((19053, 19090), 'numpy.zeros', 'np.zeros', (['(n, X.shape[1], X.shape[1])'], {}), '((n, X.shape[1], X.shape[1]))\n', (19061, 19090), True, 'import numpy as np\n'), ((19608, 19687), 'numpy.array', 'np.array', (['[jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]]'], {}), '([jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]])\n', (19616, 19687), True, 'import numpy as np\n'), ((22186, 22199), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (22194, 22199), False, 'from scipy.sparse import issparse\n'), ((400, 443), 'numpy.logical_or', 'np.logical_or', (['(x < domain[0])', '(x > domain[1])'], {}), '(x < domain[0], x > domain[1])\n', (413, 443), True, 'import numpy as np\n'), ((544, 558), 'numdifftools.Gradient', 'nd.Gradient', (['f'], {}), '(f)\n', (555, 558), True, 'import numdifftools as nd\n'), ((648, 662), 'numdifftools.Hessdiag', 'nd.Hessdiag', (['f'], {}), '(f)\n', (659, 662), True, 'import numdifftools as nd\n'), ((1989, 2005), 'numpy.isscalar', 'np.isscalar', (['dim'], {}), '(dim)\n', (2000, 2005), True, 'import numpy as np\n'), ((2868, 2894), 'scipy.spatial.distance.cdist', 'cdist', (['x', 'y', '"""sqeuclidean"""'], {}), "(x, y, 'sqeuclidean')\n", (2873, 2894), False, 'from scipy.spatial.distance import cdist\n'), ((4323, 4363), 'numpy.matlib.tile', 'np.matlib.tile', (['x[:, :, None]', '[1, 1, n]'], {}), '(x[:, :, None], [1, 1, n])\n', (4337, 4363), True, 'import numpy as np\n'), ((4469, 4490), 'numpy.sum', 'np.sum', (['(G_tmp ** 2)', '(1)'], {}), '(G_tmp ** 2, 1)\n', (4475, 4490), True, 'import numpy as np\n'), ((4567, 4580), 'numpy.exp', 'np.exp', (['G_tmp'], {}), '(G_tmp)\n', (4573, 4580), True, 'import numpy as np\n'), ((4617, 4632), 'numpy.ones', 'np.ones', (['(d, d)'], {}), '((d, d))\n', (4624, 4632), True, 'import numpy as np\n'), ((4685, 4710), 'numpy.matlib.tile', 'np.matlib.tile', (['y', '[1, m]'], {}), '(y, [1, m])\n', (4699, 4710), True, 'import numpy as np\n'), ((4962, 4977), 'numpy.arange', 'np.arange', (['i', 'd'], {}), '(i, d)\n', (4971, 4977), True, 'import numpy as np\n'), ((5335, 5344), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5341, 5344), True, 'import numpy as np\n'), ((9235, 9249), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (9247, 9249), True, 'import multiprocessing as mp\n'), ((9273, 9291), 'numpy.ceil', 'np.ceil', (['(n / cores)'], {}), '(n / cores)\n', (9280, 9291), True, 'import numpy as np\n'), ((9509, 9526), 'multiprocessing.dummy.Pool', 'ThreadPool', (['cores'], {}), '(cores)\n', (9519, 9526), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((9628, 9659), 'numpy.transpose', 'np.transpose', (['r'], {'axes': '(2, 0, 1)'}), '(r, axes=(2, 0, 1))\n', (9640, 9659), True, 'import numpy as np\n'), ((9697, 9711), 'numpy.vstack', 'np.vstack', (['ret'], {}), '(ret)\n', (9706, 9711), True, 'import numpy as np\n'), ((15989, 16003), 'numdifftools.Jacobian', 'nd.Jacobian', (['f'], {}), '(f)\n', (16000, 16003), True, 'import numdifftools as nd\n'), ((16658, 16669), 'numpy.trace', 'np.trace', (['J'], {}), '(J)\n', (16666, 16669), True, 'import numpy as np\n'), ((18363, 18388), 'numpy.zeros', 'np.zeros', (['(n, X.shape[1])'], {}), '((n, X.shape[1]))\n', (18371, 18388), True, 'import numpy as np\n'), ((20304, 20315), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (20312, 20315), True, 'import numpy as np\n'), ((20360, 20379), 'numpy.zeros', 'np.zeros', (['(n, 2, 2)'], {}), '((n, 2, 2))\n', (20368, 20379), True, 'import numpy as np\n'), ((20804, 20817), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (20812, 20817), False, 'from scipy.sparse import issparse\n'), ((3201, 3241), 'numpy.matlib.tile', 'np.matlib.tile', (['x[:, :, None]', '[1, 1, m]'], {}), '(x[:, :, None], [1, 1, m])\n', (3215, 3241), True, 'import numpy as np\n'), ((3346, 3363), 'numpy.sum', 'np.sum', (['(D ** 2)', '(1)'], {}), '(D ** 2, 1)\n', (3352, 3363), True, 'import numpy as np\n'), ((4388, 4428), 'numpy.matlib.tile', 'np.matlib.tile', (['y[:, :, None]', '[1, 1, m]'], {}), '(y[:, :, None], [1, 1, m])\n', (4402, 4428), True, 'import numpy as np\n'), ((5367, 5382), 'numpy.ones', 'np.ones', (['(m, n)'], {}), '((m, n))\n', (5374, 5382), True, 'import numpy as np\n'), ((5384, 5393), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5390, 5393), True, 'import numpy as np\n'), ((8703, 8722), 'numpy.zeros', 'np.zeros', (['(d, d, n)'], {}), '((d, d, n))\n', (8711, 8722), True, 'import numpy as np\n'), ((9038, 9089), 'numpy.einsum', 'np.einsum', (['"""nm, mi, njm -> ijn"""', 'K', "vf_dict['C']", 'D'], {}), "('nm, mi, njm -> ijn', K, vf_dict['C'], D)\n", (9047, 9089), True, 'import numpy as np\n'), ((14333, 14347), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (14345, 14347), True, 'import multiprocessing as mp\n'), ((14375, 14393), 'numpy.ceil', 'np.ceil', (['(n / cores)'], {}), '(n / cores)\n', (14382, 14393), True, 'import numpy as np\n'), ((14518, 14535), 'multiprocessing.dummy.Pool', 'ThreadPool', (['cores'], {}), '(cores)\n', (14528, 14535), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((14683, 14714), 'numpy.transpose', 'np.transpose', (['r'], {'axes': '(2, 0, 1)'}), '(r, axes=(2, 0, 1))\n', (14695, 14714), True, 'import numpy as np\n'), ((14756, 14770), 'numpy.vstack', 'np.vstack', (['ret'], {}), '(ret)\n', (14765, 14770), True, 'import numpy as np\n'), ((16924, 16938), 'numpy.outer', 'np.outer', (['v', 'a'], {}), '(v, a)\n', (16932, 16938), True, 'import numpy as np\n'), ((16942, 16959), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (16956, 16959), True, 'import numpy as np\n'), ((17174, 17191), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (17188, 17191), True, 'import numpy as np\n'), ((21250, 21263), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (21258, 21263), False, 'from scipy.sparse import issparse\n'), ((21364, 21377), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (21372, 21377), False, 'from scipy.sparse import issparse\n'), ((24329, 24361), 'pandas.DataFrame', 'pd.DataFrame', (['group_wise_metrics'], {}), '(group_wise_metrics)\n', (24341, 24361), True, 'import pandas as pd\n'), ((24481, 24517), 'pandas.DataFrame', 'pd.DataFrame', (['group_wise_pos_metrics'], {}), '(group_wise_pos_metrics)\n', (24493, 24517), True, 'import pandas as pd\n'), ((24643, 24679), 'pandas.DataFrame', 'pd.DataFrame', (['group_wise_neg_metrics'], {}), '(group_wise_neg_metrics)\n', (24655, 24679), True, 'import pandas as pd\n'), ((24781, 24812), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_metrics'], {}), '(gene_wise_metrics)\n', (24793, 24812), True, 'import pandas as pd\n'), ((24814, 24843), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_genes'], {}), '(gene_wise_genes)\n', (24826, 24843), True, 'import pandas as pd\n'), ((24934, 24969), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_pos_metrics'], {}), '(gene_wise_pos_metrics)\n', (24946, 24969), True, 'import pandas as pd\n'), ((24971, 25004), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_pos_genes'], {}), '(gene_wise_pos_genes)\n', (24983, 25004), True, 'import pandas as pd\n'), ((25095, 25130), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_neg_metrics'], {}), '(gene_wise_neg_metrics)\n', (25107, 25130), True, 'import pandas as pd\n'), ((25132, 25165), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_neg_genes'], {}), '(gene_wise_neg_genes)\n', (25144, 25165), True, 'import pandas as pd\n'), ((3270, 3310), 'numpy.matlib.tile', 'np.matlib.tile', (['y[:, :, None]', '[1, 1, n]'], {}), '(y[:, :, None], [1, 1, n])\n', (3284, 3310), True, 'import numpy as np\n'), ((5246, 5265), 'numpy.kron', 'np.kron', (['tmp3', 'tmp4'], {}), '(tmp3, tmp4)\n', (5253, 5265), True, 'import numpy as np\n'), ((9589, 9614), 'itertools.repeat', 'itertools.repeat', (['vf_dict'], {}), '(vf_dict)\n', (9605, 9614), False, 'import itertools, functools\n'), ((17126, 17138), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (17132, 17138), True, 'import numpy as np\n'), ((17157, 17169), 'numpy.dot', 'np.dot', (['v', 'a'], {}), '(v, a)\n', (17163, 17169), True, 'import numpy as np\n'), ((17310, 17324), 'numpy.outer', 'np.outer', (['v', 'a'], {}), '(v, a)\n', (17318, 17324), True, 'import numpy as np\n'), ((17356, 17370), 'numpy.outer', 'np.outer', (['v', 'a'], {}), '(v, a)\n', (17364, 17370), True, 'import numpy as np\n'), ((18644, 18670), 'numpy.linalg.norm', 'np.linalg.norm', (['cur_mat[i]'], {}), '(cur_mat[i])\n', (18658, 18670), True, 'import numpy as np\n'), ((19578, 19592), 'numdifftools.Jacobian', 'nd.Jacobian', (['f'], {}), '(f)\n', (19589, 19592), True, 'import numdifftools as nd\n'), ((19986, 20000), 'numdifftools.Jacobian', 'nd.Jacobian', (['f'], {}), '(f)\n', (19997, 20000), True, 'import numdifftools as nd\n'), ((14623, 14643), 'itertools.repeat', 'itertools.repeat', (['Qi'], {}), '(Qi)\n', (14639, 14643), False, 'import itertools, functools\n'), ((14645, 14665), 'itertools.repeat', 'itertools.repeat', (['Qj'], {}), '(Qj)\n', (14661, 14665), False, 'import itertools, functools\n')]
|
"""
PyCLES
Desc: This is an implementation of the Common Language Effect Size (CLES) in Python
Author: <NAME>
Date: 04/05/20
"""
import numpy as np
from scipy.stats import norm
def nonparametric_cles(a, b, half_credit=True) -> float:
"""Nonparametric solver for the common language effect size. This solves
for the probability that a random draw from `a` will be greater than a random
draw from `b` using a brute force approach.
If half_credit=True then equal values between vectors will be granted half points.
e.g.
nonparametric_cles([0, 1], [0, 0], True) >> 0.75
nonparametric_cles([0, 1], [0, 0], False) >> 0.5
nonparametric_cles([1, 1], [0, 0]) >> 1.0
nonparametric_cles([0, 0], [1, 1]) >> 0.0
"""
m = np.subtract.outer(a, b)
m = np.sign(m)
if half_credit:
m = np.where(m == 0, 0.5, m)
m = np.where(m == -1, 0, m)
return np.mean(m)
def parametric_cles(a, b):
"""Parametric solver for the common language effect size. This function
assumes that your data is normally distributed. It returns the probability
that a random draw from `a` will be greater than a random draw from `b` using
the normal cumulative distribution function."""
ma, mb = np.mean(a), np.mean(b)
sd = np.sqrt(ma**2 + mb**2)
return norm.cdf(x=0, loc=mb-ma, scale=sd)
|
[
"numpy.subtract.outer",
"scipy.stats.norm.cdf",
"numpy.where",
"numpy.mean",
"numpy.sign",
"numpy.sqrt"
] |
[((789, 812), 'numpy.subtract.outer', 'np.subtract.outer', (['a', 'b'], {}), '(a, b)\n', (806, 812), True, 'import numpy as np\n'), ((821, 831), 'numpy.sign', 'np.sign', (['m'], {}), '(m)\n', (828, 831), True, 'import numpy as np\n'), ((902, 925), 'numpy.where', 'np.where', (['(m == -1)', '(0)', 'm'], {}), '(m == -1, 0, m)\n', (910, 925), True, 'import numpy as np\n'), ((942, 952), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (949, 952), True, 'import numpy as np\n'), ((1318, 1344), 'numpy.sqrt', 'np.sqrt', (['(ma ** 2 + mb ** 2)'], {}), '(ma ** 2 + mb ** 2)\n', (1325, 1344), True, 'import numpy as np\n'), ((1353, 1389), 'scipy.stats.norm.cdf', 'norm.cdf', ([], {'x': '(0)', 'loc': '(mb - ma)', 'scale': 'sd'}), '(x=0, loc=mb - ma, scale=sd)\n', (1361, 1389), False, 'from scipy.stats import norm\n'), ((869, 893), 'numpy.where', 'np.where', (['(m == 0)', '(0.5)', 'm'], {}), '(m == 0, 0.5, m)\n', (877, 893), True, 'import numpy as np\n'), ((1286, 1296), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (1293, 1296), True, 'import numpy as np\n'), ((1298, 1308), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (1305, 1308), True, 'import numpy as np\n')]
|
# Author: Yubo "Paul" Yang
# Email: <EMAIL>
# Kyrt is a versatile fabric exclusive to the planet Florina of Sark.
# The fluorescent and mutable kyrt is ideal for artsy decorations.
# OK, this is a library of reasonable defaults for matplotlib figures.
# May this library restore elegance to your plots.
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
# ======================== library of defaults =========================
# expose some default colors for convenience
from matplotlib.cm import get_cmap
cmap = get_cmap('viridis')
colors = cmap.colors # 256 default colors
dark8 = [ # Colors from www.ColorBrewer.org by Cynthia A. Brewer, Geography, Pennsylvania State University.
'#1b9e77',
'#d95f02',
'#7570b3',
'#e7298a',
'#66a61e',
'#e6ab02',
'#a6761d',
'#666666'
]
errorbar_style = {
'cyq': {
'linestyle': 'none', # do 1 thing
'markersize': 3.5, # readable
'markeredgecolor': 'black', # accentuate
'markeredgewidth': 0.3,
'capsize': 4,
'elinewidth': 0.5
}
}
# ======================== level 0: basic color =========================
def get_cmap(name='viridis'):
""" return color map by name
Args:
name (str, optional): name of color map, default 'viridis'
Return:
matplotlib.colors.ListedColormap: requested colormap
"""
from matplotlib import cm
cmap = cm.get_cmap(name)
return cmap
def get_norm(vmin, vmax):
""" return norm function for scalar in range (vmin, vmax)
Args:
vmin (float): value minimum
vmax (float): value maximum
Return:
matplotlib.colors.Normalize: color normalization function
"""
norm = plt.Normalize(vmin, vmax)
return norm
def scalar_colormap(vmin, vmax, name='viridis'):
""" return a function that maps a number to a color
Args:
vmin (float): minimum scalar value
vmax (float): maximum scalar value
name (str, optional): color map name, default is 'viridis'
Return:
function: float -> (float,)*4 RGBA color space
"""
cmap = get_cmap(name)
norm = get_norm(vmin, vmax)
def v2c(v): # function mapping value to color
return cmap(norm(v))
return v2c
def scalar_colorbar(vmin, vmax, name='viridis', **kwargs):
""" return a colorbar for scalar_color_map()
Args:
vmin (float): minimum scalar value
vmax (float): maximum scalar value
name (str, optional): color map name, default is 'viridis'
Return:
matplotlib.colorbar.Colorbar: colorbar
"""
cmap = get_cmap(name)
norm = get_norm(vmin, vmax)
# issue 3644
sm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
sm.set_array([])
cbar = plt.colorbar(sm, **kwargs)
return cbar
# ======================== level 0: basic ax edits =========================
def figaxad(labelsize=12):
""" construct a absolute/difference (ad) figure
top 3/4 of the plot will be comparison at an absolute scale
bottom 1/4 of the plot will be comparison at a relative scale
Args:
labelsize (int, optional): tick label size
Return:
(fig, axa, axd): figure and axes for absolute and difference plots
"""
from matplotlib.gridspec import GridSpec
gs = GridSpec(4, 4)
fig = plt.figure()
axa = fig.add_subplot(gs[0:3, :])
axd = fig.add_subplot(gs[3, :], sharex=axa)
plt.setp(axa.get_xticklabels(), visible=False)
axa.tick_params(axis='y', labelsize=labelsize)
axd.tick_params(labelsize=labelsize)
fig.subplots_adjust(hspace=0)
return fig, axa, axd
def set_xy_format(ax, xfmt='%3.2f', yfmt='%3.2f'):
""" change x,y tick formats e.g. number of digits
Args:
ax (plt.Axes): matplotlib axes
xfmt (int,optional): xtick format, default is '%3.2f'
yfmt (int,optional): ytick format, default is '%3.2f'
"""
ax.get_xaxis().set_major_formatter(FormatStrFormatter(xfmt))
ax.get_yaxis().set_major_formatter(FormatStrFormatter(yfmt))
def set_tick_font(ax, xsize=14, ysize=14,
xweight='bold', yweight='bold', **kwargs):
""" change x,y tick fonts
Args:
ax (plt.Axes): matplotlib axes
xsize (int,optional): xtick fontsize, default is 14
ysize (int,optional): ytick fontsize, default is 14
xweight (str,optional): xtick fontweight, default is 'bold'
yweight (str,optional): ytick fontweight, default is 'bold'
kwargs (dict): other tick-related properties
"""
plt.setp(ax.get_xticklabels(), fontsize=xsize,
fontweight=xweight, **kwargs)
plt.setp(ax.get_yticklabels(), fontsize=ysize,
fontweight=yweight, **kwargs)
def set_label_font(ax, xsize=14, ysize=14,
xweight='bold', yweight='bold', **kwargs):
""" change x,y label fonts
Args:
ax (plt.Axes): matplotlib axes
xsize (int,optional): xlabel fontsize, default is 14
ysize (int,optional): ylabel fontsize, default is 14
xweight (str,optional): xlabel fontweight, default is 'bold'
yweight (str,optional): ylabel fontweight, default is 'bold'
kwargs (dict): other label-related properties
"""
plt.setp(ax.xaxis.label, fontsize=xsize,
fontweight=xweight, **kwargs)
plt.setp(ax.yaxis.label, fontsize=ysize,
fontweight=yweight, **kwargs)
def xtop(ax):
""" move xaxis label and ticks to the top
Args:
ax (plt.Axes): matplotlib axes
"""
xaxis = ax.get_xaxis()
xaxis.tick_top()
xaxis.set_label_position('top')
def yright(ax):
""" move yaxis label and ticks to the right
Args:
ax (plt.Axes): matplotlib axes
"""
yaxis = ax.get_yaxis()
yaxis.tick_right()
yaxis.set_label_position('right')
# ======================= level 1: advanced ax edits ========================
def cox(ax, x, xtlabels):
"""Add co-xticklabels at top of the plot, e.g., with a different unit
Args:
ax (plt.Axes): matplotlib axes
x (list): xtick locations
xtlabels (list): xtick labels
"""
ax1 = ax.twiny()
ax1.set_xlim(ax.get_xlim())
ax.set_xticks(x)
ax1.set_xticks(x)
ax1.set_xticklabels(xtlabels)
xtop(ax1)
return ax1
def coy(ax, y, ytlabels):
"""Add co-yticklabels on the right of the plot, e.g., with a different unit
Args:
ax (plt.Axes): matplotlib axes
y (list): ytick locations
ytlabels (list): ytick labels
"""
ax1 = ax.twinx()
ax1.set_ylim(ax.get_ylim())
ax.set_yticks(y)
ax1.set_yticks(y)
ax1.set_yticklabels(ytlabels)
yright(ax1)
return ax1
def align_ylim(ax1, ax2):
ylim1 = ax1.get_ylim()
ylim2 = ax2.get_ylim()
ymin = min(ylim1[0], ylim2[0])
ymax = max(ylim1[1], ylim2[1])
ylim = (ymin, ymax)
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
# ====================== level 0: basic legend edits =======================
def set_legend_marker_size(leg, ms=10):
handl = leg.legendHandles
msl = [ms]*len(handl) # override marker sizes here
for hand, ms in zip(handl, msl):
hand._legmarker.set_markersize(ms)
def create_legend(ax, styles, labels, **kwargs):
""" create custom legend
learned from "Composing Custom Legends"
Args:
ax (plt.Axes): matplotlib axes
Return:
plt.legend.Legend: legend artist
"""
from matplotlib.lines import Line2D
custom_lines = [Line2D([], [], **style) for style in styles]
leg = ax.legend(custom_lines, labels, **kwargs)
return leg
# ====================== level 0: global edits =======================
def set_style(style='ticks', context='talk', **kwargs):
import seaborn as sns
if (context=='talk') and ('font_scale' not in kwargs):
kwargs['font_scale'] = 0.7
sns.set_style(style)
sns.set_context(context, **kwargs)
# ====================== level 0: basic Line2D edits =======================
def get_style(line):
""" get plot styles from Line2D object
mostly copied from "Line2D.update_from"
Args:
line (Line2D): source of style
Return:
dict: line styles readily usable for another plot
"""
styles = {
'linestyle': line.get_linestyle(),
'linewidth': line.get_linewidth(),
'color': line.get_color(),
'markersize': line.get_markersize(),
'linestyle': line.get_linestyle(),
'marker': line.get_marker()
}
return styles
# ====================== level 0: basic Line2D =======================
def errorshade(ax, x, ym, ye, **kwargs):
line = ax.plot(x, ym, **kwargs)
alpha = 0.4
myc = line[0].get_color()
eline = ax.fill_between(x, ym-ye, ym+ye, color=myc, alpha=alpha)
return line, eline
# ===================== level 1: fit line ======================
def show_fit(ax, line, model, sel=None, nx=64, popt=None,
xmin=None, xmax=None, circle=True, circle_style=None,
cross=False, cross_style=None, **kwargs):
""" fit a segment of (x, y) data and show fit
get x, y data from line; use sel to make selection
Args:
ax (Axes): matplotlib axes
line (Line2D): line with data
model (callable): model function
sel (np.array, optional): boolean selector array
nx (int, optional): grid size, default 64
xmin (float, optional): grid min
xmax (float, optional): grid max
circle (bool, optional): circle selected points, default True
cross (bool, optional): cross out deselected points, default False
Return:
(np.array, np.array, list): (popt, perr, lines)
"""
import numpy as np
from scipy.optimize import curve_fit
# get and select data to fit
myx = line.get_xdata()
myy = line.get_ydata()
# show selected data
if sel is None:
sel = np.ones(len(myx), dtype=bool)
myx1 = myx[sel]
myy1 = myy[sel]
myx11 = myx[~sel]
myy11 = myy[~sel]
if xmin is None:
xmin = myx1.min()
if xmax is None:
xmax = myx1.max()
lines = []
if circle:
styles = get_style(line)
styles['linestyle'] = ''
styles['marker'] = 'o'
styles['fillstyle'] = 'none'
if circle_style is not None:
styles.update(circle_style)
line1 = ax.plot(myx[sel], myy[sel], **styles)
lines.append(line1[0])
if cross:
styles = get_style(line)
styles['linestyle'] = ''
styles['marker'] = 'x'
if cross_style is not None:
styles.update(cross_style)
line11 = ax.plot(myx11, myy11, **styles)
lines.append(line11[0])
if popt is None: # perform fit
popt, pcov = curve_fit(model, myx1, myy1)
perr = np.sqrt(np.diag(pcov))
else:
perr = None
# show fit
finex = np.linspace(xmin, xmax, nx)
line2 = ax.plot(finex, model(finex, *popt),
c=line.get_color(), **kwargs)
lines.append(line2[0])
return popt, perr, lines
def smooth_bspline(myx, myy, nxmult=10, **spl_kws):
import numpy as np
from scipy.interpolate import splrep, splev
nx = len(myx)*nxmult
idx = np.argsort(myx)
tck = splrep(myx[idx], myy[idx], **spl_kws)
finex = np.linspace(myx.min(), myx.max(), nx)
finey = splev(finex, tck)
return finex, finey
def show_spline(ax, line, spl_kws=dict(), sel=None, **kwargs):
""" show a smooth spline through given line x y
Args:
ax (plt.Axes): matplotlib axes
line (Line1D): matplotlib line object
spl_kws (dict, optional): keyword arguments to splrep, default is empty
nx (int, optional): number of points to allocate to 1D grid
Return:
Line1D: interpolating line
"""
import numpy as np
myx = line.get_xdata()
myy = line.get_ydata()
if sel is None:
sel = np.ones(len(myx), dtype=bool)
myx = myx[sel]
myy = myy[sel]
finex, finey = smooth_bspline(myx, myy, **spl_kws)
color = line.get_color()
line1 = ax.plot(finex, finey, c=color, **kwargs)
return line1
def krig(finex, x0, y0, length_scale, noise_level):
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, RBF
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = DotProduct() + RBF(length_scale=length_scale)
kernel += WhiteKernel(noise_level=noise_level)
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(x0[:, None], y0)
ym, ye = gpr.predict(finex[:, None], return_std=True)
return ym, ye
def gpr_errorshade(ax, x, ym, ye,
length_scale, noise_level, fb_kwargs=None,
**kwargs):
"""WARNING: length_scale and noise_level are VERY DIFFICULT to tune """
# make errorbar plot and extract color
if ('ls' not in kwargs) and ('linestyle' not in kwargs):
kwargs['ls'] = ''
line = ax.errorbar(x, ym, ye, **kwargs)
myc = line[0].get_color()
# smoothly fit data
import numpy as np
dx = abs(x[1]-x[0])
xmin = x.min(); xmax = x.max()
finex = np.arange(xmin, xmax, dx/10.)
ylm, yle = krig(finex, x, ym-ye,
length_scale=length_scale, noise_level=noise_level)
yhm, yhe = krig(finex, x, ym+ye,
length_scale=length_scale, noise_level=noise_level)
# plot fit
if fb_kwargs is None:
fb_kwargs = {'color': myc, 'alpha': 0.4}
eline = ax.fill_between(finex, ylm-yle, yhm+yhe, **fb_kwargs)
return line[0], eline
# ===================== level 2: insets ======================
def inset_zoom(fig, ax_box, xlim, ylim, draw_func, xy_label=False):
""" show an inset that zooms into a given part of the figure
Args:
fig (plt.Figure): figure
ax_box (tuple): inset location and size (x0, y0, dx, dy) in figure ratio
xlim (tuple): (xmin, xmax)
ylim (tuple): (ymin, ymax)
draw_func (callable): draw_func(ax) should recreate the figure
xy_label (bool, optional): label inset axes, default is False
Return:
plt.Axes: inset axes
Example:
>>> ax1 = inset_zoom(fig, [0.15, 0.15, 0.3, 0.3], [0.1, 0.5], [-0.02, 0.01],
>>> lambda ax: ax.plot(x, y))
>>> ax.indicate_inset_zoom(axins)
"""
ax1 = fig.add_axes(ax_box)
ax1.set_xlim(*xlim)
ax1.set_ylim(*ylim)
draw_func(ax1)
if not xy_label:
ax1.set_xticks([])
ax1.set_yticks([])
return ax1
# ======================== composition =========================
def pretty_up(ax):
set_tick_font(ax)
set_label_font(ax)
|
[
"matplotlib.cm.get_cmap",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.diag",
"matplotlib.pyplot.Normalize",
"matplotlib.lines.Line2D",
"sklearn.gaussian_process.kernels.DotProduct",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"scipy.interpolate.splrep",
"seaborn.set_context",
"seaborn.set_style",
"sklearn.gaussian_process.kernels.RBF",
"scipy.optimize.curve_fit",
"sklearn.gaussian_process.kernels.WhiteKernel",
"scipy.interpolate.splev",
"matplotlib.gridspec.GridSpec",
"sklearn.gaussian_process.gpr.GaussianProcessRegressor"
] |
[((573, 592), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (581, 592), False, 'from matplotlib.cm import get_cmap\n'), ((1409, 1426), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['name'], {}), '(name)\n', (1420, 1426), False, 'from matplotlib import cm\n'), ((1688, 1713), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (1701, 1713), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2072), 'matplotlib.cm.get_cmap', 'get_cmap', (['name'], {}), '(name)\n', (2066, 2072), False, 'from matplotlib.cm import get_cmap\n'), ((2516, 2530), 'matplotlib.cm.get_cmap', 'get_cmap', (['name'], {}), '(name)\n', (2524, 2530), False, 'from matplotlib.cm import get_cmap\n'), ((2583, 2626), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (2604, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2681), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {}), '(sm, **kwargs)\n', (2667, 2681), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3186), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (3180, 3186), False, 'from matplotlib.gridspec import GridSpec\n'), ((3195, 3207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3205, 3207), True, 'import matplotlib.pyplot as plt\n'), ((4963, 5033), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.xaxis.label'], {'fontsize': 'xsize', 'fontweight': 'xweight'}), '(ax.xaxis.label, fontsize=xsize, fontweight=xweight, **kwargs)\n', (4971, 5033), True, 'import matplotlib.pyplot as plt\n'), ((5040, 5110), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.yaxis.label'], {'fontsize': 'ysize', 'fontweight': 'yweight'}), '(ax.yaxis.label, fontsize=ysize, fontweight=yweight, **kwargs)\n', (5048, 5110), True, 'import matplotlib.pyplot as plt\n'), ((7406, 7426), 'seaborn.set_style', 'sns.set_style', (['style'], {}), '(style)\n', (7419, 7426), True, 'import seaborn as sns\n'), ((7429, 7463), 'seaborn.set_context', 'sns.set_context', (['context'], {}), '(context, **kwargs)\n', (7444, 7463), True, 'import seaborn as sns\n'), ((10171, 10198), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (10182, 10198), True, 'import numpy as np\n'), ((10482, 10497), 'numpy.argsort', 'np.argsort', (['myx'], {}), '(myx)\n', (10492, 10497), True, 'import numpy as np\n'), ((10506, 10543), 'scipy.interpolate.splrep', 'splrep', (['myx[idx]', 'myy[idx]'], {}), '(myx[idx], myy[idx], **spl_kws)\n', (10512, 10543), False, 'from scipy.interpolate import splrep, splev\n'), ((10602, 10619), 'scipy.interpolate.splev', 'splev', (['finex', 'tck'], {}), '(finex, tck)\n', (10607, 10619), False, 'from scipy.interpolate import splrep, splev\n'), ((11650, 11686), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': 'noise_level'}), '(noise_level=noise_level)\n', (11661, 11686), False, 'from sklearn.gaussian_process.kernels import WhiteKernel\n'), ((11695, 11734), 'sklearn.gaussian_process.gpr.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel'}), '(kernel=kernel)\n', (11719, 11734), False, 'from sklearn.gaussian_process.gpr import GaussianProcessRegressor\n'), ((12301, 12333), 'numpy.arange', 'np.arange', (['xmin', 'xmax', '(dx / 10.0)'], {}), '(xmin, xmax, dx / 10.0)\n', (12310, 12333), True, 'import numpy as np\n'), ((3789, 3813), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['xfmt'], {}), '(xfmt)\n', (3807, 3813), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((3852, 3876), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['yfmt'], {}), '(yfmt)\n', (3870, 3876), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((7056, 7079), 'matplotlib.lines.Line2D', 'Line2D', (['[]', '[]'], {}), '([], [], **style)\n', (7062, 7079), False, 'from matplotlib.lines import Line2D\n'), ((10061, 10089), 'scipy.optimize.curve_fit', 'curve_fit', (['model', 'myx1', 'myy1'], {}), '(model, myx1, myy1)\n', (10070, 10089), False, 'from scipy.optimize import curve_fit\n'), ((11592, 11604), 'sklearn.gaussian_process.kernels.DotProduct', 'DotProduct', ([], {}), '()\n', (11602, 11604), False, 'from sklearn.gaussian_process.kernels import DotProduct, RBF\n'), ((11607, 11637), 'sklearn.gaussian_process.kernels.RBF', 'RBF', ([], {'length_scale': 'length_scale'}), '(length_scale=length_scale)\n', (11610, 11637), False, 'from sklearn.gaussian_process.kernels import DotProduct, RBF\n'), ((10109, 10122), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (10116, 10122), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.
# Copyright (c) 2019 YunYang1994 <<EMAIL>>
# License: MIT (https://opensource.org/licenses/MIT)
# This file has been modified by Graphcore Ltd.
import argparse
import json
import math
import os
import shutil
import time
import numpy as np
import core.utils as utils
import cv2
import log
import tensorflow as tf
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from ipu_utils import stages_constructor
from log import logger
from tensorflow.python import ipu
from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops
class YoloTest(object):
def __init__(self, opts):
self.input_size = opts["test"]["input_size"]
self.classes = utils.read_class_names(opts["yolo"]["classes"])
self.num_classes = len(self.classes)
self.score_threshold = opts["test"]["score_threshold"]
self.iou_threshold = opts["test"]["iou_threshold"]
self.moving_avg_decay = opts["yolo"]["moving_avg_decay"]
self.annotation_path = opts["test"]["annot_path"]
self.weight_file = opts["test"]["weight_file"]
self.write_image = opts["test"]["write_image"]
self.write_image_path = opts["test"]["write_image_path"]
self.show_label = opts["test"]["show_label"]
self.batch_size = opts["test"]["batch_size"]
self.precision = tf.float16 if opts["yolo"]["precision"] == "fp16" else tf.float32
self.use_moving_avg = opts["yolo"]["use_moving_avg"]
self.repeat_count = opts["test"]["repeat_count"]
self.use_infeed_queue = opts["test"]["use_infeed_queue"]
self.predicted_file_path = opts["test"]["predicted_file_path"]
self.ground_truth_file_path = opts["test"]["ground_truth_file_path"]
self.meta_dict = {}
self.testset = Dataset("test", opts)
# Configure arguments for targeting the IPU
config = ipu.config.IPUConfig()
config.auto_select_ipus = 1
config.configure_ipu_system()
model = YOLOV3(False, opts)
# construct model
# we will put whole network on one ipu
layers = []
# build layer functions for backbone and upsample
layers.extend(model.build_backbone())
# last layer of darknet53 is classification layer, so it have 52 conv layers
assert len(layers) == 52
layers.extend(model.build_upsample())
# there is 25 conv layers if we count upsmaple as a conv layer
assert len(layers) == 52+25
# decoding layer and loss layer is always put on last IPU
layers.append(model.decode_boxes)
# reuse stages_constructor so we don't need to pass params by hand
network_func = stages_constructor(
[layers],
["input_data", "nums"],
["pred_sbbox", "pred_mbbox", "pred_lbbox", "nums"])[0]
input_shape = (self.batch_size, self.input_size, self.input_size, 3)
self.lines, self.image_dict = self.load_data()
if self.use_infeed_queue:
# The dataset for feeding the graphs
def data_gen():
return self.data_generator()
with tf.device("cpu"):
ds = tf.data.Dataset.from_generator(data_gen,
output_types=(tf.float16, tf.int32),
output_shapes=(input_shape, (self.batch_size,))
)
ds = ds.repeat()
ds = ds.prefetch(self.repeat_count*10)
# The host side queues
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def model_func(input_data, nums):
pred_sbbox, pred_mbbox, pred_lbbox, nums = network_func(input_data, nums)
outfeed = outfeed_queue.enqueue(
{"pred_sbbox": pred_sbbox, "pred_mbbox": pred_mbbox, "pred_lbbox": pred_lbbox, "nums": nums})
return outfeed
def my_net():
r = loops.repeat(self.repeat_count,
model_func, [], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.run_loop = ipu.ipu_compiler.compile(
my_net, inputs=[])
# The outfeed dequeue has to happen after the outfeed enqueue
self.dequeue_outfeed = outfeed_queue.dequeue()
self.sess = tf.Session(config=tf.ConfigProto())
self.sess.run(infeed_queue.initializer)
else:
# if using feed dict, it will be simpler
# the cost is throughput
with tf.device("cpu"):
with tf.name_scope("input"):
# three channel images
self.input_data = tf.placeholder(
shape=input_shape, dtype=self.precision, name="input_data")
self.nums = tf.placeholder(
shape=(self.batch_size), dtype=tf.int32, name="nums")
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.output = ipu.ipu_compiler.compile(
network_func, [self.input_data, self.nums])
self.sess = tf.Session(
config=tf.ConfigProto())
if self.use_moving_avg:
with tf.name_scope("ema"):
ema_obj = tf.train.ExponentialMovingAverage(
self.moving_avg_decay)
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
else:
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weight_file)
def load_data(self):
with open(self.annotation_path, "r") as annotation_file:
# load_all images
lines = []
for line in annotation_file:
lines.append(line)
image_dict = self.testset.load_images(dump=False)
return lines, image_dict
def data_generator(self):
"""Generate input image and write groundtruth info
"""
if os.path.exists(self.write_image_path):
shutil.rmtree(self.write_image_path)
os.mkdir(self.write_image_path)
self.ground_truth_file = open(self.ground_truth_file_path, "w")
image_datas = []
nums = []
for num, line in enumerate(self.lines):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split("/")[-1]
image = self.image_dict[line.strip()]
bbox_data_gt = np.array(
[list(map(int, box.split(","))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:,
:4], bbox_data_gt[:, 4]
num_bbox_gt = len(bboxes_gt)
# output ground-truth
self.ground_truth_file.write(str(num)+":\n")
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ",".join(
[class_name, xmin, ymin, xmax, ymax]) + "\n"
self.ground_truth_file.write(bbox_mess)
image_copy = np.copy(image)
org_h, org_w, _ = image.shape
image_data = utils.resize_image(
image_copy, [self.input_size, self.input_size])
# we don't want to pass metadata through pipeline
# so we'll keep it with a dictionary
self.meta_dict[num] = [org_h, org_w, image_name, line]
image_datas.append(image_data)
nums.append(num)
if len(nums) < self.batch_size:
if num < len(self.lines) - 1:
continue
else:
# if there's not enough data to fill the last batch
# we repeat the last image to yield a full sized batch
for _ in range(len(image_datas), self.batch_size):
image_datas.append(image_datas[-1])
nums.append(nums[-1])
image_datas = np.array(image_datas).astype(np.float16)
yield (image_datas, nums)
if num < len(self.lines) - 1:
image_datas = []
nums = []
while True:
# if using infeed_queue. it will need more batches
# to padd the data and meet the required repeat_count
# so we will use last batch for padding
yield (image_datas, nums)
def parse_result(self, pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums):
"""Parse and write predicted result
"""
for i in range(len(nums)):
# if nums value is repeated
# that means nums[i] is a repeated value for matching required batch size
# so we can stop the iteration
if i > 0 and nums[i] <= nums[i-1]:
break
num = nums[i]
pred_sbbox = pred_sbbox_list[i]
pred_mbbox = pred_mbbox_list[i]
pred_lbbox = pred_lbbox_list[i]
org_h, org_w, image_name, line = self.meta_dict[num]
image_path = line.strip().split()[0]
image = self.image_dict[line.strip()]
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(
pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
# convert boxes from input_image coordinate to original image coordinate
bboxes = utils.postprocess_boxes(
pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes_pr = utils.nms(bboxes, self.iou_threshold)
if self.write_image:
image = utils.draw_bbox(
image, bboxes_pr, self.classes, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
self.predict_result_file.write(str(num)+":\n")
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = "%.4f" % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ",".join(
[class_name, score, xmin, ymin, xmax, ymax]) + "\n"
self.predict_result_file.write(bbox_mess)
def evaluate(self):
self.predict_result_file = open(self.predicted_file_path, "w")
if self.use_infeed_queue:
# using infeed queue to improve throughput
# we can use an additional thread to run dequeue_outfeed for decrease latency and further improve throughput
total_samples = len(self.lines)
interaction_samples = self.batch_size*self.repeat_count
total_interactions = total_samples/interaction_samples
total_interactions = math.ceil(total_interactions)
for interaction_index in range(total_interactions):
run_start = time.time()
self.sess.run(self.run_loop)
result = self.sess.run(
self.dequeue_outfeed)
run_duration = time.time()-run_start
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = result[
"pred_sbbox"], result["pred_mbbox"], result["pred_lbbox"], result["nums"]
for i in range(len(nums)):
# len(nums) == repeat_count
# there's repeat count number of batches for each run
if i > 0 and nums[i][0] <= nums[i-1][0]:
# ignore repeated data
# these are only for meeting data size required when using ipu.loops.repeat
break
self.parse_result(pred_sbbox_list[i], pred_mbbox_list[i], pred_lbbox_list[i], nums[i])
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}, repeat count: {}".format(
(interaction_index+1)*interaction_samples, len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size,
self.repeat_count))
else:
# if not use infeed_queue, it will return for every batch
data_gen = self.data_generator()
interaction_samples = self.batch_size
total_interactions = math.ceil(len(self.lines)/interaction_samples)
for interaction_index in range(total_interactions):
image_datas, nums = next(data_gen)
run_start = time.time()
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = self.sess.run(
self.output,
feed_dict={
self.input_data: image_datas,
self.nums: nums
}
)
run_duration = time.time()-run_start
self.parse_result(pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums)
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}".format(
(interaction_index+1)*interaction_samples,
len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size))
self.ground_truth_file.close()
self.predict_result_file.close()
self.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="evaluation in TensorFlow", add_help=False)
parser.add_argument("--config", type=str, default="config/config_800.json",
help="json config file for yolov3.")
parser.add_argument("--test_path", type=str, default="./data/dataset/voc_test.txt",
help="data path for test")
arguments = parser.parse_args()
with open(arguments.config) as f:
opts = json.load(f)
opts['test']['annot_path'] = arguments.test_path
YoloTest(opts).evaluate()
|
[
"os.mkdir",
"argparse.ArgumentParser",
"tensorflow.python.ipu.config.IPUConfig",
"core.utils.nms",
"tensorflow.ConfigProto",
"shutil.rmtree",
"core.utils.read_class_names",
"core.utils.postprocess_boxes",
"tensorflow.train.ExponentialMovingAverage",
"numpy.copy",
"cv2.imwrite",
"os.path.exists",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.placeholder",
"numpy.reshape",
"tensorflow.name_scope",
"core.utils.resize_image",
"tensorflow.train.Saver",
"math.ceil",
"ipu_utils.stages_constructor",
"tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue",
"tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue",
"json.load",
"tensorflow.python.ipu.scopes.ipu_scope",
"core.yolov3.YOLOV3",
"tensorflow.device",
"tensorflow.python.ipu.loops.repeat",
"time.time",
"core.dataset.Dataset",
"tensorflow.data.Dataset.from_generator",
"numpy.array",
"core.utils.draw_bbox"
] |
[((14187, 14266), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""evaluation in TensorFlow"""', 'add_help': '(False)'}), "(description='evaluation in TensorFlow', add_help=False)\n", (14210, 14266), False, 'import argparse\n'), ((781, 828), 'core.utils.read_class_names', 'utils.read_class_names', (["opts['yolo']['classes']"], {}), "(opts['yolo']['classes'])\n", (803, 828), True, 'import core.utils as utils\n'), ((1873, 1894), 'core.dataset.Dataset', 'Dataset', (['"""test"""', 'opts'], {}), "('test', opts)\n", (1880, 1894), False, 'from core.dataset import Dataset\n'), ((1965, 1987), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (1985, 1987), False, 'from tensorflow.python import ipu\n'), ((2079, 2098), 'core.yolov3.YOLOV3', 'YOLOV3', (['(False)', 'opts'], {}), '(False, opts)\n', (2085, 2098), False, 'from core.yolov3 import YOLOV3\n'), ((6215, 6252), 'os.path.exists', 'os.path.exists', (['self.write_image_path'], {}), '(self.write_image_path)\n', (6229, 6252), False, 'import os\n'), ((6311, 6342), 'os.mkdir', 'os.mkdir', (['self.write_image_path'], {}), '(self.write_image_path)\n', (6319, 6342), False, 'import os\n'), ((14646, 14658), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14655, 14658), False, 'import json\n'), ((2774, 2882), 'ipu_utils.stages_constructor', 'stages_constructor', (['[layers]', "['input_data', 'nums']", "['pred_sbbox', 'pred_mbbox', 'pred_lbbox', 'nums']"], {}), "([layers], ['input_data', 'nums'], ['pred_sbbox',\n 'pred_mbbox', 'pred_lbbox', 'nums'])\n", (2792, 2882), False, 'from ipu_utils import stages_constructor\n'), ((3689, 3724), 'tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue', 'ipu_infeed_queue.IPUInfeedQueue', (['ds'], {}), '(ds)\n', (3720, 3724), False, 'from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops\n'), ((3753, 3788), 'tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue', 'ipu_outfeed_queue.IPUOutfeedQueue', ([], {}), '()\n', (3786, 3788), False, 'from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops\n'), ((5718, 5734), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5732, 5734), True, 'import tensorflow as tf\n'), ((6266, 6302), 'shutil.rmtree', 'shutil.rmtree', (['self.write_image_path'], {}), '(self.write_image_path)\n', (6279, 6302), False, 'import shutil\n'), ((7546, 7560), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (7553, 7560), True, 'import numpy as np\n'), ((7629, 7695), 'core.utils.resize_image', 'utils.resize_image', (['image_copy', '[self.input_size, self.input_size]'], {}), '(image_copy, [self.input_size, self.input_size])\n', (7647, 7695), True, 'import core.utils as utils\n'), ((10047, 10141), 'core.utils.postprocess_boxes', 'utils.postprocess_boxes', (['pred_bbox', '(org_h, org_w)', 'self.input_size', 'self.score_threshold'], {}), '(pred_bbox, (org_h, org_w), self.input_size, self.\n score_threshold)\n', (10070, 10141), True, 'import core.utils as utils\n'), ((10178, 10215), 'core.utils.nms', 'utils.nms', (['bboxes', 'self.iou_threshold'], {}), '(bboxes, self.iou_threshold)\n', (10187, 10215), True, 'import core.utils as utils\n'), ((11506, 11535), 'math.ceil', 'math.ceil', (['total_interactions'], {}), '(total_interactions)\n', (11515, 11535), False, 'import math\n'), ((3224, 3240), 'tensorflow.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (3233, 3240), True, 'import tensorflow as tf\n'), ((3263, 3394), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['data_gen'], {'output_types': '(tf.float16, tf.int32)', 'output_shapes': '(input_shape, (self.batch_size,))'}), '(data_gen, output_types=(tf.float16, tf.int32\n ), output_shapes=(input_shape, (self.batch_size,)))\n', (3293, 3394), True, 'import tensorflow as tf\n'), ((4167, 4228), 'tensorflow.python.ipu.loops.repeat', 'loops.repeat', (['self.repeat_count', 'model_func', '[]', 'infeed_queue'], {}), '(self.repeat_count, model_func, [], infeed_queue)\n', (4179, 4228), False, 'from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops\n'), ((4305, 4342), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (4325, 4342), False, 'from tensorflow.python import ipu\n'), ((4376, 4419), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['my_net'], {'inputs': '[]'}), '(my_net, inputs=[])\n', (4400, 4419), False, 'from tensorflow.python import ipu\n'), ((4807, 4823), 'tensorflow.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (4816, 4823), True, 'import tensorflow as tf\n'), ((5195, 5232), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5215, 5232), False, 'from tensorflow.python import ipu\n'), ((5264, 5332), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['network_func', '[self.input_data, self.nums]'], {}), '(network_func, [self.input_data, self.nums])\n', (5288, 5332), False, 'from tensorflow.python import ipu\n'), ((5481, 5501), 'tensorflow.name_scope', 'tf.name_scope', (['"""ema"""'], {}), "('ema')\n", (5494, 5501), True, 'import tensorflow as tf\n'), ((5529, 5585), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['self.moving_avg_decay'], {}), '(self.moving_avg_decay)\n', (5562, 5585), True, 'import tensorflow as tf\n'), ((10274, 10349), 'core.utils.draw_bbox', 'utils.draw_bbox', (['image', 'bboxes_pr', 'self.classes'], {'show_label': 'self.show_label'}), '(image, bboxes_pr, self.classes, show_label=self.show_label)\n', (10289, 10349), True, 'import core.utils as utils\n'), ((10387, 10441), 'cv2.imwrite', 'cv2.imwrite', (['(self.write_image_path + image_name)', 'image'], {}), '(self.write_image_path + image_name, image)\n', (10398, 10441), False, 'import cv2\n'), ((10558, 10592), 'numpy.array', 'np.array', (['bbox[:4]'], {'dtype': 'np.int32'}), '(bbox[:4], dtype=np.int32)\n', (10566, 10592), True, 'import numpy as np\n'), ((11628, 11639), 'time.time', 'time.time', ([], {}), '()\n', (11637, 11639), False, 'import time\n'), ((13270, 13281), 'time.time', 'time.time', ([], {}), '()\n', (13279, 13281), False, 'import time\n'), ((4616, 4632), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4630, 4632), True, 'import tensorflow as tf\n'), ((4846, 4868), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (4859, 4868), True, 'import tensorflow as tf\n'), ((4951, 5025), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'input_shape', 'dtype': 'self.precision', 'name': '"""input_data"""'}), "(shape=input_shape, dtype=self.precision, name='input_data')\n", (4965, 5025), True, 'import tensorflow as tf\n'), ((5083, 5149), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'self.batch_size', 'dtype': 'tf.int32', 'name': '"""nums"""'}), "(shape=self.batch_size, dtype=tf.int32, name='nums')\n", (5097, 5149), True, 'import tensorflow as tf\n'), ((5414, 5430), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5428, 5430), True, 'import tensorflow as tf\n'), ((8454, 8475), 'numpy.array', 'np.array', (['image_datas'], {}), '(image_datas)\n', (8462, 8475), True, 'import numpy as np\n'), ((9651, 9701), 'numpy.reshape', 'np.reshape', (['pred_sbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_sbbox, (-1, 5 + self.num_classes))\n', (9661, 9701), True, 'import numpy as np\n'), ((9743, 9793), 'numpy.reshape', 'np.reshape', (['pred_mbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_mbbox, (-1, 5 + self.num_classes))\n', (9753, 9793), True, 'import numpy as np\n'), ((9880, 9930), 'numpy.reshape', 'np.reshape', (['pred_lbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_lbbox, (-1, 5 + self.num_classes))\n', (9890, 9930), True, 'import numpy as np\n'), ((11798, 11809), 'time.time', 'time.time', ([], {}), '()\n', (11807, 11809), False, 'import time\n'), ((13601, 13612), 'time.time', 'time.time', ([], {}), '()\n', (13610, 13612), False, 'import time\n')]
|
"""
MIT License
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pathlib import Path
from typing import Dict
from autohyper import optimize, LowRankMetrics, HyperParameters
from torchvision import datasets, transforms
from torch.optim import Adam
from gutils import init_logger
import torchvision.models as models
import numpy as np
import torch
def main():
# indicate which hyper-parameters to optimize
dataset = torch.utils.data.DataLoader(
datasets.CIFAR10('.', download=True, transform=transforms.ToTensor()),
batch_size=128)
def epoch_trainer(hyper_parameters: Dict[str, float],
epochs) -> LowRankMetrics:
# update model/optimizer parameters based on values in @argument:
# hyper_parameters
print('Run epochs:', hyper_parameters)
model = models.resnet18()
model.train()
model = model.cuda()
metrics = LowRankMetrics(list(model.parameters()))
optimizer = Adam(model.parameters(),
lr=hyper_parameters['lr'],
weight_decay=hyper_parameters['weight_decay'],)
criterion = torch.nn.CrossEntropyLoss().cuda()
accs = list()
for epoch in epochs:
for inputs, targets in dataset:
inputs = inputs.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
accs.append(accuracy(outputs, targets)[0].item())
# run epoch training...
# at every epoch, evaluate low_rank metrics
print(f"Epoch {epoch} | Loss {np.mean(accs)}")
metrics.evaluate()
return metrics
hyper_parameters = HyperParameters(lr=True, weight_decay=True)
final_hp = optimize(epoch_trainer=epoch_trainer,
hyper_parameters=hyper_parameters)
final_hyper_parameters_dict = final_hp.final()
# do your final training will optimized hyper parameters
epoch_trainer(final_hyper_parameters_dict, epochs=range(250))
def accuracy(outputs, targets, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, pred = outputs.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.contiguous().view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous(
).view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
logger = init_logger(Path('logs'))
main()
|
[
"autohyper.HyperParameters",
"torchvision.models.resnet18",
"autohyper.optimize",
"torch.nn.CrossEntropyLoss",
"pathlib.Path",
"numpy.mean",
"torch.no_grad",
"torchvision.transforms.ToTensor"
] |
[((2854, 2897), 'autohyper.HyperParameters', 'HyperParameters', ([], {'lr': '(True)', 'weight_decay': '(True)'}), '(lr=True, weight_decay=True)\n', (2869, 2897), False, 'from autohyper import optimize, LowRankMetrics, HyperParameters\n'), ((2913, 2985), 'autohyper.optimize', 'optimize', ([], {'epoch_trainer': 'epoch_trainer', 'hyper_parameters': 'hyper_parameters'}), '(epoch_trainer=epoch_trainer, hyper_parameters=hyper_parameters)\n', (2921, 2985), False, 'from autohyper import optimize, LowRankMetrics, HyperParameters\n'), ((1837, 1854), 'torchvision.models.resnet18', 'models.resnet18', ([], {}), '()\n', (1852, 1854), True, 'import torchvision.models as models\n'), ((3243, 3258), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3256, 3258), False, 'import torch\n'), ((3748, 3760), 'pathlib.Path', 'Path', (['"""logs"""'], {}), "('logs')\n", (3752, 3760), False, 'from pathlib import Path\n'), ((1513, 1534), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1532, 1534), False, 'from torchvision import datasets, transforms\n'), ((2155, 2182), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2180, 2182), False, 'import torch\n'), ((2759, 2772), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (2766, 2772), True, 'import numpy as np\n')]
|
from pathlib import Path
import numpy as np
from tifffile import imread
from tracker.export import ExportResults
from tracker.extract_data import get_img_files
from tracker.extract_data import get_indices_pandas
from tracker.tracking import TrackingConfig, MultiCellTracker
def run_tracker(img_path, segm_path, res_path, delta_t=3, default_roi_size=2):
img_path = Path(img_path)
segm_path = Path(segm_path)
res_path = Path(res_path)
img_files = get_img_files(img_path)
segm_files = get_img_files(segm_path, 'mask')
# set roi size
# assume img shape z,x,y
dummy = np.squeeze(imread(segm_files[max(segm_files.keys())]))
img_shape = dummy.shape
masks = get_indices_pandas(imread(segm_files[max(segm_files.keys())]))
m_shape = np.stack(masks.apply(lambda x: np.max(np.array(x), axis=-1) - np.min(np.array(x), axis=-1) +1))
if len(img_shape) == 2:
if len(masks) > 10:
m_size = np.median(np.stack(m_shape)).astype(int)
roi_size = tuple([m_size*default_roi_size, m_size*default_roi_size])
else:
roi_size = tuple((np.array(dummy.shape) // 10).astype(int))
else:
roi_size = tuple((np.median(np.stack(m_shape), axis=0) * default_roi_size).astype(int))
config = TrackingConfig(img_files, segm_files, roi_size, delta_t=delta_t, cut_off_distance=None)
tracker = MultiCellTracker(config)
tracks = tracker()
exporter = ExportResults()
exporter(tracks, res_path, tracker.img_shape, time_steps=sorted(img_files.keys()))
if __name__ == '__main__':
from argparse import ArgumentParser
PARSER = ArgumentParser(description='Tracking KIT-Sch-GE.')
PARSER.add_argument('--image_path', type=str, help='path to the folder containing the raw images.')
PARSER.add_argument('--segmentation_path', type=str, help='path to the folder containing the segmentation images.')
PARSER.add_argument('--results_path', type=str, help='path where to store the tracking results. '
'If the results path is the same as the segmentation'
'_path the segmentation images will be overwritten.')
PARSER.add_argument('--delta_t', type=int, default=3)
PARSER.add_argument('--default_roi_size', type=int, default=2)
ARGS = PARSER.parse_args()
run_tracker(ARGS.image_path, ARGS.segmentation_path, ARGS.results_path, ARGS.delta_t, ARGS.default_roi_size)
|
[
"numpy.stack",
"argparse.ArgumentParser",
"tracker.tracking.MultiCellTracker",
"tracker.extract_data.get_img_files",
"tracker.tracking.TrackingConfig",
"pathlib.Path",
"numpy.array",
"tracker.export.ExportResults"
] |
[((372, 386), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (376, 386), False, 'from pathlib import Path\n'), ((403, 418), 'pathlib.Path', 'Path', (['segm_path'], {}), '(segm_path)\n', (407, 418), False, 'from pathlib import Path\n'), ((434, 448), 'pathlib.Path', 'Path', (['res_path'], {}), '(res_path)\n', (438, 448), False, 'from pathlib import Path\n'), ((465, 488), 'tracker.extract_data.get_img_files', 'get_img_files', (['img_path'], {}), '(img_path)\n', (478, 488), False, 'from tracker.extract_data import get_img_files\n'), ((506, 538), 'tracker.extract_data.get_img_files', 'get_img_files', (['segm_path', '"""mask"""'], {}), "(segm_path, 'mask')\n", (519, 538), False, 'from tracker.extract_data import get_img_files\n'), ((1275, 1366), 'tracker.tracking.TrackingConfig', 'TrackingConfig', (['img_files', 'segm_files', 'roi_size'], {'delta_t': 'delta_t', 'cut_off_distance': 'None'}), '(img_files, segm_files, roi_size, delta_t=delta_t,\n cut_off_distance=None)\n', (1289, 1366), False, 'from tracker.tracking import TrackingConfig, MultiCellTracker\n'), ((1377, 1401), 'tracker.tracking.MultiCellTracker', 'MultiCellTracker', (['config'], {}), '(config)\n', (1393, 1401), False, 'from tracker.tracking import TrackingConfig, MultiCellTracker\n'), ((1441, 1456), 'tracker.export.ExportResults', 'ExportResults', ([], {}), '()\n', (1454, 1456), False, 'from tracker.export import ExportResults\n'), ((1627, 1677), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Tracking KIT-Sch-GE."""'}), "(description='Tracking KIT-Sch-GE.')\n", (1641, 1677), False, 'from argparse import ArgumentParser\n'), ((956, 973), 'numpy.stack', 'np.stack', (['m_shape'], {}), '(m_shape)\n', (964, 973), True, 'import numpy as np\n'), ((810, 821), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (818, 821), True, 'import numpy as np\n'), ((841, 852), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (849, 852), True, 'import numpy as np\n'), ((1113, 1134), 'numpy.array', 'np.array', (['dummy.shape'], {}), '(dummy.shape)\n', (1121, 1134), True, 'import numpy as np\n'), ((1201, 1218), 'numpy.stack', 'np.stack', (['m_shape'], {}), '(m_shape)\n', (1209, 1218), True, 'import numpy as np\n')]
|
import numpy as np
class BBoxFilter(object):
def __init__(self, min_area, max_area, min_ratio):
self.min_area = min_area
self.max_area = max_area
self.min_ratio = min_ratio
def __call__(self, bbox):
assert len(bbox) == 4
area = bbox[2] * bbox[3]
if area < self.min_area or area > self.max_area:
return False
if min(bbox[2], bbox[3]) / max(bbox[2], bbox[3]) < self.min_ratio:
return False
return True
def truncate_bbox(bbox, h, w):
cmin = np.clip(bbox[0], 0, w - 1)
cmax = np.clip(bbox[0] + bbox[2], 0, w - 1)
rmin = np.clip(bbox[1], 0, h - 1)
rmax = np.clip(bbox[1] + bbox[3], 0, h - 1)
# return int(cmin), int(rmin), int(cmax - cmin), int(rmax - rmin)
return cmin, rmin, cmax - cmin, rmax - rmin
def round_bbox(bbox):
bbox = np.floor(bbox).astype(np.int32)
return tuple(bbox)
def compute_bbox(bimg):
rows = np.any(bimg, axis = 1)
cols = np.any(bimg, axis = 0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return cmin, rmin, cmax - cmin, rmax - rmin
def compute_iou(bbox1, bbox2):
if bbox1 is None or bbox2 is None:
return None
cmin = max(bbox1[0], bbox2[0])
rmin = max(bbox1[1], bbox2[1])
cmax = min(bbox1[0] + bbox1[2], bbox2[0] + bbox2[2])
rmax = min(bbox1[1] + bbox1[3], bbox2[1] + bbox2[3])
if (cmin < cmax) and (rmin < rmax):
intersect = float(cmax - cmin) * (rmax - rmin)
return intersect / (bbox1[2] * bbox1[3] + bbox2[2] * bbox2[3] - intersect)
else:
return 0.
def find_max_iou(bbox, bboxes):
bbox = np.asarray(bbox)
bboxes = np.asarray(bboxes)
if bboxes.shape[0] == 0:
return -1, 0.
minp = np.maximum([bbox[:2]], bboxes[:, :2])
maxp = np.minimum([bbox[:2] + bbox[2:]], bboxes[:, :2] + bboxes[:, 2:])
delta = maxp - minp
intersect_inds = np.where(np.all(delta > 0, axis = 1))[0]
intersect = np.prod(delta[intersect_inds, :], axis = 1, dtype = np.float32)
ious = intersect / (bbox[2] * bbox[3] + \
np.prod(bboxes[intersect_inds, 2:], axis = 1) - intersect)
if ious.shape[0] == 0:
return -1, 0.
else:
max_ind = np.argmax(ious)
return intersect_inds[max_ind], ious[max_ind]
def ciou(bboxes1, bboxes2):
"""
Compute IoUs between two sets of bounding boxes
Input: np.array((n, 4), np.float32), np.array((m, 4), np.float32)
Output: np.array((n, m), np.float32)
"""
cmin = np.maximum.outer(bboxes1[:, 0], bboxes2[:, 0])
cmax = np.minimum.outer(bboxes1[:, 0] + bboxes1[:, 2],
bboxes2[:, 0] + bboxes2[:, 2])
w = cmax - cmin
del cmax, cmin
w.clip(min = 0, out = w)
rmin = np.maximum.outer(bboxes1[:, 1], bboxes2[:, 1])
rmax = np.minimum.outer(bboxes1[:, 1] + bboxes1[:, 3],
bboxes2[:, 1] + bboxes2[:, 3])
h = rmax - rmin
del rmax, rmin
h.clip(min = 0, out = h)
iou = w
np.multiply(w, h, out = iou)
del w, h
a1 = np.prod(bboxes1[:, 2:], axis = 1)
a2 = np.prod(bboxes2[:, 2:], axis = 1)
np.divide(iou, np.add.outer(a1, a2) - iou, out = iou)
return iou
# @jit('float32[:, :](float32[:, :], float32[:, :])')
# def ciou_v2(bboxes1, bboxes2):
# """
# Compute IoUs between two sets of bounding boxes
# Input: np.array((n, 4), np.float32), np.array((m, 4), np.float32)
# Output: np.array((n, m), np.float32)
# """
# n = bboxes1.shape[0]
# m = bboxes2.shape[0]
# iou = np.zeros((n, m), dtype = np.float32)
# for i in range(n):
# for j in range(m):
# minp = np.maximum(bboxes1[i, :2], bboxes2[j, :2])
# maxp = np.minimum(bboxes1[i, :2] + bboxes1[i, 2:],
# bboxes2[j, :2] + bboxes2[j, 2:])
# delta = maxp - minp
# if delta[0] > 0 and delta[1] > 0:
# intersect = np.prod(delta)
# iou[i, j] = intersect / (np.prod(bboxes1[i, 2:]) + \
# np.prod(bboxes2[j, 2:]) - intersect)
# return iou
def _intersect(bboxes1, bboxes2):
"""
bboxes: t x n x 4
"""
assert bboxes1.shape[0] == bboxes2.shape[0]
t = bboxes1.shape[0]
inters = np.zeros((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
_min = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
_max = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
w = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
h = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype = np.float32)
for i in range(t):
np.maximum.outer(bboxes1[i, :, 0], bboxes2[i, :, 0], out = _min)
np.minimum.outer(bboxes1[i, :, 0] + bboxes1[i, :, 2],
bboxes2[i, :, 0] + bboxes2[i, :, 2], out = _max)
np.subtract(_max, _min, out = w)
w.clip(min = 0, out = w)
np.maximum.outer(bboxes1[i, :, 1], bboxes2[i, :, 1], out = _min)
np.minimum.outer(bboxes1[i, :, 1] + bboxes1[i, :, 3],
bboxes2[i, :, 1] + bboxes2[i, :, 3], out = _max)
np.subtract(_max, _min, out = h)
h.clip(min = 0, out = h)
np.multiply(w, h, out = w)
inters += w
return inters
def _union(bboxes1, bboxes2):
if id(bboxes1) == id(bboxes2):
w = bboxes1[:, :, 2]
h = bboxes1[:, :, 3]
area = np.sum(w * h, axis = 0)
unions = np.add.outer(area, area)
else:
w = bboxes1[:, :, 2]
h = bboxes1[:, :, 3]
area1 = np.sum(w * h, axis = 0)
w = bboxes2[:, :, 2]
h = bboxes2[:, :, 3]
area2 = np.sum(w * h, axis = 0)
unions = np.add.outer(area1, area2)
return unions
def viou(bboxes1, bboxes2):
# bboxes: t x n x 4
iou = _intersect(bboxes1, bboxes2)
union = _union(bboxes1, bboxes2)
np.subtract(union, iou, out = union)
np.divide(iou, union, out = iou)
return iou
|
[
"numpy.maximum",
"numpy.sum",
"numpy.argmax",
"numpy.empty",
"numpy.floor",
"numpy.clip",
"numpy.add.outer",
"numpy.prod",
"numpy.multiply",
"numpy.maximum.outer",
"numpy.divide",
"numpy.minimum",
"numpy.asarray",
"numpy.all",
"numpy.subtract",
"numpy.zeros",
"numpy.any",
"numpy.where",
"numpy.minimum.outer"
] |
[((497, 523), 'numpy.clip', 'np.clip', (['bbox[0]', '(0)', '(w - 1)'], {}), '(bbox[0], 0, w - 1)\n', (504, 523), True, 'import numpy as np\n'), ((533, 569), 'numpy.clip', 'np.clip', (['(bbox[0] + bbox[2])', '(0)', '(w - 1)'], {}), '(bbox[0] + bbox[2], 0, w - 1)\n', (540, 569), True, 'import numpy as np\n'), ((579, 605), 'numpy.clip', 'np.clip', (['bbox[1]', '(0)', '(h - 1)'], {}), '(bbox[1], 0, h - 1)\n', (586, 605), True, 'import numpy as np\n'), ((615, 651), 'numpy.clip', 'np.clip', (['(bbox[1] + bbox[3])', '(0)', '(h - 1)'], {}), '(bbox[1] + bbox[3], 0, h - 1)\n', (622, 651), True, 'import numpy as np\n'), ((887, 907), 'numpy.any', 'np.any', (['bimg'], {'axis': '(1)'}), '(bimg, axis=1)\n', (893, 907), True, 'import numpy as np\n'), ((919, 939), 'numpy.any', 'np.any', (['bimg'], {'axis': '(0)'}), '(bimg, axis=0)\n', (925, 939), True, 'import numpy as np\n'), ((1567, 1583), 'numpy.asarray', 'np.asarray', (['bbox'], {}), '(bbox)\n', (1577, 1583), True, 'import numpy as np\n'), ((1595, 1613), 'numpy.asarray', 'np.asarray', (['bboxes'], {}), '(bboxes)\n', (1605, 1613), True, 'import numpy as np\n'), ((1668, 1705), 'numpy.maximum', 'np.maximum', (['[bbox[:2]]', 'bboxes[:, :2]'], {}), '([bbox[:2]], bboxes[:, :2])\n', (1678, 1705), True, 'import numpy as np\n'), ((1715, 1779), 'numpy.minimum', 'np.minimum', (['[bbox[:2] + bbox[2:]]', '(bboxes[:, :2] + bboxes[:, 2:])'], {}), '([bbox[:2] + bbox[2:]], bboxes[:, :2] + bboxes[:, 2:])\n', (1725, 1779), True, 'import numpy as np\n'), ((1876, 1935), 'numpy.prod', 'np.prod', (['delta[intersect_inds, :]'], {'axis': '(1)', 'dtype': 'np.float32'}), '(delta[intersect_inds, :], axis=1, dtype=np.float32)\n', (1883, 1935), True, 'import numpy as np\n'), ((2388, 2434), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[:, 0]', 'bboxes2[:, 0]'], {}), '(bboxes1[:, 0], bboxes2[:, 0])\n', (2404, 2434), True, 'import numpy as np\n'), ((2444, 2522), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[:, 0] + bboxes1[:, 2])', '(bboxes2[:, 0] + bboxes2[:, 2])'], {}), '(bboxes1[:, 0] + bboxes1[:, 2], bboxes2[:, 0] + bboxes2[:, 2])\n', (2460, 2522), True, 'import numpy as np\n'), ((2621, 2667), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[:, 1]', 'bboxes2[:, 1]'], {}), '(bboxes1[:, 1], bboxes2[:, 1])\n', (2637, 2667), True, 'import numpy as np\n'), ((2677, 2755), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[:, 1] + bboxes1[:, 3])', '(bboxes2[:, 1] + bboxes2[:, 3])'], {}), '(bboxes1[:, 1] + bboxes1[:, 3], bboxes2[:, 1] + bboxes2[:, 3])\n', (2693, 2755), True, 'import numpy as np\n'), ((2857, 2883), 'numpy.multiply', 'np.multiply', (['w', 'h'], {'out': 'iou'}), '(w, h, out=iou)\n', (2868, 2883), True, 'import numpy as np\n'), ((2905, 2936), 'numpy.prod', 'np.prod', (['bboxes1[:, 2:]'], {'axis': '(1)'}), '(bboxes1[:, 2:], axis=1)\n', (2912, 2936), True, 'import numpy as np\n'), ((2946, 2977), 'numpy.prod', 'np.prod', (['bboxes2[:, 2:]'], {'axis': '(1)'}), '(bboxes2[:, 2:], axis=1)\n', (2953, 2977), True, 'import numpy as np\n'), ((4010, 4074), 'numpy.zeros', 'np.zeros', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4018, 4074), True, 'import numpy as np\n'), ((4086, 4150), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4094, 4150), True, 'import numpy as np\n'), ((4162, 4226), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4170, 4226), True, 'import numpy as np\n'), ((4235, 4299), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4243, 4299), True, 'import numpy as np\n'), ((4308, 4372), 'numpy.empty', 'np.empty', (['(bboxes1.shape[1], bboxes2.shape[1])'], {'dtype': 'np.float32'}), '((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)\n', (4316, 4372), True, 'import numpy as np\n'), ((5509, 5543), 'numpy.subtract', 'np.subtract', (['union', 'iou'], {'out': 'union'}), '(union, iou, out=union)\n', (5520, 5543), True, 'import numpy as np\n'), ((5548, 5578), 'numpy.divide', 'np.divide', (['iou', 'union'], {'out': 'iou'}), '(iou, union, out=iou)\n', (5557, 5578), True, 'import numpy as np\n'), ((2114, 2129), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (2123, 2129), True, 'import numpy as np\n'), ((4400, 4462), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[i, :, 0]', 'bboxes2[i, :, 0]'], {'out': '_min'}), '(bboxes1[i, :, 0], bboxes2[i, :, 0], out=_min)\n', (4416, 4462), True, 'import numpy as np\n'), ((4469, 4573), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[i, :, 0] + bboxes1[i, :, 2])', '(bboxes2[i, :, 0] + bboxes2[i, :, 2])'], {'out': '_max'}), '(bboxes1[i, :, 0] + bboxes1[i, :, 2], bboxes2[i, :, 0] +\n bboxes2[i, :, 2], out=_max)\n', (4485, 4573), True, 'import numpy as np\n'), ((4585, 4615), 'numpy.subtract', 'np.subtract', (['_max', '_min'], {'out': 'w'}), '(_max, _min, out=w)\n', (4596, 4615), True, 'import numpy as np\n'), ((4651, 4713), 'numpy.maximum.outer', 'np.maximum.outer', (['bboxes1[i, :, 1]', 'bboxes2[i, :, 1]'], {'out': '_min'}), '(bboxes1[i, :, 1], bboxes2[i, :, 1], out=_min)\n', (4667, 4713), True, 'import numpy as np\n'), ((4720, 4824), 'numpy.minimum.outer', 'np.minimum.outer', (['(bboxes1[i, :, 1] + bboxes1[i, :, 3])', '(bboxes2[i, :, 1] + bboxes2[i, :, 3])'], {'out': '_max'}), '(bboxes1[i, :, 1] + bboxes1[i, :, 3], bboxes2[i, :, 1] +\n bboxes2[i, :, 3], out=_max)\n', (4736, 4824), True, 'import numpy as np\n'), ((4836, 4866), 'numpy.subtract', 'np.subtract', (['_max', '_min'], {'out': 'h'}), '(_max, _min, out=h)\n', (4847, 4866), True, 'import numpy as np\n'), ((4902, 4926), 'numpy.multiply', 'np.multiply', (['w', 'h'], {'out': 'w'}), '(w, h, out=w)\n', (4913, 4926), True, 'import numpy as np\n'), ((5086, 5107), 'numpy.sum', 'np.sum', (['(w * h)'], {'axis': '(0)'}), '(w * h, axis=0)\n', (5092, 5107), True, 'import numpy as np\n'), ((5123, 5147), 'numpy.add.outer', 'np.add.outer', (['area', 'area'], {}), '(area, area)\n', (5135, 5147), True, 'import numpy as np\n'), ((5218, 5239), 'numpy.sum', 'np.sum', (['(w * h)'], {'axis': '(0)'}), '(w * h, axis=0)\n', (5224, 5239), True, 'import numpy as np\n'), ((5304, 5325), 'numpy.sum', 'np.sum', (['(w * h)'], {'axis': '(0)'}), '(w * h, axis=0)\n', (5310, 5325), True, 'import numpy as np\n'), ((5341, 5367), 'numpy.add.outer', 'np.add.outer', (['area1', 'area2'], {}), '(area1, area2)\n', (5353, 5367), True, 'import numpy as np\n'), ((799, 813), 'numpy.floor', 'np.floor', (['bbox'], {}), '(bbox)\n', (807, 813), True, 'import numpy as np\n'), ((957, 971), 'numpy.where', 'np.where', (['rows'], {}), '(rows)\n', (965, 971), True, 'import numpy as np\n'), ((999, 1013), 'numpy.where', 'np.where', (['cols'], {}), '(cols)\n', (1007, 1013), True, 'import numpy as np\n'), ((1830, 1855), 'numpy.all', 'np.all', (['(delta > 0)'], {'axis': '(1)'}), '(delta > 0, axis=1)\n', (1836, 1855), True, 'import numpy as np\n'), ((2997, 3017), 'numpy.add.outer', 'np.add.outer', (['a1', 'a2'], {}), '(a1, a2)\n', (3009, 3017), True, 'import numpy as np\n'), ((1990, 2033), 'numpy.prod', 'np.prod', (['bboxes[intersect_inds, 2:]'], {'axis': '(1)'}), '(bboxes[intersect_inds, 2:], axis=1)\n', (1997, 2033), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import sys
from nibabel import load as nib_load
import nibabel as nib
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from scipy import signal
import os
from numpy import genfromtxt
from sklearn.decomposition import PCA
def load_gifti_func(path_to_file):
"""
#Wrapper function to load functional data from
#a gifti file using nibabel. Returns data in shape
#<num_verts x num_timepoints>
"""
gifti_img = nib_load(path_to_file)
gifti_list = [x.data for x in gifti_img.darrays]
gifti_data = np.vstack(gifti_list).transpose()
return gifti_data
def load_cifti_func(path_to_file):
cifti_img = nib_load(path_to_file)
return np.asarray(cifti_img.dataobj).transpose()
def calc_fishers_icc(tp1, tp2):
"""
#Calculate intraclass correlation coefficient
#from the equation on wikipedia describing
#fisher's formulation. tp1 and tp2 should
# be of shape (n,1) or (n,) where n is the
#number of samples
"""
xhat = np.mean(np.vstack((tp1, tp2)))
sq_dif1 = np.power((tp1 - xhat),2)
sq_dif2 = np.power((tp2 - xhat),2)
s2 = np.mean(np.vstack((sq_dif1, sq_dif2)))
r = 1/(tp1.shape[0]*s2)*np.sum(np.multiply(tp1 - xhat, tp2 - xhat))
return r
def pre_post_carpet_plot(noisy_time_series, cleaned_time_series):
"""
#This function is for calculating a carpet plot figure, that
#will allow for comparison of the BOLD time series before and
#after denoising takes place. The two input matrices should have
#shape <num_parcels, num_timepoints>, and will ideally be from a
#parcellated time series and not whole hemisphere data (lots of points).
#The script will demean and then normalize all regions' time signals,
#and then will display them side by side on grey-scale plots
"""
#Copy the data
noisy_data = np.copy(noisy_time_series)
clean_data = np.copy(cleaned_time_series)
#Calculate means and standard deviations for all parcels
noisy_means = np.mean(noisy_data, axis = 1)
noisy_stds = np.std(noisy_data, axis = 1)
clean_means = np.mean(clean_data, axis = 1)
clean_stds = np.std(clean_data, axis = 1)
#Empty matrices for demeaned and normalized data
dn_noisy_data = np.zeros(noisy_data.shape)
dn_clean_data = np.zeros(clean_data.shape)
#Use the means and stds to mean and normalize all parcels' time signals
for i in range(0, clean_data.shape[0]):
dn_noisy_data[i,:] = (noisy_data[i,:] - noisy_means[i])/noisy_stds[i]
dn_clean_data[i,:] = (clean_data[i,:] - clean_means[i])/clean_stds[i]
#Create a subplot
plot_obj = plt.subplot(1,2,1)
#Plot the noisy data
img_plot = plt.imshow(dn_noisy_data, aspect = 'auto', cmap = 'binary')
plt.title('Noisy BOLD Data')
plt.xlabel('Timepoint #')
plt.ylabel('Region # (Arbritrary)')
plt.colorbar()
#Plot the clean data
plt.subplot(1,2,2)
img_plot2 = plt.imshow(dn_clean_data, aspect = 'auto', cmap = 'binary')
plt.title('Clean BOLD Data')
plt.xlabel('Timepoint #')
plt.colorbar()
fig = plt.gcf()
fig.set_size_inches(15, 5)
return plot_obj
def parcellate_func_combine_hemis(lh_func, rh_func, lh_parcel_path, rh_parcel_path):
"""
#Function that takes functional data in the form <num_verts, num_timepoints> for
#both the left and right hemisphere, and averages the functional time series across
#all vertices defined in a given parcel, for every parcel, with the parcels identified
#by a annotation file specified at ?h_parcel_path. The function then returns a combined
#matrix of size <num_parcels, num_timepoints> and <num_labels> for the time series and
#parcel label names, respectively. The lh parcels will preceed the rh parcels in order.
#NOTE: THIS ASSUMES THE FIRST PARCEL WILL BE MEDIAL WALL, AND DISREGARDS ANY VERTICES WITHIN
#THAT PARCEL. IF THIS IS NOT THE CASE FOR YOUR PARCELLATION, DO NOT USE THIS FUNCTION.
"""
#Output will be tuple of format [labels, ctab, names]
lh_parcels = nib.freesurfer.io.read_annot(lh_parcel_path)
rh_parcels = nib.freesurfer.io.read_annot(rh_parcel_path)
#Make array to store parcellated data with shape <num_parcels, num_timepoints>
lh_parcellated_data = np.zeros((len(lh_parcels[2]) - 1, lh_func.shape[1]))
rh_parcellated_data = np.zeros((len(rh_parcels[2]) - 1, rh_func.shape[1]))
#Start with left hemisphere
for i in range(1,len(lh_parcels[2])):
#Find the voxels for the current parcel
vois = np.where(lh_parcels[0] == i)
#Take the mean of all voxels of interest
lh_parcellated_data[i-1, :] = np.mean(lh_func[vois[0],:], axis = 0)
#Move to right hemisphere
for i in range(1,len(rh_parcels[2])):
vois = np.where(rh_parcels[0] == i)
rh_parcellated_data[i-1, :] = np.mean(rh_func[vois[0],:], axis = 0)
#Then concatenate parcel labels and parcel timeseries between the left and right hemisphere
#and drop the medial wall from label list
parcellated_data = np.vstack((lh_parcellated_data, rh_parcellated_data))
parcel_labels = lh_parcels[2][1:] + rh_parcels[2][1:]
#Try to convert the parcel labels from bytes to normal string
for i in range(0, len(parcel_labels)):
parcel_labels[i] = parcel_labels[i].decode("utf-8")
return parcellated_data, parcel_labels
def net_mat_summary_stats(matrix_data, include_diagonals, parcel_labels):
"""
#Function that takes a network matrix of size <num_parcels x num_parcels>
#and calculates summary statistics for each grouping of parcels within a
#given network combination (i.e. within DMN would be one grouping, between
#DMN and Control would be another grouping). If you would like to include
#the diagonals of the matrix set include_diagonals to true, otherwise,
#as is the case in conventional functional connectivity matrices, exclude
#the diagonal since it will most commonly be 1 or Inf.
#This function only works on data formatted in the Schaeffer/Yeo 7 network
#configuration.
#Parcel labels should be a list of strings that has the names of the different
#parcels in the parcellation. This is how the function knows what parcels
#belong to what networks.
"""
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
#Calculate the average stat for each network combination
network_stats = np.zeros((7,7))
for i in range(0,7):
for j in range(0,7):
temp_stat = 0
temp_stat_count = 0
rel_inds_i = np.where(network_ids == i)[0]
rel_inds_j = np.where(network_ids == j)[0]
for inds_i in rel_inds_i:
for inds_j in rel_inds_j:
if inds_i == inds_j:
if include_diagonals == True:
temp_stat += matrix_data[inds_i, inds_j]
temp_stat_count += 1
else:
temp_stat += matrix_data[inds_i, inds_j]
temp_stat_count += 1
network_stats[i,j] = temp_stat/temp_stat_count
return network_stats
def net_summary_stats(parcel_data, parcel_labels):
"""
#Function that takes a statistic defined at a parcel level, and
#resamples that statistic to the network level. This function is a copy of
#net_mat_summary_stats only now defined to work on 1D instead of 2D data.
#This function only works on data formatted in the Schaeffer/Yeo 7 network
#configuration.
#Parcel labels should be a list of strings that has the names of the different
#parcels in the parcellation. This is how the function knows what parcels
#belong to what networks.
"""
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
#Calculate the average stat for each network combination
network_stats = np.zeros((7))
for i in range(0,7):
temp_stat = 0
temp_stat_count = 0
rel_inds_i = np.where(network_ids == i)[0]
for inds_i in rel_inds_i:
temp_stat += parcel_data[inds_i]
temp_stat_count += 1
network_stats[i] = temp_stat/temp_stat_count
return network_stats
def plot_network_timeseries(parcel_data, parcel_labels):
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
network_colors = [[121/255,3/255,136/255,1],[67/255,129/255,182/255,1],[0/255,150/255,0/255,1], \
[198/255,41/255,254/255,1],[219/255,249/255,160/255,1], \
[232/255,149/255,0/255,1], [207/255,60/255,74/255,1]]
#Array to store network IDs (0-6, corresponding to order of network names)
network_ids = np.zeros((len(parcel_labels),1))
#Find which network each parcel belongs to
for i in range(0,len(parcel_labels)):
for j in range(0,len(network_names)):
if network_names[j] in parcel_labels[i]:
network_ids[i] = j
fig, ax = plt.subplots(7,1)
for i in range(0,7):
in_network = np.where(network_ids == i)[0]
plt.sca(ax[i])
for j in range(0, in_network.shape[0]):
plt.plot(parcel_data[in_network[j]], color=network_colors[i])
plt.ylabel('Signal Intensity')
plt.title('Time-Course For All ' + network_names[i] + ' Parcels')
if i != 6:
plt.xticks([])
plt.xlabel('Volume # (excluding high-motion volumes)')
fig.set_size_inches(15, 20)
return fig
def calc_norm_std(parcel_data, confound_path):
"""
#This script is used to calculate the normalized standard
#deviation of a cleaned fmri time signal. This is a metric
#representative of variability/amplitude in the BOLD signal.
#This is a particularly good option if you are working with
#scrubbed data such that the FFT for ALFF can no longer be
#properly calculated.
#parcel_data has size <num_regions, num_timepoints>. Confound
#path is the path to the confound file for the run of interest.
#The global signal will be taken from the confound file to calculate
#the median BOLD signal in the brain before pre-processing. This will then
#be used to normalize the standard deviation of the BOLD signal such that
#the output measure will be std(BOLD_Time_Series)/median_global_signal_intensity.
"""
#Create a dataframe for nuisance variables in confounds
confound_df = pd.read_csv(confound_path, sep='\t')
global_signal = confound_df.global_signal.values
median_intensity = np.median(global_signal)
parcel_std = np.zeros((parcel_data.shape[0]))
for i in range(0, parcel_data.shape[0]):
parcel_std[i] = np.std(parcel_data[i,:])/median_intensity
return parcel_std
def network_bar_chart(network_vals, ylabel):
#The names of the different networks
network_names = ['Vis', 'SomMot', 'DorsAttn', 'SalVentAttn', 'Limbic', 'Cont', 'Default']
network_colors = [[121/255,3/255,136/255,1],[67/255,129/255,182/255,1],[0/255,150/255,0/255,1], \
[198/255,41/255,254/255,1],[219/255,249/255,160/255,1], \
[232/255,149/255,0/255,1], [207/255,60/255,74/255,1]]
x = [1, 2, 3, 4, 5, 6, 7]
fig = plt.bar(x, network_vals, color = network_colors, tick_label = network_names)
plt.ylabel(ylabel)
plt.xticks(rotation=45)
return fig
def fs_anat_to_array(path_to_fs_subject, folder_for_output_files):
"""
#This function serves the function of collecting the aseg.stats file,
#lh.aparc.stats file, and rh.aparc.stats files from a freesurfer subject
#found at the path path_to_fs_subject, and grabs the volumes for all
#subcortical structures, along with volumes, thicknesses, and surface
#areas for all cortical structures, and saves them as .npy files under
#folder_for_output_files. Also saves a text file with the names of the
#regions (one for subcortical, and one for lh/rh)
"""
aseg_path = os.path.join(path_to_fs_subject, 'stats', 'aseg.stats')
lh_path = os.path.join(path_to_fs_subject, 'stats', 'lh.aparc.stats')
rh_path = os.path.join(path_to_fs_subject, 'stats', 'rh.aparc.stats')
f = open(aseg_path, "r")
lines = f.readlines()
f.close()
header = '# ColHeaders Index SegId NVoxels Volume_mm3 StructName normMean normStdDev normMin normMax normRange'
subcort_names = ['Left-Lateral-Ventricle', 'Left-Inf-Lat-Vent', 'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen',
'Left-Pallidum', '3rd-Ventricle', '4th-Ventricle', 'Brain-Stem', 'Left-Hippocampus',
'Left-Amygdala', 'CSF' ,'Left-Accumbens-area', 'Left-VentralDC', 'Left-vessel',
'Left-choroid-plexus', 'Right-Lateral-Ventricle', 'Right-Inf-Lat-Vent',
'Right-Cerebellum-White-Matter','Right-Cerebellum-Cortex', 'Right-Thalamus-Proper',
'Right-Caudate', 'Right-Putamen', 'Right-Pallidum', 'Right-Hippocampus',
'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Right-vessel',
'Right-choroid-plexus', '5th-Ventricle', 'WM-hypointensities', 'Left-WM-hypointensities',
'Right-WM-hypointensities', 'non-WM-hypointensities', 'Left-non-WM-hypointensities',
'Right-non-WM-hypointensities', 'Optic-Chiasm', 'CC_Posterior', 'CC_Mid_Posterior',
'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior']
aseg_vol = []
header_found = 0
for i in range(0,len(lines)):
if header_found == 1:
split_line = lines[i].split()
if split_line[4] != subcort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
subcort_names[i-header_found_ind] + ' but found ' + split_line[4])
aseg_vol.append(float(split_line[3]))
if header in lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
lh_f = open(lh_path, "r")
lh_lines = lh_f.readlines()
lh_f.close()
header = '# ColHeaders StructName NumVert SurfArea GrayVol ThickAvg ThickStd MeanCurv GausCurv FoldInd CurvInd'
cort_names = ['bankssts', 'caudalanteriorcingulate', 'caudalmiddlefrontal', 'cuneus', 'entorhinal',
'fusiform', 'inferiorparietal', 'inferiortemporal', 'isthmuscingulate', 'lateraloccipital',
'lateralorbitofrontal', 'lingual', 'medialorbitofrontal', 'middletemporal', 'parahippocampal',
'paracentral', 'parsopercularis', 'parsorbitalis', 'parstriangularis', 'pericalcarine',
'postcentral', 'posteriorcingulate', 'precentral', 'precuneus', 'rostralanteriorcingulate',
'rostralmiddlefrontal', 'superiorfrontal', 'superiorparietal', 'superiortemporal', 'supramarginal',
'frontalpole', 'temporalpole', 'transversetemporal', 'insula']
lh_surface_area = []
lh_volume = []
lh_thickness = []
header_found = 0
for i in range(0,len(lh_lines)):
if header_found == 1:
split_line = lh_lines[i].split()
if split_line[0] != cort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
cort_names[i-header_found_ind] + ' but found ' + split_line[4])
#then insert text to actually grab/save the data.....
lh_surface_area.append(float(split_line[2]))
lh_volume.append(float(split_line[3]))
lh_thickness.append(float(split_line[4]))
if header in lh_lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
rh_f = open(rh_path, "r")
rh_lines = rh_f.readlines()
rh_f.close()
rh_surface_area = []
rh_volume = []
rh_thickness = []
header_found = 0
for i in range(0,len(rh_lines)):
if header_found == 1:
split_line = rh_lines[i].split()
if split_line[0] != cort_names[i-header_found_ind]:
raise NameError('Error: anatomy names do not line up with expectation. Expected ' +
cort_names[i-header_found_ind] + ' but found ' + split_line[4])
#then insert text to actually grab/save the data.....
rh_surface_area.append(float(split_line[2]))
rh_volume.append(float(split_line[3]))
rh_thickness.append(float(split_line[4]))
if header in rh_lines[i]:
header_found = 1
header_found_ind = i + 1 #actually add one for formatting....
#This indicates that (1) the column headings should
#be correct, and that (2) this is where to start
#looking for anatomical stats
if os.path.exists(folder_for_output_files) == False:
os.mkdir(folder_for_output_files)
#Save the metrics as numpy files
np.save(os.path.join(folder_for_output_files, 'aseg_vols.npy'), np.asarray(aseg_vol))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_surface_areas.npy'), np.asarray(lh_surface_area))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_volumes.npy'), np.asarray(lh_volume))
np.save(os.path.join(folder_for_output_files, 'lh_aseg_thicknesses.npy'), np.asarray(lh_thickness))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_surface_areas.npy'), np.asarray(rh_surface_area))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_volumes.npy'), np.asarray(rh_volume))
np.save(os.path.join(folder_for_output_files, 'rh_aseg_thicknesses.npy'), np.asarray(rh_thickness))
#Calculate some bilateral metrics
left_vent = 0
right_vent = 18
total_lateral_vent = aseg_vol[left_vent] + aseg_vol[right_vent]
left_hipp = 11
right_hipp = 26
total_hipp_vol = aseg_vol[left_hipp] + aseg_vol[right_hipp]
left_thal = 4
right_thal = 22
total_thal_vol = aseg_vol[left_thal] + aseg_vol[right_thal]
left_amyg = 12
right_amyg = 27
total_amyg_vol = aseg_vol[left_amyg] + aseg_vol[right_amyg]
#Also calculate global thickness
numerator = np.sum(np.multiply(lh_surface_area,lh_thickness)) + np.sum(np.multiply(rh_surface_area,rh_thickness))
denominator = np.sum(lh_surface_area) + np.sum(rh_surface_area)
whole_brain_ave_thick = numerator/denominator
discovery_metric_array = [total_hipp_vol, total_amyg_vol, total_thal_vol,
total_lateral_vent, whole_brain_ave_thick]
np.save(os.path.join(folder_for_output_files, 'discovery_anat_metrics.npy'), np.asarray(discovery_metric_array))
discovery_anat_ids = ['bilateral_hipp_volume', 'bilateral_amyg_vol', 'bilateral_thal_vol',
'bilateral_lateral_vent_vol', 'whole_brain_ave_thick']
#Then save a file with the region names
with open(os.path.join(folder_for_output_files, 'subcortical_region_names.txt'), 'w') as f:
for item in subcort_names:
f.write("%s\n" % item)
with open(os.path.join(folder_for_output_files, 'cortical_region_names.txt'), 'w') as f:
for item in cort_names:
f.write("%s\n" % item)
with open(os.path.join(folder_for_output_files, 'discovery_region_names.txt'), 'w') as f:
for item in discovery_anat_ids:
f.write("%s\n" % item)
return
def calculate_XT_X_Neg1_XT(X):
"""
#Calculate term that can be multiplied with
#Y to calculate the beta weights for least
#squares regression. X should be of shape
#(n x d) where n is the number of observations
#and d is the number of dimensions/predictors
#uses inverse transform
"""
XT = X.transpose()
XT_X_Neg1 = np.linalg.pinv(np.matmul(XT,X))
return np.matmul(XT_X_Neg1, XT)
def partial_clean_fast(Y, XT_X_Neg1_XT, bad_regressors):
"""
#Function to help in the denoising of time signal Y with shape
#(n,1) or (n,) where n is the number of timepoints.
#XT_X_Neg1_XT is ((X^T)*X)^-1*(X^T), where ^T represents transpose
#and ^-1 represents matrix inversions. X contains bad regressors including
#noise ICs, a constant component, and a linear trend (etc.), and good regressors
#containing non-motion related ICs. The Beta weights for the linear model
#will be solved by multiplying XT_X_Neg1_XT with Y, and then the beta weights
#determined for the bad regressors will be subtracted off from Y and the residuals
#from this operation will be returned. For this reason, it is important to
#put all bad regressors in front when doing matrix multiplication
"""
B = np.matmul(XT_X_Neg1_XT, Y)
Y_noise = np.matmul(bad_regressors, B[:bad_regressors.shape[1]])
return (Y - Y_noise)
from scipy.signal import butter, filtfilt
def construct_filter(btype, cutoff, TR, order):
"""
#btype should be 'lowpass', 'highpass', or 'bandpass' and
#cutoff should be list (in Hz) with length 1 for low and high and
#2 for band. Order is the order of the filter
#which will be doubled since filtfilt will be used
#to remove phase distortion from the filter. Recommended
#order is 6. Will return filter coefficients b and a for
#the desired butterworth filter.
#Constructs filter coefficients. Use apply_filter to use
#the coefficients to filter a signal.
#Should have butter imported from scipy.signal
"""
nyq = 0.5 * (1/TR)
if btype == 'lowpass':
if len(cutoff) != 1:
raise NameError('Error: lowpass type filter should have one cutoff values')
low = cutoff[0]/nyq
b, a = butter(order, low, btype='lowpass')
elif btype == 'highpass':
if len(cutoff) != 1:
raise NameError('Error: highpass type filter should have one cutoff values')
high = cutoff[0]/nyq
b, a = butter(order, high, btype='highpass')
elif btype == 'bandpass':
if len(cutoff) != 2:
raise NameError('Error: bandpass type filter should have two cutoff values')
low = min(cutoff)/nyq
high = max(cutoff)/nyq
b, a = butter(order, [low, high], btype='bandpass')
else:
raise NameError('Error: filter type should by low, high, or band')
return b, a
########################################################################################
########################################################################################
########################################################################################
def apply_filter(b, a, signal):
"""
#Wrapper function to apply the filter coefficients from
#construct_filter to a signal.
#should have filtfilt imported from scipy.signal
"""
filtered_signal = filtfilt(b, a, signal)
return filtered_signal
########################################################################################
########################################################################################
########################################################################################
def output_stats_figures_pa_ap_compare(cleaned_ap, cleaned_pa):
cleaned_ap_netmat = np.corrcoef(cleaned_ap)
cleaned_pa_netmat = np.corrcoef(cleaned_pa)
plt.figure()
plt.imshow(cleaned_ap_netmat)
plt.colorbar()
plt.title('AP Conn Matrix')
plt.figure()
cleaned_ap.shape
plt.imshow(cleaned_pa_netmat)
plt.colorbar()
plt.title('PA Conn Matrix')
plt.figure()
corr_dif = cleaned_ap_netmat - cleaned_pa_netmat
plt.imshow(np.abs(corr_dif), vmin=0, vmax=0.1)
plt.title('abs(AP - PA)')
plt.colorbar()
plt.figure()
plt.hist(np.abs(np.reshape(corr_dif, corr_dif.shape[0]**2)), bins = 20)
plt.title('abs(AP - PA) mean = ' + str(np.mean(np.abs(corr_dif))))
ap_arr = cleaned_ap_netmat[np.triu_indices(cleaned_ap_netmat.shape[0], k = 1)]
pa_arr = cleaned_pa_netmat[np.triu_indices(cleaned_pa_netmat.shape[0], k = 1)]
plt.figure()
plt.scatter(ap_arr, pa_arr)
plt.title('AP-PA corr: ' + str(np.corrcoef(ap_arr, pa_arr)[0,1]))
def find_mean_fd(path_to_func):
#For a functional path (must be pointing to fsaverage),
#and a list of confounds (from *desc-confounds_regressors.tsv).
#This function will make two matrices of shape (t x n), where
#t is the number of timepoints, and n the number of regressors.
#The first matrix will contain 'nuisance_vars' which will be
#a combination of the variables from list_of_confounds, and
#independent components identified as noise by ICA-AROMA.
#The second will contain the indpendent components not identified
#by ICA-AROMA, which are presumed to contain meaningful functional
#data
confound_path = path_to_func[:-31] + 'desc-confounds_regressors.tsv'
confound_df = pd.read_csv(confound_path, sep='\t')
partial_confounds = []
temp = confound_df.loc[ : , 'framewise_displacement' ]
fd_arr = np.copy(temp.values)
return np.mean(fd_arr[1:])
def convert_to_upper_arr(np_square_matrix):
"""
#Function that takes a square matrix,
#and outputs its upper triangle without
#the diagonal as an array
"""
inds = np.triu_indices(np_square_matrix.shape[0], k = 1)
return np_square_matrix[inds]
def demedian_parcellate_func_combine_hemis(lh_func, rh_func, lh_parcel_path, rh_parcel_path):
"""
#Function that takes functional data in the form <num_verts, num_timepoints> for
#both the left and right hemisphere, and averages the functional time series across
#all vertices defined in a given parcel, for every parcel, with the parcels identified
#by a annotation file specified at ?h_parcel_path. The function then returns a combined
#matrix of size <num_parcels, num_timepoints> and <num_labels> for the time series and
#parcel label names, respectively. The lh parcels will preceed the rh parcels in order.
#Prior to taking the average of all vertices, all vertices time signals are divided by their
#median signal intensity. The mean of all these medians within a given parcel is then
#exported with this function as the third argument
#NOTE: THIS ASSUMES THE FIRST PARCEL WILL BE MEDIAL WALL, AND DISREGARDS ANY VERTICES WITHIN
#THAT PARCEL. IF THIS IS NOT THE CASE FOR YOUR PARCELLATION, DO NOT USE THIS FUNCTION.
"""
#Output will be tuple of format [labels, ctab, names]
lh_parcels = nib.freesurfer.io.read_annot(lh_parcel_path)
rh_parcels = nib.freesurfer.io.read_annot(rh_parcel_path)
#Make array to store parcellated data with shape <num_parcels, num_timepoints>
lh_parcellated_data = np.zeros((len(lh_parcels[2]) - 1, lh_func.shape[1]))
rh_parcellated_data = np.zeros((len(rh_parcels[2]) - 1, rh_func.shape[1]))
lh_parcel_medians = np.zeros(len(lh_parcels[2]) - 1)
rh_parcel_medians = np.zeros(len(rh_parcels[2]) - 1)
lh_vertex_medians = np.nanmedian(lh_func, axis=1)
rh_vertex_medians = np.nanmedian(rh_func, axis=1)
lh_vertex_medians[np.where(lh_vertex_medians < 0.001)] = np.nan
rh_vertex_medians[np.where(rh_vertex_medians < 0.001)] = np.nan
lh_adjusted_func = lh_func/lh_vertex_medians[:,None]
rh_adjusted_func = rh_func/rh_vertex_medians[:,None]
#Start with left hemisphere
for i in range(1,len(lh_parcels[2])):
#Find the voxels for the current parcel
vois = np.where(lh_parcels[0] == i)
#Take the mean of all voxels of interest
lh_parcellated_data[i-1, :] = np.nanmean(lh_adjusted_func[vois[0],:], axis = 0)
lh_parcel_medians[i-1] = np.nanmean(lh_vertex_medians[vois[0]])
#Move to right hemisphere
for i in range(1,len(rh_parcels[2])):
vois = np.where(rh_parcels[0] == i)
rh_parcellated_data[i-1, :] = np.nanmean(rh_adjusted_func[vois[0],:], axis = 0)
rh_parcel_medians[i-1] = np.nanmean(rh_vertex_medians[vois[0]])
#Then concatenate parcel labels and parcel timeseries between the left and right hemisphere
#and drop the medial wall from label list
parcellated_data = np.vstack((lh_parcellated_data, rh_parcellated_data))
parcel_labels = lh_parcels[2][1:] + rh_parcels[2][1:]
parcel_medians = np.hstack((lh_parcel_medians, rh_parcel_medians))
#Try to convert the parcel labels from bytes to normal string
for i in range(0, len(parcel_labels)):
parcel_labels[i] = parcel_labels[i].decode("utf-8")
return parcellated_data, parcel_labels, parcel_medians
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.sum",
"numpy.abs",
"numpy.nanmedian",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"numpy.mean",
"os.path.join",
"numpy.nanmean",
"numpy.multiply",
"numpy.copy",
"numpy.std",
"numpy.power",
"matplotlib.pyplot.imshow",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.reshape",
"nibabel.freesurfer.io.read_annot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xticks",
"scipy.signal.butter",
"numpy.median",
"numpy.corrcoef",
"numpy.asarray",
"numpy.triu_indices",
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"numpy.vstack",
"matplotlib.pyplot.subplot",
"nibabel.load",
"scipy.signal.filtfilt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.where",
"numpy.matmul",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.xlabel"
] |
[((525, 547), 'nibabel.load', 'nib_load', (['path_to_file'], {}), '(path_to_file)\n', (533, 547), True, 'from nibabel import load as nib_load\n'), ((764, 786), 'nibabel.load', 'nib_load', (['path_to_file'], {}), '(path_to_file)\n', (772, 786), True, 'from nibabel import load as nib_load\n'), ((1165, 1188), 'numpy.power', 'np.power', (['(tp1 - xhat)', '(2)'], {}), '(tp1 - xhat, 2)\n', (1173, 1188), True, 'import numpy as np\n'), ((1204, 1227), 'numpy.power', 'np.power', (['(tp2 - xhat)', '(2)'], {}), '(tp2 - xhat, 2)\n', (1212, 1227), True, 'import numpy as np\n'), ((1989, 2015), 'numpy.copy', 'np.copy', (['noisy_time_series'], {}), '(noisy_time_series)\n', (1996, 2015), True, 'import numpy as np\n'), ((2033, 2061), 'numpy.copy', 'np.copy', (['cleaned_time_series'], {}), '(cleaned_time_series)\n', (2040, 2061), True, 'import numpy as np\n'), ((2142, 2169), 'numpy.mean', 'np.mean', (['noisy_data'], {'axis': '(1)'}), '(noisy_data, axis=1)\n', (2149, 2169), True, 'import numpy as np\n'), ((2189, 2215), 'numpy.std', 'np.std', (['noisy_data'], {'axis': '(1)'}), '(noisy_data, axis=1)\n', (2195, 2215), True, 'import numpy as np\n'), ((2236, 2263), 'numpy.mean', 'np.mean', (['clean_data'], {'axis': '(1)'}), '(clean_data, axis=1)\n', (2243, 2263), True, 'import numpy as np\n'), ((2283, 2309), 'numpy.std', 'np.std', (['clean_data'], {'axis': '(1)'}), '(clean_data, axis=1)\n', (2289, 2309), True, 'import numpy as np\n'), ((2390, 2416), 'numpy.zeros', 'np.zeros', (['noisy_data.shape'], {}), '(noisy_data.shape)\n', (2398, 2416), True, 'import numpy as np\n'), ((2437, 2463), 'numpy.zeros', 'np.zeros', (['clean_data.shape'], {}), '(clean_data.shape)\n', (2445, 2463), True, 'import numpy as np\n'), ((2780, 2800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2791, 2800), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dn_noisy_data'], {'aspect': '"""auto"""', 'cmap': '"""binary"""'}), "(dn_noisy_data, aspect='auto', cmap='binary')\n", (2868, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2950), 'matplotlib.pyplot.title', 'plt.title', (['"""Noisy BOLD Data"""'], {}), "('Noisy BOLD Data')\n", (2931, 2950), True, 'import matplotlib.pyplot as plt\n'), ((2955, 2980), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timepoint #"""'], {}), "('Timepoint #')\n", (2965, 2980), True, 'import matplotlib.pyplot as plt\n'), ((2985, 3020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Region # (Arbritrary)"""'], {}), "('Region # (Arbritrary)')\n", (2995, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3025, 3039), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3037, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3099, 3119), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3110, 3119), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3189), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dn_clean_data'], {'aspect': '"""auto"""', 'cmap': '"""binary"""'}), "(dn_clean_data, aspect='auto', cmap='binary')\n", (3144, 3189), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3226), 'matplotlib.pyplot.title', 'plt.title', (['"""Clean BOLD Data"""'], {}), "('Clean BOLD Data')\n", (3207, 3226), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timepoint #"""'], {}), "('Timepoint #')\n", (3241, 3256), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3275), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3273, 3275), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3295), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3293, 3295), True, 'import matplotlib.pyplot as plt\n'), ((4385, 4429), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['lh_parcel_path'], {}), '(lh_parcel_path)\n', (4413, 4429), True, 'import nibabel as nib\n'), ((4447, 4491), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['rh_parcel_path'], {}), '(rh_parcel_path)\n', (4475, 4491), True, 'import nibabel as nib\n'), ((5417, 5470), 'numpy.vstack', 'np.vstack', (['(lh_parcellated_data, rh_parcellated_data)'], {}), '((lh_parcellated_data, rh_parcellated_data))\n', (5426, 5470), True, 'import numpy as np\n'), ((7325, 7341), 'numpy.zeros', 'np.zeros', (['(7, 7)'], {}), '((7, 7))\n', (7333, 7341), True, 'import numpy as np\n'), ((9335, 9346), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (9343, 9346), True, 'import numpy as np\n'), ((10523, 10541), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(7)', '(1)'], {}), '(7, 1)\n', (10535, 10541), True, 'import matplotlib.pyplot as plt\n'), ((10983, 11037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Volume # (excluding high-motion volumes)"""'], {}), "('Volume # (excluding high-motion volumes)')\n", (10993, 11037), True, 'import matplotlib.pyplot as plt\n'), ((12045, 12081), 'pandas.read_csv', 'pd.read_csv', (['confound_path'], {'sep': '"""\t"""'}), "(confound_path, sep='\\t')\n", (12056, 12081), True, 'import pandas as pd\n'), ((12160, 12184), 'numpy.median', 'np.median', (['global_signal'], {}), '(global_signal)\n', (12169, 12184), True, 'import numpy as np\n'), ((12207, 12237), 'numpy.zeros', 'np.zeros', (['parcel_data.shape[0]'], {}), '(parcel_data.shape[0])\n', (12215, 12237), True, 'import numpy as np\n'), ((12881, 12953), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'network_vals'], {'color': 'network_colors', 'tick_label': 'network_names'}), '(x, network_vals, color=network_colors, tick_label=network_names)\n', (12888, 12953), True, 'import matplotlib.pyplot as plt\n'), ((12962, 12980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (12972, 12980), True, 'import matplotlib.pyplot as plt\n'), ((12985, 13008), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (12995, 13008), True, 'import matplotlib.pyplot as plt\n'), ((13637, 13692), 'os.path.join', 'os.path.join', (['path_to_fs_subject', '"""stats"""', '"""aseg.stats"""'], {}), "(path_to_fs_subject, 'stats', 'aseg.stats')\n", (13649, 13692), False, 'import os\n'), ((13707, 13766), 'os.path.join', 'os.path.join', (['path_to_fs_subject', '"""stats"""', '"""lh.aparc.stats"""'], {}), "(path_to_fs_subject, 'stats', 'lh.aparc.stats')\n", (13719, 13766), False, 'import os\n'), ((13781, 13840), 'os.path.join', 'os.path.join', (['path_to_fs_subject', '"""stats"""', '"""rh.aparc.stats"""'], {}), "(path_to_fs_subject, 'stats', 'rh.aparc.stats')\n", (13793, 13840), False, 'import os\n'), ((22062, 22086), 'numpy.matmul', 'np.matmul', (['XT_X_Neg1', 'XT'], {}), '(XT_X_Neg1, XT)\n', (22071, 22086), True, 'import numpy as np\n'), ((22930, 22956), 'numpy.matmul', 'np.matmul', (['XT_X_Neg1_XT', 'Y'], {}), '(XT_X_Neg1_XT, Y)\n', (22939, 22956), True, 'import numpy as np\n'), ((22971, 23025), 'numpy.matmul', 'np.matmul', (['bad_regressors', 'B[:bad_regressors.shape[1]]'], {}), '(bad_regressors, B[:bad_regressors.shape[1]])\n', (22980, 23025), True, 'import numpy as np\n'), ((25142, 25164), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'signal'], {}), '(b, a, signal)\n', (25150, 25164), False, 'from scipy.signal import butter, filtfilt\n'), ((25554, 25577), 'numpy.corrcoef', 'np.corrcoef', (['cleaned_ap'], {}), '(cleaned_ap)\n', (25565, 25577), True, 'import numpy as np\n'), ((25602, 25625), 'numpy.corrcoef', 'np.corrcoef', (['cleaned_pa'], {}), '(cleaned_pa)\n', (25613, 25625), True, 'import numpy as np\n'), ((25631, 25643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25641, 25643), True, 'import matplotlib.pyplot as plt\n'), ((25648, 25677), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cleaned_ap_netmat'], {}), '(cleaned_ap_netmat)\n', (25658, 25677), True, 'import matplotlib.pyplot as plt\n'), ((25682, 25696), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25694, 25696), True, 'import matplotlib.pyplot as plt\n'), ((25701, 25728), 'matplotlib.pyplot.title', 'plt.title', (['"""AP Conn Matrix"""'], {}), "('AP Conn Matrix')\n", (25710, 25728), True, 'import matplotlib.pyplot as plt\n'), ((25733, 25745), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25743, 25745), True, 'import matplotlib.pyplot as plt\n'), ((25772, 25801), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cleaned_pa_netmat'], {}), '(cleaned_pa_netmat)\n', (25782, 25801), True, 'import matplotlib.pyplot as plt\n'), ((25806, 25820), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25818, 25820), True, 'import matplotlib.pyplot as plt\n'), ((25825, 25852), 'matplotlib.pyplot.title', 'plt.title', (['"""PA Conn Matrix"""'], {}), "('PA Conn Matrix')\n", (25834, 25852), True, 'import matplotlib.pyplot as plt\n'), ((25857, 25869), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25867, 25869), True, 'import matplotlib.pyplot as plt\n'), ((25979, 26004), 'matplotlib.pyplot.title', 'plt.title', (['"""abs(AP - PA)"""'], {}), "('abs(AP - PA)')\n", (25988, 26004), True, 'import matplotlib.pyplot as plt\n'), ((26009, 26023), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (26021, 26023), True, 'import matplotlib.pyplot as plt\n'), ((26028, 26040), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26038, 26040), True, 'import matplotlib.pyplot as plt\n'), ((26360, 26372), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26370, 26372), True, 'import matplotlib.pyplot as plt\n'), ((26377, 26404), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ap_arr', 'pa_arr'], {}), '(ap_arr, pa_arr)\n', (26388, 26404), True, 'import matplotlib.pyplot as plt\n'), ((27223, 27259), 'pandas.read_csv', 'pd.read_csv', (['confound_path'], {'sep': '"""\t"""'}), "(confound_path, sep='\\t')\n", (27234, 27259), True, 'import pandas as pd\n'), ((27359, 27379), 'numpy.copy', 'np.copy', (['temp.values'], {}), '(temp.values)\n', (27366, 27379), True, 'import numpy as np\n'), ((27396, 27415), 'numpy.mean', 'np.mean', (['fd_arr[1:]'], {}), '(fd_arr[1:])\n', (27403, 27415), True, 'import numpy as np\n'), ((27611, 27658), 'numpy.triu_indices', 'np.triu_indices', (['np_square_matrix.shape[0]'], {'k': '(1)'}), '(np_square_matrix.shape[0], k=1)\n', (27626, 27658), True, 'import numpy as np\n'), ((28866, 28910), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['lh_parcel_path'], {}), '(lh_parcel_path)\n', (28894, 28910), True, 'import nibabel as nib\n'), ((28928, 28972), 'nibabel.freesurfer.io.read_annot', 'nib.freesurfer.io.read_annot', (['rh_parcel_path'], {}), '(rh_parcel_path)\n', (28956, 28972), True, 'import nibabel as nib\n'), ((29355, 29384), 'numpy.nanmedian', 'np.nanmedian', (['lh_func'], {'axis': '(1)'}), '(lh_func, axis=1)\n', (29367, 29384), True, 'import numpy as np\n'), ((29409, 29438), 'numpy.nanmedian', 'np.nanmedian', (['rh_func'], {'axis': '(1)'}), '(rh_func, axis=1)\n', (29421, 29438), True, 'import numpy as np\n'), ((30516, 30569), 'numpy.vstack', 'np.vstack', (['(lh_parcellated_data, rh_parcellated_data)'], {}), '((lh_parcellated_data, rh_parcellated_data))\n', (30525, 30569), True, 'import numpy as np\n'), ((30649, 30698), 'numpy.hstack', 'np.hstack', (['(lh_parcel_medians, rh_parcel_medians)'], {}), '((lh_parcel_medians, rh_parcel_medians))\n', (30658, 30698), True, 'import numpy as np\n'), ((1128, 1149), 'numpy.vstack', 'np.vstack', (['(tp1, tp2)'], {}), '((tp1, tp2))\n', (1137, 1149), True, 'import numpy as np\n'), ((1246, 1275), 'numpy.vstack', 'np.vstack', (['(sq_dif1, sq_dif2)'], {}), '((sq_dif1, sq_dif2))\n', (1255, 1275), True, 'import numpy as np\n'), ((4902, 4930), 'numpy.where', 'np.where', (['(lh_parcels[0] == i)'], {}), '(lh_parcels[0] == i)\n', (4910, 4930), True, 'import numpy as np\n'), ((5019, 5055), 'numpy.mean', 'np.mean', (['lh_func[vois[0], :]'], {'axis': '(0)'}), '(lh_func[vois[0], :], axis=0)\n', (5026, 5055), True, 'import numpy as np\n'), ((5146, 5174), 'numpy.where', 'np.where', (['(rh_parcels[0] == i)'], {}), '(rh_parcels[0] == i)\n', (5154, 5174), True, 'import numpy as np\n'), ((5213, 5249), 'numpy.mean', 'np.mean', (['rh_func[vois[0], :]'], {'axis': '(0)'}), '(rh_func[vois[0], :], axis=0)\n', (5220, 5249), True, 'import numpy as np\n'), ((10626, 10640), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[i]'], {}), '(ax[i])\n', (10633, 10640), True, 'import matplotlib.pyplot as plt\n'), ((10809, 10839), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal Intensity"""'], {}), "('Signal Intensity')\n", (10819, 10839), True, 'import matplotlib.pyplot as plt\n'), ((10848, 10913), 'matplotlib.pyplot.title', 'plt.title', (["('Time-Course For All ' + network_names[i] + ' Parcels')"], {}), "('Time-Course For All ' + network_names[i] + ' Parcels')\n", (10857, 10913), True, 'import matplotlib.pyplot as plt\n'), ((19027, 19066), 'os.path.exists', 'os.path.exists', (['folder_for_output_files'], {}), '(folder_for_output_files)\n', (19041, 19066), False, 'import os\n'), ((19085, 19118), 'os.mkdir', 'os.mkdir', (['folder_for_output_files'], {}), '(folder_for_output_files)\n', (19093, 19118), False, 'import os\n'), ((19173, 19227), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""aseg_vols.npy"""'], {}), "(folder_for_output_files, 'aseg_vols.npy')\n", (19185, 19227), False, 'import os\n'), ((19229, 19249), 'numpy.asarray', 'np.asarray', (['aseg_vol'], {}), '(aseg_vol)\n', (19239, 19249), True, 'import numpy as np\n'), ((19263, 19329), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""lh_aseg_surface_areas.npy"""'], {}), "(folder_for_output_files, 'lh_aseg_surface_areas.npy')\n", (19275, 19329), False, 'import os\n'), ((19331, 19358), 'numpy.asarray', 'np.asarray', (['lh_surface_area'], {}), '(lh_surface_area)\n', (19341, 19358), True, 'import numpy as np\n'), ((19372, 19432), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""lh_aseg_volumes.npy"""'], {}), "(folder_for_output_files, 'lh_aseg_volumes.npy')\n", (19384, 19432), False, 'import os\n'), ((19434, 19455), 'numpy.asarray', 'np.asarray', (['lh_volume'], {}), '(lh_volume)\n', (19444, 19455), True, 'import numpy as np\n'), ((19469, 19533), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""lh_aseg_thicknesses.npy"""'], {}), "(folder_for_output_files, 'lh_aseg_thicknesses.npy')\n", (19481, 19533), False, 'import os\n'), ((19535, 19559), 'numpy.asarray', 'np.asarray', (['lh_thickness'], {}), '(lh_thickness)\n', (19545, 19559), True, 'import numpy as np\n'), ((19573, 19639), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""rh_aseg_surface_areas.npy"""'], {}), "(folder_for_output_files, 'rh_aseg_surface_areas.npy')\n", (19585, 19639), False, 'import os\n'), ((19641, 19668), 'numpy.asarray', 'np.asarray', (['rh_surface_area'], {}), '(rh_surface_area)\n', (19651, 19668), True, 'import numpy as np\n'), ((19682, 19742), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""rh_aseg_volumes.npy"""'], {}), "(folder_for_output_files, 'rh_aseg_volumes.npy')\n", (19694, 19742), False, 'import os\n'), ((19744, 19765), 'numpy.asarray', 'np.asarray', (['rh_volume'], {}), '(rh_volume)\n', (19754, 19765), True, 'import numpy as np\n'), ((19779, 19843), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""rh_aseg_thicknesses.npy"""'], {}), "(folder_for_output_files, 'rh_aseg_thicknesses.npy')\n", (19791, 19843), False, 'import os\n'), ((19845, 19869), 'numpy.asarray', 'np.asarray', (['rh_thickness'], {}), '(rh_thickness)\n', (19855, 19869), True, 'import numpy as np\n'), ((20509, 20532), 'numpy.sum', 'np.sum', (['lh_surface_area'], {}), '(lh_surface_area)\n', (20515, 20532), True, 'import numpy as np\n'), ((20535, 20558), 'numpy.sum', 'np.sum', (['rh_surface_area'], {}), '(rh_surface_area)\n', (20541, 20558), True, 'import numpy as np\n'), ((20781, 20848), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""discovery_anat_metrics.npy"""'], {}), "(folder_for_output_files, 'discovery_anat_metrics.npy')\n", (20793, 20848), False, 'import os\n'), ((20850, 20884), 'numpy.asarray', 'np.asarray', (['discovery_metric_array'], {}), '(discovery_metric_array)\n', (20860, 20884), True, 'import numpy as np\n'), ((22034, 22050), 'numpy.matmul', 'np.matmul', (['XT', 'X'], {}), '(XT, X)\n', (22043, 22050), True, 'import numpy as np\n'), ((23957, 23992), 'scipy.signal.butter', 'butter', (['order', 'low'], {'btype': '"""lowpass"""'}), "(order, low, btype='lowpass')\n", (23963, 23992), False, 'from scipy.signal import butter, filtfilt\n'), ((25939, 25955), 'numpy.abs', 'np.abs', (['corr_dif'], {}), '(corr_dif)\n', (25945, 25955), True, 'import numpy as np\n'), ((26221, 26269), 'numpy.triu_indices', 'np.triu_indices', (['cleaned_ap_netmat.shape[0]'], {'k': '(1)'}), '(cleaned_ap_netmat.shape[0], k=1)\n', (26236, 26269), True, 'import numpy as np\n'), ((26304, 26352), 'numpy.triu_indices', 'np.triu_indices', (['cleaned_pa_netmat.shape[0]'], {'k': '(1)'}), '(cleaned_pa_netmat.shape[0], k=1)\n', (26319, 26352), True, 'import numpy as np\n'), ((29462, 29497), 'numpy.where', 'np.where', (['(lh_vertex_medians < 0.001)'], {}), '(lh_vertex_medians < 0.001)\n', (29470, 29497), True, 'import numpy as np\n'), ((29530, 29565), 'numpy.where', 'np.where', (['(rh_vertex_medians < 0.001)'], {}), '(rh_vertex_medians < 0.001)\n', (29538, 29565), True, 'import numpy as np\n'), ((29833, 29861), 'numpy.where', 'np.where', (['(lh_parcels[0] == i)'], {}), '(lh_parcels[0] == i)\n', (29841, 29861), True, 'import numpy as np\n'), ((29950, 29998), 'numpy.nanmean', 'np.nanmean', (['lh_adjusted_func[vois[0], :]'], {'axis': '(0)'}), '(lh_adjusted_func[vois[0], :], axis=0)\n', (29960, 29998), True, 'import numpy as np\n'), ((30033, 30071), 'numpy.nanmean', 'np.nanmean', (['lh_vertex_medians[vois[0]]'], {}), '(lh_vertex_medians[vois[0]])\n', (30043, 30071), True, 'import numpy as np\n'), ((30161, 30189), 'numpy.where', 'np.where', (['(rh_parcels[0] == i)'], {}), '(rh_parcels[0] == i)\n', (30169, 30189), True, 'import numpy as np\n'), ((30228, 30276), 'numpy.nanmean', 'np.nanmean', (['rh_adjusted_func[vois[0], :]'], {'axis': '(0)'}), '(rh_adjusted_func[vois[0], :], axis=0)\n', (30238, 30276), True, 'import numpy as np\n'), ((30311, 30349), 'numpy.nanmean', 'np.nanmean', (['rh_vertex_medians[vois[0]]'], {}), '(rh_vertex_medians[vois[0]])\n', (30321, 30349), True, 'import numpy as np\n'), ((618, 639), 'numpy.vstack', 'np.vstack', (['gifti_list'], {}), '(gifti_list)\n', (627, 639), True, 'import numpy as np\n'), ((798, 827), 'numpy.asarray', 'np.asarray', (['cifti_img.dataobj'], {}), '(cifti_img.dataobj)\n', (808, 827), True, 'import numpy as np\n'), ((1312, 1347), 'numpy.multiply', 'np.multiply', (['(tp1 - xhat)', '(tp2 - xhat)'], {}), '(tp1 - xhat, tp2 - xhat)\n', (1323, 1347), True, 'import numpy as np\n'), ((9445, 9471), 'numpy.where', 'np.where', (['(network_ids == i)'], {}), '(network_ids == i)\n', (9453, 9471), True, 'import numpy as np\n'), ((10588, 10614), 'numpy.where', 'np.where', (['(network_ids == i)'], {}), '(network_ids == i)\n', (10596, 10614), True, 'import numpy as np\n'), ((10723, 10784), 'matplotlib.pyplot.plot', 'plt.plot', (['parcel_data[in_network[j]]'], {'color': 'network_colors[i]'}), '(parcel_data[in_network[j]], color=network_colors[i])\n', (10731, 10784), True, 'import matplotlib.pyplot as plt\n'), ((10954, 10968), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (10964, 10968), True, 'import matplotlib.pyplot as plt\n'), ((12318, 12343), 'numpy.std', 'np.std', (['parcel_data[i, :]'], {}), '(parcel_data[i, :])\n', (12324, 12343), True, 'import numpy as np\n'), ((20396, 20438), 'numpy.multiply', 'np.multiply', (['lh_surface_area', 'lh_thickness'], {}), '(lh_surface_area, lh_thickness)\n', (20407, 20438), True, 'import numpy as np\n'), ((20448, 20490), 'numpy.multiply', 'np.multiply', (['rh_surface_area', 'rh_thickness'], {}), '(rh_surface_area, rh_thickness)\n', (20459, 20490), True, 'import numpy as np\n'), ((21125, 21194), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""subcortical_region_names.txt"""'], {}), "(folder_for_output_files, 'subcortical_region_names.txt')\n", (21137, 21194), False, 'import os\n'), ((21296, 21362), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""cortical_region_names.txt"""'], {}), "(folder_for_output_files, 'cortical_region_names.txt')\n", (21308, 21362), False, 'import os\n'), ((21469, 21536), 'os.path.join', 'os.path.join', (['folder_for_output_files', '"""discovery_region_names.txt"""'], {}), "(folder_for_output_files, 'discovery_region_names.txt')\n", (21481, 21536), False, 'import os\n'), ((24194, 24231), 'scipy.signal.butter', 'butter', (['order', 'high'], {'btype': '"""highpass"""'}), "(order, high, btype='highpass')\n", (24200, 24231), False, 'from scipy.signal import butter, filtfilt\n'), ((26062, 26106), 'numpy.reshape', 'np.reshape', (['corr_dif', '(corr_dif.shape[0] ** 2)'], {}), '(corr_dif, corr_dif.shape[0] ** 2)\n', (26072, 26106), True, 'import numpy as np\n'), ((7478, 7504), 'numpy.where', 'np.where', (['(network_ids == i)'], {}), '(network_ids == i)\n', (7486, 7504), True, 'import numpy as np\n'), ((7533, 7559), 'numpy.where', 'np.where', (['(network_ids == j)'], {}), '(network_ids == j)\n', (7541, 7559), True, 'import numpy as np\n'), ((24465, 24509), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""bandpass"""'}), "(order, [low, high], btype='bandpass')\n", (24471, 24509), False, 'from scipy.signal import butter, filtfilt\n'), ((26169, 26185), 'numpy.abs', 'np.abs', (['corr_dif'], {}), '(corr_dif)\n', (26175, 26185), True, 'import numpy as np\n'), ((26440, 26467), 'numpy.corrcoef', 'np.corrcoef', (['ap_arr', 'pa_arr'], {}), '(ap_arr, pa_arr)\n', (26451, 26467), True, 'import numpy as np\n')]
|
import open3d as o3d
import os
import glob
import numpy as np
import json
class Open3DReconstructionDataset:
def __init__(self, root_dir):
self.root_dir = root_dir
self.len_frame = len(list(glob.glob(os.path.join(root_dir, "color/*.jpg"))))
def get_rgb_paths(self):
open3d_rgb_paths = []
for i in range(0, self.len_frame):
open3d_rgb_paths.append(os.path.join(self.root_dir, "color", '{:06}.jpg'.format(i)))
return open3d_rgb_paths
def get_depth_paths(self):
open3d_depth_paths = []
for i in range(0, self.len_frame):
open3d_depth_paths.append(os.path.join(self.root_dir, "depth", '{:06}.png'.format(i)))
return open3d_depth_paths
def get_trajectory(self):
lines = open(os.path.join(self.root_dir, "scene/trajectory.log"), 'r').readlines()
mats = []
for i in range(0, self.len_frame * 5, 5):
rows = [
[float(t) for t in lines[i + 1].split(" ")],
[float(t) for t in lines[i + 2].split(" ")],
[float(t) for t in lines[i + 3].split(" ")],
[float(t) for t in lines[i + 4].split(" ")]
]
mats.append(np.array(rows))
return mats
def get_intrinsic(self, type = "raw"):
if type == "raw":
return json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
elif type == "open3d":
intrinsics = json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
return o3d.camera.PinholeCameraIntrinsic(
intrinsics["width"],
intrinsics["height"],
intrinsics["intrinsic_matrix"][0],
intrinsics["intrinsic_matrix"][4],
intrinsics["intrinsic_matrix"][6],
intrinsics["intrinsic_matrix"][7],
)
elif type == "matrix":
intrinsics = json.load(open(os.path.join(self.root_dir, "camera_intrinsic.json")))
intrinsic_matrix = np.zeros((3, 3), dtype=np.float64)
fx = intrinsics["intrinsic_matrix"][0]
fy = intrinsics["intrinsic_matrix"][4]
cx = intrinsics["intrinsic_matrix"][6]
cy = intrinsics["intrinsic_matrix"][7]
intrinsic_matrix[0, 0] = fx
intrinsic_matrix[0, 2] = cx
intrinsic_matrix[1, 1] = fy
intrinsic_matrix[1, 2] = cy
intrinsic_matrix[2, 2] = 1
return intrinsic_matrix
|
[
"numpy.zeros",
"numpy.array",
"os.path.join",
"open3d.camera.PinholeCameraIntrinsic"
] |
[((1236, 1250), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (1244, 1250), True, 'import numpy as np\n'), ((1576, 1800), 'open3d.camera.PinholeCameraIntrinsic', 'o3d.camera.PinholeCameraIntrinsic', (["intrinsics['width']", "intrinsics['height']", "intrinsics['intrinsic_matrix'][0]", "intrinsics['intrinsic_matrix'][4]", "intrinsics['intrinsic_matrix'][6]", "intrinsics['intrinsic_matrix'][7]"], {}), "(intrinsics['width'], intrinsics['height'],\n intrinsics['intrinsic_matrix'][0], intrinsics['intrinsic_matrix'][4],\n intrinsics['intrinsic_matrix'][6], intrinsics['intrinsic_matrix'][7])\n", (1609, 1800), True, 'import open3d as o3d\n'), ((222, 259), 'os.path.join', 'os.path.join', (['root_dir', '"""color/*.jpg"""'], {}), "(root_dir, 'color/*.jpg')\n", (234, 259), False, 'import os\n'), ((796, 847), 'os.path.join', 'os.path.join', (['self.root_dir', '"""scene/trajectory.log"""'], {}), "(self.root_dir, 'scene/trajectory.log')\n", (808, 847), False, 'import os\n'), ((1376, 1428), 'os.path.join', 'os.path.join', (['self.root_dir', '"""camera_intrinsic.json"""'], {}), "(self.root_dir, 'camera_intrinsic.json')\n", (1388, 1428), False, 'import os\n'), ((2061, 2095), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float64'}), '((3, 3), dtype=np.float64)\n', (2069, 2095), True, 'import numpy as np\n'), ((1502, 1554), 'os.path.join', 'os.path.join', (['self.root_dir', '"""camera_intrinsic.json"""'], {}), "(self.root_dir, 'camera_intrinsic.json')\n", (1514, 1554), False, 'import os\n'), ((1975, 2027), 'os.path.join', 'os.path.join', (['self.root_dir', '"""camera_intrinsic.json"""'], {}), "(self.root_dir, 'camera_intrinsic.json')\n", (1987, 2027), False, 'import os\n')]
|
import numpy as np
x = [0, 1, 2, 3, 4]
y = [5, 6, 7, 8, 9]
z = []
for i, j in zip(x, y):
z.append(i + j)
print(z)
z = np.add(x, y)
print(z)
def my_add(a, b):
return a + b
my_add = np.frompyfunc(my_add, 2, 1)
z = my_add(x, y)
print(z)
print(type(np.add))
print(type(np.concatenate))
print(type(my_add))
|
[
"numpy.add",
"numpy.frompyfunc"
] |
[((123, 135), 'numpy.add', 'np.add', (['x', 'y'], {}), '(x, y)\n', (129, 135), True, 'import numpy as np\n'), ((190, 217), 'numpy.frompyfunc', 'np.frompyfunc', (['my_add', '(2)', '(1)'], {}), '(my_add, 2, 1)\n', (203, 217), True, 'import numpy as np\n')]
|
"""Asynchronized (distributed) cnn training."""
import os # noqa isort:skip
os.environ['OMP_NUM_THREADS'] = '1' # noqa isort:skip
import argparse
import logging
import pprint
import time
from dataclasses import asdict, dataclass
from functools import partial
from pathlib import Path
import numpy as np
from dqn.actor_manager import ActorManagerClient, run_actor_manager_server
from dqn.actor_runner import ActorRunner
from dqn.async_train import AsyncTrainerConfig, async_train
from dqn.cnn.config import CNNConfigBase
from dqn.cnn.datum import Batch
from dqn.cnn.evaluator import run_evaluator_server
from dqn.cnn.learner import Learner
from dqn.cnn.replay_buffer import ReplayBufferServer
from dqn.cnn.run_actor import run_actor
from dqn.evaluator import EvaluatorClient, EvaluatorServerRunner
from dqn.param_distributor import (ParamDistributorClient,
run_param_distributor_server)
from dqn.policy import PolicyParam
from dqn.subprocess_manager import SubprocessManager
from dqn.utils import init_log_dir, init_random_seed
@dataclass
class Config(CNNConfigBase):
"""Configuration of CNN asynchronized training."""
trainer: AsyncTrainerConfig = AsyncTrainerConfig()
def init_actor_runner(config: Config) -> ActorRunner:
"""Initialize actor runner.
Args:
config: Configuration of training.
"""
policy_param = PolicyParam(epsilon=np.ones(config.actor.vector_env_size),
gamma=np.ones(config.actor.vector_env_size) * config.gamma)
actor_runner = ActorRunner(n_processes=config.n_actor_process,
run_actor_func=partial(run_actor, init_policy_param=policy_param, config=config))
return actor_runner
def main_run_actor(config: Config, logger: logging.Logger = logging.getLogger(__name__)) -> None:
"""Run actor forever.
Args:
config: Training configuration.
logger: Logger object.
"""
actor_runner = init_actor_runner(config)
logger.info("Actor runner initialized.")
try:
actor_runner.start()
logger.info("Actor runner start.")
while True:
assert actor_runner.workers_alive, f"Actor runner's worker died."
time.sleep(1)
finally:
logger.info(f"Finalize actor runner")
actor_runner.finalize()
def main(log_dir: Path, enable_actor: bool, config: Config,
logger: logging.Logger = logging.getLogger(__name__)) -> None:
"""Initialize and kick all the components of asynchronized training.
Args:
log_dir: Directory to put log data.
config: Training configuration.
logger: Logger object.
"""
# show configuration
logger.info(pprint.pformat(asdict(config)))
# init config
if not enable_actor:
logger.warning('enable_actor is false. You should run actor in other process')
config.n_actor_process = 0 # disable actor
# NOTE: All child processes should be forked before init gRPC channel (https://github.com/grpc/grpc/issues/13873)
subprocess_manager = SubprocessManager()
# init actor manager
subprocess_manager.append_worker(
partial(run_actor_manager_server,
url=config.actor_manager_url,
gamma=config.gamma,
config=config.trainer.actor_manager))
# init param distributor
subprocess_manager.append_worker(partial(run_param_distributor_server, url=config.param_distributor_url))
# init evaluator
evaluator_runner = EvaluatorServerRunner(run_evaluator_server_func=partial(run_evaluator_server, config=config))
# may init actor
actor_runner = init_actor_runner(config)
# init replay buffer
replay_buffer_server = ReplayBufferServer(config=config)
# init learner
learner = Learner(config=config)
try:
def check_subprocess_func():
"""Helper function to check child processes."""
assert subprocess_manager.workers_alive, 'Subprocess manager worker has been dead'
assert evaluator_runner.workers_alive, 'Evaluator runner worker has been dead'
assert actor_runner.workers_alive, 'Actor runner worker has been dead'
check_subprocess_func()
# init gRPC clients
evaluator_runner.start()
actor_runner.start()
evaluator_client = EvaluatorClient(url=config.evaluator_url)
param_distributor_client = ParamDistributorClient(url=config.param_distributor_url)
actor_manager_client = ActorManagerClient(url=config.actor_manager_url)
# run train
async_train(log_dir=log_dir,
check_subprocess_func=check_subprocess_func,
actor_manager_client=actor_manager_client,
evaluator_client=evaluator_client,
param_distributor_client=param_distributor_client,
replay_buffer_server=replay_buffer_server,
learner=learner,
batch_from_sample=Batch.from_buffer_sample,
config=config.trainer)
finally:
replay_buffer_server.finalize()
subprocess_manager.finalize()
evaluator_runner.finalize()
actor_runner.finalize()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Asynchronized CNN-DQN training.")
parser.add_argument('log_dir', type=Path, help="Directory to put log and snapshots")
parser.add_argument('--log_level',
type=str,
choices=('debug', 'info', 'error', 'critical'),
default='info',
help="Logging level")
parser.add_argument('--disable_actor', action='store_true', help="Disable actor module or not.")
parser.add_argument('--run_only_actor', action='store_true', help="Running only actor module or not.")
parser.add_argument('--config', type=Path, help="Path of DQN configuration YAML file.")
parser.add_argument('--seed', type=int, default=1, help="Random seed value.")
args = parser.parse_args()
# init configuration
config = Config.load_from_yaml(args.config) if args.config else Config()
# init log_dir
log_handlers = [logging.StreamHandler()]
if not args.run_only_actor:
args.log_dir.mkdir(exist_ok=False, parents=False)
init_log_dir(args.log_dir, config)
log_handlers.append(logging.FileHandler(args.log_dir / 'main.log'))
# init logger
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='[%(asctime)s %(name)s %(levelname)s] %(message)s',
datefmt='%Y/%m/%d %I:%M:%S',
handlers=log_handlers)
# init random seed
init_random_seed(args.seed)
# start training or exploration
if args.run_only_actor:
assert not args.disable_actor, 'run_actor should be specified without disable_actor.'
main_run_actor(config)
else:
main(args.log_dir, not args.disable_actor, config)
|
[
"functools.partial",
"dqn.async_train.AsyncTrainerConfig",
"dqn.cnn.replay_buffer.ReplayBufferServer",
"argparse.ArgumentParser",
"dqn.evaluator.EvaluatorClient",
"dqn.actor_manager.ActorManagerClient",
"dqn.async_train.async_train",
"dqn.cnn.learner.Learner",
"dqn.param_distributor.ParamDistributorClient",
"logging.StreamHandler",
"numpy.ones",
"logging.FileHandler",
"time.sleep",
"dqn.utils.init_log_dir",
"dqn.subprocess_manager.SubprocessManager",
"dataclasses.asdict",
"dqn.utils.init_random_seed",
"logging.getLogger"
] |
[((1198, 1218), 'dqn.async_train.AsyncTrainerConfig', 'AsyncTrainerConfig', ([], {}), '()\n', (1216, 1218), False, 'from dqn.async_train import AsyncTrainerConfig, async_train\n'), ((1804, 1831), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1821, 1831), False, 'import logging\n'), ((2441, 2468), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2458, 2468), False, 'import logging\n'), ((3086, 3105), 'dqn.subprocess_manager.SubprocessManager', 'SubprocessManager', ([], {}), '()\n', (3103, 3105), False, 'from dqn.subprocess_manager import SubprocessManager\n'), ((3747, 3780), 'dqn.cnn.replay_buffer.ReplayBufferServer', 'ReplayBufferServer', ([], {'config': 'config'}), '(config=config)\n', (3765, 3780), False, 'from dqn.cnn.replay_buffer import ReplayBufferServer\n'), ((3815, 3837), 'dqn.cnn.learner.Learner', 'Learner', ([], {'config': 'config'}), '(config=config)\n', (3822, 3837), False, 'from dqn.cnn.learner import Learner\n'), ((5301, 5371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Asynchronized CNN-DQN training."""'}), "(description='Asynchronized CNN-DQN training.')\n", (5324, 5371), False, 'import argparse\n'), ((6784, 6811), 'dqn.utils.init_random_seed', 'init_random_seed', (['args.seed'], {}), '(args.seed)\n', (6800, 6811), False, 'from dqn.utils import init_log_dir, init_random_seed\n'), ((3178, 3303), 'functools.partial', 'partial', (['run_actor_manager_server'], {'url': 'config.actor_manager_url', 'gamma': 'config.gamma', 'config': 'config.trainer.actor_manager'}), '(run_actor_manager_server, url=config.actor_manager_url, gamma=\n config.gamma, config=config.trainer.actor_manager)\n', (3185, 3303), False, 'from functools import partial\n'), ((3415, 3486), 'functools.partial', 'partial', (['run_param_distributor_server'], {'url': 'config.param_distributor_url'}), '(run_param_distributor_server, url=config.param_distributor_url)\n', (3422, 3486), False, 'from functools import partial\n'), ((4367, 4408), 'dqn.evaluator.EvaluatorClient', 'EvaluatorClient', ([], {'url': 'config.evaluator_url'}), '(url=config.evaluator_url)\n', (4382, 4408), False, 'from dqn.evaluator import EvaluatorClient, EvaluatorServerRunner\n'), ((4444, 4500), 'dqn.param_distributor.ParamDistributorClient', 'ParamDistributorClient', ([], {'url': 'config.param_distributor_url'}), '(url=config.param_distributor_url)\n', (4466, 4500), False, 'from dqn.param_distributor import ParamDistributorClient, run_param_distributor_server\n'), ((4532, 4580), 'dqn.actor_manager.ActorManagerClient', 'ActorManagerClient', ([], {'url': 'config.actor_manager_url'}), '(url=config.actor_manager_url)\n', (4550, 4580), False, 'from dqn.actor_manager import ActorManagerClient, run_actor_manager_server\n'), ((4610, 4956), 'dqn.async_train.async_train', 'async_train', ([], {'log_dir': 'log_dir', 'check_subprocess_func': 'check_subprocess_func', 'actor_manager_client': 'actor_manager_client', 'evaluator_client': 'evaluator_client', 'param_distributor_client': 'param_distributor_client', 'replay_buffer_server': 'replay_buffer_server', 'learner': 'learner', 'batch_from_sample': 'Batch.from_buffer_sample', 'config': 'config.trainer'}), '(log_dir=log_dir, check_subprocess_func=check_subprocess_func,\n actor_manager_client=actor_manager_client, evaluator_client=\n evaluator_client, param_distributor_client=param_distributor_client,\n replay_buffer_server=replay_buffer_server, learner=learner,\n batch_from_sample=Batch.from_buffer_sample, config=config.trainer)\n', (4621, 4956), False, 'from dqn.async_train import AsyncTrainerConfig, async_train\n'), ((6248, 6271), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (6269, 6271), False, 'import logging\n'), ((6371, 6405), 'dqn.utils.init_log_dir', 'init_log_dir', (['args.log_dir', 'config'], {}), '(args.log_dir, config)\n', (6383, 6405), False, 'from dqn.utils import init_log_dir, init_random_seed\n'), ((1408, 1445), 'numpy.ones', 'np.ones', (['config.actor.vector_env_size'], {}), '(config.actor.vector_env_size)\n', (1415, 1445), True, 'import numpy as np\n'), ((1651, 1716), 'functools.partial', 'partial', (['run_actor'], {'init_policy_param': 'policy_param', 'config': 'config'}), '(run_actor, init_policy_param=policy_param, config=config)\n', (1658, 1716), False, 'from functools import partial\n'), ((2240, 2253), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2250, 2253), False, 'import time\n'), ((2742, 2756), 'dataclasses.asdict', 'asdict', (['config'], {}), '(config)\n', (2748, 2756), False, 'from dataclasses import asdict, dataclass\n'), ((3581, 3625), 'functools.partial', 'partial', (['run_evaluator_server'], {'config': 'config'}), '(run_evaluator_server, config=config)\n', (3588, 3625), False, 'from functools import partial\n'), ((6434, 6480), 'logging.FileHandler', 'logging.FileHandler', (["(args.log_dir / 'main.log')"], {}), "(args.log_dir / 'main.log')\n", (6453, 6480), False, 'import logging\n'), ((1484, 1521), 'numpy.ones', 'np.ones', (['config.actor.vector_env_size'], {}), '(config.actor.vector_env_size)\n', (1491, 1521), True, 'import numpy as np\n')]
|
# class does ...
import sys
import numpy as np
import math
import random
import time
from enum import Enum
from chapters.wall.hyperspace_helper.Segment import Segment
from chapters.wall.hyperspace_helper.AssetLibrary import AssetLibrary
from chapters.wall.hyperspace_helper.RingAssembly import RingAssembly
from chapters.wall.hyperspace_helper.Curve import Curve
from chapters.wall.hyperspace_helper.Maze import Maze
class SCENE_STATE(Enum):
INTRO=0 #pod exiting space ship
OUTRO=1 #pod entering white success
DEATH=2 #pod entering black death
PLAY=3 #normal operation (birth and hot controls)
#precon: first segment is id=0 (where to look in file) and is straight
# (ensure camera is lined up with pod after intro sequence)
#strongly advised: last segment before death and outro be straight to allow for extrapolation
#track the status of the pod through the maze
class SceneManager:
MAX_POD_DISPLACEMENT=3.5 #2.8 #maximum distance the pod can be from the center of the playfield
# ~4x of Segment.BRANCH_DISPLACEMENT_DISTANCE
POD_TRANSLATION_PER_SECOND=10.0 #rate of pod movement per second
POD_ROTATION_DEGREES_PER_SECOND=70.0 #rate of rotation animatic when pod is translating
POD_MAX_ROTATION=[6.0,12.0] #x-translation, y-translation, degrees
INTRO_SECONDS=1 #number of seconds to wait on start for cut scene to play
OUTRO_SECONDS=1
DEATH_SECONDS=1
CAMERA_LAG_DISTANCE=12 #pi3d distance unit between camera and pod
def __init__(self):
self.np=np #for some reason, Python forgets that np was imported...???? so it needs to be stored here for later use... idk/idc
def clean(self,pi3d,display_3d,camera_3d):
#variables
self.pi3d=pi3d
#why is np appaear as a UnboundedLocalError? I imported it up above...
self.pod_offset=self.np.array([0.0,0.0]) #x,y offset
self.pod_offset_rate=self.np.array([0.0,0.0]) #Z,X rotation angles for translation animatic (rotate right to translate right)
self.scene={'state':SCENE_STATE.INTRO,'start_seconds':0,'end_seconds':self.INTRO_SECONDS,'ratio':0.0}
self.life=0
self.level_start_time_seconds=0
self.segment_list=[]
self.pod_segment=None
self.camera_segment=None
self.last_key=-1 #delete from final program - used for smoothing pi3d keyboard inputs
#playfield
self.display = display_3d #self.pi3d.Display.create(background=(0.0, 0.0, 0.0, 0.0))
self.camera = camera_3d #self.pi3d.Camera()
self.light = self.pi3d.Light(lightpos=(10,-10,-7),lightcol=(0.75,0.75,0.45), lightamb=(0.1,0.1,0.42),is_point=False)
#self.keys = self.pi3d.Keyboard() #TODO: remove later...
#objects
self.asset_library=AssetLibrary(self.pi3d)
self.pod=self.asset_library.pod_frame.shallow_clone() #note: all children remain intact
self.maze=Maze()
#debug testing
self.maze.clean()
print(self.maze.getSegmentsBetweenNodes(100,91))
print(self.maze.getSegmentsBetweenNodes(91,100))
print(self.maze.getSegmentsBetweenNodes(91,91))
#print(maze.linear_definition)
#print(maze.branch_definition)
#print(maze.segment_definition)
#print(maze.debris_definition)
segments=self.maze.getSegmentIdAfter(2,3)
print("SceneManager.clean: Next segment: ",segments)
segments=segments[0]
temp2=self.maze.getPopulatedSegment(segments["segment_id"],segments["is_forward"],segments["is_branch"],self.asset_library,self.np.array([0,0,0]),self.np.eye(3),0)
print("SceneManager.clean: populated: ",temp2)
temp3=self.maze.getFirstPopulatedSegment(self.asset_library,0)
print("SceneManager.clean: first segment: ",temp3)
def __getRingCount(self):
count=0
for segment in self.segment_list:
count+=len(segment.ring_assembly_list)
return count
#update list of parameterized arcs
def __updateSegmentQueue(self,level_elapsed_time_seconds):
#if any segments in list are u<0 for camera (already completely used), then dispose of segment
#if any segment has no succesor and the [end time - current time] < queue_time_depth
# then get and append successor
#initialization
if(len(self.segment_list)==0):
segment_joint=self.getSegmentAfter(None)
first_segment=segment_joint['next_segment'][0]
self.segment_list.append(first_segment)
self.pod_segment=first_segment
self.camera_segment=first_segment
#append segments to end when the end is near
segment_index=0
while(segment_index<len(self.segment_list)): #keep adding segments to end when needed
segment=self.segment_list[segment_index]
end_time=segment.durationSeconds()+segment.start_time_seconds
cut_off_time=level_elapsed_time_seconds+RingAssembly.PRE_RENDER_SECONDS
#if(level_elapsed_time_seconds<7):
# print('query: '+str(end_time)+"<"+str(cut_off_time))
# print('size: '+str(len(self.segment_list)))
if(end_time<cut_off_time and segment.hasTraceabilityTo(self.pod_segment)):
if(segment.is_branch):
if(segment.successor[1] is None):
segment_joint=self.getSegmentAfter(segment)
for itr in range(2):
seg_id=itr+1
self.segment_list.append(segment_joint['next_segment'][seg_id])
segment.successor[seg_id]=segment_joint['next_segment'][seg_id]
segment_joint['next_segment'][seg_id].predecessor=segment
else:
if(segment.successor[0] is None):
segment_joint=self.getSegmentAfter(segment)
self.segment_list.append(segment_joint['next_segment'][0])
segment.successor[0]=segment_joint['next_segment'][0]
segment_joint['next_segment'][0].predecessor=segment
segment_index+=1
#remove old segments
camera_time=self.__getCameraTime(level_elapsed_time_seconds)
for segment_index in reversed(range(len(self.segment_list))): #traverse backward to allow for deletion
segment=self.segment_list[segment_index]
ratio=segment.getRatio(camera_time)
if(ratio>1):
if(not segment==self.camera_segment):
segment=self.segment_list.pop(segment_index) #delete stale segments
segment.dispose()
#update graphical rotation of rings, derbis, etc
def __updateSegments(self,level_elapsed_time_seconds):
for segment in self.segment_list:
segment.update(level_elapsed_time_seconds,self.light)
#assumes input for 'k' as 4-element bool np.array
# in the following order: [NORTH,WEST,SOUTH,EAST], where True is an active user input command
def __updatePodPosition(self,k,delta_time):
#position
pod_target=np.array([0,0])
is_x=False
is_y=False
IS_AIRPLANE_CONTROLS=True #True is up joystick means down motion
#if(k==ord('a')):
#pod_target[0]=-1
#is_x=True
#if(k==ord('d')):
#pod_target[0]=1
#is_x=True
#if(k==ord('s')):
#pod_target[1]=1
#is_y=True
#if(k==ord('w')):
#pod_target[1]=-1
#is_y=True
if(k[1]):
pod_target[0]=-1
is_x=True
if(k[3]):
pod_target[0]=1
is_x=True
if(k[2]):
if(IS_AIRPLANE_CONTROLS):
pod_target[1]=-1
else:
pod_target[1]=1
is_y=True
if(k[0]):
if(IS_AIRPLANE_CONTROLS):
pod_target[1]=1
else:
pod_target[1]=-1
is_y=True
delta_pod=pod_target*self.POD_TRANSLATION_PER_SECOND*delta_time*(0.707 if (is_x and is_y) else 1.0)
pod_pos=self.pod_offset+delta_pod
scale=np.linalg.norm(pod_pos)
if(scale>self.MAX_POD_DISPLACEMENT):
pod_pos=pod_pos*self.MAX_POD_DISPLACEMENT/scale
self.pod_offset=pod_pos
#rotation animatic
x_rate=self.pod_offset_rate[0] #x-translation, Z-rotation
delta_x=self.POD_ROTATION_DEGREES_PER_SECOND*delta_time
#if(k==ord('d')):#right
#delta_x=-delta_x
#elif(k==ord('a')):#left
#pass
if(k[3]):#right
delta_x=-delta_x
elif(k[1]):#left
pass
else:#neither, return to center
if(x_rate<0): delta_x=min(-x_rate,delta_x)
elif(x_rate>0): delta_x=max(-x_rate,-delta_x)
else: delta_x=0
self.pod_offset_rate[0]+=delta_x
y_rate=self.pod_offset_rate[1] #y-translation, Y-rotation
delta_y=self.POD_ROTATION_DEGREES_PER_SECOND*delta_time
#if(k==ord('s')):#up
#delta_y=-delta_y
#elif(k==ord('w')):#down
#pass
if(k[0]):#up
if(IS_AIRPLANE_CONTROLS):
pass
else:
delta_y=-delta_y
elif(k[2]):#down
if(IS_AIRPLANE_CONTROLS):
delta_y=-delta_y
else:
pass
else:#neither, return to center
if(y_rate<0): delta_y=min(-y_rate,delta_y)
elif(y_rate>0): delta_y=max(-y_rate,-delta_y)
else: delta_y=0
self.pod_offset_rate[1]+=delta_y
for itr in range(2): #bound rotation
self.pod_offset_rate[itr]=max(self.pod_offset_rate[itr],-self.POD_MAX_ROTATION[itr])
self.pod_offset_rate[itr]=min(self.pod_offset_rate[itr],self.POD_MAX_ROTATION[itr])
def __updateProps(self,level_elapsed_time_seconds):
prop_orientation=self.getPropOrientation(level_elapsed_time_seconds)
#light
light_pos=prop_orientation['light']['position']
self.light.position((light_pos[0],light_pos[1],light_pos[2]))
#pod
pod_pos=prop_orientation['pod']['position']
pod_rot=prop_orientation['pod']['rotation_euler']
self.pod.children[0].rotateToX(self.pod_offset_rate[1])
self.pod.children[0].rotateToZ(self.pod_offset_rate[0])
self.pod.position(pod_pos[0],pod_pos[1],pod_pos[2])
self.pod.rotateToX(pod_rot[0])
self.pod.rotateToY(pod_rot[1])
self.pod.rotateToZ(pod_rot[2])
self.pod.set_light(self.light)
#TO DO make recursive set_light method for pod
self.pod.children[0].set_light(self.light)
self.pod.children[0].children[0].set_light(self.light)
self.pod.children[0].children[0].children[0].set_light(self.light)
#camera
camera_pos=prop_orientation['camera']['position']
camera_rot=prop_orientation['camera']['rotation_euler']
self.camera.reset()
self.camera.position(camera_pos)
# print("SceneManager.__updateProps: camera_pos:",camera_pos)
self.camera.rotate(camera_rot[0],camera_rot[1],camera_rot[2])
def __drawSegments(self):
for segment in self.segment_list:
segment.draw()
def __updatePodSegment(self,level_elapsed_time_seconds):
while(self.pod_segment.getRatio(level_elapsed_time_seconds)>1):
self.pod_segment=self.pod_segment.getSuccessor()
if(self.pod_segment.is_branch): #when entering a branch, decide which path to take
is_left=self.pod_offset[0]<0
self.pod_segment.decideBranch(level_elapsed_time_seconds,is_left)
#print('is_left: ',self.pod_segment.isLeft())
self.pod_orientation=self.pod_segment.getOrientationAtTime(level_elapsed_time_seconds)
def __updateCameraSegment(self,level_elapsed_time_seconds):
camera_time=self.__getCameraTime(level_elapsed_time_seconds)
while(self.camera_segment.getRatio(camera_time)>1):
self.camera_segment=self.camera_segment.getSuccessor()
self.camera_orientation=self.camera_segment.getOrientationAtTime(camera_time)
def __getCameraTime(self,level_elapsed_time_seconds):
camera_lag_time=self.CAMERA_LAG_DISTANCE/(Segment.DISTANCE_BETWEEN_RINGS*Segment.RINGS_PER_SECOND)
camera_time=level_elapsed_time_seconds-camera_lag_time
return camera_time
def getSegmentAfter(self,prev_segment):
if(True): #create per config file
return self.getSegmentAfter_config(prev_segment)
else: #create randomly
return self.getSegmentAfter_random(prev_segment)
#note: is is assumed super method will populate the retuend segment's predecessor
def getSegmentAfter_config(self,prev_segment):
print("SceneManager.getSegmentAfter_config: prev_segment: ",prev_segment)
if(prev_segment is None):
next_segment=self.maze.getFirstPopulatedSegment(self.asset_library,0)#if no segment provided, return the first one
#precon: time is measured in seconds from the start of the current life
out_segment=[next_segment,None,None]
else:
end_point=prev_segment.getEndPoints()
prev_id=prev_segment.segment_id
prev2_id=-100 if prev_segment.predecessor is None else prev_segment.predecessor.segment_id #precon: the id of the segment before the first segment needs to be -100
next_segment_ids=self.maze.getSegmentIdAfter(prev2_id,prev_id)
print("SceneManager.getSegmentAfter_config: next_segment_ids: ",next_segment_ids)
was_branch=len(next_segment_ids)>1
out_segment=[None] if was_branch else [] #goal is to make either [None,Segment,Segment] for a branch, or [Segment,None,None] for straight
for itr in range(2 if was_branch else 1):#precon: only two paths come out of any one branch node
next_segment_def=next_segment_ids[itr]
next_segment=self.maze.getPopulatedSegment(next_segment_def["segment_id"],
next_segment_def["is_forward"],next_segment_def["is_branch"],
self.asset_library,end_point[itr]["position"],
end_point[itr]["rotation_matrix"],end_point[itr]["timestamp_seconds"])
out_segment.append(next_segment)
if(not was_branch):
out_segment.append(None)
out_segment.append(None)
return {'prev_segment':prev_segment,'next_segment':out_segment}
#TODO: is currently a placeholder for Maze...
#given a segment ID, return the parameters needed for the next segment
#input:
#Segment
#output:
#{'previous_segment':Segment,'next_segment':[Segment,Segment,Segment]}
# where previous_segment is the input
# and one of the following is True: 'next_segment'[0] is None OR 'next_segment'[1:2] is None
def getSegmentAfter_random(self,segment):
if(segment is None):
#return first segment
#TODO load from file
previous_segment=None
ring_count=7
segment=Segment(self.asset_library,False,np.array([0,0,0]),np.identity(3),0,
120,60,ring_count)
for ring_id in range(ring_count):
u=ring_id/ring_count
segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
next_segment=[segment,None,None]
else:
#this_segment_id=segment.segment_id
previous_segment=segment
ring_count=[2+random.randint(0,3),2+random.randint(0,3)]
curvature=[random.randint(0,30),random.randint(0,30)]
orientation=[random.randint(0,360),random.randint(0,360)]
was_branch=segment.is_branch #input segmenet was a branch
was_branch2=segment.predecessor is None or segment.predecessor.is_branch
#print('was_branch: ',was_branch)
is_branch=[random.randint(0,100)<20,random.randint(0,100)<20] #next segment is a branch
if(was_branch or was_branch2):
is_branch=[False,False]
#is_branch=[False,False]
end_point=segment.getEndPoints()
if(was_branch):
next_segment=[None]
for itr in range(2):
this_segment=Segment(self.asset_library,is_branch[itr],end_point[itr]['position'],
end_point[itr]['rotation_matrix'],end_point[itr]['timestamp_seconds'],
curvature[itr],orientation[itr],ring_count[itr])
next_segment.append(this_segment)
if(not is_branch[itr]):
for ring_id in range(ring_count[itr]):
u=ring_id/ring_count[itr]
this_segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
else:
next_segment=[]
this_segment=Segment(self.asset_library,is_branch[0],end_point[0]['position'],
end_point[0]['rotation_matrix'],end_point[0]['timestamp_seconds'],
curvature[0],orientation[0],ring_count[0])
next_segment.append(this_segment)
next_segment.append(None)
next_segment.append(None)
if(not is_branch[0]):
for ring_id in range(ring_count[0]):
u=ring_id/ring_count[0]
this_segment.addRingAssembly(self.asset_library,u,
ring_rotation_rate=RingAssembly.RING_ROTATION_DEGREES_PER_SECOND,
debris_rotation_rate=RingAssembly.DEBRIS_ROTATION_DEGREES_PER_SECOND)
#return next segment
return {'prev_segment':previous_segment,'next_segment':next_segment}
#return the start node, end node, progress and current segment_id
#return a pointer to the segment where the pod is currently located
#return: {"node_from":X,"node_to":Y,"ratio":Z} #ratio between nodes
def getPodStatus(self):
pass
#return the segment where the camera is currently located
def getCameraStatus(self):
pass
#dict with keys:
# pod
# camera
# light
# sub-keys:
# position
# rotation_matrix
# rotation_euler
#note: rotations have not been implemented for light
def getPropOrientation(self,level_elapsed_time_seconds):
#pod
pod_orientation=self.pod_segment.getOrientationAtTime(level_elapsed_time_seconds)
pod_position=pod_orientation["position"]
x_axis=pod_orientation["rotation_matrix"][0,:]
y_axis=pod_orientation["rotation_matrix"][1,:]
pod_position+=x_axis*self.pod_offset[0]
pod_position+=y_axis*self.pod_offset[1]
pod_orientation["position"]=pod_position
#camera
camera_orientation=self.camera_segment.getOrientationAtTime(self.__getCameraTime(level_elapsed_time_seconds))
x_axis=camera_orientation["rotation_matrix"][0,:]
y_axis=camera_orientation["rotation_matrix"][1,:]
position_camera=camera_orientation["position"]
camera_movement_scale=0.5
position_camera+=x_axis*self.pod_offset[0]*camera_movement_scale
position_camera+=y_axis*self.pod_offset[1]*camera_movement_scale
camera_orientation["position"]=position_camera
camera_orientation_to_target=Curve.euler_angles_from_vectors(pod_position-position_camera,'z',y_axis,'y')
camera_orientation["rotation_euler"]=camera_orientation_to_target["rotation_euler"]
camera_orientation["rotation_matrix"]=camera_orientation_to_target["rotation_matrix"]
#light
light_vect=np.array([10,-10,7])
light_vect = np.dot(camera_orientation["rotation_matrix"], light_vect) * [1.0, 1.0, -1.0] #https://github.com/tipam/pi3d/issues/220
light_orientation={'position':light_vect}
#laser...
return {'pod':pod_orientation,'camera':camera_orientation,'light':light_orientation}
#assumes inputs for navigation_joystick,camera_joystick,laser_joystick as 4-element bool np.arrays
# in the following order: [NORTH,WEST,SOUTH,EAST], where True is an active user input command
def update(self,this_frame_number,this_frame_elapsed_seconds,previous_frame_elapsed_seconds,packets,
navigation_joystick,camera_joystick,laser_joystick,is_fire_laser):
scene_state=self.scene['state']
level_elapsed_time_seconds=this_frame_elapsed_seconds-self.level_start_time_seconds
scene_start=self.scene['start_seconds'] #seconds
scene_end=self.scene['end_seconds']
delta_time=this_frame_elapsed_seconds-previous_frame_elapsed_seconds #time between frames
#advance from previous state to curent state
if(scene_end>=0 and level_elapsed_time_seconds>=scene_end):
if(scene_state==SCENE_STATE.INTRO or scene_state==SCENE_STATE.DEATH):
self.__setSceneState(SCENE_STATE.PLAY,this_frame_elapsed_seconds)
#make decisions based on current state
if(scene_end<=scene_start):
ratio=0.0
else:
ratio=(level_elapsed_time_seconds-scene_start)/(scene_end-scene_start)
self.scene['ratio']=ratio
if(scene_state==SCENE_STATE.INTRO):
pass #update pod, space ship, hyperspace effects
elif(scene_state==SCENE_STATE.OUTRO): #when transitioning TO outro, fade out music
if(ratio>=1):
self.is_done=True #stop music in exitChapter()
pass #update sphere of white
elif(scene_state==SCENE_STATE.DEATH):
pass #update sphere of black
else: #CUT_SCENE.PLAY
#if(this_frame_number%30==0):
# print('ring count: '+str(self.__getRingCount()))
self.__updateSegmentQueue(level_elapsed_time_seconds)
self.__updatePodSegment(level_elapsed_time_seconds)
self.__updateCameraSegment(level_elapsed_time_seconds)
#user input
#buttons=[]
#k=0
#while k>=0:
#k = sm.keys.read()
#buttons.append(k)
#k=max(buttons)
#temp=k
#is_smooth_motion_enabled=True
#if(is_smooth_motion_enabled):
#k=max(k,self.last_key)
#self.last_key=temp
k=-1 #temp disconnect from player controls
self.__updatePodPosition(navigation_joystick,delta_time)
self.__updateProps(level_elapsed_time_seconds)
self.__updateSegments(level_elapsed_time_seconds)
#if k==27:
# self.is_done=True
#TODO collissions
#update pod, camera, light, rings, branches, laser, asteroids...
def draw(self):
scene_state=self.scene['state']
ratio=self.scene['ratio']
if(scene_state==SCENE_STATE.INTRO):
self.pod.draw()
elif(scene_state==SCENE_STATE.OUTRO):
pass
elif(scene_state==SCENE_STATE.DEATH):
pass
else:
self.__drawSegments()#standard play scene
self.pod.draw()
#supported state transitions:
#intro to play
#play to death
#play to outro
#death to play
def __setSceneState(self,to_scene_state,this_frame_elapsed_seconds):
from_scene_state=self.scene['state']
level_elapsed_seconds=this_frame_elapsed_seconds-self.level_start_time_seconds
play_scene={'state':SCENE_STATE.PLAY,'start_seconds':level_elapsed_seconds,'end_seconds':-1,'ratio':0.0}
out_scene=None
if(to_scene_state==SCENE_STATE.PLAY):
if(from_scene_state==SCENE_STATE.INTRO): #intro -> play
out_scene=play_scene
#fade in/start music
elif(from_scene_state==SCENE_STATE.DEATH): #death -> play
out_scene=play_scene
self.segment_list=[] #clear segment list
self.life+=1
self.level_start_time_seconds=this_frame_elapsed_seconds
self.pod_segment=None
self.camera_segment=None
elif(to_scene_state==SCENE_STATE.DEATH): #play -> death
if(from_scene_state==SCENE_STATE.PLAY):
out_scene={'state':SCENE_STATE.DEATH,'start_seconds':level_elapsed_seconds,'end_seconds':level_elapsed_seconds+self.DEATH_SECONDS,'ratio':0.0}
elif(to_scene_state==SCENE_STATE.OUTRO):
if(from_scene_state==SCENE_STATE.PLAY): #play -> outro
out_scene={'state':SCENE_STATE.OUTRO,'start_seconds':level_elapsed_seconds,'end_seconds':level_elapsed_seconds+self.OUTRO_SECONDS,'ratio':0.0}
#fade out music
if(not out_scene is None):
self.scene=out_scene
return
raise NotImplementedError('SceneManager.__setSceneState(): Unable to transition from scene state: '+str(from_scene_state)+', to scene state: '+str(to_scene_state))
|
[
"chapters.wall.hyperspace_helper.Segment.Segment",
"random.randint",
"chapters.wall.hyperspace_helper.AssetLibrary.AssetLibrary",
"numpy.identity",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"chapters.wall.hyperspace_helper.Maze.Maze",
"chapters.wall.hyperspace_helper.Curve.Curve.euler_angles_from_vectors"
] |
[((2598, 2621), 'chapters.wall.hyperspace_helper.AssetLibrary.AssetLibrary', 'AssetLibrary', (['self.pi3d'], {}), '(self.pi3d)\n', (2610, 2621), False, 'from chapters.wall.hyperspace_helper.AssetLibrary import AssetLibrary\n'), ((2724, 2730), 'chapters.wall.hyperspace_helper.Maze.Maze', 'Maze', ([], {}), '()\n', (2728, 2730), False, 'from chapters.wall.hyperspace_helper.Maze import Maze\n'), ((6312, 6328), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6320, 6328), True, 'import numpy as np\n'), ((7082, 7105), 'numpy.linalg.norm', 'np.linalg.norm', (['pod_pos'], {}), '(pod_pos)\n', (7096, 7105), True, 'import numpy as np\n'), ((17035, 17120), 'chapters.wall.hyperspace_helper.Curve.Curve.euler_angles_from_vectors', 'Curve.euler_angles_from_vectors', (['(pod_position - position_camera)', '"""z"""', 'y_axis', '"""y"""'], {}), "(pod_position - position_camera, 'z', y_axis,\n 'y')\n", (17066, 17120), False, 'from chapters.wall.hyperspace_helper.Curve import Curve\n'), ((17311, 17333), 'numpy.array', 'np.array', (['[10, -10, 7]'], {}), '([10, -10, 7])\n', (17319, 17333), True, 'import numpy as np\n'), ((17347, 17404), 'numpy.dot', 'np.dot', (["camera_orientation['rotation_matrix']", 'light_vect'], {}), "(camera_orientation['rotation_matrix'], light_vect)\n", (17353, 17404), True, 'import numpy as np\n'), ((13226, 13245), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (13234, 13245), True, 'import numpy as np\n'), ((13244, 13258), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (13255, 13258), True, 'import numpy as np\n'), ((13728, 13749), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (13742, 13749), False, 'import random\n'), ((13749, 13770), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (13763, 13770), False, 'import random\n'), ((13787, 13809), 'random.randint', 'random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (13801, 13809), False, 'import random\n'), ((13809, 13831), 'random.randint', 'random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (13823, 13831), False, 'import random\n'), ((14913, 15101), 'chapters.wall.hyperspace_helper.Segment.Segment', 'Segment', (['self.asset_library', 'is_branch[0]', "end_point[0]['position']", "end_point[0]['rotation_matrix']", "end_point[0]['timestamp_seconds']", 'curvature[0]', 'orientation[0]', 'ring_count[0]'], {}), "(self.asset_library, is_branch[0], end_point[0]['position'],\n end_point[0]['rotation_matrix'], end_point[0]['timestamp_seconds'],\n curvature[0], orientation[0], ring_count[0])\n", (14920, 15101), False, 'from chapters.wall.hyperspace_helper.Segment import Segment\n'), ((13671, 13691), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (13685, 13691), False, 'import random\n'), ((13693, 13713), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (13707, 13713), False, 'import random\n'), ((14020, 14042), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (14034, 14042), False, 'import random\n'), ((14045, 14067), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (14059, 14067), False, 'import random\n'), ((14309, 14511), 'chapters.wall.hyperspace_helper.Segment.Segment', 'Segment', (['self.asset_library', 'is_branch[itr]', "end_point[itr]['position']", "end_point[itr]['rotation_matrix']", "end_point[itr]['timestamp_seconds']", 'curvature[itr]', 'orientation[itr]', 'ring_count[itr]'], {}), "(self.asset_library, is_branch[itr], end_point[itr]['position'],\n end_point[itr]['rotation_matrix'], end_point[itr]['timestamp_seconds'],\n curvature[itr], orientation[itr], ring_count[itr])\n", (14316, 14511), False, 'from chapters.wall.hyperspace_helper.Segment import Segment\n')]
|
import itertools
import numpy as np
import string
__all__ = ['BigramGenerator', 'SkipgramGenerator',
'id2bigram', 'vocabulary_size', 'all_bigrams']
letters = sorted(set((string.ascii_letters + string.digits + " ").lower()))
class WhitelistTable(object):
# there will be stories
def __init__(self, letters):
self._d = {ord(l): ord(l) for l in letters}
def __getitem__(self, k):
return self._d.get(k)
trans_table = WhitelistTable(letters)
all_bigrams = {x[0] + x[1]: i for i, x in
enumerate(itertools.product(letters, letters))}
inversed_bigrams = {i: x for x, i in all_bigrams.items()}
vocabulary_size = len(all_bigrams)
def id2bigram(i):
return inversed_bigrams[i]
def text_to_bigram_sequence(text):
text = text.translate(trans_table)
if len(text) % 2 != 0:
text += " "
sequence = [all_bigrams[text[i:i + 2]] for i in range(0, len(text), 2)]
return np.array(sequence, dtype=np.int16)
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(
shape=(self._batch_size), dtype=np.int16)
for b in range(self._batch_size):
batch[b] = self._text[self._cursor[b]]
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def to_skipgrams(batches):
""" This converts given number of batches to skipgrams
returns skipgram_batches, skipgram_labels
"""
assert len(batches) % 2 != 0
skip_window = len(batches) // 2
return ([batches[skip_window]] * (len(batches) - 1),
[b for i, b in enumerate(batches) if i != skip_window])
class BigramGenerator(object):
"""Generates batches of bigrams for given text"""
def __init__(self, text, batch_size, num_unrollings=0):
self._bigrams = text_to_bigram_sequence(text)
self._generator = BatchGenerator(
self._bigrams, batch_size, num_unrollings)
def next(self):
return self._generator.next()
class SkipgramGenerator(object):
"""Generates batches/labels of skipgrams for given text"""
def __init__(self, text, batch_size, num_skips):
self._bigrams = text_to_bigram_sequence(text)
self._generator = BatchGenerator(
self._bigrams, batch_size, num_skips * 2)
def next(self):
return to_skipgrams(self._generator.next())
|
[
"numpy.zeros",
"numpy.array",
"itertools.product"
] |
[((944, 978), 'numpy.array', 'np.array', (['sequence'], {'dtype': 'np.int16'}), '(sequence, dtype=np.int16)\n', (952, 978), True, 'import numpy as np\n'), ((1512, 1560), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self._batch_size', 'dtype': 'np.int16'}), '(shape=self._batch_size, dtype=np.int16)\n', (1520, 1560), True, 'import numpy as np\n'), ((551, 586), 'itertools.product', 'itertools.product', (['letters', 'letters'], {}), '(letters, letters)\n', (568, 586), False, 'import itertools\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
"""
This file implements a psychrometric chart for air at 1 atm
"""
from CoolProp.HumidAirProp import HAPropsSI
from .Plots import InlineLabel
import matplotlib, numpy, textwrap
import_template = (
"""
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots
if __name__=='__main__':
import numpy, matplotlib
from CoolProp.HumidAirProp import HAPropsSI
from CoolProp.Plots.Plots import InlineLabel
p = 101325
Tdb = numpy.linspace(-10,60,100)+273.15
# Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
"""
)
closure_template = (
"""
matplotlib.pyplot.show()
"""
)
Tdb = numpy.linspace(-10, 60, 100) + 273.15
p = 101325
def indented_segment(s):
return '\n'.join([' ' + line for line in textwrap.dedent(s).split('\n')])
class PlotFormatting(object):
def plot(self, ax):
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
def __str__(self):
return indented_segment("""
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
""")
class SaturationLine(object):
def plot(self, ax):
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', 1.0) for T in Tdb]
ax.plot(Tdb - 273.15, w, lw=2)
def __str__(self):
return indented_segment("""
# Saturation line
w = [HAPropsSI('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
"""
)
class HumidityLabels(object):
def __init__(self, RH_values, h):
self.RH_values = RH_values
self.h = h
def plot(self, ax):
xv = Tdb # [K]
for RH in self.RH_values:
yv = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
y = HAPropsSI('W', 'P', p, 'H', self.h, 'R', RH)
T_K, w, rot = InlineLabel(xv, yv, y=y, axis=ax)
string = r'$\phi$=' + '{s:0.0f}'.format(s=RH * 100) + '%'
# Make a temporary label to get its bounding box
bbox_opts = dict(boxstyle='square,pad=0.0', fc='white', ec='None', alpha=0.5)
ax.text(T_K - 273.15, w, string, rotation=rot, ha='center', va='center', bbox=bbox_opts)
def __str__(self):
return indented_segment("""
xv = Tdb #[K]
for RH in {RHValues:s}:
yv = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAPropsSI('W','P',p,'H',{h:f},'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+{s:s}+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
""".format(h=self.h, RHValues=str(self.RH_values), s="'{s:0.0f}'.format(s=RH*100)")
)
class HumidityLines(object):
def __init__(self, RH_values):
self.RH_values = RH_values
def plot(self, ax):
for RH in self.RH_values:
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
ax.plot(Tdb - 273.15, w, 'r', lw=1)
def __str__(self):
return indented_segment("""
# Humidity lines
RHValues = {RHValues:s}
for RH in RHValues:
w = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
""".format(RHValues=str(self.RH_values))
)
class EnthalpyLines(object):
def __init__(self, H_values):
self.H_values = H_values
def plot(self, ax):
for H in self.H_values:
# Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T', 'H', H, 'P', p, 'R', 1.0) - 273.15
T0 = HAPropsSI('T', 'H', H, 'P', p, 'R', 0.0) - 273.15
w1 = HAPropsSI('W', 'H', H, 'P', p, 'R', 1.0)
w0 = HAPropsSI('W', 'H', H, 'P', p, 'R', 0.0)
ax.plot(numpy.r_[T1, T0], numpy.r_[w1, w0], 'r', lw=1)
def __str__(self):
return indented_segment("""
# Humidity lines
for H in {HValues:s}:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAPropsSI('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAPropsSI('W','H',H,'P',p,'R',1.0)
w0 = HAPropsSI('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
""".format(HValues=str(self.H_values))
)
if __name__ == '__main__':
and_plot = False
if and_plot:
fig = matplotlib.pyplot.figure(figsize=(10, 8))
ax = fig.add_axes((0.1, 0.1, 0.85, 0.85))
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
SL = SaturationLine()
if and_plot: SL.plot(ax)
RHL = HumidityLines([0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
if and_plot: RHL.plot(ax)
RHLabels = HumidityLabels([0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], h=65000)
if and_plot: RHLabels.plot(ax)
HL = EnthalpyLines(range(-20000, 100000, 10000))
if and_plot: HL.plot(ax)
PF = PlotFormatting()
if and_plot: PF.plot(ax)
if and_plot: matplotlib.pyplot.show()
with open('PsychScript.py', 'w') as fp:
for chunk in [import_template, SL, RHL, HL, PF, RHLabels, closure_template]:
fp.write(str(chunk).encode('ascii'))
execfile('PsychScript.py')
|
[
"textwrap.dedent",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"CoolProp.HumidAirProp.HAPropsSI",
"numpy.linspace"
] |
[((799, 827), 'numpy.linspace', 'numpy.linspace', (['(-10)', '(60)', '(100)'], {}), '(-10, 60, 100)\n', (813, 827), False, 'import matplotlib, numpy, textwrap\n'), ((5180, 5221), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (5204, 5221), False, 'import matplotlib, numpy, textwrap\n'), ((5950, 5974), 'matplotlib.pyplot.show', 'matplotlib.pyplot.show', ([], {}), '()\n', (5972, 5974), False, 'import matplotlib, numpy, textwrap\n'), ((1538, 1578), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""T"""', 'T', '"""P"""', 'p', '"""R"""', '(1.0)'], {}), "('W', 'T', T, 'P', p, 'R', 1.0)\n", (1547, 1578), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((2167, 2211), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""P"""', 'p', '"""H"""', 'self.h', '"""R"""', 'RH'], {}), "('W', 'P', p, 'H', self.h, 'R', RH)\n", (2176, 2211), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4326, 4366), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(1.0)'], {}), "('W', 'H', H, 'P', p, 'R', 1.0)\n", (4335, 4366), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4384, 4424), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(0.0)'], {}), "('W', 'H', H, 'P', p, 'R', 0.0)\n", (4393, 4424), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((2097, 2136), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""T"""', 'T', '"""P"""', 'p', '"""R"""', 'RH'], {}), "('W', 'T', T, 'P', p, 'R', RH)\n", (2106, 2136), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((3475, 3514), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""W"""', '"""T"""', 'T', '"""P"""', 'p', '"""R"""', 'RH'], {}), "('W', 'T', T, 'P', p, 'R', RH)\n", (3484, 3514), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4192, 4232), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""T"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(1.0)'], {}), "('T', 'H', H, 'P', p, 'R', 1.0)\n", (4201, 4232), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((4259, 4299), 'CoolProp.HumidAirProp.HAPropsSI', 'HAPropsSI', (['"""T"""', '"""H"""', 'H', '"""P"""', 'p', '"""R"""', '(0.0)'], {}), "('T', 'H', H, 'P', p, 'R', 0.0)\n", (4268, 4299), False, 'from CoolProp.HumidAirProp import HAPropsSI\n'), ((923, 941), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (938, 941), False, 'import matplotlib, numpy, textwrap\n')]
|
# This is an answer to: https://codegolf.stackexchange.com/questions/189277/bridge-the-gaps
import sys
import os
from PIL import Image
import numpy as np
import scipy.ndimage
def obtain_groups(image, threshold, structuring_el):
"""
Obtain isles of unconnected pixels via a threshold on the R channel
"""
image_logical = (image[:, :, 1] < threshold).astype(np.int)
return scipy.ndimage.measurements.label(image_logical, structure=structuring_el)
def swap_colors(image, original_color, new_color):
"""
Swap all the pixels of a specific color by another color
"""
r1, g1, b1 = original_color # RGB value to be replaced
r2, g2, b2 = new_color # New RGB value
red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2]
mask = (red == r1) & (green == g1) & (blue == b1)
image[:, :, :3][mask] = [r2, g2, b2]
return image
def main(image_path=None):
"""
For each processed image, we begin by changing the color
of all the white pixels in an image to red. By doing this,
it is guaranteed that all the elements (any isle of black
pixels) are connected.
Then, we iterate over all the pixels in the image starting
from the top left corner and moving right and down. For every
red pixel we find we change its color to white. If after this
change of color there is still only one element (an element
being now any isle of black and red pixels), we leave the pixel
white and move on to the next pixel. However, if after the
color change from red to white the number of elements is bigger
than one, we leave the pixel red and move on to the next pixel.
The connections obtained by only using this method show a regular
pattern and in some cases, there are unnecessary red pixels.
This extra red pixels can be easily removed by iterating again over
the image and performing the same operations as explained above but
from the bottom right corner to the top left corner. This second
pass is much faster since the amount of red pixels that have to be
checked.
"""
images = os.listdir("images")
f = open("results.txt", "w")
if image_path is not None:
images = [image_path]
for image_name in images:
im = Image.open("images/"+image_name).convert("RGBA")
image = np.array(im)
image = swap_colors(image, (255, 255, 255), (255, 0, 0))
# create structuring element to determine unconnected groups of pixels in image
s = scipy.ndimage.morphology.generate_binary_structure(2, 2)
for i in np.ndindex(image.shape[:2]):
# skip black pixels
if sum(image[i[0], i[1]]) == 255:
continue
image[i[0], i[1]] = [255, 255, 255, 255]
# label the different groups, considering diagonal connections as valid
groups, num_groups = obtain_groups(image, 255, s)
if num_groups != 1:
image[i[0], i[1]] = [255, 0, 0, 255]
# Show percentage
print((i[1] + i[0]*im.size[0])/(im.size[0]*im.size[1]))
# Number of red pixels
red_p = 0
for i in np.ndindex(image.shape[:2]):
j = (im.size[1] - i[0] - 1, im.size[0] - i[1] - 1)
# skip black and white pixels
if sum(image[j[0], j[1]]) == 255 or sum(image[j[0], j[1]]) == 255*4:
continue
image[j[0], j[1]] = [255, 255, 255, 255]
# label the different groups, considering diagonal connections as valid
groups, num_groups = obtain_groups(image, 255, s)
if num_groups != 1:
image[j[0], j[1]] = [255, 0, 0, 255]
# Show percentage
print((j[1] + j[0]*im.size[0])/(im.size[0]*im.size[1]))
red_p += (sum(image[j[0], j[1]]) == 255*2)
print(red_p)
f.write("r_"+image_name+": "+str(red_p)+"\n")
im = Image.fromarray(image)
# im.show()
im.save("r_"+image_name)
f.close()
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
main()
|
[
"numpy.ndindex",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"os.listdir"
] |
[((2124, 2144), 'os.listdir', 'os.listdir', (['"""images"""'], {}), "('images')\n", (2134, 2144), False, 'import os\n'), ((2349, 2361), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2357, 2361), True, 'import numpy as np\n'), ((2604, 2631), 'numpy.ndindex', 'np.ndindex', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (2614, 2631), True, 'import numpy as np\n'), ((3185, 3212), 'numpy.ndindex', 'np.ndindex', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (3195, 3212), True, 'import numpy as np\n'), ((3952, 3974), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3967, 3974), False, 'from PIL import Image\n'), ((2284, 2318), 'PIL.Image.open', 'Image.open', (["('images/' + image_name)"], {}), "('images/' + image_name)\n", (2294, 2318), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python
import rospy
import sys
import time
import numpy as np
from realtimepseudoAstar import plan
from globaltorobotcoords import transform
from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo, BallIsHolding
#Initialize desired x depending on obstacle number
ROBOT_NAME = 'rival' + str(sys.argv[1])
possible_x = [-600, -200, 200, 600]
target_1 = np.array([possible_x[int(sys.argv[1]) - 1], -400])
target_2 = np.array([possible_x[int(sys.argv[1]) - 1], 400])
current_target = target_1
# For plotting
# import math
# import matplotlib.pyplot as plt
# Initialize publisher and rate
pub = rospy.Publisher('/' + str(ROBOT_NAME)+'/nubotcontrol/actioncmd', ActionCmd, queue_size=1)
rospy.init_node(str(ROBOT_NAME) + '_brain', anonymous=False)
hertz = 10
rate = rospy.Rate(hertz)
def callback(data):
#Receive all robot info
r = data.robotinfo[int(sys.argv[1]) - 1]
robot_pos = np.array([r.pos.x, r.pos.y])
theta = r.heading.theta
#Alternate between +y and -y target positions
global current_target
if np.linalg.norm(robot_pos - current_target) < 50 and np.all(current_target == target_1):
current_target = target_2
elif np.linalg.norm(robot_pos - current_target) < 50 and np.all(current_target == target_2):
current_target = target_1
target = current_target
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
target = transform(target[0], target[1], robot_pos[0], robot_pos[1], theta)
#Generate ActionCmd() and publish to hwcontroller
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 150
action.handle_enable = 0
action.target_ori = 0
pub.publish(action)
rate.sleep()
def listener():
rospy.Subscriber("/" + str(ROBOT_NAME) + "/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
|
[
"globaltorobotcoords.transform",
"nubot_common.msg.ActionCmd",
"rospy.Rate",
"numpy.array",
"numpy.linalg.norm",
"rospy.spin",
"numpy.all"
] |
[((822, 839), 'rospy.Rate', 'rospy.Rate', (['hertz'], {}), '(hertz)\n', (832, 839), False, 'import rospy\n'), ((955, 983), 'numpy.array', 'np.array', (['[r.pos.x, r.pos.y]'], {}), '([r.pos.x, r.pos.y])\n', (963, 983), True, 'import numpy as np\n'), ((1494, 1560), 'globaltorobotcoords.transform', 'transform', (['target[0]', 'target[1]', 'robot_pos[0]', 'robot_pos[1]', 'theta'], {}), '(target[0], target[1], robot_pos[0], robot_pos[1], theta)\n', (1503, 1560), False, 'from globaltorobotcoords import transform\n'), ((1629, 1640), 'nubot_common.msg.ActionCmd', 'ActionCmd', ([], {}), '()\n', (1638, 1640), False, 'from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo, BallIsHolding\n'), ((1963, 1975), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1973, 1975), False, 'import rospy\n'), ((1150, 1184), 'numpy.all', 'np.all', (['(current_target == target_1)'], {}), '(current_target == target_1)\n', (1156, 1184), True, 'import numpy as np\n'), ((1098, 1140), 'numpy.linalg.norm', 'np.linalg.norm', (['(robot_pos - current_target)'], {}), '(robot_pos - current_target)\n', (1112, 1140), True, 'import numpy as np\n'), ((1281, 1315), 'numpy.all', 'np.all', (['(current_target == target_2)'], {}), '(current_target == target_2)\n', (1287, 1315), True, 'import numpy as np\n'), ((1229, 1271), 'numpy.linalg.norm', 'np.linalg.norm', (['(robot_pos - current_target)'], {}), '(robot_pos - current_target)\n', (1243, 1271), True, 'import numpy as np\n')]
|
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
import tensorflow as tf
class SummaryCallback(Callback):
def __init__(self, trainer, validation=False):
super(SummaryCallback, self)
self.trainer = trainer
self.summarysteps = trainer.config['summarysteps']
self.validation = validation
self.image = tf.Variable(0., validate_shape=False)
self.mask = tf.Variable(0., validate_shape=False)
self.predicted = tf.Variable(0., validate_shape=False)
model = self.trainer.model.model
self.fetches = [tf.assign(self.image, model.inputs[0], validate_shape=False),
tf.assign(self.mask, model.targets[0], validate_shape=False),
tf.assign(self.predicted, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': self.fetches}
def on_train_begin(self, logs={}):
self.losses = []
model = self.trainer.model.model
self.fetches = [tf.assign(self.image, model.inputs[0], validate_shape=False),
tf.assign(self.mask, model.targets[0], validate_shape=False),
tf.assign(self.predicted, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': self.fetches}
def on_train_end(self, logs={}):
model = self.trainer.model.model
model._function_kwargs = {'fetches': []}
def on_batch_end(self, batch, logs={}):
loss = logs.get('loss')
self.losses.append(loss)
if self.validation is False:
self.trainer.global_step += 1
self.trainer.loss += loss
if batch % self.summarysteps == 0:
if self.trainer.summarywriter:
self.trainer.summarywriter.add_scalar(
self.trainer.name+'loss', loss, global_step=self.trainer.global_step)
image = K.eval(self.image)
if not type(image) is np.float32:
image = image[0]
image = np.rollaxis(image, axis=2, start=0)
mask = K.eval(self.mask)[0]
mask = np.rollaxis(mask, axis=2, start=0)[1]
predicted = K.eval(self.predicted)[0]
predicted = np.rollaxis(predicted, axis=2, start=0)[1]
self.trainer.summarywriter.add_image(
self.trainer.name+'image',image/255.0, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'mask', mask.astype(np.float32), global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'predicted', predicted/(predicted.max()+0.0001), global_step=self.trainer.global_step)
else:
if self.trainer.summarywriter:
self.trainer.summarywriter.add_scalar(
self.trainer.name+'val_loss', loss, global_step=self.trainer.global_step)
image = K.eval(self.image)
if not type(image) is np.float32:
image = image[0]
image = np.rollaxis(image, axis=2, start=0)
mask = K.eval(self.mask)[0]
mask = np.rollaxis(mask, axis=2, start=0)[1]
predicted = K.eval(self.predicted)[0]
predicted = np.rollaxis(predicted, axis=2, start=0)[1]
self.trainer.summarywriter.add_image(
self.trainer.name+'val_image',image/255.0, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'val_mask', mask, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'val_predicted', predicted/(predicted.max()+0.0001), global_step=self.trainer.global_step)
|
[
"tensorflow.assign",
"tensorflow.Variable",
"keras.backend.eval",
"numpy.rollaxis"
] |
[((382, 420), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'validate_shape': '(False)'}), '(0.0, validate_shape=False)\n', (393, 420), True, 'import tensorflow as tf\n'), ((440, 478), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'validate_shape': '(False)'}), '(0.0, validate_shape=False)\n', (451, 478), True, 'import tensorflow as tf\n'), ((503, 541), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'validate_shape': '(False)'}), '(0.0, validate_shape=False)\n', (514, 541), True, 'import tensorflow as tf\n'), ((606, 666), 'tensorflow.assign', 'tf.assign', (['self.image', 'model.inputs[0]'], {'validate_shape': '(False)'}), '(self.image, model.inputs[0], validate_shape=False)\n', (615, 666), True, 'import tensorflow as tf\n'), ((679, 739), 'tensorflow.assign', 'tf.assign', (['self.mask', 'model.targets[0]'], {'validate_shape': '(False)'}), '(self.mask, model.targets[0], validate_shape=False)\n', (688, 739), True, 'import tensorflow as tf\n'), ((752, 817), 'tensorflow.assign', 'tf.assign', (['self.predicted', 'model.outputs[0]'], {'validate_shape': '(False)'}), '(self.predicted, model.outputs[0], validate_shape=False)\n', (761, 817), True, 'import tensorflow as tf\n'), ((1008, 1068), 'tensorflow.assign', 'tf.assign', (['self.image', 'model.inputs[0]'], {'validate_shape': '(False)'}), '(self.image, model.inputs[0], validate_shape=False)\n', (1017, 1068), True, 'import tensorflow as tf\n'), ((1081, 1141), 'tensorflow.assign', 'tf.assign', (['self.mask', 'model.targets[0]'], {'validate_shape': '(False)'}), '(self.mask, model.targets[0], validate_shape=False)\n', (1090, 1141), True, 'import tensorflow as tf\n'), ((1154, 1219), 'tensorflow.assign', 'tf.assign', (['self.predicted', 'model.outputs[0]'], {'validate_shape': '(False)'}), '(self.predicted, model.outputs[0], validate_shape=False)\n', (1163, 1219), True, 'import tensorflow as tf\n'), ((3137, 3155), 'keras.backend.eval', 'K.eval', (['self.image'], {}), '(self.image)\n', (3143, 3155), True, 'from keras import backend as K\n'), ((1910, 1928), 'keras.backend.eval', 'K.eval', (['self.image'], {}), '(self.image)\n', (1916, 1928), True, 'from keras import backend as K\n'), ((3271, 3306), 'numpy.rollaxis', 'np.rollaxis', (['image'], {'axis': '(2)', 'start': '(0)'}), '(image, axis=2, start=0)\n', (3282, 3306), True, 'import numpy as np\n'), ((2056, 2091), 'numpy.rollaxis', 'np.rollaxis', (['image'], {'axis': '(2)', 'start': '(0)'}), '(image, axis=2, start=0)\n', (2067, 2091), True, 'import numpy as np\n'), ((3334, 3351), 'keras.backend.eval', 'K.eval', (['self.mask'], {}), '(self.mask)\n', (3340, 3351), True, 'from keras import backend as K\n'), ((3382, 3416), 'numpy.rollaxis', 'np.rollaxis', (['mask'], {'axis': '(2)', 'start': '(0)'}), '(mask, axis=2, start=0)\n', (3393, 3416), True, 'import numpy as np\n'), ((3452, 3474), 'keras.backend.eval', 'K.eval', (['self.predicted'], {}), '(self.predicted)\n', (3458, 3474), True, 'from keras import backend as K\n'), ((3510, 3549), 'numpy.rollaxis', 'np.rollaxis', (['predicted'], {'axis': '(2)', 'start': '(0)'}), '(predicted, axis=2, start=0)\n', (3521, 3549), True, 'import numpy as np\n'), ((2123, 2140), 'keras.backend.eval', 'K.eval', (['self.mask'], {}), '(self.mask)\n', (2129, 2140), True, 'from keras import backend as K\n'), ((2175, 2209), 'numpy.rollaxis', 'np.rollaxis', (['mask'], {'axis': '(2)', 'start': '(0)'}), '(mask, axis=2, start=0)\n', (2186, 2209), True, 'import numpy as np\n'), ((2249, 2271), 'keras.backend.eval', 'K.eval', (['self.predicted'], {}), '(self.predicted)\n', (2255, 2271), True, 'from keras import backend as K\n'), ((2311, 2350), 'numpy.rollaxis', 'np.rollaxis', (['predicted'], {'axis': '(2)', 'start': '(0)'}), '(predicted, axis=2, start=0)\n', (2322, 2350), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
# ---------------------------------------------------------------------
filter = np.array(
[
[0, -1, 0],
[-1, 5, -1],
[0,-1, 0]
]
)
def sharpen(img):
sharpen_img = cv2.filter2D(img, -1, filter)
return sharpen_img
# ---------------------------------------------------------------
dim = (720, 385)
cap = cv2.VideoCapture('../video_file/Hackathon_high_home_1_Trim.mp4')
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, dim)
pts1 = np.float32([[502,57], [218,57], [690,320], [30,320]])
pts2 = np.float32([[0,0], [dim[0], 0],[0, dim[1]], [dim[0], dim[1]]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame1 = cv2.warpPerspective(frame1, matrix, dim)
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, dim)
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame2 = cv2.warpPerspective(frame2, matrix, dim)
frame1 = sharpen(frame1)
frame2 = sharpen(frame2)
while True:
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.bilateralFilter(gray, 10, 510, 50)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
print(x,y)
if cv2.contourArea(contour) > 100 and cv2.contourArea(contour) < 450:
cv2.rectangle(frame1, (x,y), (x+w, y+h), (255, 255, 0), 1)
# elif cv2.contourArea(contour) < 30:
# cv2.rectangle(frame1, (x,y), (x+w, y+h), (0, 255, 0), 2)
# else:
# cv2.rectangle(frame1, (x,y), (x+w, y+h), (255, 255, 0), 2)
cv2.imshow('video', frame1)
frame1 = frame2
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, dim)
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame2 = cv2.warpPerspective(frame2, matrix, dim)
frame2 = sharpen(frame2)
if cv2.waitKey(27) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# import cv2
# import sys
# (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# if __name__ == '__main__' :
# # Set up tracker.
# # Instead of MIL, you can also use
# tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
# tracker_type = tracker_types[-1]
# if int(minor_ver) < 3:
# tracker = cv2.Tracker_create(tracker_type)
# else:
# if tracker_type == 'BOOSTING':
# tracker = cv2.TrackerBoosting_create()
# if tracker_type == 'MIL':
# tracker = cv2.TrackerMIL_create()
# if tracker_type == 'KCF':
# tracker = cv2.TrackerKCF_create()
# if tracker_type == 'TLD':
# tracker = cv2.TrackerTLD_create()
# if tracker_type == 'MEDIANFLOW':
# tracker = cv2.TrackerMedianFlow_create()
# if tracker_type == 'GOTURN':
# tracker = cv2.TrackerGOTURN_create()
# if tracker_type == 'MOSSE':
# tracker = cv2.TrackerMOSSE_create()
# if tracker_type == "CSRT":
# tracker = cv2.TrackerCSRT_create()
# # Read video
# video = cv2.VideoCapture("../video_file/Hackathon_high_home_1_Trim.mp4")
# # Exit if video not opened.
# if not video.isOpened():
# print("Could not open video")
# sys.exit()
# # Read first frame.
# ok, frame = video.read()
# if not ok:
# print('Cannot read video file')
# sys.exit()
# # Define an initial bounding box
# bbox = (287, 23, 86, 320)
# # Uncomment the line below to select a different bounding box
# bbox = cv2.selectROI(frame)
# # Initialize tracker with first frame and bounding box
# ok = tracker.init(frame, bbox)
# while True:
# # Read a new frame
# ok, frame = video.read()
# if not ok:
# break
# # Start timer
# timer = cv2.getTickCount()
# # Update tracker
# ok, bbox = tracker.update(frame)
# # Calculate Frames per second (FPS)
# fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# # Draw bounding box
# if ok:
# # Tracking success
# p1 = (int(bbox[0]), int(bbox[1]))
# p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
# cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
# else :
# # Tracking failure
# cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# # Display tracker type on frame
# cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# # Display FPS on frame
# cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# # Display result
# cv2.imshow("Tracking", frame)
# # Exit if ESC pressed
# k = cv2.waitKey(1) & 0xff
# if k == 27 : break
|
[
"cv2.getPerspectiveTransform",
"cv2.bilateralFilter",
"cv2.rectangle",
"cv2.absdiff",
"cv2.imshow",
"cv2.warpPerspective",
"cv2.contourArea",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"numpy.float32",
"cv2.threshold",
"cv2.VideoCapture",
"numpy.array",
"cv2.findContours"
] |
[((250, 297), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 5, -1], [0, -1, 0]]'], {}), '([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n', (258, 297), True, 'import numpy as np\n'), ((514, 578), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../video_file/Hackathon_high_home_1_Trim.mp4"""'], {}), "('../video_file/Hackathon_high_home_1_Trim.mp4')\n", (530, 578), False, 'import cv2\n'), ((614, 637), 'cv2.resize', 'cv2.resize', (['frame1', 'dim'], {}), '(frame1, dim)\n', (624, 637), False, 'import cv2\n'), ((645, 702), 'numpy.float32', 'np.float32', (['[[502, 57], [218, 57], [690, 320], [30, 320]]'], {}), '([[502, 57], [218, 57], [690, 320], [30, 320]])\n', (655, 702), True, 'import numpy as np\n'), ((706, 770), 'numpy.float32', 'np.float32', (['[[0, 0], [dim[0], 0], [0, dim[1]], [dim[0], dim[1]]]'], {}), '([[0, 0], [dim[0], 0], [0, dim[1]], [dim[0], dim[1]]])\n', (716, 770), True, 'import numpy as np\n'), ((778, 817), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (805, 817), False, 'import cv2\n'), ((827, 867), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame1', 'matrix', 'dim'], {}), '(frame1, matrix, dim)\n', (846, 867), False, 'import cv2\n'), ((903, 926), 'cv2.resize', 'cv2.resize', (['frame2', 'dim'], {}), '(frame2, dim)\n', (913, 926), False, 'import cv2\n'), ((936, 975), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (963, 975), False, 'import cv2\n'), ((985, 1025), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame2', 'matrix', 'dim'], {}), '(frame2, matrix, dim)\n', (1004, 1025), False, 'import cv2\n'), ((2214, 2237), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2235, 2237), False, 'import cv2\n'), ((370, 399), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'filter'], {}), '(img, -1, filter)\n', (382, 399), False, 'import cv2\n'), ((1102, 1129), 'cv2.absdiff', 'cv2.absdiff', (['frame1', 'frame2'], {}), '(frame1, frame2)\n', (1113, 1129), False, 'import cv2\n'), ((1141, 1179), 'cv2.cvtColor', 'cv2.cvtColor', (['diff', 'cv2.COLOR_BGR2GRAY'], {}), '(diff, cv2.COLOR_BGR2GRAY)\n', (1153, 1179), False, 'import cv2\n'), ((1191, 1229), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['gray', '(10)', '(510)', '(50)'], {}), '(gray, 10, 510, 50)\n', (1210, 1229), False, 'import cv2\n'), ((1247, 1294), 'cv2.threshold', 'cv2.threshold', (['blur', '(20)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blur, 20, 255, cv2.THRESH_BINARY)\n', (1260, 1294), False, 'import cv2\n'), ((1309, 1347), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(3)'}), '(thresh, None, iterations=3)\n', (1319, 1347), False, 'import cv2\n'), ((1367, 1432), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1383, 1432), False, 'import cv2\n'), ((1893, 1920), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame1'], {}), "('video', frame1)\n", (1903, 1920), False, 'import cv2\n'), ((1983, 2006), 'cv2.resize', 'cv2.resize', (['frame2', 'dim'], {}), '(frame2, dim)\n', (1993, 2006), False, 'import cv2\n'), ((2021, 2060), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (2048, 2060), False, 'import cv2\n'), ((2074, 2114), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame2', 'matrix', 'dim'], {}), '(frame2, matrix, dim)\n', (2093, 2114), False, 'import cv2\n'), ((1487, 1512), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (1503, 1512), False, 'import cv2\n'), ((1622, 1685), 'cv2.rectangle', 'cv2.rectangle', (['frame1', '(x, y)', '(x + w, y + h)', '(255, 255, 0)', '(1)'], {}), '(frame1, (x, y), (x + w, y + h), (255, 255, 0), 1)\n', (1635, 1685), False, 'import cv2\n'), ((2151, 2166), 'cv2.waitKey', 'cv2.waitKey', (['(27)'], {}), '(27)\n', (2162, 2166), False, 'import cv2\n'), ((1543, 1567), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1558, 1567), False, 'import cv2\n'), ((1578, 1602), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1593, 1602), False, 'import cv2\n')]
|
import tensorflow as tf
import numpy as np
import pandas as pd
import re
import nltk
import string
import random
random.seed(0)
np.random.seed(0)
tf.random.set_seed(42)
tf.random.set_seed(42)
from nltk.tokenize import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
df = pd.read_csv("imdb.csv")
def preprocess(x):
x = x.lower()
x = x.encode("ascii","ignore").decode()
x = re.sub("https*\S+"," ",x)
x = re.sub("@\S+"," ",x)
x = re.sub("#\S+"," ",x)
x = re.sub("\'\w+","",x)
x = re.sub("[%s]" % re.escape(string.punctuation)," ", x)
x = re.sub("\w*\d+\w*","",x)
x = re.sub("\s{2,}"," ",x)
return x
temp = []
data_to_list = df["review"].values.tolist()
for i in range(len(data_to_list)):
temp.append(preprocess(data_to_list[i]))
def tokenize(y):
for x in y:
yield(word_tokenize(str(x)))
data_words = list(tokenize(temp))
def detokenize(txt):
return TreebankWordDetokenizer().detokenize(txt)
final_data = []
for i in range(len(data_words)):
final_data.append(detokenize(data_words[i]))
print(final_data[:5])
final_data = np.array(final_data)
import pickle
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
max_words = 20000
max_len = 200
tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(final_data)
sequences = tokenizer.texts_to_sequences(final_data)
tweets = pad_sequences(sequences,maxlen=max_len)
with open("tokenizer.pickle","wb") as handle:
pickle.dump(tokenizer,handle,protocol=pickle.HIGHEST_PROTOCOL)
print(tweets)
labels = np.array(df["sentiment"])
l = []
for i in range(len(labels)):
if labels[i]=="negative":
l.append(0)
elif labels[i]=="positive":
l.append(1)
l = np.array(l)
labels = tf.keras.utils.to_categorical(l,2,dtype="int32")
del l
x_train,x_test,y_train,y_test = train_test_split(tweets,labels,random_state=42)
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.25,random_state=42)
inputs = tf.keras.Input(shape=(None,),dtype="int32")
x = layers.Embedding(max_words,128)(inputs)
x = layers.GRU(64,return_sequences=True)(x)
x = layers.GRU(64)(x)
outputs = layers.Dense(2,activation="sigmoid")(x)
model = tf.keras.Model(inputs,outputs)
model.summary()
model.compile(optimizer="adam",loss="binary_crossentropy",metrics=["accuracy"])
checkpoint = ModelCheckpoint("model_gru.hdf5",monitor="val_accuracy",verbose=1,save_best_only=True,save_weights_only=False)
model.fit(x_train,y_train,batch_size=32,epochs=5,validation_data=(x_val,y_val),callbacks=[checkpoint])
best = tf.keras.models.load_model("model_gru.hdf5")
loss,acc = best.evaluate(x_test,y_test,verbose=2)
predictions = best.evaluate(x_test)
print("Test acc: {:.2f} %".format(100*acc))
print("Test loss: {:.2f} %".format(100*loss))
|
[
"tensorflow.random.set_seed",
"pickle.dump",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.callbacks.ModelCheckpoint",
"nltk.tokenize.treebank.TreebankWordDetokenizer",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.Input",
"re.escape",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"random.seed",
"tensorflow.keras.layers.Embedding",
"re.sub",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.Model",
"numpy.array"
] |
[((114, 128), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (125, 128), False, 'import random\n'), ((129, 146), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (143, 146), True, 'import numpy as np\n'), ((147, 169), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (165, 169), True, 'import tensorflow as tf\n'), ((170, 192), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (188, 192), True, 'import tensorflow as tf\n'), ((299, 322), 'pandas.read_csv', 'pd.read_csv', (['"""imdb.csv"""'], {}), "('imdb.csv')\n", (310, 322), True, 'import pandas as pd\n'), ((1072, 1092), 'numpy.array', 'np.array', (['final_data'], {}), '(final_data)\n', (1080, 1092), True, 'import numpy as np\n'), ((1464, 1494), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'max_words'}), '(num_words=max_words)\n', (1473, 1494), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((1594, 1634), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_len'}), '(sequences, maxlen=max_len)\n', (1607, 1634), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1768, 1793), 'numpy.array', 'np.array', (["df['sentiment']"], {}), "(df['sentiment'])\n", (1776, 1793), True, 'import numpy as np\n'), ((1918, 1929), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (1926, 1929), True, 'import numpy as np\n'), ((1939, 1989), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['l', '(2)'], {'dtype': '"""int32"""'}), "(l, 2, dtype='int32')\n", (1968, 1989), True, 'import tensorflow as tf\n'), ((2027, 2076), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tweets', 'labels'], {'random_state': '(42)'}), '(tweets, labels, random_state=42)\n', (2043, 2076), False, 'from sklearn.model_selection import train_test_split\n'), ((2105, 2172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train', 'y_train'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(x_train, y_train, test_size=0.25, random_state=42)\n', (2121, 2172), False, 'from sklearn.model_selection import train_test_split\n'), ((2180, 2224), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None,)', 'dtype': '"""int32"""'}), "(shape=(None,), dtype='int32')\n", (2194, 2224), True, 'import tensorflow as tf\n'), ((2392, 2423), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2406, 2423), True, 'import tensorflow as tf\n'), ((2533, 2651), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model_gru.hdf5"""'], {'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "('model_gru.hdf5', monitor='val_accuracy', verbose=1,\n save_best_only=True, save_weights_only=False)\n", (2548, 2651), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2754, 2798), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model_gru.hdf5"""'], {}), "('model_gru.hdf5')\n", (2780, 2798), True, 'import tensorflow as tf\n'), ((404, 432), 're.sub', 're.sub', (['"""https*\\\\S+"""', '""" """', 'x'], {}), "('https*\\\\S+', ' ', x)\n", (410, 432), False, 'import re\n'), ((435, 458), 're.sub', 're.sub', (['"""@\\\\S+"""', '""" """', 'x'], {}), "('@\\\\S+', ' ', x)\n", (441, 458), False, 'import re\n'), ((461, 484), 're.sub', 're.sub', (['"""#\\\\S+"""', '""" """', 'x'], {}), "('#\\\\S+', ' ', x)\n", (467, 484), False, 'import re\n'), ((487, 509), 're.sub', 're.sub', (['"""\'\\\\w+"""', '""""""', 'x'], {}), '("\'\\\\w+", \'\', x)\n', (493, 509), False, 'import re\n'), ((572, 601), 're.sub', 're.sub', (['"""\\\\w*\\\\d+\\\\w*"""', '""""""', 'x'], {}), "('\\\\w*\\\\d+\\\\w*', '', x)\n", (578, 601), False, 'import re\n'), ((602, 627), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'x'], {}), "('\\\\s{2,}', ' ', x)\n", (608, 627), False, 'import re\n'), ((1681, 1745), 'pickle.dump', 'pickle.dump', (['tokenizer', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (1692, 1745), False, 'import pickle\n'), ((2228, 2260), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', (['max_words', '(128)'], {}), '(max_words, 128)\n', (2244, 2260), False, 'from tensorflow.keras import layers\n'), ((2272, 2309), 'tensorflow.keras.layers.GRU', 'layers.GRU', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (2282, 2309), False, 'from tensorflow.keras import layers\n'), ((2316, 2330), 'tensorflow.keras.layers.GRU', 'layers.GRU', (['(64)'], {}), '(64)\n', (2326, 2330), False, 'from tensorflow.keras import layers\n'), ((2344, 2381), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2)'], {'activation': '"""sigmoid"""'}), "(2, activation='sigmoid')\n", (2356, 2381), False, 'from tensorflow.keras import layers\n'), ((529, 558), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (538, 558), False, 'import re\n'), ((898, 923), 'nltk.tokenize.treebank.TreebankWordDetokenizer', 'TreebankWordDetokenizer', ([], {}), '()\n', (921, 923), False, 'from nltk.tokenize.treebank import TreebankWordDetokenizer\n')]
|
# ---
# jupyter:
# jupytext_format_version: '1.2'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.4
# ---
# +
import sys
sys.path.insert(0, './../')
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import torch
from torchvision import datasets, transforms
import numpy as np
from matplotlib import pyplot as plt
import foolbox
from foolbox import attacks as fa
# own modules
from abs_models import utils as u
from abs_models import models as mz
from abs_models import attack_utils as au
# -
model = mz.get_VAE(n_iter=10) # ABS, do n_iter=50 for original model
# model = mz.get_VAE(binary=True) # ABS with scaling and binaryzation
# model = mz.get_binary_CNN() # Binary CNN
# model = mz.get_CNN() # Vanilla CNN
# model = mz.get_NearestNeighbor() # Nearest Neighbor, "nearest L2 dist to each class"=logits
# model = mz.get_madry() # Robust network from Madry et al. in tf
# code is agnostic of pytorch/ tensorflow model --> foolbox model
if model.code_base == 'tensorflow':
fmodel = foolbox.models.TensorFlowModel(model.x_input, model.pre_softmax, (0., 1.),
channel_axis=3)
elif model.code_base == 'pytorch':
model.eval()
fmodel = foolbox.models.PyTorchModel(model, # return logits in shape (bs, n_classes)
bounds=(0., 1.), num_classes=10,
device=u.dev())
else:
print('not implemented')
# test model
b, l = u.get_batch(bs=10000) # returns random batch as np.array
pred_label = np.argmax(fmodel.batch_predictions(b), axis=1)
print('score', float(np.sum(pred_label == l)) / b.shape[0])
# # Decision based attacks
# Note that this is only demo code. All experiments were optimized to our compute architecture.
b, l = u.get_batch(bs=1) # returns random batch
# +
import time
start = time.time()
att = fa.DeepFoolL2Attack(fmodel)
metric = foolbox.distances.MSE
criterion = foolbox.criteria.Misclassification()
plt.imshow(b[0, 0], cmap='gray')
plt.title('orig')
plt.axis('off')
plt.show()
# Estimate gradients from scores
if not model.has_grad:
GE = foolbox.gradient_estimators.CoordinateWiseGradientEstimator(0.1)
fmodel = foolbox.models.ModelWithEstimatedGradients(fmodel, GE)
# gernate Adversarial
a = foolbox.adversarial.Adversarial(fmodel, criterion, b[0], l[0], distance=metric)
att(a)
print('runtime', time.time() - start, 'seconds')
print('pred', np.argmax(fmodel.predictions(a.image)))
if a.image is not None: # attack was successful
plt.imshow(a.image[0], cmap='gray')
plt.title('adv')
plt.axis('off')
plt.show()
# -
# # get Trash Adversarials
from foolbox.gradient_estimators import CoordinateWiseGradientEstimator as CWGE
a = np.random.random((1, 28, 28)).astype(np.float32)
a_helper = torch.tensor(torch.from_numpy(a.copy()), requires_grad=True)
fixed_class = 1
GE = CWGE(1.)
opti = torch.optim.SGD([a_helper], lr=1, momentum=0.95)
# +
confidence_level = model.confidence_level # abs 0.0000031, CNN 1439000, madry 60, 1-NN 0.000000000004
logits_scale = model.logit_scale # ABS 430, madry 1, CNN 1, 1-NN 5
a_orig = a
plt.imshow(u.t2n(a[0]), cmap='gray')
plt.show()
for i in range(10000):
logits = fmodel.predictions(a)
probs = u.t2n(u.confidence_softmax(logits_scale*torch.from_numpy(logits[None, :]), dim=1,
const=confidence_level))[0]
pred_class = np.argmax(u.t2n(logits).squeeze())
if probs[fixed_class]>= 0.9:
break
grads = GE(fmodel.batch_predictions, a, fixed_class, (0,1))
a = au.update_distal_adv(a, a_helper, grads, opti)
if i % 1000 == 0:
print(f'probs {probs[pred_class]:.3f} class', pred_class)
fig, ax = plt.subplots(1,3, squeeze=False, figsize=(10, 4))
ax[0, 0].imshow(u.t2n(a[0]), cmap='gray')
ax[0, 1].imshow(u.t2n(grads[0]), cmap='gray')
ax[0, 2].imshow(np.sign(grads[0]), cmap='gray')
plt.show()
plt.imshow(u.t2n(a[0]), cmap='gray')
plt.show()
# -
# # Latent Descent Attack
# +
# only for abs
att = au.LineSearchAttack(model) # BinaryLineSearchAttack
b, l = u.get_batch(bs=200)
advs = att(b, l, n_coarse_steps=50+1, n_ft_steps=2)
for adv in advs:
adv['img'] = adv['img'].cpu().numpy()
for i, (a_i, b_i) in enumerate(zip(advs, b)):
l2 = np.sqrt(a_i['distance'] * 784) # convert from MSE
fig, ax = plt.subplots(1, 2, squeeze=False)
ax[0, 0].set_title(str(a_i['original_label']))
ax[0, 0].imshow(u.t2n(b_i[0]), cmap='gray')
ax[0, 1].set_title(str(a_i['adversarial_label']))
ax[0, 1].imshow(u.t2n(a_i['img'][0]), cmap='gray')
plt.show()
if i ==10:
break
print('mean L2', np.mean([np.sqrt(a_i['distance'] * 784) for a_i in advs]))
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"abs_models.attack_utils.LineSearchAttack",
"numpy.sqrt",
"foolbox.criteria.Misclassification",
"foolbox.models.TensorFlowModel",
"matplotlib.pyplot.imshow",
"abs_models.utils.get_batch",
"foolbox.adversarial.Adversarial",
"abs_models.models.get_VAE",
"abs_models.utils.t2n",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"foolbox.gradient_estimators.CoordinateWiseGradientEstimator",
"foolbox.attacks.DeepFoolL2Attack",
"torch.from_numpy",
"foolbox.models.ModelWithEstimatedGradients",
"abs_models.utils.dev",
"abs_models.attack_utils.update_distal_adv",
"sys.path.insert",
"matplotlib.pyplot.axis",
"time.time",
"numpy.random.random",
"numpy.sign",
"torch.optim.SGD"
] |
[((405, 432), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./../"""'], {}), "(0, './../')\n", (420, 432), False, 'import sys\n'), ((800, 821), 'abs_models.models.get_VAE', 'mz.get_VAE', ([], {'n_iter': '(10)'}), '(n_iter=10)\n', (810, 821), True, 'from abs_models import models as mz\n'), ((1840, 1861), 'abs_models.utils.get_batch', 'u.get_batch', ([], {'bs': '(10000)'}), '(bs=10000)\n', (1851, 1861), True, 'from abs_models import utils as u\n'), ((2151, 2168), 'abs_models.utils.get_batch', 'u.get_batch', ([], {'bs': '(1)'}), '(bs=1)\n', (2162, 2168), True, 'from abs_models import utils as u\n'), ((2218, 2229), 'time.time', 'time.time', ([], {}), '()\n', (2227, 2229), False, 'import time\n'), ((2236, 2263), 'foolbox.attacks.DeepFoolL2Attack', 'fa.DeepFoolL2Attack', (['fmodel'], {}), '(fmodel)\n', (2255, 2263), True, 'from foolbox import attacks as fa\n'), ((2307, 2343), 'foolbox.criteria.Misclassification', 'foolbox.criteria.Misclassification', ([], {}), '()\n', (2341, 2343), False, 'import foolbox\n'), ((2345, 2377), 'matplotlib.pyplot.imshow', 'plt.imshow', (['b[0, 0]'], {'cmap': '"""gray"""'}), "(b[0, 0], cmap='gray')\n", (2355, 2377), True, 'from matplotlib import pyplot as plt\n'), ((2378, 2395), 'matplotlib.pyplot.title', 'plt.title', (['"""orig"""'], {}), "('orig')\n", (2387, 2395), True, 'from matplotlib import pyplot as plt\n'), ((2396, 2411), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2404, 2411), True, 'from matplotlib import pyplot as plt\n'), ((2412, 2422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2420, 2422), True, 'from matplotlib import pyplot as plt\n'), ((2650, 2729), 'foolbox.adversarial.Adversarial', 'foolbox.adversarial.Adversarial', (['fmodel', 'criterion', 'b[0]', 'l[0]'], {'distance': 'metric'}), '(fmodel, criterion, b[0], l[0], distance=metric)\n', (2681, 2729), False, 'import foolbox\n'), ((3250, 3259), 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', 'CWGE', (['(1.0)'], {}), '(1.0)\n', (3254, 3259), True, 'from foolbox.gradient_estimators import CoordinateWiseGradientEstimator as CWGE\n'), ((3267, 3315), 'torch.optim.SGD', 'torch.optim.SGD', (['[a_helper]'], {'lr': '(1)', 'momentum': '(0.95)'}), '([a_helper], lr=1, momentum=0.95)\n', (3282, 3315), False, 'import torch\n'), ((3562, 3572), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3570, 3572), True, 'from matplotlib import pyplot as plt\n'), ((4372, 4382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4380, 4382), True, 'from matplotlib import pyplot as plt\n'), ((4440, 4466), 'abs_models.attack_utils.LineSearchAttack', 'au.LineSearchAttack', (['model'], {}), '(model)\n', (4459, 4466), True, 'from abs_models import attack_utils as au\n'), ((4501, 4520), 'abs_models.utils.get_batch', 'u.get_batch', ([], {'bs': '(200)'}), '(bs=200)\n', (4512, 4520), True, 'from abs_models import utils as u\n'), ((1374, 1470), 'foolbox.models.TensorFlowModel', 'foolbox.models.TensorFlowModel', (['model.x_input', 'model.pre_softmax', '(0.0, 1.0)'], {'channel_axis': '(3)'}), '(model.x_input, model.pre_softmax, (0.0, 1.0),\n channel_axis=3)\n', (1404, 1470), False, 'import foolbox\n'), ((2490, 2554), 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', 'foolbox.gradient_estimators.CoordinateWiseGradientEstimator', (['(0.1)'], {}), '(0.1)\n', (2549, 2554), False, 'import foolbox\n'), ((2568, 2622), 'foolbox.models.ModelWithEstimatedGradients', 'foolbox.models.ModelWithEstimatedGradients', (['fmodel', 'GE'], {}), '(fmodel, GE)\n', (2610, 2622), False, 'import foolbox\n'), ((2898, 2933), 'matplotlib.pyplot.imshow', 'plt.imshow', (['a.image[0]'], {'cmap': '"""gray"""'}), "(a.image[0], cmap='gray')\n", (2908, 2933), True, 'from matplotlib import pyplot as plt\n'), ((2938, 2954), 'matplotlib.pyplot.title', 'plt.title', (['"""adv"""'], {}), "('adv')\n", (2947, 2954), True, 'from matplotlib import pyplot as plt\n'), ((2959, 2974), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2967, 2974), True, 'from matplotlib import pyplot as plt\n'), ((2979, 2989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2987, 2989), True, 'from matplotlib import pyplot as plt\n'), ((3536, 3547), 'abs_models.utils.t2n', 'u.t2n', (['a[0]'], {}), '(a[0])\n', (3541, 3547), True, 'from abs_models import utils as u\n'), ((3953, 3999), 'abs_models.attack_utils.update_distal_adv', 'au.update_distal_adv', (['a', 'a_helper', 'grads', 'opti'], {}), '(a, a_helper, grads, opti)\n', (3973, 3999), True, 'from abs_models import attack_utils as au\n'), ((4346, 4357), 'abs_models.utils.t2n', 'u.t2n', (['a[0]'], {}), '(a[0])\n', (4351, 4357), True, 'from abs_models import utils as u\n'), ((4690, 4720), 'numpy.sqrt', 'np.sqrt', (["(a_i['distance'] * 784)"], {}), "(a_i['distance'] * 784)\n", (4697, 4720), True, 'import numpy as np\n'), ((4756, 4789), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'squeeze': '(False)'}), '(1, 2, squeeze=False)\n', (4768, 4789), True, 'from matplotlib import pyplot as plt\n'), ((5002, 5012), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5010, 5012), True, 'from matplotlib import pyplot as plt\n'), ((2758, 2769), 'time.time', 'time.time', ([], {}), '()\n', (2767, 2769), False, 'import time\n'), ((3108, 3137), 'numpy.random.random', 'np.random.random', (['(1, 28, 28)'], {}), '((1, 28, 28))\n', (3124, 3137), True, 'import numpy as np\n'), ((4106, 4156), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'squeeze': '(False)', 'figsize': '(10, 4)'}), '(1, 3, squeeze=False, figsize=(10, 4))\n', (4118, 4156), True, 'from matplotlib import pyplot as plt\n'), ((4324, 4334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4332, 4334), True, 'from matplotlib import pyplot as plt\n'), ((4861, 4874), 'abs_models.utils.t2n', 'u.t2n', (['b_i[0]'], {}), '(b_i[0])\n', (4866, 4874), True, 'from abs_models import utils as u\n'), ((4963, 4983), 'abs_models.utils.t2n', 'u.t2n', (["a_i['img'][0]"], {}), "(a_i['img'][0])\n", (4968, 4983), True, 'from abs_models import utils as u\n'), ((1979, 2002), 'numpy.sum', 'np.sum', (['(pred_label == l)'], {}), '(pred_label == l)\n', (1985, 2002), True, 'import numpy as np\n'), ((4180, 4191), 'abs_models.utils.t2n', 'u.t2n', (['a[0]'], {}), '(a[0])\n', (4185, 4191), True, 'from abs_models import utils as u\n'), ((4230, 4245), 'abs_models.utils.t2n', 'u.t2n', (['grads[0]'], {}), '(grads[0])\n', (4235, 4245), True, 'from abs_models import utils as u\n'), ((4284, 4301), 'numpy.sign', 'np.sign', (['grads[0]'], {}), '(grads[0])\n', (4291, 4301), True, 'import numpy as np\n'), ((5068, 5098), 'numpy.sqrt', 'np.sqrt', (["(a_i['distance'] * 784)"], {}), "(a_i['distance'] * 784)\n", (5075, 5098), True, 'import numpy as np\n'), ((1774, 1781), 'abs_models.utils.dev', 'u.dev', ([], {}), '()\n', (1779, 1781), True, 'from abs_models import utils as u\n'), ((3800, 3813), 'abs_models.utils.t2n', 'u.t2n', (['logits'], {}), '(logits)\n', (3805, 3813), True, 'from abs_models import utils as u\n'), ((3684, 3717), 'torch.from_numpy', 'torch.from_numpy', (['logits[None, :]'], {}), '(logits[None, :])\n', (3700, 3717), False, 'import torch\n')]
|
"""
# T: maturity
# n: # option periods
# N: # futures periods
# S: initial stock price
# r: continuously-compounded interest rate
# c: dividend yield
# sigma: annualized volatility
# K: strike price
# cp: +1/-1 with regards to call/put
"""
from __future__ import division
from math import exp, sqrt
import numpy as np
import math
T = 0.25
n = 15 # option periods
N = 15 # futures periods
S = 100 #initial stock price
r = 0.02 #continuously-compounded interest rate
c = 0.01 #dividend yield
sigma = 0.3 #annualized volatility
K = 110 #strike price
cp = -1 #with regards to call/put
def Parameter(T,n,sigma,r,c):
"""Parameter calculation"""
dt = T/n
u = exp(sigma * sqrt(dt))
d = 1/u
q1 = (exp((r-c)*dt)-d)/(u-d)
q2 = 1-q1
R = exp(r*dt)
return (u, d, q1, q2, R)
# =============================================================================
def GenerateTree(T,n,S,sigma,r,c):
"""generate stock tree"""
u, d, q1, q2, R = Parameter(T,n,sigma,r,c)
stockTree = np.zeros((n+1, n+1))
# compute the stock tree
stockTree[0,0] = S
for i in range(1,n+1):
stockTree[0,i] = stockTree[0, i-1]*u
for j in range(1,n+1):
stockTree[j,i] = stockTree[j-1, i-1]*d
return stockTree
# =============================================================================
def StockOptionAM(T,n,S,r,c,sigma,K,cp):
"""first return: American Stock Option Pricing"""
"""second return: when is the earliest time to exercise"""
"""Though it's never optimal to early exercise AM call"""
"""It matters for AM put"""
u, d, q1, q2, R = Parameter(T,n,sigma,r,c)
stockTree = GenerateTree(T,n,S,sigma,r,c)
optionTree = np.zeros((n+1,n+1))
# compute the option tree
for j in range(n+1):
optionTree[j, n] = max(0, cp * (stockTree[j, n]-K))
flag = 0
list = []
for i in range(n-1,-1,-1):
for j in range(i+1):
optionTree[j, i] = max((q1 * optionTree[j, i+1] + q2 * optionTree[j+1, i+1])/R,
cp * (stockTree[j, i] - K))
if (optionTree[j, i] - cp * (stockTree[j, i] - K)) < 1e-10:
flag += 1
list.append(i)
when = n
if(flag): when = list[-1]
print(optionTree, when)
return (optionTree[0,0], when)
z = StockOptionAM(T,n,S,r,c,sigma,K,cp)
option_maturity = 10
class bs_bin_tree:
def __init__(self,T,s0,r,sigma,c,K,n):
self.T = T
self.r = r
self.c = c
self.sigma = sigma
self.K = K
self.s0 = s0
self.n = n
self.u = math.exp(self.sigma*np.sqrt(self.T/self.n))
self.q = (math.exp((self.r-self.c)*T/self.n)-(1/self.u))/(self.u-(1/self.u))
self.R = math.exp(self.r*self.T/self.n)
self.__print_param__()
def __print_param__(self):
print('Time',self.T)
print('Starting Price',self.s0)
print('r',self.r)
print('volatility',self.sigma)
print('dividend yield',self.c)
print('strike',self.K)
print('# period',self.n)
def generate_price(self):
arr=[[self.s0]]
for i in range(self.n):
arr_to_add=[]
for j in range(len(arr[i])):
arr_to_add.append(arr[i][j]/self.u)
if j == (len(arr[i])-1):
arr_to_add.append(arr[i][j]*self.u)
arr.append(arr_to_add)
return arr
def neutral_pricing(self,p1,p2):
price = ((1-self.q)*p1 + (self.q)*p2)/self.R
return price
def eu_put(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
#a = max(arr_rev[i][j]-strike,0)
#a = max(a,price)
a = price
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def eu_call(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
#a = max(arr_rev[i][j]-strike,0)
#a = max(a,price)
a = price
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_call(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_call_price(self):
return self.us_call()[0][0]
def us_put(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(self.K - arr_rev[i][j],0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def us_put_price(self):
return self.us_put()[0][0]
def us_put_early_ex(self):
early_ex = False
early_ex_earning = 0
early_ex_time = self.n
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(self.K-arr_rev[i][j],0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(self.K-arr_rev[i][j],0)
if a1 > price:
if early_ex_time == self.n - i:
early_ex_earning = max(early_ex_earning,a1)
else:
early_ex_earning = a1
early_ex =True
early_ex_time = self.n - i
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return {early_ex_time:early_ex_earning} if early_ex == True else False
def us_put_call_parity(self):
LHS = self.us_put_price() + self.s0 * math.exp(-self.c * self.T)
RHS = self.us_call_price() + self.K * math.exp(-self.r * self.T)
print('Put Side',LHS)
print('Call Side',RHS)
return LHS==RHS
def generate_future_price(self):
arr = self.generate_price()
arr_rev = arr[::-1]
res=[]
for i in range(len(arr_rev)):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
res_to_add.append(arr_rev[i][j])
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])*self.R
res_to_add.append(price)
res.append(res_to_add)
return res[::-1]
def option_on_future(self,option_maturity):
arr = self.generate_future_price()[0:option_maturity+1]
arr_rev = arr[::-1]
res=[]
for i in range(option_maturity+1):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return res[::-1]
def option_price_on_future(self,option_maturity):
return self.option_on_future(option_maturity)[0][0]
def option_on_future_early_ex(self,option_maturity):
arr = self.generate_future_price()[0:option_maturity+1]
arr_rev = arr[::-1]
res=[]
early_ex = False
early_ex_earning = 0
early_ex_time = self.n
for i in range(option_maturity+1):
res_to_add = []
for j in range(len(arr_rev[i])):
if i == 0:
a = max(arr_rev[i][j]-self.K,0)
res_to_add.append(a)
else:
price = self.neutral_pricing(res[i-1][j], res[i-1][j+1])
a1 = max(arr_rev[i][j]-self.K,0)
if a1 > price:
if early_ex_time == option_maturity - i:
early_ex_earning = max(early_ex_earning,a1)
else:
early_ex_earning = a1
early_ex =True
early_ex_time = len(arr_rev) - i -1
a = max(a1,price)
res_to_add.append(a)
res.append(res_to_add)
return {early_ex_time:early_ex_earning} if early_ex == True else False
def nCr(self,n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def chooser_option_price(self,option_expire):
call = self.eu_call()[option_expire]
put = self.eu_put()[option_expire]
res=[]
for i in range(len(call)):
res.append(max(call[i],put[i]))
result=0
for j in range(0,len(res)):
result += self.nCr(option_expire,j)* (self.q**(j)) * (1-self.q)**(option_expire-j) * res[j]
return (result/self.R**(option_expire))
tree = bs_bin_tree(T, 100, r, sigma, c, K, n)
print(tree.us_call())
print(tree.us_call_price())
print(tree.us_put())
print(tree.us_put_price())
print(tree.us_put_early_ex())
print(tree.us_put_call_parity())
print(tree.option_on_future(option_maturity))
print(tree.option_price_on_future(option_maturity))
print(tree.option_on_future_early_ex(option_maturity))
print(tree.chooser_option_price(10))
|
[
"math.exp",
"numpy.zeros",
"math.sqrt",
"numpy.sqrt"
] |
[((771, 782), 'math.exp', 'exp', (['(r * dt)'], {}), '(r * dt)\n', (774, 782), False, 'from math import exp, sqrt\n'), ((1034, 1058), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (1042, 1058), True, 'import numpy as np\n'), ((1755, 1779), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (1763, 1779), True, 'import numpy as np\n'), ((2850, 2884), 'math.exp', 'math.exp', (['(self.r * self.T / self.n)'], {}), '(self.r * self.T / self.n)\n', (2858, 2884), False, 'import math\n'), ((689, 697), 'math.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (693, 697), False, 'from math import exp, sqrt\n'), ((726, 743), 'math.exp', 'exp', (['((r - c) * dt)'], {}), '((r - c) * dt)\n', (729, 743), False, 'from math import exp, sqrt\n'), ((2724, 2748), 'numpy.sqrt', 'np.sqrt', (['(self.T / self.n)'], {}), '(self.T / self.n)\n', (2731, 2748), True, 'import numpy as np\n'), ((2766, 2806), 'math.exp', 'math.exp', (['((self.r - self.c) * T / self.n)'], {}), '((self.r - self.c) * T / self.n)\n', (2774, 2806), False, 'import math\n'), ((7626, 7652), 'math.exp', 'math.exp', (['(-self.c * self.T)'], {}), '(-self.c * self.T)\n', (7634, 7652), False, 'import math\n'), ((7699, 7725), 'math.exp', 'math.exp', (['(-self.r * self.T)'], {}), '(-self.r * self.T)\n', (7707, 7725), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
"""
PID Control Class
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from collections import deque
import math
import numpy as np
import carla
class Controller:
"""
PID Controller implementation.
Parameters
----------
args : dict
The configuration dictionary parsed from yaml file.
Attributes
----------
_lon_ebuffer : deque
A deque buffer that stores longitudinal control errors.
_lat_ebuffer : deque
A deque buffer that stores latitudinal control errors.
current_transform : carla.transform
Current ego vehicle transformation in CARLA world.
current_speed : float
Current ego vehicle speed.
past_steering : float
Sterring angle from previous control step.
"""
def __init__(self, args):
# longitudinal related
self.max_brake = args['max_brake']
self.max_throttle = args['max_throttle']
self._lon_k_p = args['lon']['k_p']
self._lon_k_d = args['lon']['k_d']
self._lon_k_i = args['lon']['k_i']
self._lon_ebuffer = deque(maxlen=10)
# lateral related
self.max_steering = args['max_steering']
self._lat_k_p = args['lat']['k_p']
self._lat_k_d = args['lat']['k_d']
self._lat_k_i = args['lat']['k_i']
self._lat_ebuffer = deque(maxlen=10)
# simulation time-step
self.dt = args['dt']
# current speed and localization retrieved from sensing layer
self.current_transform = None
self.current_speed = 0.
# past steering
self.past_steering = 0.
self.dynamic = args['dynamic']
def dynamic_pid(self):
"""
Compute kp, kd, ki based on current speed.
"""
pass
def update_info(self, ego_pos, ego_spd):
"""
Update ego position and speed to controller.
Parameters
----------
ego_pos : carla.location
Position of the ego vehicle.
ego_spd : float
Speed of the ego vehicle
Returns
-------
"""
self.current_transform = ego_pos
self.current_speed = ego_spd
if self.dynamic:
self.dynamic_pid()
def lon_run_step(self, target_speed):
"""
Parameters
----------
target_speed : float
Target speed of the ego vehicle.
Returns
-------
acceleration : float
Desired acceleration value for the current step
to achieve target speed.
"""
error = target_speed - self.current_speed
self._lat_ebuffer.append(error)
if len(self._lat_ebuffer) >= 2:
_de = (self._lat_ebuffer[-1] - self._lat_ebuffer[-2]) / self.dt
_ie = sum(self._lat_ebuffer) * self.dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._lat_k_p * error) +
(self._lat_k_d * _de) +
(self._lat_k_i * _ie),
-1.0, 1.0)
"""
Generate the throttle command based on current speed and target speed
Args:
-target_location (carla.loaction): Target location.
Returns:
-current_steering (float): Desired steering angle value
for the current step to achieve target location.
"""
def lat_run_step(self, target_location):
"""
Generate the throttle command based on current speed and target speed
Parameters
----------
target_location : carla.location
Target location.
Returns
-------
current_steering : float
Desired steering angle value for the current step to
achieve target location.
"""
v_begin = self.current_transform.location
v_end = v_begin + carla.Location(
x=math.cos(
math.radians(
self.current_transform.rotation.yaw)), y=math.sin(
math.radians(
self.current_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([target_location.x -
v_begin.x, target_location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(
w_vec, v_vec) / (np.linalg.norm(w_vec) * np.linalg.norm(v_vec)),
-1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
self._lon_ebuffer.append(_dot)
if len(self._lon_ebuffer) >= 2:
_de = (self._lon_ebuffer[-1] - self._lon_ebuffer[-2]) / self.dt
_ie = sum(self._lon_ebuffer) * self.dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._lat_k_p * _dot) + (self._lat_k_d *
_de) + (self._lat_k_i * _ie), -1.0, 1.0)
def run_step(self, target_speed, waypoint):
"""
Execute one step of control invoking both lateral and longitudinal
PID controllers to reach a target waypoint at a given target_speed.
Parameters
----------
target_speed : float
Target speed of the ego vehicle.
waypoint : carla.loaction
Target location.
Returns
-------
control : carla.VehicleControl
Desired vehicle control command for the current step.
"""
# control class for carla vehicle
control = carla.VehicleControl()
# emergency stop
if target_speed == 0 or waypoint is None:
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
return control
acceleration = self.lon_run_step(target_speed)
current_steering = self.lat_run_step(waypoint)
if acceleration >= 0.0:
control.throttle = min(acceleration, self.max_throttle)
control.brake = 0.0
else:
control.throttle = 0.0
control.brake = min(abs(acceleration), self.max_brake)
# Steering regulation: changes cannot happen abruptly, can't steer too
# much.
if current_steering > self.past_steering + 0.2:
current_steering = self.past_steering + 0.2
elif current_steering < self.past_steering - 0.2:
current_steering = self.past_steering - 0.2
if current_steering >= 0:
steering = min(self.max_steering, current_steering)
else:
steering = max(-self.max_steering, current_steering)
control.steer = steering
control.hand_brake = False
control.manual_gear_shift = False
self.past_steering = steering
return control
|
[
"math.radians",
"numpy.cross",
"numpy.clip",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"carla.VehicleControl",
"collections.deque"
] |
[((1110, 1126), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1115, 1126), False, 'from collections import deque\n'), ((1362, 1378), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1367, 1378), False, 'from collections import deque\n'), ((2932, 3022), 'numpy.clip', 'np.clip', (['(self._lat_k_p * error + self._lat_k_d * _de + self._lat_k_i * _ie)', '(-1.0)', '(1.0)'], {}), '(self._lat_k_p * error + self._lat_k_d * _de + self._lat_k_i * _ie, \n -1.0, 1.0)\n', (2939, 3022), True, 'import numpy as np\n'), ((4128, 4185), 'numpy.array', 'np.array', (['[v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0]'], {}), '([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])\n', (4136, 4185), True, 'import numpy as np\n'), ((4202, 4279), 'numpy.array', 'np.array', (['[target_location.x - v_begin.x, target_location.y - v_begin.y, 0.0]'], {}), '([target_location.x - v_begin.x, target_location.y - v_begin.y, 0.0])\n', (4210, 4279), True, 'import numpy as np\n'), ((4512, 4534), 'numpy.cross', 'np.cross', (['v_vec', 'w_vec'], {}), '(v_vec, w_vec)\n', (4520, 4534), True, 'import numpy as np\n'), ((4868, 4957), 'numpy.clip', 'np.clip', (['(self._lat_k_p * _dot + self._lat_k_d * _de + self._lat_k_i * _ie)', '(-1.0)', '(1.0)'], {}), '(self._lat_k_p * _dot + self._lat_k_d * _de + self._lat_k_i * _ie, -\n 1.0, 1.0)\n', (4875, 4957), True, 'import numpy as np\n'), ((5582, 5604), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (5602, 5604), False, 'import carla\n'), ((4365, 4385), 'numpy.dot', 'np.dot', (['w_vec', 'v_vec'], {}), '(w_vec, v_vec)\n', (4371, 4385), True, 'import numpy as np\n'), ((3938, 3987), 'math.radians', 'math.radians', (['self.current_transform.rotation.yaw'], {}), '(self.current_transform.rotation.yaw)\n', (3950, 3987), False, 'import math\n'), ((4039, 4088), 'math.radians', 'math.radians', (['self.current_transform.rotation.yaw'], {}), '(self.current_transform.rotation.yaw)\n', (4051, 4088), False, 'import math\n'), ((4402, 4423), 'numpy.linalg.norm', 'np.linalg.norm', (['w_vec'], {}), '(w_vec)\n', (4416, 4423), True, 'import numpy as np\n'), ((4426, 4447), 'numpy.linalg.norm', 'np.linalg.norm', (['v_vec'], {}), '(v_vec)\n', (4440, 4447), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Copyright 2019-2022 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate batches of alerts coming from ZTF or ELaSTICC.
"""
import argparse
import os
import sys
import glob
import time
import asyncio
import gzip
import numpy as np
from fink_alert_simulator import alertProducer
from fink_alert_simulator import avroUtils
from fink_alert_simulator.parser import getargs
def main():
parser = argparse.ArgumentParser(description=__doc__)
args = getargs(parser)
# Configure producer connection to Kafka broker
conf = {'bootstrap.servers': args.servers}
streamproducer = alertProducer.AlertProducer(
args.topic, schema_files=None, **conf)
# Scan for avro files
root = args.datasimpath
# Grab data stored on disk
files = glob.glob(os.path.join(root, "*.avro*"))
# Number of observations, and total number of alerts to send.
nobs = args.nobservations
poolsize = args.nalerts_per_obs * nobs
if nobs == -1:
# Take all alerts available
nobs = int(len(files) / float(args.nalerts_per_obs)) + 1
poolsize = args.nalerts_per_obs * nobs
msg = """
All {} alerts to be sent (nobservations=-1), corresponding
to {} observations ({} alerts each).
""".format(len(files), nobs, args.nalerts_per_obs)
print(msg)
elif len(files) < poolsize:
# Send only available alerts
nobs = int(len(files) / float(args.nalerts_per_obs)) + 1
msg = """
You ask for more data than you have!
Number of alerts on disk ({}): {}
Number of alerts required (nalerts_per_obs * nobservations): {}
Hence, we reduced the number of observations to {}.
""".format(root, len(files), poolsize, nobs)
print(msg)
print('Total alert available ({}): {}'.format(root, len(files)))
print('Total alert to be sent: {}'.format(poolsize))
# Break the alert list into observations
files = np.array_split(files[:poolsize], nobs)[:nobs]
# Starting time
t0 = time.time()
print("t0: {}".format(t0))
def send_visit(list_of_files):
""" Send all alerts of an observation for publication in Kafka
Parameters
----------
list_of_files: list of str
List with filenames containing the alert (avro file). Alerts
can be gzipped, but the extension should be
explicit (`avro` or `avro.gz`).
"""
print('Observation start: t0 + : {:.2f} seconds'.format(
time.time() - t0))
# Load alert contents
startstop = []
for index, fn in enumerate(list_of_files):
if fn.endswith('avro'):
copen = lambda x: open(x, mode='rb')
elif fn.endswith('avro.gz'):
copen = lambda x: gzip.open(x, mode='rb')
else:
msg = """
Alert filename should end with `avro` or `avro.gz`.
Currently trying to read: {}
""".format(fn)
raise NotImplementedError(msg)
with copen(fn) as file_data:
# Read the data
data = avroUtils.readschemadata(file_data)
# Read the Schema
schema = data.schema
# assuming one record per data
record = next(data)
if index == 0 or index == len(list_of_files) - 1:
if args.to_display != 'None':
fields = args.to_display.split(',')
to_display = record[fields[0]]
for field_ in fields[1:]:
to_display = to_display[field_]
startstop.append(to_display)
streamproducer.send(record, alert_schema=schema, encode=True)
if args.to_display != 'None':
print('{} alerts sent ({} to {})'.format(len(
list_of_files),
startstop[0],
startstop[1]))
# Trigger the producer
streamproducer.flush()
loop = asyncio.get_event_loop()
asyncio.ensure_future(
alertProducer.schedule_delays(
loop,
send_visit,
files,
interval=args.tinterval_kafka))
loop.run_forever()
loop.close()
if __name__ == "__main__":
main()
|
[
"asyncio.get_event_loop",
"argparse.ArgumentParser",
"gzip.open",
"fink_alert_simulator.alertProducer.schedule_delays",
"time.time",
"fink_alert_simulator.parser.getargs",
"fink_alert_simulator.avroUtils.readschemadata",
"fink_alert_simulator.alertProducer.AlertProducer",
"numpy.array_split",
"os.path.join"
] |
[((962, 1006), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (985, 1006), False, 'import argparse\n'), ((1018, 1033), 'fink_alert_simulator.parser.getargs', 'getargs', (['parser'], {}), '(parser)\n', (1025, 1033), False, 'from fink_alert_simulator.parser import getargs\n'), ((1155, 1221), 'fink_alert_simulator.alertProducer.AlertProducer', 'alertProducer.AlertProducer', (['args.topic'], {'schema_files': 'None'}), '(args.topic, schema_files=None, **conf)\n', (1182, 1221), False, 'from fink_alert_simulator import alertProducer\n'), ((2592, 2603), 'time.time', 'time.time', ([], {}), '()\n', (2601, 2603), False, 'import time\n'), ((4650, 4674), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4672, 4674), False, 'import asyncio\n'), ((1340, 1369), 'os.path.join', 'os.path.join', (['root', '"""*.avro*"""'], {}), "(root, '*.avro*')\n", (1352, 1369), False, 'import os\n'), ((2516, 2554), 'numpy.array_split', 'np.array_split', (['files[:poolsize]', 'nobs'], {}), '(files[:poolsize], nobs)\n', (2530, 2554), True, 'import numpy as np\n'), ((4710, 4800), 'fink_alert_simulator.alertProducer.schedule_delays', 'alertProducer.schedule_delays', (['loop', 'send_visit', 'files'], {'interval': 'args.tinterval_kafka'}), '(loop, send_visit, files, interval=args.\n tinterval_kafka)\n', (4739, 4800), False, 'from fink_alert_simulator import alertProducer\n'), ((3721, 3756), 'fink_alert_simulator.avroUtils.readschemadata', 'avroUtils.readschemadata', (['file_data'], {}), '(file_data)\n', (3745, 3756), False, 'from fink_alert_simulator import avroUtils\n'), ((3078, 3089), 'time.time', 'time.time', ([], {}), '()\n', (3087, 3089), False, 'import time\n'), ((3365, 3388), 'gzip.open', 'gzip.open', (['x'], {'mode': '"""rb"""'}), "(x, mode='rb')\n", (3374, 3388), False, 'import gzip\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.