code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import pickle
from unittest import mock
from nose2.tools.params import params
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleGaussianMLPModel
class TestGaussianMLPPolicyWithModel(TfGraphTestCase):
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_get_action(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, prob = policy.get_action(obs)
expected_action = np.full(action_dim, 0.75)
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
actions, probs = policy.get_actions([obs, obs, obs])
for action, mean, log_std in zip(actions, probs['mean'],
probs['log_std']):
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_dist_info_sym(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
obs_ph = tf.placeholder(tf.float32, shape=(None, obs_dim))
dist1_sym = policy.dist_info_sym(obs_ph, name='p1_sym')
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
prob = self.sess.run(dist1_sym, feed_dict={obs_ph: [obs.flatten()]})
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_is_pickleable(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
action1, prob1 = policy.get_action(obs)
p = pickle.dumps(policy)
with tf.Session(graph=tf.Graph()):
policy_pickled = pickle.loads(p)
action2, prob2 = policy_pickled.get_action(obs)
assert env.action_space.contains(action1)
assert np.array_equal(action1, action2)
assert np.array_equal(prob1['mean'], prob2['mean'])
assert np.array_equal(prob1['log_std'], prob2['log_std'])
|
[
"tensorflow.Graph",
"pickle.dumps",
"tensorflow.placeholder",
"numpy.array_equal",
"nose2.tools.params.params",
"tests.fixtures.envs.dummy.DummyBoxEnv",
"pickle.loads",
"numpy.full",
"garage.tf.policies.GaussianMLPPolicyWithModel",
"unittest.mock.patch"
] |
[((426, 532), 'nose2.tools.params.params', 'params', (['((1,), (1,))', '((1,), (2,))', '((2,), (2,))', '((1, 1), (1, 1))', '((1, 1), (2, 2))', '((2, 2), (2, 2))'], {}), '(((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1),\n (2, 2)), ((2, 2), (2, 2)))\n', (432, 532), False, 'from nose2.tools.params import params\n'), ((1882, 1988), 'nose2.tools.params.params', 'params', (['((1,), (1,))', '((1,), (2,))', '((2,), (2,))', '((1, 1), (1, 1))', '((1, 1), (2, 2))', '((2, 2), (2, 2))'], {}), '(((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1),\n (2, 2)), ((2, 2), (2, 2)))\n', (1888, 1988), False, 'from nose2.tools.params import params\n'), ((2972, 3078), 'nose2.tools.params.params', 'params', (['((1,), (1,))', '((1,), (2,))', '((2,), (2,))', '((1, 1), (1, 1))', '((1, 1), (2, 2))', '((2, 2), (2, 2))'], {}), '(((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1),\n (2, 2)), ((2, 2), (2, 2)))\n', (2978, 3078), False, 'from nose2.tools.params import params\n'), ((1089, 1114), 'numpy.full', 'np.full', (['action_dim', '(0.75)'], {}), '(action_dim, 0.75)\n', (1096, 1114), True, 'import numpy as np\n'), ((1139, 1163), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (1146, 1163), True, 'import numpy as np\n'), ((1191, 1215), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (1198, 1215), True, 'import numpy as np\n'), ((1281, 1320), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (1295, 1320), True, 'import numpy as np\n'), ((1336, 1379), 'numpy.array_equal', 'np.array_equal', (["prob['mean']", 'expected_mean'], {}), "(prob['mean'], expected_mean)\n", (1350, 1379), True, 'import numpy as np\n'), ((1395, 1444), 'numpy.array_equal', 'np.array_equal', (["prob['log_std']", 'expected_log_std'], {}), "(prob['log_std'], expected_log_std)\n", (1409, 1444), True, 'import numpy as np\n'), ((2546, 2595), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, obs_dim)'}), '(tf.float32, shape=(None, obs_dim))\n', (2560, 2595), True, 'import tensorflow as tf\n'), ((2686, 2710), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (2693, 2710), True, 'import numpy as np\n'), ((2738, 2762), 'numpy.full', 'np.full', (['action_dim', '(0.5)'], {}), '(action_dim, 0.5)\n', (2745, 2762), True, 'import numpy as np\n'), ((2857, 2900), 'numpy.array_equal', 'np.array_equal', (["prob['mean']", 'expected_mean'], {}), "(prob['mean'], expected_mean)\n", (2871, 2900), True, 'import numpy as np\n'), ((2916, 2965), 'numpy.array_equal', 'np.array_equal', (["prob['log_std']", 'expected_log_std'], {}), "(prob['log_std'], expected_log_std)\n", (2930, 2965), True, 'import numpy as np\n'), ((3680, 3700), 'pickle.dumps', 'pickle.dumps', (['policy'], {}), '(policy)\n', (3692, 3700), False, 'import pickle\n'), ((3915, 3947), 'numpy.array_equal', 'np.array_equal', (['action1', 'action2'], {}), '(action1, action2)\n', (3929, 3947), True, 'import numpy as np\n'), ((3963, 4007), 'numpy.array_equal', 'np.array_equal', (["prob1['mean']", "prob2['mean']"], {}), "(prob1['mean'], prob2['mean'])\n", (3977, 4007), True, 'import numpy as np\n'), ((4023, 4073), 'numpy.array_equal', 'np.array_equal', (["prob1['log_std']", "prob2['log_std']"], {}), "(prob1['log_std'], prob2['log_std'])\n", (4037, 4073), True, 'import numpy as np\n'), ((662, 713), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (673, 713), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((728, 841), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel"""'], {'new': 'SimpleGaussianMLPModel'}), "('garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel'\n , new=SimpleGaussianMLPModel)\n", (738, 841), False, 'from unittest import mock\n'), ((913, 958), 'garage.tf.policies.GaussianMLPPolicyWithModel', 'GaussianMLPPolicyWithModel', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (939, 958), False, 'from garage.tf.policies import GaussianMLPPolicyWithModel\n'), ((1704, 1743), 'numpy.array_equal', 'np.array_equal', (['action', 'expected_action'], {}), '(action, expected_action)\n', (1718, 1743), True, 'import numpy as np\n'), ((1763, 1806), 'numpy.array_equal', 'np.array_equal', (["prob['mean']", 'expected_mean'], {}), "(prob['mean'], expected_mean)\n", (1777, 1806), True, 'import numpy as np\n'), ((1826, 1875), 'numpy.array_equal', 'np.array_equal', (["prob['log_std']", 'expected_log_std'], {}), "(prob['log_std'], expected_log_std)\n", (1840, 1875), True, 'import numpy as np\n'), ((2121, 2172), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (2132, 2172), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((2187, 2300), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel"""'], {'new': 'SimpleGaussianMLPModel'}), "('garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel'\n , new=SimpleGaussianMLPModel)\n", (2197, 2300), False, 'from unittest import mock\n'), ((2372, 2417), 'garage.tf.policies.GaussianMLPPolicyWithModel', 'GaussianMLPPolicyWithModel', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (2398, 2417), False, 'from garage.tf.policies import GaussianMLPPolicyWithModel\n'), ((3211, 3262), 'tests.fixtures.envs.dummy.DummyBoxEnv', 'DummyBoxEnv', ([], {'obs_dim': 'obs_dim', 'action_dim': 'action_dim'}), '(obs_dim=obs_dim, action_dim=action_dim)\n', (3222, 3262), False, 'from tests.fixtures.envs.dummy import DummyBoxEnv\n'), ((3277, 3390), 'unittest.mock.patch', 'mock.patch', (['"""garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel"""'], {'new': 'SimpleGaussianMLPModel'}), "('garage.tf.policies.gaussian_mlp_policy_with_model.GaussianMLPModel'\n , new=SimpleGaussianMLPModel)\n", (3287, 3390), False, 'from unittest import mock\n'), ((3462, 3507), 'garage.tf.policies.GaussianMLPPolicyWithModel', 'GaussianMLPPolicyWithModel', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (3488, 3507), False, 'from garage.tf.policies import GaussianMLPPolicyWithModel\n'), ((3773, 3788), 'pickle.loads', 'pickle.loads', (['p'], {}), '(p)\n', (3785, 3788), False, 'import pickle\n'), ((3731, 3741), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3739, 3741), True, 'import tensorflow as tf\n')]
|
from cinebot_mini import SERVERS
import requests
import numpy as np
import json
def base_url():
blender_dict = SERVERS["blender"]
url = "http://{}:{}".format(
blender_dict["host"], blender_dict["port"])
return url
def handshake():
url = base_url() + "/api/ping"
for i in range(5):
try:
r = requests.get(url, timeout=1.0)
r_data = r.json()
assert(r_data["url"] == "/api/ping")
return True
except Exception as e:
continue
return False
def create_object(name, type="CAMERA"):
url = base_url() + "/api/create"
data = {
"type": type,
"name": name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
return obj_dict["name"]
else:
print("Creating {} failed!", obj_name)
def create_objects(type="CAMERA", num=4, base_name="screen_camera_"):
url = base_url() + "/api/create"
obj_names = []
for i in range(num):
obj_name = base_name + str(i)
data = {
"type": type,
"name": obj_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
obj_names.append(obj_dict["name"])
else:
print("Creating {} failed!", obj_name)
return obj_names
def set_transform_euler(obj_name, loc, rot, degree=True):
url = base_url() + "/api/object/" + obj_name + "/property"
rot_data = list(rot)
if degree:
rot_data = (np.array(rot) / 180.0 * np.pi).tolist()
data = {
"properties": {
"location": list(loc),
"rotation_euler": list(rot_data)
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_property(obj_name, key, val, prop_type="properties"):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
prop_type: {
key: val
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_property(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
r = requests.get(url)
r_data = r.json()
return r_data["result"]
def test_object_exist(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
data = dict()
r = requests.get(url, data=json.dumps(data))
return r.status_code != 404
def set_animation_euler(obj_name, locs, rots, degree=True):
url = base_url() + "/api/object/" + obj_name + "/animation"
rot_data = rots
if degree:
rot_data = rots / 180.0 * np.pi
transforms = []
for t in range(len(locs)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["location"] = locs[t].tolist()
tf_data["rotation_euler"] = rot_data[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_animation_matrix(obj_name, matrices):
url = base_url() + "/api/object/" + obj_name + "/animation"
transforms = []
for t in range(len(matrices)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["matrix_world"] = matrices[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_animation_dict(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = dict()
for frame in animation:
t = frame["frame_number"]
arr = np.array(frame["matrix_world"])
result[t] = arr
return result
def get_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = []
for frame in animation:
arr = np.array(frame["matrix_world"])
result.append(arr)
return result
def delete_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def delete_object(obj_name):
url = base_url() + "/api/object/" + obj_name
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def render_animation(file_name, frame_start, frame_end):
url = base_url() + "/api/render/animation"
data = {
"output_file_path": file_name,
"frame_start": frame_start,
"frame_end": frame_end
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_render_resolution(pixel_dim):
url = base_url() + "/api/render/property"
x, y = pixel_dim
data = {
"properties": {
"resolution_x": x,
"resolution_y": y
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_camera_properties(cam_name, focal_length_m, sensor_dims_m):
url = base_url() + "/api/object/" + cam_name + "/property"
lens = focal_length_m * 1000
w, h = np.array(sensor_dims_m) * 1000
data = {
"data_properties": {
"lens": lens,
"sensor_width": w,
"sensor_height": h
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_active_camera(cam_name):
url = base_url() + "/api/render/active_camera"
data = {
"name": cam_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
|
[
"numpy.array",
"json.dumps",
"requests.get",
"requests.delete"
] |
[((2923, 2940), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2935, 2940), False, 'import requests\n'), ((4368, 4385), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4380, 4385), False, 'import requests\n'), ((4714, 4731), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4726, 4731), False, 'import requests\n'), ((5028, 5048), 'requests.delete', 'requests.delete', (['url'], {}), '(url)\n', (5043, 5048), False, 'import requests\n'), ((5187, 5207), 'requests.delete', 'requests.delete', (['url'], {}), '(url)\n', (5202, 5207), False, 'import requests\n'), ((4537, 4568), 'numpy.array', 'np.array', (["frame['matrix_world']"], {}), "(frame['matrix_world'])\n", (4545, 4568), True, 'import numpy as np\n'), ((4845, 4876), 'numpy.array', 'np.array', (["frame['matrix_world']"], {}), "(frame['matrix_world'])\n", (4853, 4876), True, 'import numpy as np\n'), ((6098, 6121), 'numpy.array', 'np.array', (['sensor_dims_m'], {}), '(sensor_dims_m)\n', (6106, 6121), True, 'import numpy as np\n'), ((342, 372), 'requests.get', 'requests.get', (['url'], {'timeout': '(1.0)'}), '(url, timeout=1.0)\n', (354, 372), False, 'import requests\n'), ((717, 733), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (727, 733), False, 'import json\n'), ((1847, 1863), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1857, 1863), False, 'import json\n'), ((2152, 2168), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2162, 2168), False, 'import json\n'), ((2457, 2473), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2467, 2473), False, 'import json\n'), ((2754, 2770), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2764, 2770), False, 'import json\n'), ((3138, 3154), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3148, 3154), False, 'import json\n'), ((3723, 3739), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3733, 3739), False, 'import json\n'), ((4192, 4208), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4202, 4208), False, 'import json\n'), ((5520, 5536), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (5530, 5536), False, 'import json\n'), ((5840, 5856), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (5850, 5856), False, 'import json\n'), ((6306, 6322), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (6316, 6322), False, 'import json\n'), ((6548, 6564), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (6558, 6564), False, 'import json\n'), ((1214, 1230), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1224, 1230), False, 'import json\n'), ((1643, 1656), 'numpy.array', 'np.array', (['rot'], {}), '(rot)\n', (1651, 1656), True, 'import numpy as np\n')]
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import scipy.io.wavfile as wavfile
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
def read_and_slice(filename, wav_canvas_size, stride=0.5):
fm, wav_data = wavfile.read(filename)
if fm != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
signals = slice_signal(wav_data, wav_canvas_size, stride)
return signals
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size, baseline_dir=None):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
if not baseline_dir is None:
baseline_filename = os.path.join(baseline_dir, wav_fullname)
baseline_signals = read_and_slice(baseline_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
if baseline_dir is None:
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
else:
for (wav, noisy, base) in zip(wav_signals, noisy_signals, baseline_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
baseline_raw = base.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw),
'baseline_raw': _bytes_feature(baseline_raw)
}))
out_file.write(example.SerializeToString())
def main(opts):
if not os.path.exists(opts.save_path):
# make save path if it does not exist
os.makedirs(opts.save_path)
# set up the output filepath
out_filepath = os.path.join(opts.save_path, opts.out_file)
if os.path.splitext(out_filepath)[1] != '.tfrecords':
# if wrong extension or no extension appended, put .tfrecords
out_filepath += '.tfrecords'
else:
out_filename, ext = os.path.splitext(out_filepath)
out_filepath = out_filename + ext
# check if out_file exists and if force flag is set
if os.path.exists(out_filepath) and not opts.force_gen:
raise ValueError('ERROR: {} already exists. Set force flag (--force-gen) to '
'overwrite. Skipping this speaker.'.format(out_filepath))
elif os.path.exists(out_filepath) and opts.force_gen:
print('Will overwrite previously existing tfrecords')
os.unlink(out_filepath)
with open(opts.cfg) as cfh:
# read the configuration description
cfg_desc = toml.loads(cfh.read())
beg_enc_t = timeit.default_timer()
out_file = tf.python_io.TFRecordWriter(out_filepath)
# process the acoustic and textual data now
for dset_i, (dset, dset_desc) in enumerate(cfg_desc.iteritems()):
print('-' * 50)
wav_dir = dset_desc['clean']
wav_files = [os.path.join(wav_dir, wav) for wav in
os.listdir(wav_dir) if wav.endswith('.wav')]
noisy_dir = dset_desc['noisy']
baseline_dir = None
if 'baseline' in dset_desc.keys():
baseline_dir = dset_desc['baseline']
nfiles = len(wav_files)
for m, wav_file in enumerate(wav_files):
print('Processing wav file {}/{} {}{}'.format(m + 1,
nfiles,
wav_file,
' ' * 10),
end='\r')
sys.stdout.flush()
encoder_proc(wav_file, noisy_dir, out_file, 2 ** 14, baseline_dir)
out_file.close()
end_enc_t = timeit.default_timer() - beg_enc_t
print('')
print('*' * 50)
print('Total processing and writing time: {} s'.format(end_enc_t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='cfg/e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
|
[
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"timeit.default_timer",
"os.path.join",
"os.path.splitext",
"tensorflow.train.Int64List",
"os.path.split",
"tensorflow.train.BytesList",
"numpy.array",
"scipy.io.wavfile.read",
"os.unlink",
"tensorflow.python_io.TFRecordWriter",
"sys.stdout.flush"
] |
[((1126, 1158), 'numpy.array', 'np.array', (['slices'], {'dtype': 'np.int32'}), '(slices, dtype=np.int32)\n', (1134, 1158), True, 'import numpy as np\n'), ((1238, 1260), 'scipy.io.wavfile.read', 'wavfile.read', (['filename'], {}), '(filename)\n', (1250, 1260), True, 'import scipy.io.wavfile as wavfile\n'), ((1660, 1687), 'os.path.split', 'os.path.split', (['wav_filename'], {}), '(wav_filename)\n', (1673, 1687), False, 'import os\n'), ((1709, 1747), 'os.path.join', 'os.path.join', (['noisy_path', 'wav_fullname'], {}), '(noisy_path, wav_fullname)\n', (1721, 1747), False, 'import os\n'), ((3275, 3318), 'os.path.join', 'os.path.join', (['opts.save_path', 'opts.out_file'], {}), '(opts.save_path, opts.out_file)\n', (3287, 3318), False, 'import os\n'), ((5522, 5610), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert the set of txt and wavs to TFRecords"""'}), "(description=\n 'Convert the set of txt and wavs to TFRecords')\n", (5545, 5610), False, 'import argparse\n'), ((1941, 1981), 'os.path.join', 'os.path.join', (['baseline_dir', 'wav_fullname'], {}), '(baseline_dir, wav_fullname)\n', (1953, 1981), False, 'import os\n'), ((3109, 3139), 'os.path.exists', 'os.path.exists', (['opts.save_path'], {}), '(opts.save_path)\n', (3123, 3139), False, 'import os\n'), ((3195, 3222), 'os.makedirs', 'os.makedirs', (['opts.save_path'], {}), '(opts.save_path)\n', (3206, 3222), False, 'import os\n'), ((3522, 3552), 'os.path.splitext', 'os.path.splitext', (['out_filepath'], {}), '(out_filepath)\n', (3538, 3552), False, 'import os\n'), ((3658, 3686), 'os.path.exists', 'os.path.exists', (['out_filepath'], {}), '(out_filepath)\n', (3672, 3686), False, 'import os\n'), ((4171, 4193), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4191, 4193), False, 'import timeit\n'), ((4213, 4254), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['out_filepath'], {}), '(out_filepath)\n', (4240, 4254), True, 'import tensorflow as tf\n'), ((359, 392), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (377, 392), True, 'import tensorflow as tf\n'), ((459, 492), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (477, 492), True, 'import tensorflow as tf\n'), ((3326, 3356), 'os.path.splitext', 'os.path.splitext', (['out_filepath'], {}), '(out_filepath)\n', (3342, 3356), False, 'import os\n'), ((3889, 3917), 'os.path.exists', 'os.path.exists', (['out_filepath'], {}), '(out_filepath)\n', (3903, 3917), False, 'import os\n'), ((4008, 4031), 'os.unlink', 'os.unlink', (['out_filepath'], {}), '(out_filepath)\n', (4017, 4031), False, 'import os\n'), ((5329, 5351), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5349, 5351), False, 'import timeit\n'), ((4475, 4501), 'os.path.join', 'os.path.join', (['wav_dir', 'wav'], {}), '(wav_dir, wav)\n', (4487, 4501), False, 'import os\n'), ((5182, 5200), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5198, 5200), False, 'import sys\n'), ((4540, 4559), 'os.listdir', 'os.listdir', (['wav_dir'], {}), '(wav_dir)\n', (4550, 4559), False, 'import os\n')]
|
import argparse
import math
import matplotlib.pyplot as plt
import os
import numpy as np
import shutil
import pandas as pd
import seaborn as sns
sns.set()
sns.set_context("talk")
NUM_BINS = 100
path = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'
video_mappings = {}
video_mappings['300'] = '320x180x30_vmaf_score'
video_mappings['750'] = '640x360x30_vmaf_score'
video_mappings['1200'] = '768x432x30_vmaf_score'
video_mappings['1850'] = '1024x576x30_vmaf_score'
video_mappings['2850'] = '1280x720x30_vmaf_score'
video_mappings['4300'] = '1280x720x60_vmaf_score'
metric_list = ["reward_vmaf", "reward_br", "rebuf", "br_avg", "vmaf_avg", "switching_vmaf", "switching_br"]
#MINERVA
rebuf_penalty = 25
switching_penalty = 2.5
segment_lenght = 4.0
def load_csv():
video_info = pd.read_csv(path)
return video_info
pensieve_video_csv = load_csv()
def get_qoe(abr, trace):
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
logfile = os.path.join(logdir, abr + "_rewards_0.log")
reward_vmaf = 0
reward_bitrate = 0
total_rebuffering = 0.0
vmaf_avg = 0.0
vmaf_switching_avg = 0.0
bitrate_avg = 0.0
bitrate_switching_avg = 0.0
with open(logfile, "r") as fin:
reward_lines = fin.readlines()
if (len(reward_lines) != args.video_chunks):
if len(reward_lines) < args.video_chunks:
to_clean.append(logfile)
print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
print("Skip, please")
return None, None, None, None, None, None, None
for i, r_line in enumerate(reward_lines):
data = r_line.split()
if i == 0:
br = int(data[1])
br_previous = br
vmaf_previous = pensieve_video_csv.loc[i, video_mappings[str(br)]]
else: # skip first
br = int(data[1])
bitrate_avg += br
bitrate_switching_avg += abs(br - br_previous)
reward_bitrate += float(data[-1])
total_rebuffering += float(data[3])
vmaf_current = pensieve_video_csv.loc[i, video_mappings[str(br)]]
vmaf_avg += vmaf_current
vmaf_switching_avg += abs(vmaf_current - vmaf_previous)
reward_vmaf += (float(vmaf_current) -
rebuf_penalty*(float(data[3])) -
switching_penalty*(abs(vmaf_current - vmaf_previous)))
vmaf_previous = vmaf_current
br_previous = br
return reward_vmaf,\
reward_bitrate,\
total_rebuffering,\
bitrate_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_avg/(segment_lenght*args.video_chunks),\
bitrate_avg/args.video_chunks
#
#def get_qoe(abr, trace):
# logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
# logfile = os.path.join(logdir, abr + "_rewards_0.log")
#
# reward = 0
#
#
# with open(logfile, "r") as fin:
# reward_lines = fin.readlines()
#
# if (len(reward_lines) != args.video_chunks):
# if len(reward_lines) < args.video_chunks:
# to_clean.append(logfile)
# print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
# print("Skip, please")
# return None
#
# for i, r_line in enumerate(reward_lines):
# if i > 0: # skip first
# reward += float(r_line.split()[-1])
#
# return reward
def get_qoes(abrs_list, traces_list):
global_results = {}
for abr in abrs_list:
global_results[abr] = []
global_results[abr] = {}
global_results[abr]['reward_vmaf'] = []
global_results[abr]['reward_br'] = []
global_results[abr]['rebuf'] = []
global_results[abr]['switching_br'] = []
global_results[abr]['switching_vmaf'] = []
global_results[abr]['vmaf_avg'] = []
global_results[abr]['br_avg'] = []
for trace in traces_list:
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
global_results[abr]['reward_vmaf'].append(reward_vmaf)
global_results[abr]['reward_br'].append(reward_br)
global_results[abr]['rebuf'].append(rebuf)
global_results[abr]['switching_br'].append(switching_br)
global_results[abr]['switching_vmaf'].append(switching_vmaf)
global_results[abr]['vmaf_avg'].append(vmaf_avg)
global_results[abr]['br_avg'].append(br_avg)
return global_results
def get_qoes_partial(abrs_list, traces_list):
total_experiments_expected = len(args.abrs) * len(args.traces)
experiments_executed_so_far = 0
partial_results = {}
for abr in abrs_list:
partial_results[abr] = {}
partial_results[abr]['reward_vmaf'] = []
partial_results[abr]['reward_br'] = []
partial_results[abr]['rebuf'] = []
partial_results[abr]['switching_br'] = []
partial_results[abr]['switching_vmaf'] = []
partial_results[abr]['vmaf_avg'] = []
partial_results[abr]['br_avg'] = []
for trace in traces_list:
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
if os.path.exists(logdir):
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
partial_results[abr]['reward_vmaf'].append(reward_vmaf)
partial_results[abr]['reward_br'].append(reward_br)
partial_results[abr]['rebuf'].append(rebuf)
partial_results[abr]['switching_br'].append(switching_br)
partial_results[abr]['switching_vmaf'].append(switching_vmaf)
partial_results[abr]['vmaf_avg'].append(vmaf_avg)
partial_results[abr]['br_avg'].append(br_avg)
experiments_executed_so_far += 1
if partial_results[abr] == []:
del partial_results[abr]
print("Experiment executed: {}/{}".format(experiments_executed_so_far, total_experiments_expected))
return partial_results
def plot_cdf(results, reward_key):
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
def average_of_the_best():
avg_best = -1000000000000
abr_best = ''
for scheme in results.keys():
avg_tmp = np.mean(results[scheme][reward_key])
if avg_best < avg_tmp:
avg_best = avg_tmp
abr_best = scheme
print("Best provider in average is {} with {}".format(abr_best, avg_best))
return abs(avg_best)
schemes = []
norm = average_of_the_best()
markers = ['.', ',', 'o', 'v', '^', '>', '<', 's', 'x', 'D', 'd', '*', '_', '']
for i, scheme in enumerate(results.keys()):
values = [float(i)/norm for i in results[scheme][reward_key]]
values, base = np.histogram(values, bins=len(values))
cumulative = np.cumsum(values)
cumulative = [float(i) / len(values) * 100 for i in cumulative]
marker_index = i % len(markers)
ax.plot(base[:-1], cumulative, linewidth=3, marker=markers[marker_index], markevery=2, markersize=15)
schemes.append(scheme)
ax.legend(schemes, loc=2)
ax.set_xlim(-1.0, 1.8)
plt.ylabel('CDF')
plt.xlabel('total reward')
fig.savefig(os.path.join(args.store_dir, 'cdf_{}.png'.format(reward_key)))
def plot_bar(results, metric):
results_metric_avg = {}
for scheme in results.keys():
results_metric_avg[scheme] = np.mean(results[scheme][metric])
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
y_pos = np.arange(len(results_metric_avg.keys()))
ax.bar(y_pos, results_metric_avg.values())
ax.set_xticks(y_pos)
ax.set_xticklabels(results_metric_avg.keys())
fig.savefig(os.path.join(args.store_dir, 'bar_{}.png'.format(metric)))
def clean():
timestamps = []
for c in to_clean:
timestamp_creation = os.path.getmtime(c)
timestamps.append(timestamp_creation)
print("File {} was created at {}".format(c, timestamp_creation))
timestamps.sort()
if not args.include_last and len(timestamps) >= 1:
print("Skipping file created at {}: might be still running".format(timestamps[-1]))
del timestamps[-1]
removing = []
for t in timestamps:
for c in to_clean:
if os.path.getmtime(c) == t:
print("Removing {}".format(os.path.dirname(os.path.dirname(c))))
removing.append(os.path.dirname(os.path.dirname(c)))
for r in removing:
shutil.rmtree(r)
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('result_dir', help='result directory', type=str)
parser.add_argument('store_dir', help='result directory', type=str)
parser.add_argument('video_chunks', help='result directory', type=int)
parser.add_argument("--abrs", nargs="+", help='ABR list')
parser.add_argument("--traces", nargs="+", help='Traces list')
parser.add_argument('--partial', action="store_true", help="get the partial results")
parser.add_argument('--allow_cleaning', action="store_true", help="if enabled, cleans the experiments that failed, a part of the most recent one (might still be running")
parser.add_argument('--include_last', action="store_true", help="if enabled, also the last is getting cleaned")
# args need to be global for simplicity
global args
args = parser.parse_args()
global to_clean
to_clean = []
if not os.path.exists(args.store_dir):
os.makedirs(args.store_dir)
if args.partial:
res = get_qoes_partial(args.abrs, args.traces)
else:
res = get_qoes(args.abrs, args.traces)
for metric in metric_list:
if "reward" in metric:
plot_cdf(res, metric)
plot_bar(res, metric)
if args.allow_cleaning:
print("Executing cleaning")
clean()
if __name__ == "__main__":
main()
|
[
"numpy.mean",
"seaborn.set",
"os.path.exists",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.xlabel",
"seaborn.set_context",
"os.path.join",
"os.path.getmtime",
"os.path.dirname",
"matplotlib.pyplot.figure",
"shutil.rmtree",
"numpy.cumsum"
] |
[((146, 155), 'seaborn.set', 'sns.set', ([], {}), '()\n', (153, 155), True, 'import seaborn as sns\n'), ((156, 179), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (171, 179), True, 'import seaborn as sns\n'), ((798, 815), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (809, 815), True, 'import pandas as pd\n'), ((910, 968), 'os.path.join', 'os.path.join', (['args.result_dir', "(abr + '-' + trace)", '"""result"""'], {}), "(args.result_dir, abr + '-' + trace, 'result')\n", (922, 968), False, 'import os\n'), ((983, 1027), 'os.path.join', 'os.path.join', (['logdir', "(abr + '_rewards_0.log')"], {}), "(logdir, abr + '_rewards_0.log')\n", (995, 1027), False, 'import os\n'), ((6675, 6707), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0, 10.0)'}), '(figsize=(16.0, 10.0))\n', (6685, 6707), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (7835, 7842), True, 'import matplotlib.pyplot as plt\n'), ((7847, 7873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""total reward"""'], {}), "('total reward')\n", (7857, 7873), True, 'import matplotlib.pyplot as plt\n'), ((8135, 8167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0, 10.0)'}), '(figsize=(16.0, 10.0))\n', (8145, 8167), True, 'import matplotlib.pyplot as plt\n'), ((9251, 9276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9274, 9276), False, 'import argparse\n'), ((7492, 7509), 'numpy.cumsum', 'np.cumsum', (['values'], {}), '(values)\n', (7501, 7509), True, 'import numpy as np\n'), ((8091, 8123), 'numpy.mean', 'np.mean', (['results[scheme][metric]'], {}), '(results[scheme][metric])\n', (8098, 8123), True, 'import numpy as np\n'), ((8538, 8557), 'os.path.getmtime', 'os.path.getmtime', (['c'], {}), '(c)\n', (8554, 8557), False, 'import os\n'), ((9177, 9193), 'shutil.rmtree', 'shutil.rmtree', (['r'], {}), '(r)\n', (9190, 9193), False, 'import shutil\n'), ((10156, 10186), 'os.path.exists', 'os.path.exists', (['args.store_dir'], {}), '(args.store_dir)\n', (10170, 10186), False, 'import os\n'), ((10196, 10223), 'os.makedirs', 'os.makedirs', (['args.store_dir'], {}), '(args.store_dir)\n', (10207, 10223), False, 'import os\n'), ((5595, 5653), 'os.path.join', 'os.path.join', (['args.result_dir', "(abr + '-' + trace)", '"""result"""'], {}), "(args.result_dir, abr + '-' + trace, 'result')\n", (5607, 5653), False, 'import os\n'), ((5669, 5691), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (5683, 5691), False, 'import os\n'), ((6890, 6926), 'numpy.mean', 'np.mean', (['results[scheme][reward_key]'], {}), '(results[scheme][reward_key])\n', (6897, 6926), True, 'import numpy as np\n'), ((8970, 8989), 'os.path.getmtime', 'os.path.getmtime', (['c'], {}), '(c)\n', (8986, 8989), False, 'import os\n'), ((9125, 9143), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (9140, 9143), False, 'import os\n'), ((9055, 9073), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (9070, 9073), False, 'import os\n')]
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import collections
import numpy as np
import scipy.sparse as smat
def cs_matrix(arg1, mat_type, shape=None, dtype=None, copy=False, check_contents=False):
"""Custom compressed sparse matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct compressed sparse matrix
mat_type (type): the matrix type to construct, one of [scipy.sparse.csr_matrix | scipy.sparse.csc_matrix]
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
check_contents (bool, optional): whether to check array contents to determine dtype, defaults to False
Returns:
compressed sparse matrix in mat_type
"""
(data, indices, indptr) = arg1
indices_dtype = smat.sputils.get_index_dtype(indices, check_contents=check_contents)
indptr_dtype = smat.sputils.get_index_dtype(indptr, check_contents=check_contents)
ret = mat_type(shape, dtype=dtype)
# Read matrix dimensions given, if any
if shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(ret.indptr) - 1
minor_dim = ret.indices.max() + 1
except Exception:
raise ValueError("unable to infer matrix dimensions")
else:
shape = ret._swap((major_dim, minor_dim))
ret.indices = np.array(indices, copy=copy, dtype=indices_dtype)
ret.indptr = np.array(indptr, copy=copy, dtype=indptr_dtype)
ret.data = np.array(data, copy=copy, dtype=dtype)
return ret
def csr_matrix(arg1, shape=None, dtype=None, copy=False):
"""Custom csr_matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct csr_matrix
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
Returns:
csr_matrix
"""
return cs_matrix(arg1, smat.csr_matrix, shape=shape, dtype=dtype, copy=copy)
def csc_matrix(arg1, shape=None, dtype=None, copy=False):
"""Custom csc_matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct csc_matrix
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
Returns:
csc_matrix
"""
return cs_matrix(arg1, smat.csc_matrix, shape=shape, dtype=dtype, copy=copy)
def save_matrix(tgt, mat):
"""Save dense or sparse matrix to file.
Args:
tgt (str): path to save the matrix
mat (numpy.ndarray or scipy.sparse.spmatrix): target matrix to save
"""
assert isinstance(tgt, str), "tgt for save_matrix must be a str, but got {}".format(type(tgt))
with open(tgt, "wb") as tgt_file:
if isinstance(mat, np.ndarray):
np.save(tgt_file, mat, allow_pickle=False)
elif isinstance(mat, smat.spmatrix):
smat.save_npz(tgt_file, mat, compressed=False)
else:
raise NotImplementedError("Save not implemented for matrix type {}".format(type(mat)))
def load_matrix(src, dtype=None):
"""Load dense or sparse matrix from file.
Args:
src (str): path to load the matrix.
dtype (numpy.dtype, optional): if given, convert matrix dtype. otherwise use default type.
Returns:
mat (numpy.ndarray or scipy.sparse.spmatrix): loaded matrix
Notes:
If underlying matrix is {"csc", "csr", "bsr"}, indices will be sorted.
"""
if not isinstance(src, str):
raise ValueError("src for load_matrix must be a str")
mat = np.load(src)
# decide whether it's dense or sparse
if isinstance(mat, np.ndarray):
pass
elif isinstance(mat, np.lib.npyio.NpzFile):
# Ref code: https://github.com/scipy/scipy/blob/v1.4.1/scipy/sparse/_matrix_io.py#L19-L80
matrix_format = mat["format"].item()
if not isinstance(matrix_format, str):
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode("ascii")
try:
cls = getattr(smat, "{}_matrix".format(matrix_format))
except AttributeError:
raise ValueError("Unknown matrix format {}".format(matrix_format))
if matrix_format in ("csc", "csr", "bsr"):
mat = cls((mat["data"], mat["indices"], mat["indptr"]), shape=mat["shape"])
# This is in-place operation
mat.sort_indices()
elif matrix_format == "dia":
mat = cls((mat["data"], mat["offsets"]), shape=mat["shape"])
elif matrix_format == "coo":
mat = cls((mat["data"], (mat["row"], mat["col"])), shape=mat["shape"])
else:
raise NotImplementedError(
"Load is not implemented for sparse matrix of format {}.".format(matrix_format)
)
else:
raise TypeError("load_feature_matrix encountered unknown input format {}".format(type(mat)))
if dtype is None:
return mat
else:
return mat.astype(dtype)
def transpose(mat):
"""Transpose a dense/sparse matrix.
Args:
X (np.ndarray, spmatrix): input matrix to be transposed.
Returns:
transposed X
"""
if not isinstance(mat, smat.spmatrix):
raise ValueError("mat must be a smat.spmatrix type")
if isinstance(mat, smat.csr_matrix):
return csc_matrix((mat.data, mat.indices, mat.indptr), shape=(mat.shape[1], mat.shape[0]))
elif isinstance(mat, smat.csc_matrix):
return csr_matrix((mat.data, mat.indices, mat.indptr), shape=(mat.shape[1], mat.shape[0]))
else:
return mat.T
def sorted_csr_from_coo(shape, row_idx, col_idx, val, only_topk=None):
"""Return a row-sorted CSR matrix from a COO sparse matrix.
Nonzero elements in each row of the returned CSR matrix is sorted in an descending order based on the value. If only_topk is given, only topk largest elements will be kept.
Args:
shape (tuple): the shape of the input COO matrix
row_idx (ndarray): row indices of the input COO matrix
col_idx (ndarray): col indices of the input COO matrix
val (ndarray): values of the input COO matrix
only_topk (int, optional): keep only topk elements per row. Default None to ignore
Returns:
csr_matrix
"""
csr = smat.csr_matrix((val, (row_idx, col_idx)), shape=shape)
csr.sort_indices()
for i in range(shape[0]):
rng = slice(csr.indptr[i], csr.indptr[i + 1])
sorted_idx = np.argsort(-csr.data[rng], kind="mergesort")
csr.indices[rng] = csr.indices[rng][sorted_idx]
csr.data[rng] = csr.data[rng][sorted_idx]
if only_topk is not None:
assert isinstance(only_topk, int), f"Wrong type: type(only_topk) = {type(only_topk)}"
only_topk = max(min(1, only_topk), only_topk)
nnz_of_insts = csr.indptr[1:] - csr.indptr[:-1]
row_idx = np.repeat(np.arange(shape[0], dtype=csr.indices.dtype), nnz_of_insts)
selected_idx = (np.arange(len(csr.data)) - csr.indptr[row_idx]) < only_topk
row_idx = row_idx[selected_idx]
col_idx = csr.indices[selected_idx]
val = csr.data[selected_idx]
indptr = np.cumsum(np.bincount(row_idx + 1, minlength=(shape[0] + 1)))
csr = csr_matrix((val, col_idx, indptr), shape=shape, dtype=val.dtype)
return csr
def sorted_csc_from_coo(shape, row_idx, col_idx, val, only_topk=None):
"""Return a column-sorted CSC matrix from a COO sparse matrix.
Nonzero elements in each col of the returned CSC matrix is sorted in an descending order based on the value. If only_topk is given, only topk largest elements will be kept.
Args:
shape (tuple): the shape of the input COO matrix
row_idx (ndarray): row indices of the input COO matrix
col_idx (ndarray): col indices of the input COO matrix
val (ndarray): values of the input COO matrix
only_topk (int, optional): keep only topk elements per col. Default None to ignore
Returns:
csc_matrix
"""
csr = sorted_csr_from_coo(shape[::-1], col_idx, row_idx, val, only_topk=None)
return transpose(csr)
def binarized(X, inplace=False):
"""Binarize a dense/sparse matrix. All nonzero elements become 1.
Args:
X (np.ndarray, spmatrix): input matrix to binarize
inplace (bool, optional): if True do the binarization in-place, else return a copy. Default False
Returns:
binarized X
"""
if not isinstance(X, (np.ndarray, smat.spmatrix)):
raise NotImplementedError(
"this function only support X being np.ndarray or scipy.sparse.spmatrix."
)
if not inplace:
X = X.copy()
if isinstance(X, smat.spmatrix):
X.data[:] = 1
else:
X[:] = 1
return X
def sorted_csr(csr, only_topk=None):
"""Return a copy of input CSR matrix where nonzero elements in each row is sorted in an descending order based on the value.
If `only_topk` is given, only top-k largest elements will be kept.
Args:
csr (csr_matrix): input csr_matrix to sort
only_topk (int, optional): keep only topk elements per row. Default None to ignore
Returns:
csr_matrix
"""
if not isinstance(csr, smat.csr_matrix):
raise ValueError("the input matrix must be a csr_matrix.")
row_idx = np.repeat(np.arange(csr.shape[0], dtype=np.uint32), csr.indptr[1:] - csr.indptr[:-1])
return sorted_csr_from_coo(csr.shape, row_idx, csr.indices, csr.data, only_topk)
def sorted_csc(csc, only_topk=None):
"""Return a copy of input CSC matrix where nonzero elements in each column is sorted in an descending order based on the value.
If `only_topk` is given, only top-k largest elements will be kept.
Args:
csc (csc_matrix): input csc_matrix to sort
only_topk (int, optional): keep only topk elements per col. Default None to ignore
Returns:
csc_matrix
"""
if not isinstance(csc, smat.csc_matrix):
raise ValueError("the input matrix must be a csc_matrix.")
return transpose(sorted_csr(transpose(csc)))
def dense_to_csr(dense, topk=None, batch=None):
"""Memory efficient method to construct a csr_matrix from a dense matrix.
Args:
dense (ndarray): 2-D dense matrix to convert.
topk (int or None, optional): keep topk non-zeros with largest abs value for each row.
Default None to keep everything.
batch (int or None, optional): the batch size for construction.
Default None to use min(dense.shape[0], 10 ** 5).
Returns:
csr_matrix that has topk nnz each row with the same shape as dense.
"""
BATCH_LIMIT = 10 ** 5
if topk is None:
keep_topk = dense.shape[1]
else:
keep_topk = min(dense.shape[1], max(1, int(topk)))
# if batch is given, use input batch size even if input batch > BATCH_LIMIT
if batch is None:
chunk_size = min(dense.shape[0], BATCH_LIMIT)
else:
chunk_size = min(dense.shape[0], max(1, int(batch)))
max_nnz = keep_topk * dense.shape[0]
indptr_dtype = np.int32 if max_nnz < np.iinfo(np.int32).max else np.int64
indices_dtype = np.int32 if dense.shape[1] < np.iinfo(np.int32).max else np.int64
data = np.empty((keep_topk * dense.shape[0],), dtype=dense.dtype)
indices = np.empty((keep_topk * dense.shape[0],), dtype=indices_dtype)
for i in range(0, dense.shape[0], chunk_size):
cur_chunk = dense[i : i + chunk_size, :]
chunk_len = cur_chunk.shape[0]
if keep_topk < dense.shape[1]:
col_indices = np.argpartition(abs(cur_chunk), keep_topk, axis=1)[:, -keep_topk:]
else:
col_indices = np.repeat(np.arange(keep_topk)[np.newaxis, :], chunk_len, axis=0)
row_indices = np.repeat(np.arange(chunk_len)[:, np.newaxis], keep_topk, axis=1)
chunk_data = cur_chunk[row_indices, col_indices]
data[i * keep_topk : i * keep_topk + chunk_data.size] = chunk_data.flatten()
indices[i * keep_topk : i * keep_topk + col_indices.size] = col_indices.flatten()
indptr = np.arange(0, dense.shape[0] * keep_topk + 1, keep_topk, dtype=indptr_dtype)
# Bypass scipy constructor to allow different indices and indptr types
return csr_matrix((data, indices, indptr), shape=dense.shape)
def vstack_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices vertically.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (M1 x N), (M2 x N), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (M1 + M2 + ..., N)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
nr_cols = matrices[0].shape[1]
if any(mat.shape[1] != nr_cols for mat in matrices):
raise ValueError("Second dim not match")
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_rows = sum([int(mat.shape[0]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if nr_cols > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(total_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
indptr[0], cur_nnz, cur_row = 0, 0, 0
for mat in matrices:
indices[cur_nnz : cur_nnz + mat.nnz] = mat.indices
data[cur_nnz : cur_nnz + mat.nnz] = mat.data
# can not merge the following two lines because
# mat.indptr[1:] + cur_nnz may overflow!
indptr[cur_row + 1 : cur_row + mat.shape[0] + 1] = mat.indptr[1:]
indptr[cur_row + 1 : cur_row + mat.shape[0] + 1] += cur_nnz
cur_nnz += mat.nnz
cur_row += mat.shape[0]
return csr_matrix((data, indices, indptr), shape=(total_rows, nr_cols))
def hstack_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices horizontally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (M x N1), (M x N2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (M, N1 + N2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
nr_rows = matrices[0].shape[0]
if any(mat.shape[0] != nr_rows for mat in matrices):
raise ValueError("First dim not match")
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_cols = sum([int(mat.shape[1]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if nr_rows > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(nr_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
indptr[0], cur_ptr = 0, 0
for i in range(nr_rows): # for every row
start_col = 0
for mat in matrices:
cur_nnz = mat.indptr[i + 1] - mat.indptr[i]
indices[cur_ptr : cur_ptr + cur_nnz] = (
mat.indices[mat.indptr[i] : mat.indptr[i + 1]] + start_col
)
data[cur_ptr : cur_ptr + cur_nnz] = mat.data[mat.indptr[i] : mat.indptr[i + 1]]
cur_ptr += cur_nnz
start_col += mat.shape[1]
indptr[i + 1] = cur_ptr
return csr_matrix((data, indices, indptr), shape=(nr_rows, total_cols))
def block_diag_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices block diagonally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (NR1 x NC1), (NR2 x NC2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (NR1 + NR2 + ..., NC1 + NC2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_rows = sum([int(mat.shape[0]) for mat in matrices])
total_cols = sum([int(mat.shape[1]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if total_rows > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(total_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
cur_row, cur_col, cur_nnz = 0, 0, 0
indptr[0] = 0
for mat in matrices:
data[cur_nnz : cur_nnz + mat.nnz] = mat.data
indices[cur_nnz : cur_nnz + mat.nnz] = mat.indices + cur_col
indptr[1 + cur_row : 1 + cur_row + mat.shape[0]] = mat.indptr[1:] + indptr[cur_row]
cur_col += mat.shape[1]
cur_row += mat.shape[0]
cur_nnz += mat.nnz
return csr_matrix((data, indices, indptr), shape=(total_rows, total_cols))
def vstack_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices vertically.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csc_matrix): the matrices to stack in order, with shape (M1 x N), (M2 x N), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (M1 + M2 + ..., N)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(hstack_csr([transpose(mat) for mat in matrices], dtype=dtype))
def hstack_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices horizontally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csc_matrix): the matrices to stack in order, with shape (M x N1), (M x N2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (M, N1 + N2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(vstack_csr([transpose(mat) for mat in matrices], dtype=dtype))
def block_diag_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices block diagonally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (NR1 x NC1), (NR2 x NC2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (NR1+ NR2 + ..., NC1 + NC2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(block_diag_csr([transpose(mat) for mat in matrices], dtype=dtype))
def get_csc_col_nonzero(matrix):
"""Given a matrix, returns the nonzero row ids of each col
The returned ndarray will retain the indices order.
Args:
matrix: the matrix to operate on, with shape (N x M)
Returns:
list of ndarray [a_1, a_2, a_3, ...], where a_i is an array indicate the nonzero row ids of col i
"""
if not isinstance(matrix, smat.csc_matrix):
raise ValueError("matrix need to be csc_matrix!")
return [matrix.indices[matrix.indptr[i] : matrix.indptr[i + 1]] for i in range(matrix.shape[1])]
def get_csr_row_nonzero(matrix):
"""Given a matrix, returns the nonzero col ids of each row
The returned ndarray will retain the indices order.
Args:
matrix: the matrix to operate on, with shape (N x M)
Returns:
list of ndarray [a_1, a_2, a_3, ...], where a_i is an array indicate the nonzero col ids of row i
"""
if not isinstance(matrix, smat.csr_matrix):
raise ValueError("matrix need to be csr_matrix!")
return [matrix.indices[matrix.indptr[i] : matrix.indptr[i + 1]] for i in range(matrix.shape[0])]
def get_row_submatrices(matrices, row_indices):
"""Get the sub-matrices of given matrices by selecting the rows given in row_indices
Args:
matrices (list of csr_matrix or ndarray): the matrices [mat_1, mat_2, ...] to operate on, with shape (M x N1), (M x N2), ...
row_indices (list or ndarray): the row indices to select
Returns:
list of csr_matrix or ndarray
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
n_mat = len(matrices)
if n_mat == 0:
raise ValueError("At least one matrix required as input")
if any(not isinstance(X, (smat.csr_matrix, np.ndarray)) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix or ndarray!")
nr_rows = matrices[0].shape[0]
if any(mat.shape[0] != nr_rows for mat in matrices):
raise ValueError("First dim not match")
if any(idx >= nr_rows or idx < 0 for idx in row_indices):
raise ValueError("row indices should be positive and do not exceed matrix first dimension")
results = []
for mat in matrices:
mat1 = mat[row_indices, :]
if isinstance(mat, smat.csr_matrix):
mat1.sort_indices()
results += [mat1]
return results
def dense_to_coo(dense):
"""Convert a dense matrix to COO format.
Args:
dense (ndarray): input dense matrix
Returns:
coo_matrix
"""
rows = np.arange(dense.shape[0], dtype=np.uint32)
cols = np.arange(dense.shape[1], dtype=np.uint32)
row_idx = np.repeat(rows, np.ones_like(rows) * len(cols)).astype(np.uint32)
col_idx = np.ones((len(rows), 1), dtype=np.uint32).dot(cols.reshape(1, -1)).ravel()
return smat.coo_matrix((dense.ravel(), (row_idx, col_idx)), shape=dense.shape)
def get_relevance_csr(csr, mm=None, dtype=np.float64):
"""Return the csr matrix containing relevance scores based on given prediction csr matrix.
Relevance score is defined as: max_rank - local_rank + 1
Args:
csr (csr_matrix): input CSR matrix, row indices are sorted in descending order
mm (int, optional): max rank, will be inferred from csr if not given
dtype (type, optional): datatype for the returned relevance matrix. Default float64.
Returns:
csr_matrix of relevance scores
"""
if mm is None:
mm = (csr.indptr[1:] - csr.indptr[:-1]).max()
nnz = len(csr.data)
nnz_of_rows = csr.indptr[1:] - csr.indptr[:-1]
row_idx = np.repeat(np.arange(csr.shape[0]), nnz_of_rows)
rel = np.array(
mm - (np.arange(nnz) - csr.indptr[row_idx]), dtype=dtype
) # adding 1 to avoiding zero entries
return smat.csr_matrix((rel, csr.indices, csr.indptr), csr.shape)
def get_sparsified_coo(coo, selected_rows, selected_columns):
"""
Zero out everything not in selected rows and columns.
Args:
coo (coo_matrix): input coo matrix
selected_rows (list of int or np.array(int)): list of rows to be not zeroed out
selected_columns (list of int or np.array(int)): list of columns to be not zeroed out
Returns:
coo matrix with unwanted rows and columns zeroed out.
"""
valid_rows = np.zeros(coo.shape[0], dtype=bool)
valid_cols = np.zeros(coo.shape[1], dtype=bool)
valid_rows[selected_rows] = True
valid_cols[selected_columns] = True
valid_idx = valid_rows[coo.row] & valid_cols[coo.col]
coo = smat.coo_matrix(
(coo.data[valid_idx], (coo.row[valid_idx], coo.col[valid_idx])), shape=coo.shape
)
return coo
def csr_rowwise_mul(A, v):
"""Row-wise multiplication between sparse csr matrix A and dense array v.
Where each row of A is multiplied by the corresponding element in v.
The number of rows of A is same as the length of v.
Args:
A (csr_matrix): The matrix to be multiplied.
v (ndarray): The multiplying vector.
Returns:
Z (csr_matrix): The product of row-wise multiplication of A and v.
"""
if not isinstance(A, smat.csr_matrix):
raise ValueError(f"A must be scipy.sparse.csr_matrix")
if not isinstance(v, np.ndarray):
raise ValueError(f"v must be a numpy ndarray")
if v.ndim != 1:
raise ValueError(f"v should be an 1-d array")
if v.shape[0] != A.shape[0]:
raise ValueError(f"The dimension of v should be the same as the number of rows of A")
Z = A.copy()
for i in range(v.shape[0]):
Z.data[Z.indptr[i] : Z.indptr[i + 1]] *= v[i]
return Z
def csc_colwise_mul(A, v):
"""Column-wise multiplication between sparse csc matrix A and dense array v, where each column of A is multiplied by the corresponding element in v (The number of columns of A is same as the length of v).
Args:
A (csc_matrix): The matrix to be multiplied.
v (ndarray): The multiplying vector.
Returns:
Z (csc_matrix): The product of column-wise multiplication of A and v.
"""
if not isinstance(A, smat.csc_matrix):
raise ValueError(f"A must be scipy.sparse.csc_matrix")
if not isinstance(v, np.ndarray):
raise ValueError(f"v must be a numpy ndarray")
if v.ndim != 1:
raise ValueError(f"v should be an 1-d array")
if v.shape[0] != A.shape[1]:
raise ValueError(f"The dimension of v should be the same as the number of columns of A")
Z = A.copy()
for i in range(v.shape[0]):
Z.data[Z.indptr[i] : Z.indptr[i + 1]] *= v[i]
return Z
def get_cocluster_spectral_embeddings(A, dim=24):
"""Obtain the co-cluster spectral embeddings for the given bipartite graph described in [1]
* [1] `<NAME>, 2001. Co-clustering documents and words using
bipartite spectral graph partition`
Args:
A (csr_matrix or csc_matrix): bipartite graph matrix
dim (int, optional): the dimension of the returned embeddings. Default 24
Returns:
(row_embedding, col_embedding): a tuple of embeddings for rows and columns respectively
row_embedding: numpy.ndarray of shape (A.shape[0], dim).
col_embedding: numpy.ndarray of shape (A.shape[1], dim).
"""
assert A.min() >= 0.0, "A must be nonnegative"
from sklearn.utils.extmath import randomized_svd
# Obtain An, the normalized adjacency bipartite matrix described in Eq (10) of [1]
# A_n = D_1^{-1/2} A D_2^{-1/2}
# row_diag = diagonal of D_1^{-1/2}
# col_diag = diagonal of D_2^{-1/2}
row_diag = np.asarray(np.sqrt(A.sum(axis=1))).squeeze()
col_diag = np.asarray(np.sqrt(A.sum(axis=0))).squeeze()
row_diag[row_diag == 0] = 1.0
col_diag[col_diag == 0] = 1.0
row_diag = 1.0 / row_diag
col_diag = 1.0 / col_diag
if smat.issparse(A):
n_rows, n_cols = A.shape
r = smat.dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = smat.dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
An = r * A * c
else:
An = row_diag[:, np.newaxis] * A * col_diag
# run SVD on An
nr_discards = 1 # discarding the first component
U, Sigma, VT = randomized_svd(An, dim + nr_discards, random_state=0)
# Normalized the singular vectors based on Eq (24) of [1]
row_embedding = np.ascontiguousarray(row_diag[:, np.newaxis] * U[:, nr_discards:])
col_embedding = np.ascontiguousarray(col_diag[:, np.newaxis] * VT[nr_discards:].T)
return row_embedding, col_embedding
class CsrEnsembler(object):
"""A class implementing several ensemblers for a list sorted CSR predictions"""
@staticmethod
def check_validlity(*args):
"""Check whether input CSR matrices are valid
Args:
args (iterable over csr_matrix): input CSR matrices
"""
for x in args:
assert isinstance(x, smat.csr_matrix), type(x)
assert all(x.shape == args[0].shape for x in args)
@staticmethod
def average(*args):
"""Ensemble predictions by averaging prediction values
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
ret = sum(args)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def rank_average(*args):
"""Ensemble predictions by averaging prediction ranks
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
mm = max((x.indptr[1:] - x.indptr[:-1]).max() for x in args)
ret = sum(get_relevance_csr(csr, mm) for csr in args)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def round_robin(*args):
"""Ensemble predictions by round robin
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
base = 1.0 / (len(args) + 1.0)
mm = max((x.indptr[1:] - x.indptr[:-1]).max() for x in args)
ret = get_relevance_csr(args[0], mm)
ret.data[:] += len(args) * base
for i, x in enumerate(args[1:], 1):
tmp = get_relevance_csr(x, mm)
tmp.data[:] += (len(args) - i) * base
ret = ret.maximum(tmp)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def print_ens(Ytrue, pred_set, param_set, topk=10):
"""Print matrices before and after ensemble
Args:
Ytrue (csr_matrix): ground truth label matrix
pred_set (iterable over csr_matrix): prediction matrices to ensemble
param_set (iterable): parameters or model names associated with pred_set
"""
for param, pred in zip(param_set, pred_set):
print("param: {}".format(param))
print(Metrics.generate(Ytrue, pred, topk=topk))
for ens in [CsrEnsembler.average, CsrEnsembler.rank_average, CsrEnsembler.round_robin]:
print("ens: {}".format(ens.__name__))
print(Metrics.generate(Ytrue, ens(*pred_set), topk=topk))
class Metrics(collections.namedtuple("Metrics", ["prec", "recall"])):
"""The metrics (precision, recall) for multi-label classification problems."""
__slots__ = ()
def __str__(self):
"""Format printing"""
def fmt(key):
return " ".join("{:4.2f}".format(100 * v) for v in getattr(self, key)[:])
return "\n".join("{:7}= {}".format(key, fmt(key)) for key in self._fields)
@classmethod
def default(cls):
"""Default dummy metric"""
return cls(prec=[], recall=[])
@classmethod
def generate(cls, tY, pY, topk=10):
"""Compute the metrics with given prediction and ground truth.
Args:
tY (csr_matrix): ground truth label matrix
pY (csr_matrix): predicted logits
topk (int, optional): only generate topk prediction. Default 10
Returns:
Metrics
"""
assert isinstance(tY, smat.csr_matrix), type(tY)
assert isinstance(pY, smat.csr_matrix), type(pY)
assert tY.shape == pY.shape, "tY.shape = {}, pY.shape = {}".format(tY.shape, pY.shape)
pY = sorted_csr(pY)
total_matched = np.zeros(topk, dtype=np.uint64)
recall = np.zeros(topk, dtype=np.float64)
for i in range(tY.shape[0]):
truth = tY.indices[tY.indptr[i] : tY.indptr[i + 1]]
matched = np.isin(pY.indices[pY.indptr[i] : pY.indptr[i + 1]][:topk], truth)
cum_matched = np.cumsum(matched, dtype=np.uint64)
total_matched[: len(cum_matched)] += cum_matched
recall[: len(cum_matched)] += cum_matched / max(len(truth), 1)
if len(cum_matched) != 0:
total_matched[len(cum_matched) :] += cum_matched[-1]
recall[len(cum_matched) :] += cum_matched[-1] / max(len(truth), 1)
prec = total_matched / tY.shape[0] / np.arange(1, topk + 1)
recall = recall / tY.shape[0]
return cls(prec=prec, recall=recall)
|
[
"numpy.iinfo",
"numpy.isin",
"numpy.ascontiguousarray",
"numpy.argsort",
"numpy.array",
"scipy.sparse.sputils.get_index_dtype",
"numpy.save",
"numpy.arange",
"scipy.sparse.sputils.upcast",
"numpy.empty",
"scipy.sparse.coo_matrix",
"scipy.sparse.csr_matrix",
"sklearn.utils.extmath.randomized_svd",
"collections.namedtuple",
"scipy.sparse.issparse",
"scipy.sparse.save_npz",
"numpy.bincount",
"numpy.ones_like",
"scipy.sparse.dia_matrix",
"numpy.zeros",
"numpy.cumsum",
"numpy.load"
] |
[((33975, 34028), 'collections.namedtuple', 'collections.namedtuple', (['"""Metrics"""', "['prec', 'recall']"], {}), "('Metrics', ['prec', 'recall'])\n", (33997, 34028), False, 'import collections\n'), ((1542, 1610), 'scipy.sparse.sputils.get_index_dtype', 'smat.sputils.get_index_dtype', (['indices'], {'check_contents': 'check_contents'}), '(indices, check_contents=check_contents)\n', (1570, 1610), True, 'import scipy.sparse as smat\n'), ((1630, 1697), 'scipy.sparse.sputils.get_index_dtype', 'smat.sputils.get_index_dtype', (['indptr'], {'check_contents': 'check_contents'}), '(indptr, check_contents=check_contents)\n', (1658, 1697), True, 'import scipy.sparse as smat\n'), ((2142, 2191), 'numpy.array', 'np.array', (['indices'], {'copy': 'copy', 'dtype': 'indices_dtype'}), '(indices, copy=copy, dtype=indices_dtype)\n', (2150, 2191), True, 'import numpy as np\n'), ((2209, 2256), 'numpy.array', 'np.array', (['indptr'], {'copy': 'copy', 'dtype': 'indptr_dtype'}), '(indptr, copy=copy, dtype=indptr_dtype)\n', (2217, 2256), True, 'import numpy as np\n'), ((2272, 2310), 'numpy.array', 'np.array', (['data'], {'copy': 'copy', 'dtype': 'dtype'}), '(data, copy=copy, dtype=dtype)\n', (2280, 2310), True, 'import numpy as np\n'), ((4764, 4776), 'numpy.load', 'np.load', (['src'], {}), '(src)\n', (4771, 4776), True, 'import numpy as np\n'), ((7535, 7590), 'scipy.sparse.csr_matrix', 'smat.csr_matrix', (['(val, (row_idx, col_idx))'], {'shape': 'shape'}), '((val, (row_idx, col_idx)), shape=shape)\n', (7550, 7590), True, 'import scipy.sparse as smat\n'), ((12526, 12584), 'numpy.empty', 'np.empty', (['(keep_topk * dense.shape[0],)'], {'dtype': 'dense.dtype'}), '((keep_topk * dense.shape[0],), dtype=dense.dtype)\n', (12534, 12584), True, 'import numpy as np\n'), ((12599, 12659), 'numpy.empty', 'np.empty', (['(keep_topk * dense.shape[0],)'], {'dtype': 'indices_dtype'}), '((keep_topk * dense.shape[0],), dtype=indices_dtype)\n', (12607, 12659), True, 'import numpy as np\n'), ((13371, 13446), 'numpy.arange', 'np.arange', (['(0)', '(dense.shape[0] * keep_topk + 1)', 'keep_topk'], {'dtype': 'indptr_dtype'}), '(0, dense.shape[0] * keep_topk + 1, keep_topk, dtype=indptr_dtype)\n', (13380, 13446), True, 'import numpy as np\n'), ((14967, 15011), 'numpy.empty', 'np.empty', (['(total_rows + 1)'], {'dtype': 'indptr_dtype'}), '(total_rows + 1, dtype=indptr_dtype)\n', (14975, 15011), True, 'import numpy as np\n'), ((15026, 15066), 'numpy.empty', 'np.empty', (['total_nnz'], {'dtype': 'indices_dtype'}), '(total_nnz, dtype=indices_dtype)\n', (15034, 15066), True, 'import numpy as np\n'), ((15078, 15110), 'numpy.empty', 'np.empty', (['total_nnz'], {'dtype': 'dtype'}), '(total_nnz, dtype=dtype)\n', (15086, 15110), True, 'import numpy as np\n'), ((17053, 17094), 'numpy.empty', 'np.empty', (['(nr_rows + 1)'], {'dtype': 'indptr_dtype'}), '(nr_rows + 1, dtype=indptr_dtype)\n', (17061, 17094), True, 'import numpy as np\n'), ((17109, 17149), 'numpy.empty', 'np.empty', (['total_nnz'], {'dtype': 'indices_dtype'}), '(total_nnz, dtype=indices_dtype)\n', (17117, 17149), True, 'import numpy as np\n'), ((17161, 17193), 'numpy.empty', 'np.empty', (['total_nnz'], {'dtype': 'dtype'}), '(total_nnz, dtype=dtype)\n', (17169, 17193), True, 'import numpy as np\n'), ((19123, 19167), 'numpy.empty', 'np.empty', (['(total_rows + 1)'], {'dtype': 'indptr_dtype'}), '(total_rows + 1, dtype=indptr_dtype)\n', (19131, 19167), True, 'import numpy as np\n'), ((19182, 19222), 'numpy.empty', 'np.empty', (['total_nnz'], {'dtype': 'indices_dtype'}), '(total_nnz, dtype=indices_dtype)\n', (19190, 19222), True, 'import numpy as np\n'), ((19234, 19266), 'numpy.empty', 'np.empty', (['total_nnz'], {'dtype': 'dtype'}), '(total_nnz, dtype=dtype)\n', (19242, 19266), True, 'import numpy as np\n'), ((25049, 25091), 'numpy.arange', 'np.arange', (['dense.shape[0]'], {'dtype': 'np.uint32'}), '(dense.shape[0], dtype=np.uint32)\n', (25058, 25091), True, 'import numpy as np\n'), ((25103, 25145), 'numpy.arange', 'np.arange', (['dense.shape[1]'], {'dtype': 'np.uint32'}), '(dense.shape[1], dtype=np.uint32)\n', (25112, 25145), True, 'import numpy as np\n'), ((26289, 26347), 'scipy.sparse.csr_matrix', 'smat.csr_matrix', (['(rel, csr.indices, csr.indptr)', 'csr.shape'], {}), '((rel, csr.indices, csr.indptr), csr.shape)\n', (26304, 26347), True, 'import scipy.sparse as smat\n'), ((26815, 26849), 'numpy.zeros', 'np.zeros', (['coo.shape[0]'], {'dtype': 'bool'}), '(coo.shape[0], dtype=bool)\n', (26823, 26849), True, 'import numpy as np\n'), ((26867, 26901), 'numpy.zeros', 'np.zeros', (['coo.shape[1]'], {'dtype': 'bool'}), '(coo.shape[1], dtype=bool)\n', (26875, 26901), True, 'import numpy as np\n'), ((27047, 27149), 'scipy.sparse.coo_matrix', 'smat.coo_matrix', (['(coo.data[valid_idx], (coo.row[valid_idx], coo.col[valid_idx]))'], {'shape': 'coo.shape'}), '((coo.data[valid_idx], (coo.row[valid_idx], coo.col[\n valid_idx])), shape=coo.shape)\n', (27062, 27149), True, 'import scipy.sparse as smat\n'), ((30343, 30359), 'scipy.sparse.issparse', 'smat.issparse', (['A'], {}), '(A)\n', (30356, 30359), True, 'import scipy.sparse as smat\n'), ((30711, 30764), 'sklearn.utils.extmath.randomized_svd', 'randomized_svd', (['An', '(dim + nr_discards)'], {'random_state': '(0)'}), '(An, dim + nr_discards, random_state=0)\n', (30725, 30764), False, 'from sklearn.utils.extmath import randomized_svd\n'), ((30848, 30914), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['(row_diag[:, np.newaxis] * U[:, nr_discards:])'], {}), '(row_diag[:, np.newaxis] * U[:, nr_discards:])\n', (30868, 30914), True, 'import numpy as np\n'), ((30935, 31001), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['(col_diag[:, np.newaxis] * VT[nr_discards:].T)'], {}), '(col_diag[:, np.newaxis] * VT[nr_discards:].T)\n', (30955, 31001), True, 'import numpy as np\n'), ((7719, 7763), 'numpy.argsort', 'np.argsort', (['(-csr.data[rng])'], {'kind': '"""mergesort"""'}), "(-csr.data[rng], kind='mergesort')\n", (7729, 7763), True, 'import numpy as np\n'), ((10600, 10640), 'numpy.arange', 'np.arange', (['csr.shape[0]'], {'dtype': 'np.uint32'}), '(csr.shape[0], dtype=np.uint32)\n', (10609, 10640), True, 'import numpy as np\n'), ((14707, 14725), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (14715, 14725), True, 'import numpy as np\n'), ((14768, 14821), 'scipy.sparse.sputils.upcast', 'smat.sputils.upcast', (['*[mat.dtype for mat in matrices]'], {}), '(*[mat.dtype for mat in matrices])\n', (14787, 14821), True, 'import scipy.sparse as smat\n'), ((16793, 16811), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (16801, 16811), True, 'import numpy as np\n'), ((16854, 16907), 'scipy.sparse.sputils.upcast', 'smat.sputils.upcast', (['*[mat.dtype for mat in matrices]'], {}), '(*[mat.dtype for mat in matrices])\n', (16873, 16907), True, 'import scipy.sparse as smat\n'), ((18860, 18878), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (18868, 18878), True, 'import numpy as np\n'), ((18921, 18974), 'scipy.sparse.sputils.upcast', 'smat.sputils.upcast', (['*[mat.dtype for mat in matrices]'], {}), '(*[mat.dtype for mat in matrices])\n', (18940, 18974), True, 'import scipy.sparse as smat\n'), ((26112, 26135), 'numpy.arange', 'np.arange', (['csr.shape[0]'], {}), '(csr.shape[0])\n', (26121, 26135), True, 'import numpy as np\n'), ((30406, 30462), 'scipy.sparse.dia_matrix', 'smat.dia_matrix', (['(row_diag, [0])'], {'shape': '(n_rows, n_rows)'}), '((row_diag, [0]), shape=(n_rows, n_rows))\n', (30421, 30462), True, 'import scipy.sparse as smat\n'), ((30475, 30531), 'scipy.sparse.dia_matrix', 'smat.dia_matrix', (['(col_diag, [0])'], {'shape': '(n_cols, n_cols)'}), '((col_diag, [0]), shape=(n_cols, n_cols))\n', (30490, 30531), True, 'import scipy.sparse as smat\n'), ((35127, 35158), 'numpy.zeros', 'np.zeros', (['topk'], {'dtype': 'np.uint64'}), '(topk, dtype=np.uint64)\n', (35135, 35158), True, 'import numpy as np\n'), ((35176, 35208), 'numpy.zeros', 'np.zeros', (['topk'], {'dtype': 'np.float64'}), '(topk, dtype=np.float64)\n', (35184, 35208), True, 'import numpy as np\n'), ((3981, 4023), 'numpy.save', 'np.save', (['tgt_file', 'mat'], {'allow_pickle': '(False)'}), '(tgt_file, mat, allow_pickle=False)\n', (3988, 4023), True, 'import numpy as np\n'), ((8132, 8176), 'numpy.arange', 'np.arange', (['shape[0]'], {'dtype': 'csr.indices.dtype'}), '(shape[0], dtype=csr.indices.dtype)\n', (8141, 8176), True, 'import numpy as np\n'), ((8424, 8472), 'numpy.bincount', 'np.bincount', (['(row_idx + 1)'], {'minlength': '(shape[0] + 1)'}), '(row_idx + 1, minlength=shape[0] + 1)\n', (8435, 8472), True, 'import numpy as np\n'), ((35332, 35396), 'numpy.isin', 'np.isin', (['pY.indices[pY.indptr[i]:pY.indptr[i + 1]][:topk]', 'truth'], {}), '(pY.indices[pY.indptr[i]:pY.indptr[i + 1]][:topk], truth)\n', (35339, 35396), True, 'import numpy as np\n'), ((35425, 35460), 'numpy.cumsum', 'np.cumsum', (['matched'], {'dtype': 'np.uint64'}), '(matched, dtype=np.uint64)\n', (35434, 35460), True, 'import numpy as np\n'), ((35832, 35854), 'numpy.arange', 'np.arange', (['(1)', '(topk + 1)'], {}), '(1, topk + 1)\n', (35841, 35854), True, 'import numpy as np\n'), ((4081, 4127), 'scipy.sparse.save_npz', 'smat.save_npz', (['tgt_file', 'mat'], {'compressed': '(False)'}), '(tgt_file, mat, compressed=False)\n', (4094, 4127), True, 'import scipy.sparse as smat\n'), ((12391, 12409), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (12399, 12409), True, 'import numpy as np\n'), ((12477, 12495), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (12485, 12495), True, 'import numpy as np\n'), ((13069, 13089), 'numpy.arange', 'np.arange', (['chunk_len'], {}), '(chunk_len)\n', (13078, 13089), True, 'import numpy as np\n'), ((26184, 26198), 'numpy.arange', 'np.arange', (['nnz'], {}), '(nnz)\n', (26193, 26198), True, 'import numpy as np\n'), ((12981, 13001), 'numpy.arange', 'np.arange', (['keep_topk'], {}), '(keep_topk)\n', (12990, 13001), True, 'import numpy as np\n'), ((25176, 25194), 'numpy.ones_like', 'np.ones_like', (['rows'], {}), '(rows)\n', (25188, 25194), True, 'import numpy as np\n')]
|
import numpy as np
import csv
import cv2
from keras.models import Sequential
from keras.layers import Dense, Flatten
def load_data():
lines = []
with open('Data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
source_path = line[0]
filename = source_path.split('/')[-1]
current_path = 'Data/IMG/'+filename
image = cv2.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
X_train = np.array(images)
y_train = np.array(measurements)
return X_train, y_train
def train(X_train, y_train):
model = Sequential()
model.add(Flatten(input_shape=(160, 320, 3)))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=10)
model.save('model.h5')
if __name__ == "__main__":
X_train, y_train = load_data()
train(X_train, y_train)
|
[
"keras.layers.Flatten",
"keras.models.Sequential",
"numpy.array",
"csv.reader",
"keras.layers.Dense",
"cv2.imread"
] |
[((645, 661), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (653, 661), True, 'import numpy as np\n'), ((676, 698), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (684, 698), True, 'import numpy as np\n'), ((769, 781), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (779, 781), False, 'from keras.models import Sequential\n'), ((218, 237), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (228, 237), False, 'import csv\n'), ((499, 523), 'cv2.imread', 'cv2.imread', (['current_path'], {}), '(current_path)\n', (509, 523), False, 'import cv2\n'), ((796, 830), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(160, 320, 3)'}), '(input_shape=(160, 320, 3))\n', (803, 830), False, 'from keras.layers import Dense, Flatten\n'), ((846, 854), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (851, 854), False, 'from keras.layers import Dense, Flatten\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to help set up and run experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
from absl import logging
import numpy as np
import scipy.special
from six.moves import range
from six.moves import zip
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
gfile = tf.io.gfile
class _SimpleJsonEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def json_dumps(x):
return json.dumps(x, indent=2, cls=_SimpleJsonEncoder)
def record_config(config, path):
out = json_dumps(config)
logging.info('Recording config to %s\n %s', path, out)
gfile.makedirs(os.path.dirname(path))
with gfile.GFile(path, 'w') as fh:
fh.write(out)
def load_config(path):
logging.info('Loading config from %s', path)
with gfile.GFile(path) as fh:
return json.loads(fh.read())
def save_model(model, output_dir):
"""Save Keras model weights and architecture as HDF5 file."""
save_path = '%s/model.hdf5' % output_dir
logging.info('Saving model to %s', save_path)
model.save(save_path, include_optimizer=False)
return save_path
def load_model(path):
logging.info('Loading model from %s', path)
return tf.keras.models.load_model(path)
def metrics_from_stats(stats):
"""Compute metrics to report to hyperparameter tuner."""
labels, probs = stats['labels'], stats['probs']
# Reshape binary predictions to 2-class.
if len(probs.shape) == 1:
probs = np.stack([1-probs, probs], axis=-1)
assert len(probs.shape) == 2
predictions = np.argmax(probs, axis=-1)
accuracy = np.equal(labels, predictions)
label_probs = probs[np.arange(len(labels)), labels]
log_probs = np.maximum(-1e10, np.log(label_probs))
brier_scores = np.square(probs).sum(-1) - 2 * label_probs
return {'accuracy': accuracy.mean(0),
'brier_score': brier_scores.mean(0),
'log_prob': log_probs.mean(0)}
def make_predictions(
model, batched_dataset, predictions_per_example=1, writers=None,
predictions_are_logits=True, record_image_samples=True, max_batches=1e6):
"""Build a dictionary of predictions for examples from a dataset.
Args:
model: Trained Keras model.
batched_dataset: tf.data.Dataset that yields batches of image, label pairs.
predictions_per_example: Number of predictions to generate per example.
writers: `dict` with keys 'small' and 'full', containing
array_utils.StatsWriter instances for full prediction results and small
prediction results (omitting logits).
predictions_are_logits: Indicates whether model outputs are logits or
probabilities.
record_image_samples: `bool` Record one batch of input examples.
max_batches: `int`, maximum number of batches.
Returns:
Dictionary containing:
labels: Labels copied from the dataset (shape=[N]).
logits_samples: Samples of model predict outputs for each example
(shape=[N, M, K]).
probs: Probabilities after averaging over samples (shape=[N, K]).
image_samples: One batch of input images (for sanity checking).
"""
if predictions_are_logits:
samples_key = 'logits_samples'
avg_probs_fn = lambda x: scipy.special.softmax(x, axis=-1).mean(-2)
else:
samples_key = 'probs_samples'
avg_probs_fn = lambda x: x.mean(-2)
labels, outputs = [], []
predict_fn = model.predict if hasattr(model, 'predict') else model
for i, (inputs_i, labels_i) in enumerate(tfds.as_numpy(batched_dataset)):
logging.info('iteration: %d', i)
outputs_i = np.stack(
[predict_fn(inputs_i) for _ in range(predictions_per_example)], axis=1)
if writers is None:
labels.extend(labels_i)
outputs.append(outputs_i)
else:
avg_probs_i = avg_probs_fn(outputs_i)
prediction_batch = dict(labels=labels_i, probs=avg_probs_i)
if i == 0 and record_image_samples:
prediction_batch['image_samples'] = inputs_i
writers['small'].write_batch(prediction_batch)
prediction_batch[samples_key] = outputs_i
writers['full'].write_batch(prediction_batch)
# Don't predict whole ImageNet training set
if i > max_batches:
break
if writers is None:
image_samples = inputs_i # pylint: disable=undefined-loop-variable
labels = np.stack(labels, axis=0)
outputs = np.concatenate(outputs, axis=0)
stats = {'labels': labels, 'image_samples': image_samples,
samples_key: outputs, 'probs': avg_probs_fn(outputs)}
if record_image_samples:
stats['image_samples'] = image_samples
return stats
def download_dataset(dataset, batch_size_for_dl=1024):
logging.info('Starting dataset download...')
tup = list(zip(*tfds.as_numpy(dataset.batch(batch_size_for_dl))))
logging.info('dataset download complete.')
return tuple(np.concatenate(x, axis=0) for x in tup)
def get_distribution_strategy(distribution_strategy='default',
num_gpus=0,
num_workers=1,
all_reduce_alg=None,
num_packs=1):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are 'off', 'default', 'one_device', 'mirrored',
'parameter_server', 'multi_worker_mirrored', case insensitive. 'off' means
not to use Distribution Strategy; 'default' means to choose from
`MirroredStrategy`, `MultiWorkerMirroredStrategy`, or `OneDeviceStrategy`
according to the number of GPUs and number of workers.
num_gpus: Number of GPUs to run this model.
num_workers: Number of workers to run this model.
all_reduce_alg: Optional. Specifies which algorithm to use when performing
all-reduce. For `MirroredStrategy`, valid values are 'nccl' and
'hierarchical_copy'. For `MultiWorkerMirroredStrategy`, valid values are
'ring' and 'nccl'. If None, DistributionStrategy will choose based on
device topology.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`
or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError('`num_gpus` can not be negative.')
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == 'off':
if num_gpus > 1:
raise ValueError(
'When {} GPUs and {} workers are specified, distribution_strategy '
'flag cannot be set to "off".'.format(num_gpus, num_workers))
return None
if distribution_strategy == 'multi_worker_mirrored':
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=_collective_communication(all_reduce_alg))
if (distribution_strategy == 'one_device' or
(distribution_strategy == 'default' and num_gpus <= 1)):
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy('device:CPU:0')
else:
if num_gpus > 1:
raise ValueError('`OneDeviceStrategy` can not be used for more than '
'one device.')
return tf.distribute.OneDeviceStrategy('device:GPU:0')
if distribution_strategy in ('mirrored', 'default'):
if num_gpus == 0:
assert distribution_strategy == 'mirrored'
devices = ['device:CPU:0']
else:
devices = ['device:GPU:%d' % i for i in range(num_gpus)]
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs))
if distribution_strategy == 'parameter_server':
return tf.compat.v1.distribute.experimental.ParameterServerStrategy()
raise ValueError(
'Unrecognized Distribution Strategy: %r' % distribution_strategy)
def _collective_communication(all_reduce_alg):
"""Return a CollectiveCommunication based on all_reduce_alg.
Args:
all_reduce_alg: a string specifying which collective communication to pick,
or None.
Returns:
tf.distribute.experimental.CollectiveCommunication object
Raises:
ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl']
"""
collective_communication_options = {
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
'ring': tf.distribute.experimental.CollectiveCommunication.RING,
'nccl': tf.distribute.experimental.CollectiveCommunication.NCCL
}
if all_reduce_alg not in collective_communication_options:
raise ValueError(
'When used with `multi_worker_mirrored`, valid values for '
'all_reduce_alg are ["ring", "nccl"]. Supplied value: {}'.format(
all_reduce_alg))
return collective_communication_options[all_reduce_alg]
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
Args:
all_reduce_alg: a string specifying which cross device op to pick, or None.
num_packs: an integer specifying number of packs for the cross device op.
Returns:
tf.distribute.CrossDeviceOps object or None.
Raises:
ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy'].
"""
if all_reduce_alg is None:
return None
mirrored_all_reduce_options = {
'nccl': tf.distribute.NcclAllReduce,
'hierarchical_copy': tf.distribute.HierarchicalCopyAllReduce
}
if all_reduce_alg not in mirrored_all_reduce_options:
raise ValueError(
'When used with `mirrored`, valid values for all_reduce_alg are '
'["nccl", "hierarchical_copy"]. Supplied value: {}'.format(
all_reduce_alg))
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
return cross_device_ops_class(num_packs=num_packs)
|
[
"tensorflow.compat.v2.distribute.OneDeviceStrategy",
"tensorflow.compat.v2.compat.v1.distribute.experimental.ParameterServerStrategy",
"six.moves.range",
"json.dumps",
"numpy.log",
"numpy.argmax",
"numpy.equal",
"absl.logging.info",
"numpy.square",
"numpy.stack",
"tensorflow.compat.v2.keras.models.load_model",
"numpy.concatenate",
"tensorflow_datasets.as_numpy"
] |
[((1132, 1179), 'json.dumps', 'json.dumps', (['x'], {'indent': '(2)', 'cls': '_SimpleJsonEncoder'}), '(x, indent=2, cls=_SimpleJsonEncoder)\n', (1142, 1179), False, 'import json\n'), ((1244, 1301), 'absl.logging.info', 'logging.info', (['"""Recording config to %s\n %s"""', 'path', 'out'], {}), '("""Recording config to %s\n %s""", path, out)\n', (1256, 1301), False, 'from absl import logging\n'), ((1421, 1465), 'absl.logging.info', 'logging.info', (['"""Loading config from %s"""', 'path'], {}), "('Loading config from %s', path)\n", (1433, 1465), False, 'from absl import logging\n'), ((1677, 1722), 'absl.logging.info', 'logging.info', (['"""Saving model to %s"""', 'save_path'], {}), "('Saving model to %s', save_path)\n", (1689, 1722), False, 'from absl import logging\n'), ((1817, 1860), 'absl.logging.info', 'logging.info', (['"""Loading model from %s"""', 'path'], {}), "('Loading model from %s', path)\n", (1829, 1860), False, 'from absl import logging\n'), ((1870, 1902), 'tensorflow.compat.v2.keras.models.load_model', 'tf.keras.models.load_model', (['path'], {}), '(path)\n', (1896, 1902), True, 'import tensorflow.compat.v2 as tf\n'), ((2212, 2237), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(-1)'}), '(probs, axis=-1)\n', (2221, 2237), True, 'import numpy as np\n'), ((2251, 2280), 'numpy.equal', 'np.equal', (['labels', 'predictions'], {}), '(labels, predictions)\n', (2259, 2280), True, 'import numpy as np\n'), ((5292, 5336), 'absl.logging.info', 'logging.info', (['"""Starting dataset download..."""'], {}), "('Starting dataset download...')\n", (5304, 5336), False, 'from absl import logging\n'), ((5407, 5449), 'absl.logging.info', 'logging.info', (['"""dataset download complete."""'], {}), "('dataset download complete.')\n", (5419, 5449), False, 'from absl import logging\n'), ((2128, 2165), 'numpy.stack', 'np.stack', (['[1 - probs, probs]'], {'axis': '(-1)'}), '([1 - probs, probs], axis=-1)\n', (2136, 2165), True, 'import numpy as np\n'), ((2368, 2387), 'numpy.log', 'np.log', (['label_probs'], {}), '(label_probs)\n', (2374, 2387), True, 'import numpy as np\n'), ((4115, 4145), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['batched_dataset'], {}), '(batched_dataset)\n', (4128, 4145), True, 'import tensorflow_datasets as tfds\n'), ((4152, 4184), 'absl.logging.info', 'logging.info', (['"""iteration: %d"""', 'i'], {}), "('iteration: %d', i)\n", (4164, 4184), False, 'from absl import logging\n'), ((4940, 4964), 'numpy.stack', 'np.stack', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (4948, 4964), True, 'import numpy as np\n'), ((4979, 5010), 'numpy.concatenate', 'np.concatenate', (['outputs'], {'axis': '(0)'}), '(outputs, axis=0)\n', (4993, 5010), True, 'import numpy as np\n'), ((8476, 8538), 'tensorflow.compat.v2.compat.v1.distribute.experimental.ParameterServerStrategy', 'tf.compat.v1.distribute.experimental.ParameterServerStrategy', ([], {}), '()\n', (8536, 8538), True, 'import tensorflow.compat.v2 as tf\n'), ((5465, 5490), 'numpy.concatenate', 'np.concatenate', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5479, 5490), True, 'import numpy as np\n'), ((7773, 7820), 'tensorflow.compat.v2.distribute.OneDeviceStrategy', 'tf.distribute.OneDeviceStrategy', (['"""device:CPU:0"""'], {}), "('device:CPU:0')\n", (7804, 7820), True, 'import tensorflow.compat.v2 as tf\n'), ((7985, 8032), 'tensorflow.compat.v2.distribute.OneDeviceStrategy', 'tf.distribute.OneDeviceStrategy', (['"""device:GPU:0"""'], {}), "('device:GPU:0')\n", (8016, 8032), True, 'import tensorflow.compat.v2 as tf\n'), ((2406, 2422), 'numpy.square', 'np.square', (['probs'], {}), '(probs)\n', (2415, 2422), True, 'import numpy as np\n'), ((4250, 4280), 'six.moves.range', 'range', (['predictions_per_example'], {}), '(predictions_per_example)\n', (4255, 4280), False, 'from six.moves import range\n'), ((8249, 8264), 'six.moves.range', 'range', (['num_gpus'], {}), '(num_gpus)\n', (8254, 8264), False, 'from six.moves import range\n')]
|
from __future__ import division, print_function, absolute_import
from .core import SeqletCoordinates
from modisco import util
import numpy as np
from collections import defaultdict, Counter, OrderedDict
import itertools
import sys
import time
from .value_provider import (
AbstractValTransformer, AbsPercentileValTransformer,
SignedPercentileValTransformer, PrecisionValTransformer)
import scipy
from sklearn.isotonic import IsotonicRegression
SUBSAMPLE_CAP = 1000000
#The only parts of TransformAndThresholdResults that are used in
# TfModiscoWorkflow are the transformed_pos/neg_thresholds and the
# val_transformer (used in metaclustering with multiple tasks)
#TransformAndThresholdResults are also used to be
# able to replicate the same procedure used for identifying coordinates as
# when TfMoDisco was first run; the information needed in that case would
# be specific to the type of Coordproducer used
class AbstractTransformAndThresholdResults(object):
def __init__(self, transformed_neg_threshold, transformed_pos_threshold,
val_transformer):
self.transformed_neg_threshold = transformed_neg_threshold
self.transformed_pos_threshold = transformed_pos_threshold
self.val_transformer = val_transformer
@classmethod
def from_hdf5(cls, grp):
if "class" not in grp.attrs:
the_class = FWACTransformAndThresholdResults
else:
the_class = eval(grp.attrs["class"])
if (the_class.__name__ != cls.__name__):
return the_class.from_hdf5(grp)
class BasicTransformAndThresholdResults(AbstractTransformAndThresholdResults):
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["transformed_neg_threshold"] = self.transformed_neg_threshold
grp.attrs["transformed_pos_threshold"] = self.transformed_pos_threshold
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def load_basic_attrs_from_hdf5(cls, grp):
transformed_neg_threshold = grp.attrs['transformed_neg_threshold']
transformed_pos_threshold = grp.attrs['transformed_pos_threshold']
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return (transformed_neg_threshold, transformed_pos_threshold,
val_transformer)
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
(transformed_neg_threshold,
transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
return cls(transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
#FWAC = FixedWindowAroundChunks; this TransformAndThresholdResults object
# is specific to the type of info needed in that case.
class FWACTransformAndThresholdResults(
BasicTransformAndThresholdResults):
def __init__(self, neg_threshold,
transformed_neg_threshold,
pos_threshold,
transformed_pos_threshold,
val_transformer):
#both 'transformed_neg_threshold' and 'transformed_pos_threshold'
# should be positive, i.e. they should be relative to the
# transformed distribution used to set the threshold, e.g. a
# cdf value
self.neg_threshold = neg_threshold
self.pos_threshold = pos_threshold
super(FWACTransformAndThresholdResults, self).__init__(
transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
def save_hdf5(self, grp):
super(FWACTransformAndThresholdResults, self).save_hdf5(grp)
grp.attrs["neg_threshold"] = self.neg_threshold
grp.attrs["pos_threshold"] = self.pos_threshold
@classmethod
def from_hdf5(cls, grp):
(transformed_neg_threshold, transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
neg_threshold = grp.attrs['neg_threshold']
pos_threshold = grp.attrs['pos_threshold']
return cls(neg_threshold=neg_threshold,
transformed_neg_threshold=transformed_neg_threshold,
pos_threshold=pos_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
class AbstractCoordProducer(object):
def __call__(self):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class SeqletCoordsFWAP(SeqletCoordinates):
"""
Coordinates for the FixedWindowAroundChunks CoordProducer
"""
def __init__(self, example_idx, start, end, score, other_info={}):
self.score = score
self.other_info = other_info
super(SeqletCoordsFWAP, self).__init__(
example_idx=example_idx,
start=start, end=end,
is_revcomp=False)
class CoordProducerResults(object):
def __init__(self, coords, tnt_results):
self.coords = coords
self.tnt_results = tnt_results
@classmethod
def from_hdf5(cls, grp):
coord_strings = util.load_string_list(dset_name="coords",
grp=grp)
coords = [SeqletCoordinates.from_string(x) for x in coord_strings]
tnt_results = AbstractTransformAndThresholdResults.from_hdf5(
grp["tnt_results"])
return CoordProducerResults(coords=coords,
tnt_results=tnt_results)
def save_hdf5(self, grp):
util.save_string_list(
string_list=[str(x) for x in self.coords],
dset_name="coords",
grp=grp)
self.tnt_results.save_hdf5(
grp=grp.create_group("tnt_results"))
def get_simple_window_sum_function(window_size):
def window_sum_function(arrs):
to_return = []
for arr in arrs:
cumsum = np.cumsum(arr)
cumsum = np.array([0]+list(cumsum))
to_return.append(cumsum[window_size:]-cumsum[:-window_size])
return to_return
return window_sum_function
class GenerateNullDist(object):
def __call__(self, score_track):
raise NotImplementedError()
class TakeSign(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.sign(x) for x in score_track]
return null_tracks
class TakeAbs(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.abs(x) for x in score_track]
return null_tracks
class LaplaceNullDist(GenerateNullDist):
def __init__(self, num_to_samp, verbose=True,
percentiles_to_use=[5*(x+1) for x in range(19)],
random_seed=1234):
self.num_to_samp = num_to_samp
self.verbose = verbose
self.percentiles_to_use = np.array(percentiles_to_use)
self.random_seed = random_seed
self.rng = np.random.RandomState()
@classmethod
def from_hdf5(cls, grp):
num_to_samp = grp.attrs["num_to_samp"]
verbose = grp.attrs["verbose"]
percentiles_to_use = np.array(grp["percentiles_to_use"][:])
return cls(num_to_samp=num_to_samp, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["num_to_samp"] = self.num_to_samp
grp.attrs["verbose"] = self.verbose
grp.create_dataset('percentiles_to_use',
data=self.percentiles_to_use)
def __call__(self, score_track, window_size, original_summed_score_track):
#original_summed_score_track is supplied to avoid recomputing it
if (original_summed_score_track is None):
window_sum_function = get_simple_window_sum_function(window_size)
original_summed_score_track = window_sum_function(arrs=score_track)
values = np.concatenate(original_summed_score_track, axis=0)
# first estimate mu, using two level histogram to get to 1e-6
hist1, bin_edges1 = np.histogram(values, bins=1000)
peak1 = np.argmax(hist1)
l_edge = bin_edges1[peak1]
r_edge = bin_edges1[peak1+1]
top_values = values[ (l_edge < values) & (values < r_edge) ]
hist2, bin_edges2 = np.histogram(top_values, bins=1000)
peak2 = np.argmax(hist2)
l_edge = bin_edges2[peak2]
r_edge = bin_edges2[peak2+1]
mu = (l_edge + r_edge) / 2
if (self.verbose):
print("peak(mu)=", mu)
pos_values = [x for x in values if x >= mu]
neg_values = [x for x in values if x <= mu]
#for an exponential distribution:
# cdf = 1 - exp(-lambda*x)
# exp(-lambda*x) = 1-cdf
# -lambda*x = log(1-cdf)
# lambda = -log(1-cdf)/x
# x = -log(1-cdf)/lambda
#Take the most aggressive lambda over all percentiles
pos_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.percentile(a=pos_values, q=self.percentiles_to_use)-mu))
neg_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.abs(np.percentile(a=neg_values,
q=100-self.percentiles_to_use)-mu)))
self.rng.seed(self.random_seed)
prob_pos = float(len(pos_values))/(len(pos_values)+len(neg_values))
sampled_vals = []
for i in range(self.num_to_samp):
sign = 1 if (self.rng.uniform() < prob_pos) else -1
if (sign == 1):
sampled_cdf = self.rng.uniform()
val = -np.log(1-sampled_cdf)/pos_laplace_lambda + mu
else:
sampled_cdf = self.rng.uniform()
val = mu + np.log(1-sampled_cdf)/neg_laplace_lambda
sampled_vals.append(val)
return np.array(sampled_vals)
class FlipSignNullDist(GenerateNullDist):
def __init__(self, num_seq_to_samp, shuffle_pos=False,
seed=1234, num_breaks=100,
lower_null_percentile=20,
upper_null_percentile=80):
self.num_seq_to_samp = num_seq_to_samp
self.shuffle_pos = shuffle_pos
self.seed = seed
self.rng = np.random.RandomState()
self.num_breaks = num_breaks
self.lower_null_percentile = lower_null_percentile
self.upper_null_percentile = upper_null_percentile
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track, windowsize, original_summed_score_track):
#summed_score_track is supplied to avoid recomputing it
window_sum_function = get_simple_window_sum_function(windowsize)
if (original_summed_score_track is not None):
original_summed_score_track = window_sum_function(arrs=score_track)
all_orig_summed_scores = np.concatenate(
original_summed_score_track, axis=0)
pos_threshold = np.percentile(a=all_orig_summed_scores,
q=self.upper_null_percentile)
neg_threshold = np.percentile(a=all_orig_summed_scores,
q=self.lower_null_percentile)
#retain only the portions of the tracks that are under the
# thresholds
retained_track_portions = []
num_pos_vals = 0
num_neg_vals = 0
for (single_score_track, single_summed_score_track)\
in zip(score_track, original_summed_score_track):
window_passing_track = [
(1.0 if (x > neg_threshold and x < pos_threshold) else 0)
for x in single_summed_score_track]
padded_window_passing_track = [0.0]*int(windowsize-1)
padded_window_passing_track.extend(window_passing_track)
padded_window_passing_track.extend([0.0]*int(windowsize-1))
pos_in_passing_window = window_sum_function(
[padded_window_passing_track])[0]
assert len(single_score_track)==len(pos_in_passing_window)
single_retained_track = []
for (val, pos_passing) in zip(single_score_track,
pos_in_passing_window):
if (pos_passing > 0):
single_retained_track.append(val)
num_pos_vals += (1 if val > 0 else 0)
num_neg_vals += (1 if val < 0 else 0)
retained_track_portions.append(single_retained_track)
print("Fraction of positions retained:",
sum(len(x) for x in retained_track_portions)/
sum(len(x) for x in score_track))
prob_pos = num_pos_vals/float(num_pos_vals + num_neg_vals)
self.rng.seed(self.seed)
null_tracks = []
for i in range(self.num_seq_to_samp):
random_track = retained_track_portions[
int(self.rng.randint(0,len(retained_track_portions)))]
track_with_sign_flips = np.array([
abs(x)*(1 if self.rng.uniform() < prob_pos else -1)
for x in random_track])
if (self.shuffle_pos):
self.rng.shuffle(track_with_sign_flips)
null_tracks.append(track_with_sign_flips)
return np.concatenate(window_sum_function(null_tracks), axis=0)
def get_null_vals(null_track, score_track, window_size,
original_summed_score_track):
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=window_size,
original_summed_score_track=original_summed_score_track)
else:
window_sum_function = get_simple_window_sum_function(window_size)
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = list(np.concatenate(null_summed_score_track, axis=0))
return null_vals
def subsample_if_large(arr):
if (len(arr) > SUBSAMPLE_CAP):
print("Subsampling!")
sys.stdout.flush()
arr = np.random.RandomState(1234).choice(a=arr, size=SUBSAMPLE_CAP,
replace=False)
return arr
def irval_to_probpos(irval, frac_neg):
#n(x):= pdf of null dist (negatives)
#p(x):= pdf of positive distribution
#f_p:= fraction of positives
#f_n:= fraction of negatives = 1-f_p
#o(x):= pdf of observed distribution = n(x)f_n + p(x)f_p
#The isotonic regression produces a(x) = o(x)/[o(x) + n(x)]
# o(x)/[o(x) + n(x)] = [n(x)f_n + o(x)f_p]/[n(x)(1+f_n) + p(x)]
# a(x)[n(x)(1+f_n) + p(x)f_p] = n(x)f_n + p(x)f_p
# a(x)n(x)(1+f_n) - n(x)f_n = p(x)f_p - a(x)p(x)f_p
# n(x)[a(x)(1+f_n) - f_n] = p(x)f_p[1 - a(x)]
# [a(x)/f_n + (a(x)-1)]/[1-a(x)] = (p(x)f_p)/(n(x)f_n) = r(x)
#p_pos = 1 / (1 + 1/r(x))
# = [a(x)/f_n + (a(x)-1)]/[a(x)/f_n + (a(x)-1) + (1-a(x))]
# = [a(x)/f_n + a(x)-1]/[a(x)/f_n]
# = [a(x) + f_n(a(x)-1)]/a(x)
# = 1 + f_n(a(x)-1)/a(x)
# = 1 + f_n(1 - 1/a(x))
#If solving for p_pos=0, we have -1/(1 - 1/a(x)) = f_n
#As f_n --> 100%, p_pos --> 2 - 1/a(x); this assumes max(a(x)) = 0.5
return np.minimum(np.maximum(1 + frac_neg*(
1 - (1/np.maximum(irval,1e-7))), 0.0), 1.0)
class SavableIsotonicRegression(object):
def __init__(self, origvals, nullvals, increasing, min_frac_neg=0.95):
self.origvals = origvals
self.nullvals = nullvals
self.increasing = increasing
self.min_frac_neg = min_frac_neg
self.ir = IsotonicRegression(out_of_bounds='clip',
increasing=increasing).fit(
X=np.concatenate([self.origvals, self.nullvals], axis=0),
y=([1.0 for x in self.origvals] + [0.0 for x in self.nullvals]),
sample_weight=([1.0 for x in self.origvals]
+[float(len(self.origvals))/len(self.nullvals)
for x in self.nullvals]))
#Infer frac_pos based on the minimum value of the ir probs
#See derivation in irval_to_probpos function
min_prec_x = self.ir.X_min_ if self.increasing else self.ir.X_max_
min_precision = self.ir.transform([min_prec_x])[0]
implied_frac_neg = -1/(1-(1/max(min_precision,1e-7)))
print("For increasing =",increasing,", the minimum IR precision was",
min_precision,"occurring at",min_prec_x,
"implying a frac_neg",
"of",implied_frac_neg)
if (implied_frac_neg > 1.0 or implied_frac_neg < self.min_frac_neg):
implied_frac_neg = max(min(1.0,implied_frac_neg),
self.min_frac_neg)
print("To be conservative, adjusted frac neg is",implied_frac_neg)
self.implied_frac_neg = implied_frac_neg
def transform(self, vals):
return irval_to_probpos(self.ir.transform(vals),
frac_neg=self.implied_frac_neg)
def save_hdf5(self, grp):
grp.attrs['increasing'] = self.increasing
grp.attrs['min_frac_neg'] = self.min_frac_neg
grp.create_dataset('origvals', data=self.origvals)
grp.create_dataset('nullvals', data=self.nullvals)
@classmethod
def from_hdf5(cls, grp):
increasing = grp.attrs['increasing']
min_frac_neg = grp.attrs['min_frac_neg']
origvals = np.array(grp['origvals'])
nullvals = np.array(grp['nullvals'])
return cls(origvals=origvals, nullvals=nullvals,
increasing=increasing, min_frac_neg=min_frac_neg)
def get_isotonic_regression_classifier(orig_vals, null_vals):
orig_vals = subsample_if_large(orig_vals)
null_vals = subsample_if_large(null_vals)
pos_orig_vals = (
np.array(sorted([x for x in orig_vals if x >= 0])))
neg_orig_vals = (
np.array(sorted([x for x in orig_vals if x < 0],
key=lambda x: abs(x))))
pos_null_vals = [x for x in null_vals if x >= 0]
neg_null_vals = [x for x in null_vals if x < 0]
pos_ir = SavableIsotonicRegression(origvals=pos_orig_vals,
nullvals=pos_null_vals, increasing=True)
if (len(neg_orig_vals) > 0):
neg_ir = SavableIsotonicRegression(origvals=neg_orig_vals,
nullvals=neg_null_vals, increasing=False)
else:
neg_ir = None
return pos_ir, neg_ir, orig_vals, null_vals
#sliding in this case would be a list of values
class VariableWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding, flank, suppress, target_fdr,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds,
max_seqlets_total,
progress_update=5000,
verbose=True, plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = np.array(grp["sliding"]).astype("int")
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("sliding", data=np.array(self.sliding))
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def fit_pos_and_neg_irs(self, score_track, null_track):
pos_irs = []
neg_irs = []
for sliding_window_size in self.sliding:
window_sum_function = get_simple_window_sum_function(
sliding_window_size)
print("Fitting - on window size",sliding_window_size)
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=sliding_window_size,
original_summed_score_track=None)
else:
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = np.concatenate(null_summed_score_track,
axis=0)
print("Computing window sums")
sys.stdout.flush()
window_sums_rows = window_sum_function(arrs=score_track)
print("Done computing window sums")
sys.stdout.flush()
orig_vals = np.concatenate(window_sums_rows, axis=0)
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=np.concatenate(window_sums_rows, axis=0),
null_vals=null_vals)
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=None,
neg_threshold=None)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_window"
+str(sliding_window_size)+"_"
+str(VariableWindowAroundChunks.count)+".png")
pos_irs.append(pos_ir)
neg_irs.append(neg_ir)
return pos_irs, neg_irs
def __call__(self, score_track, null_track, tnt_results=None):
if (tnt_results is None):
pos_irs, neg_irs = self.fit_pos_and_neg_irs(
score_track=score_track,
null_track=null_track)
precision_transformer = PrecisionValTransformer(
sliding_window_sizes=self.sliding,
pos_irs=pos_irs,
neg_irs=neg_irs)
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
subsampled_prec_vals = subsample_if_large(
np.concatenate(precisiontransformed_score_track, axis=0))
from matplotlib import pyplot as plt
plt.plot(sorted(subsampled_prec_vals),
(np.arange(len(subsampled_prec_vals))/
len(subsampled_prec_vals)))
plt.xlabel("Tranformed IR precision value")
plt.ylabel("CDF")
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="final_prec_vals_cdf_dist"
+str(VariableWindowAroundChunks.count)+".png")
#Pick a threshold according the the precisiontransformed score track
pos_threshold = (1-self.target_fdr)
neg_threshold = -(1-self.target_fdr)
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_prec_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
tnt_results = BasicTransformAndThresholdResults(
transformed_neg_threshold=neg_threshold,
transformed_pos_threshold=pos_threshold,
val_transformer=precision_transformer)
else:
precision_transformer = tnt_results.val_transformer
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
#Need to remove padding because identify_coords is assumed to
# operate on a scoretrack that has already been processed with
# a sliding window of window_size (and assumes that partial windows
# were not included)
left_padding_to_remove = int((max(self.sliding)-1)/2)
right_padding_to_remove = (max(self.sliding)-1)-left_padding_to_remove
coords = identify_coords(
score_track=[x[left_padding_to_remove:-right_padding_to_remove]
for x in precisiontransformed_score_track],
pos_threshold=tnt_results.transformed_pos_threshold,
neg_threshold=tnt_results.transformed_neg_threshold,
window_size=max(self.sliding),
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose,
other_info_tracks={'best_window_idx':
[x[left_padding_to_remove:-right_padding_to_remove] for x in
precisiontransformed_bestwindowsizeidxs]})
VariableWindowAroundChunks.count += 1
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
#identify_coords is expecting something that has already been processed
# with sliding windows of size window_size
def identify_coords(score_track, pos_threshold, neg_threshold,
window_size, flank, suppress,
max_seqlets_total, verbose, other_info_tracks={}):
for other_info_track in other_info_tracks.values():
assert all([x.shape==y.shape for x,y
in zip(other_info_track,score_track)])
#cp_score_track = 'copy' of the score track, which can be modified as
# coordinates are identified
cp_score_track = [np.array(x) for x in score_track]
#if a position is less than the threshold, set it to -np.inf
#Note that the threshold comparisons need to be >= and not just > for
# cases where there are lots of ties at the high end (e.g. with an IR
# tranformation that gives a lot of values that have a precision of 1.0)
cp_score_track = [
np.array([np.abs(y) if (y >= pos_threshold
or y <= neg_threshold)
else -np.inf for y in x])
for x in cp_score_track]
coords = []
for example_idx,single_score_track in enumerate(cp_score_track):
#set the stuff near the flanks to -np.inf so that we
# don't pick it up during argmax
single_score_track[0:flank] = -np.inf
single_score_track[len(single_score_track)-(flank):
len(single_score_track)] = -np.inf
while True:
argmax = np.argmax(single_score_track,axis=0)
max_val = single_score_track[argmax]
#bail if exhausted everything that passed the threshold
#and was not suppressed
if (max_val == -np.inf):
break
#need to be able to expand without going off the edge
if ((argmax >= flank) and
(argmax < (len(single_score_track)-flank))):
coord = SeqletCoordsFWAP(
example_idx=example_idx,
start=argmax-flank,
end=argmax+window_size+flank,
score=score_track[example_idx][argmax],
other_info = dict([
(track_name, track[example_idx][argmax])
for (track_name, track) in other_info_tracks.items()]))
assert (coord.score >= pos_threshold
or coord.score <= neg_threshold)
coords.append(coord)
else:
assert False,\
("This shouldn't happen because I set stuff near the"
"border to -np.inf early on")
#suppress the chunks within +- suppress
left_supp_idx = int(max(np.floor(argmax+0.5-suppress),0))
right_supp_idx = int(min(np.ceil(argmax+0.5+suppress),
len(single_score_track)))
single_score_track[left_supp_idx:right_supp_idx] = -np.inf
if (verbose):
print("Got "+str(len(coords))+" coords")
sys.stdout.flush()
if ((max_seqlets_total is not None) and
len(coords) > max_seqlets_total):
if (verbose):
print("Limiting to top "+str(max_seqlets_total))
sys.stdout.flush()
coords = sorted(coords, key=lambda x: -np.abs(x.score))\
[:max_seqlets_total]
return coords
def refine_thresholds_based_on_frac_passing(
vals, pos_threshold, neg_threshold,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds, verbose):
frac_passing_windows =(
sum(vals >= pos_threshold)
+ sum(vals <= neg_threshold))/float(len(vals))
if (verbose):
print("Thresholds from null dist were",
neg_threshold," and ",pos_threshold,
"with frac passing", frac_passing_windows)
pos_vals = [x for x in vals if x >= 0]
neg_vals = [x for x in vals if x < 0]
#deal with edge case of len < 0
pos_vals = [0] if len(pos_vals)==0 else pos_vals
neg_vals = [0] if len(neg_vals)==0 else neg_vals
#adjust the thresholds if the fall outside the min/max
# windows frac
if (frac_passing_windows < min_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is below ",
min_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-min_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(min_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-min_passing_windows_frac))
neg_threshold = -pos_threshold
if (frac_passing_windows > max_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is above ",
max_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-max_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(max_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-max_passing_windows_frac))
neg_threshold = -pos_threshold
if (verbose):
print("New thresholds are",pos_threshold,"and",neg_threshold)
return pos_threshold, neg_threshold
def make_nulldist_figure(orig_vals, null_vals, pos_ir, neg_ir,
pos_threshold, neg_threshold):
from matplotlib import pyplot as plt
fig,ax1 = plt.subplots()
orig_vals = np.array(sorted(orig_vals))
ax1.hist(orig_vals, bins=100, density=True, alpha=0.5)
ax1.hist(null_vals, bins=100, density=True, alpha=0.5)
ax1.set_ylabel("Probability density\n(blue=foreground, orange=null)")
ax1.set_xlabel("Total importance in window")
precisions = pos_ir.transform(orig_vals)
if (neg_ir is not None):
precisions = np.maximum(precisions, neg_ir.transform(orig_vals))
ax2 = ax1.twinx()
ax2.plot(orig_vals, precisions)
if (pos_threshold is not None):
ax2.plot([pos_threshold, pos_threshold], [0.0, 1.0], color="red")
if (neg_threshold is not None):
ax2.plot([neg_threshold, neg_threshold], [0.0, 1.0], color="red")
ax2.set_ylabel("Estimated foreground precision")
ax2.set_ylim(0.0, 1.02)
class FixedWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding,
flank,
suppress, #flanks to suppress
target_fdr,
min_passing_windows_frac,
max_passing_windows_frac,
separate_pos_neg_thresholds=False,
max_seqlets_total=None,
progress_update=5000,
verbose=True,
plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = grp.attrs["sliding"]
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["sliding"] = self.sliding
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def __call__(self, score_track, null_track, tnt_results=None):
# score_track now can be a list of arrays,
assert all([len(x.shape)==1 for x in score_track])
window_sum_function = get_simple_window_sum_function(self.sliding)
if (self.verbose):
print("Computing windowed sums on original")
sys.stdout.flush()
original_summed_score_track = window_sum_function(arrs=score_track)
#Determine the window thresholds
if (tnt_results is None):
if (self.verbose):
print("Generating null dist")
sys.stdout.flush()
null_vals = get_null_vals(
null_track=null_track,
score_track=score_track,
window_size=self.sliding,
original_summed_score_track=original_summed_score_track)
if (self.verbose):
print("Computing threshold")
sys.stdout.flush()
orig_vals = list(
np.concatenate(original_summed_score_track, axis=0))
#Note that orig_vals may have been subsampled at this point
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=orig_vals,
null_vals=null_vals)
subsampled_pos_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x >= 0])))
subsampled_neg_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x < 0],
key=lambda x: abs(x))))
subsampled_pos_val_precisions =\
pos_ir.transform(subsampled_pos_orig_vals)
if (len(subsampled_neg_orig_vals) > 0):
subsampled_neg_val_precisions =\
neg_ir.transform(subsampled_neg_orig_vals)
pos_threshold = ([x[1] for x in
zip(subsampled_pos_val_precisions,
subsampled_pos_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_pos_orig_vals[-1]])[0]
if (len(subsampled_neg_orig_vals) > 0):
neg_threshold = ([x[1] for x in
zip(subsampled_neg_val_precisions,
subsampled_neg_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_neg_orig_vals[-1]])[0]
else:
neg_threshold = -np.inf
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_orig_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
if (self.separate_pos_neg_thresholds):
val_transformer = SignedPercentileValTransformer(
distribution=orig_vals)
else:
val_transformer = AbsPercentileValTransformer(
distribution=orig_vals)
if (self.verbose):
print("Final raw thresholds are",
neg_threshold," and ",pos_threshold)
print("Final transformed thresholds are",
val_transformer(neg_threshold)," and ",
val_transformer(pos_threshold))
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_"
+str(FixedWindowAroundChunks.count)+".png")
FixedWindowAroundChunks.count += 1
tnt_results = FWACTransformAndThresholdResults(
neg_threshold=neg_threshold,
transformed_neg_threshold=val_transformer(neg_threshold),
pos_threshold=pos_threshold,
transformed_pos_threshold=val_transformer(pos_threshold),
val_transformer=val_transformer)
coords = identify_coords(
score_track=original_summed_score_track,
pos_threshold=tnt_results.pos_threshold,
neg_threshold=tnt_results.neg_threshold,
window_size=self.sliding,
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose)
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
|
[
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"numpy.percentile",
"numpy.random.RandomState",
"numpy.histogram",
"matplotlib.pyplot.xlabel",
"numpy.concatenate",
"numpy.maximum",
"sys.stdout.flush",
"modisco.util.load_string_list",
"numpy.abs",
"numpy.ceil",
"numpy.floor",
"numpy.argmax",
"numpy.sign",
"sklearn.isotonic.IsotonicRegression",
"numpy.cumsum",
"matplotlib.pyplot.subplots"
] |
[((33997, 34011), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (34009, 34011), True, 'from matplotlib import pyplot as plt\n'), ((5463, 5513), 'modisco.util.load_string_list', 'util.load_string_list', ([], {'dset_name': '"""coords"""', 'grp': 'grp'}), "(dset_name='coords', grp=grp)\n", (5484, 5513), False, 'from modisco import util\n'), ((7506, 7534), 'numpy.array', 'np.array', (['percentiles_to_use'], {}), '(percentiles_to_use)\n', (7514, 7534), True, 'import numpy as np\n'), ((7593, 7616), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (7614, 7616), True, 'import numpy as np\n'), ((7779, 7817), 'numpy.array', 'np.array', (["grp['percentiles_to_use'][:]"], {}), "(grp['percentiles_to_use'][:])\n", (7787, 7817), True, 'import numpy as np\n'), ((8544, 8595), 'numpy.concatenate', 'np.concatenate', (['original_summed_score_track'], {'axis': '(0)'}), '(original_summed_score_track, axis=0)\n', (8558, 8595), True, 'import numpy as np\n'), ((8702, 8733), 'numpy.histogram', 'np.histogram', (['values'], {'bins': '(1000)'}), '(values, bins=1000)\n', (8714, 8733), True, 'import numpy as np\n'), ((8750, 8766), 'numpy.argmax', 'np.argmax', (['hist1'], {}), '(hist1)\n', (8759, 8766), True, 'import numpy as np\n'), ((8936, 8971), 'numpy.histogram', 'np.histogram', (['top_values'], {'bins': '(1000)'}), '(top_values, bins=1000)\n', (8948, 8971), True, 'import numpy as np\n'), ((8988, 9004), 'numpy.argmax', 'np.argmax', (['hist2'], {}), '(hist2)\n', (8997, 9004), True, 'import numpy as np\n'), ((10514, 10536), 'numpy.array', 'np.array', (['sampled_vals'], {}), '(sampled_vals)\n', (10522, 10536), True, 'import numpy as np\n'), ((10928, 10951), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (10949, 10951), True, 'import numpy as np\n'), ((11642, 11693), 'numpy.concatenate', 'np.concatenate', (['original_summed_score_track'], {'axis': '(0)'}), '(original_summed_score_track, axis=0)\n', (11656, 11693), True, 'import numpy as np\n'), ((11731, 11800), 'numpy.percentile', 'np.percentile', ([], {'a': 'all_orig_summed_scores', 'q': 'self.upper_null_percentile'}), '(a=all_orig_summed_scores, q=self.upper_null_percentile)\n', (11744, 11800), True, 'import numpy as np\n'), ((11863, 11932), 'numpy.percentile', 'np.percentile', ([], {'a': 'all_orig_summed_scores', 'q': 'self.lower_null_percentile'}), '(a=all_orig_summed_scores, q=self.lower_null_percentile)\n', (11876, 11932), True, 'import numpy as np\n'), ((14793, 14811), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14809, 14811), False, 'import sys\n'), ((18212, 18237), 'numpy.array', 'np.array', (["grp['origvals']"], {}), "(grp['origvals'])\n", (18220, 18237), True, 'import numpy as np\n'), ((18257, 18282), 'numpy.array', 'np.array', (["grp['nullvals']"], {}), "(grp['nullvals'])\n", (18265, 18282), True, 'import numpy as np\n'), ((28639, 28650), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (28647, 28650), True, 'import numpy as np\n'), ((31109, 31127), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (31125, 31127), False, 'import sys\n'), ((6285, 6299), 'numpy.cumsum', 'np.cumsum', (['arr'], {}), '(arr)\n', (6294, 6299), True, 'import numpy as np\n'), ((6831, 6841), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (6838, 6841), True, 'import numpy as np\n'), ((7135, 7144), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (7141, 7144), True, 'import numpy as np\n'), ((14619, 14666), 'numpy.concatenate', 'np.concatenate', (['null_summed_score_track'], {'axis': '(0)'}), '(null_summed_score_track, axis=0)\n', (14633, 14666), True, 'import numpy as np\n'), ((23049, 23067), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (23065, 23067), False, 'import sys\n'), ((23197, 23215), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (23213, 23215), False, 'import sys\n'), ((23241, 23281), 'numpy.concatenate', 'np.concatenate', (['window_sums_rows'], {'axis': '(0)'}), '(window_sums_rows, axis=0)\n', (23255, 23281), True, 'import numpy as np\n'), ((25291, 25334), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tranformed IR precision value"""'], {}), "('Tranformed IR precision value')\n", (25301, 25334), True, 'from matplotlib import pyplot as plt\n'), ((25347, 25364), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (25357, 25364), True, 'from matplotlib import pyplot as plt\n'), ((29563, 29600), 'numpy.argmax', 'np.argmax', (['single_score_track'], {'axis': '(0)'}), '(single_score_track, axis=0)\n', (29572, 29600), True, 'import numpy as np\n'), ((31310, 31328), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (31326, 31328), False, 'import sys\n'), ((32567, 32632), 'numpy.percentile', 'np.percentile', ([], {'a': 'pos_vals', 'q': '(100 * (1 - min_passing_windows_frac))'}), '(a=pos_vals, q=100 * (1 - min_passing_windows_frac))\n', (32580, 32632), True, 'import numpy as np\n'), ((32690, 32749), 'numpy.percentile', 'np.percentile', ([], {'a': 'neg_vals', 'q': '(100 * min_passing_windows_frac)'}), '(a=neg_vals, q=100 * min_passing_windows_frac)\n', (32703, 32749), True, 'import numpy as np\n'), ((33283, 33348), 'numpy.percentile', 'np.percentile', ([], {'a': 'pos_vals', 'q': '(100 * (1 - max_passing_windows_frac))'}), '(a=pos_vals, q=100 * (1 - max_passing_windows_frac))\n', (33296, 33348), True, 'import numpy as np\n'), ((33406, 33465), 'numpy.percentile', 'np.percentile', ([], {'a': 'neg_vals', 'q': '(100 * max_passing_windows_frac)'}), '(a=neg_vals, q=100 * max_passing_windows_frac)\n', (33419, 33465), True, 'import numpy as np\n'), ((38174, 38192), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38190, 38192), False, 'import sys\n'), ((14826, 14853), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (14847, 14853), True, 'import numpy as np\n'), ((16353, 16416), 'sklearn.isotonic.IsotonicRegression', 'IsotonicRegression', ([], {'out_of_bounds': '"""clip"""', 'increasing': 'increasing'}), "(out_of_bounds='clip', increasing=increasing)\n", (16371, 16416), False, 'from sklearn.isotonic import IsotonicRegression\n'), ((16473, 16527), 'numpy.concatenate', 'np.concatenate', (['[self.origvals, self.nullvals]'], {'axis': '(0)'}), '([self.origvals, self.nullvals], axis=0)\n', (16487, 16527), True, 'import numpy as np\n'), ((20328, 20352), 'numpy.array', 'np.array', (["grp['sliding']"], {}), "(grp['sliding'])\n", (20336, 20352), True, 'import numpy as np\n'), ((21550, 21572), 'numpy.array', 'np.array', (['self.sliding'], {}), '(self.sliding)\n', (21558, 21572), True, 'import numpy as np\n'), ((22898, 22945), 'numpy.concatenate', 'np.concatenate', (['null_summed_score_track'], {'axis': '(0)'}), '(null_summed_score_track, axis=0)\n', (22912, 22945), True, 'import numpy as np\n'), ((25008, 25064), 'numpy.concatenate', 'np.concatenate', (['precisiontransformed_score_track'], {'axis': '(0)'}), '(precisiontransformed_score_track, axis=0)\n', (25022, 25064), True, 'import numpy as np\n'), ((38440, 38458), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38456, 38458), False, 'import sys\n'), ((38799, 38817), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38815, 38817), False, 'import sys\n'), ((38864, 38915), 'numpy.concatenate', 'np.concatenate', (['original_summed_score_track'], {'axis': '(0)'}), '(original_summed_score_track, axis=0)\n', (38878, 38915), True, 'import numpy as np\n'), ((9601, 9644), 'numpy.log', 'np.log', (['(1 - self.percentiles_to_use / 100.0)'], {}), '(1 - self.percentiles_to_use / 100.0)\n', (9607, 9644), True, 'import numpy as np\n'), ((9657, 9711), 'numpy.percentile', 'np.percentile', ([], {'a': 'pos_values', 'q': 'self.percentiles_to_use'}), '(a=pos_values, q=self.percentiles_to_use)\n', (9670, 9711), True, 'import numpy as np\n'), ((9767, 9810), 'numpy.log', 'np.log', (['(1 - self.percentiles_to_use / 100.0)'], {}), '(1 - self.percentiles_to_use / 100.0)\n', (9773, 9810), True, 'import numpy as np\n'), ((23439, 23479), 'numpy.concatenate', 'np.concatenate', (['window_sums_rows'], {'axis': '(0)'}), '(window_sums_rows, axis=0)\n', (23453, 23479), True, 'import numpy as np\n'), ((29004, 29013), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (29010, 29013), True, 'import numpy as np\n'), ((30801, 30834), 'numpy.floor', 'np.floor', (['(argmax + 0.5 - suppress)'], {}), '(argmax + 0.5 - suppress)\n', (30809, 30834), True, 'import numpy as np\n'), ((30872, 30904), 'numpy.ceil', 'np.ceil', (['(argmax + 0.5 + suppress)'], {}), '(argmax + 0.5 + suppress)\n', (30879, 30904), True, 'import numpy as np\n'), ((32858, 32870), 'numpy.abs', 'np.abs', (['vals'], {}), '(vals)\n', (32864, 32870), True, 'import numpy as np\n'), ((33574, 33586), 'numpy.abs', 'np.abs', (['vals'], {}), '(vals)\n', (33580, 33586), True, 'import numpy as np\n'), ((9830, 9890), 'numpy.percentile', 'np.percentile', ([], {'a': 'neg_values', 'q': '(100 - self.percentiles_to_use)'}), '(a=neg_values, q=100 - self.percentiles_to_use)\n', (9843, 9890), True, 'import numpy as np\n'), ((10421, 10444), 'numpy.log', 'np.log', (['(1 - sampled_cdf)'], {}), '(1 - sampled_cdf)\n', (10427, 10444), True, 'import numpy as np\n'), ((10279, 10302), 'numpy.log', 'np.log', (['(1 - sampled_cdf)'], {}), '(1 - sampled_cdf)\n', (10285, 10302), True, 'import numpy as np\n'), ((16034, 16058), 'numpy.maximum', 'np.maximum', (['irval', '(1e-07)'], {}), '(irval, 1e-07)\n', (16044, 16058), True, 'import numpy as np\n'), ((31376, 31391), 'numpy.abs', 'np.abs', (['x.score'], {}), '(x.score)\n', (31382, 31391), True, 'import numpy as np\n')]
|
import unittest
from sys import argv
import numpy as np
import torch
from objective.ridge import Ridge, Ridge_ClosedForm, Ridge_Gradient
from .utils import Container, assert_all_close, assert_all_close_dict
def _init_ridge(cls):
np.random.seed(1234)
torch.manual_seed(1234)
n_features = 3
n_samples = 5
mu = 0.02
cls.hparams = Container(n_features=n_features,
n_samples=n_samples,
mu=mu)
cls.w = torch.randn(n_features, 1, requires_grad=True)
cls.x = torch.randn(n_samples, n_features)
cls.y = torch.randn(n_samples)
class TestObj_Ridge_ClosedForm(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_ClosedForm(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'sol': torch.tensor([[-0.2297], [-0.7944], [-0.5806]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
class TestObj_Ridge_Gradient(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_Gradient(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'dw': torch.tensor([[0.7323], [1.4816], [-0.3771]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
if __name__ == '__main__':
unittest.main(argv=argv)
|
[
"torch.manual_seed",
"objective.ridge.Ridge_Gradient",
"torch.tensor",
"numpy.random.seed",
"objective.ridge.Ridge_ClosedForm",
"unittest.main",
"torch.randn"
] |
[((237, 257), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (251, 257), True, 'import numpy as np\n'), ((262, 285), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (279, 285), False, 'import torch\n'), ((486, 532), 'torch.randn', 'torch.randn', (['n_features', '(1)'], {'requires_grad': '(True)'}), '(n_features, 1, requires_grad=True)\n', (497, 532), False, 'import torch\n'), ((545, 579), 'torch.randn', 'torch.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (556, 579), False, 'import torch\n'), ((592, 614), 'torch.randn', 'torch.randn', (['n_samples'], {}), '(n_samples)\n', (603, 614), False, 'import torch\n'), ((1985, 2009), 'unittest.main', 'unittest.main', ([], {'argv': 'argv'}), '(argv=argv)\n', (1998, 2009), False, 'import unittest\n'), ((734, 764), 'objective.ridge.Ridge_ClosedForm', 'Ridge_ClosedForm', (['self.hparams'], {}), '(self.hparams)\n', (750, 764), False, 'from objective.ridge import Ridge, Ridge_ClosedForm, Ridge_Gradient\n'), ((877, 897), 'torch.tensor', 'torch.tensor', (['(1.3251)'], {}), '(1.3251)\n', (889, 897), False, 'import torch\n'), ((1404, 1432), 'objective.ridge.Ridge_Gradient', 'Ridge_Gradient', (['self.hparams'], {}), '(self.hparams)\n', (1418, 1432), False, 'from objective.ridge import Ridge, Ridge_ClosedForm, Ridge_Gradient\n'), ((1545, 1565), 'torch.tensor', 'torch.tensor', (['(1.3251)'], {}), '(1.3251)\n', (1557, 1565), False, 'import torch\n'), ((1117, 1164), 'torch.tensor', 'torch.tensor', (['[[-0.2297], [-0.7944], [-0.5806]]'], {}), '([[-0.2297], [-0.7944], [-0.5806]])\n', (1129, 1164), False, 'import torch\n'), ((1185, 1204), 'torch.tensor', 'torch.tensor', (['(1.337)'], {}), '(1.337)\n', (1197, 1204), False, 'import torch\n'), ((1784, 1829), 'torch.tensor', 'torch.tensor', (['[[0.7323], [1.4816], [-0.3771]]'], {}), '([[0.7323], [1.4816], [-0.3771]])\n', (1796, 1829), False, 'import torch\n'), ((1850, 1869), 'torch.tensor', 'torch.tensor', (['(1.337)'], {}), '(1.337)\n', (1862, 1869), False, 'import torch\n')]
|
"""
Train shadow net script
"""
import argparse
import functools
import itertools
import os
import os.path as ops
import sys
import time
import numpy as np
import tensorflow as tf
import pprint
import shadownet
import six
from six.moves import xrange # pylint: disable=redefined-builtin
sys.path.append('/data/')
from crnn_model import crnn_model
from local_utils import data_utils, log_utils, tensorboard_vis_summary
from global_configuration import config
from uaitrain.arch.tensorflow import uflag
from typing import List
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import device_setter
tf.app.flags.DEFINE_string('dataset_dir','/data/data/tfrecords','data path')
tf.app.flags.DEFINE_string('weights_path',None,'weight path')
FLAGS = tf.app.flags.FLAGS
logger = log_utils.init_logger()
def local_device_setter(num_devices=1,
ps_device_type='cpu',
worker_device='/cpu:0',
ps_ops=None,
ps_strategy=None):
if ps_ops == None:
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string(
'/{}:{}'.format(ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser
def get_words_from_chars(characters_list: List[str], sequence_lengths: List[int], name='chars_conversion'):
with tf.name_scope(name=name):
def join_charcaters_fn(coords):
return tf.reduce_join(characters_list[coords[0]:coords[1]])
def coords_several_sequences():
end_coords = tf.cumsum(sequence_lengths)
start_coords = tf.concat([[0], end_coords[:-1]], axis=0)
coords = tf.stack([start_coords, end_coords], axis=1)
coords = tf.cast(coords, dtype=tf.int32)
return tf.map_fn(join_charcaters_fn, coords, dtype=tf.string)
def coords_single_sequence():
return tf.reduce_join(characters_list, keep_dims=True)
words = tf.cond(tf.shape(sequence_lengths)[0] > 1,
true_fn=lambda: coords_several_sequences(),
false_fn=lambda: coords_single_sequence())
return words
def get_shadownet_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build shadownet model."""
def _shadownet_fun(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
tower_tensor_dict = []
tower_seq_len = []
num_devices = num_gpus
device_type = 'gpu'
tower_batch_size = int(params.batch_size / num_devices)
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = local_device_setter(worker_device=worker_device)
with tf.variable_scope('shadownet', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds, tensor_dict, seq_len = _tower_fn(
is_training, tower_features[i], tower_labels[i], tower_batch_size, params.l_size)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
tower_tensor_dict.append(tensor_dict)
tower_seq_len.append(seq_len)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
global_step = tf.train.get_global_step()
starter_learning_rate = params.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
params.decay_steps, params.decay_rate,
staircase=True)
loss = tf.reduce_mean(tower_losses, name='loss')
decoded, log_prob = tf.nn.ctc_beam_search_decoder(tower_preds[0],
tower_seq_len[0]*np.ones(tower_batch_size),
merge_repeated=False)
sequence_dist = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), tower_labels[0]))
sequence_lengths_pred = tf.bincount(tf.cast(decoded[0].indices[:, 0], tf.int32),
minlength=tf.shape(tower_labels[0])[1])
label_lengths_pred = tf.bincount(tf.cast(labels[0].indices[:, 0], tf.int32),
minlength=tf.shape(tower_labels[0])[1])
tensors_to_log = {'global_step': global_step, 'learning_rate': learning_rate, 'loss': loss}
dist_to_log = {'global_step': global_step,
'learning_rate': learning_rate,
'loss': loss,
'train_seq_dist': sequence_dist,
'sequence_lengths_pred': sequence_lengths_pred,
'label_lengths_pred': label_lengths_pred}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
dist_hook = tf.train.LoggingTensorHook(
tensors=dist_to_log, every_n_iter=1000)
train_hooks = [logging_hook, dist_hook]
seq_dist_sum = tf.summary.scalar(name='Seq_Dist', tensor=sequence_dist)
lr_sum = tf.summary.scalar(name='Learning_rate', tensor=learning_rate)
summaries = [seq_dist_sum, lr_sum]
summary_hook = tf.train.SummarySaverHook(
save_steps=1000,
output_dir='/data/output/',
summary_op=summaries)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
if params.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=train_hooks)
return _shadownet_fun
def _tower_fn(is_training, feature, label, batch_size, l_size):
seq_len=l_size
shadownet = crnn_model.ShadowNet(phase='Train', hidden_nums=256, layers_nums=2, seq_length=seq_len,
num_classes=config.cfg.TRAIN.CLASSES_NUMS, rnn_cell_type='lstm')
imgs = tf.image.resize_images(feature, (32, l_size*4), method=0)
input_imgs = tf.cast(x=imgs, dtype=tf.float32)
with tf.variable_scope('shadow', reuse=False):
net_out, tensor_dict = shadownet.build_shadownet(inputdata=input_imgs)
cost = tf.reduce_mean(tf.nn.ctc_loss(labels=label, inputs=net_out,
sequence_length=seq_len*np.ones(batch_size)))
#lstm l2 normalization loss
lstm_tv = tf.trainable_variables(scope='LSTMLayers')
r_lambda = 0.001
regularization_cost = r_lambda * tf.reduce_sum([tf.nn.l2_loss(v) for v in lstm_tv])
cost = cost + regularization_cost
model_params = tf.trainable_variables()
tower_grad = tf.gradients(cost, model_params)
return cost, zip(tower_grad, model_params), net_out, tensor_dict, seq_len
def input_fn(data_dir,
subset,
num_shards,
batch_size,
use_distortion_for_training=True):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', 'validate' and 'eval'.
num_shards: num of towers participating in data-parallel training.
batch_size: total batch size for training to be divided by the number of
shards.
use_distortion_for_training: True to use distortions.
Returns:
three
"""
with tf.device('/cpu:0'):
use_distortion = subset == 'train' and use_distortion_for_training
dataset = shadownet.ShadownetDataSet(data_dir, subset, use_distortion)
inputdata, input_labels = dataset.make_batch(batch_size)
if num_shards <= 1:
# No GPU available or only 1 GPU.
num_shards = 1
feature_shards = tf.split(inputdata, num_shards)
label_shards = tf.sparse_split(sp_input=input_labels, num_split=num_shards, axis=0)
return feature_shards, label_shards
def get_experiment_fn(data_dir,
num_gpus,
use_distortion_for_training=True):
def _experiment_fn(run_config, hparams):
"""Returns an Experiment."""
# Create estimator.
train_input_fn = functools.partial(
input_fn,
data_dir,
subset='train',
num_shards=num_gpus,
batch_size=hparams.batch_size,
use_distortion_for_training=use_distortion_for_training)
eval_input_fn = functools.partial(
input_fn,
data_dir,
subset='validation',
batch_size=hparams.batch_size,
num_shards=num_gpus)
train_steps = hparams.steps
eval_steps = 2048 // hparams.batch_size
variable_strategy = 'CPU'
classifier = tf.estimator.Estimator(
model_fn=get_shadownet_fn(num_gpus,
variable_strategy,
run_config.num_worker_replicas or 1),
config=run_config,
params=hparams)
# Create experiment.
return tf.contrib.learn.Experiment(
classifier,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps,
min_eval_frequency=100)
return _experiment_fn
def main(num_gpus, log_device_placement, num_intra_threads, data_dir, output_dir, tfrecord_dir, **hparams):
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
data_dir = os.path.join(data_dir, tfrecord_dir)
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=log_device_placement,
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.GPUOptions(force_gpu_compatible=True))
config = tf.contrib.learn.RunConfig(session_config=sess_config, model_dir=output_dir)
tf.contrib.learn.learn_runner.run(
get_experiment_fn(data_dir, num_gpus),
run_config=config,
hparams=tf.contrib.training.HParams(
is_chief=config.is_chief,
**hparams))
if __name__ == '__main__':
# init args
# args = init_args()
#if not ops.exists(args.dataset_dir):
# raise ValueError('{:s} doesn\'t exist'.format(args.dataset_dir))
#train_shadownet(args.dataset_dir, args.weights_path)
# if args.weights_path is not None and 'two_stage' in args.weights_path:
# train_shadownet(args.dataset_dir, args.weights_path, restore_from_cnn_subnet_work=False)
# elif args.weights_path is not None and 'cnnsub' in args.weights_path:
# train_shadownet(args.dataset_dir, args.weights_path, restore_from_cnn_subnet_work=True)
# else:
# train_shadownet(args.dataset_dir)
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_gpus',
type=int,
default=1,
help='UAI-SDK related. The number of gpus used.')
parser.add_argument(
'--log-device-placement',
action='store_true',
default=False,
help='Whether to log device placement.')
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--work_dir',
type=str,
default='/data/',
help='UAI SDK related.')
parser.add_argument(
'--data_dir',
type=str,
required=True,
help='UAI-SDK related. The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='UAI-SDK related. The directory where the model will be stored.')
parser.add_argument(
'--log_dir',
type=str,
default='/data/data/',
help='UAI SDK related.')
parser.add_argument(
'--l_size',
type=int,
default=10,
help="""l_batch_label, how many labels CNN net work will output into LSTM""")
parser.add_argument(
'--learning_rate',
type=float,
default=0.1)
parser.add_argument(
'--decay_rate',
type=float,
default=0.1)
parser.add_argument(
'--decay_steps',
type=int,
default=40000)
parser.add_argument(
'--steps',
type=int,
default=200000)
parser.add_argument(
'--batch_size',
type=int,
default=512)
parser.add_argument(
'--tfrecord_dir',
type=str,
default='tfrecords')
args = parser.parse_args()
main(**vars(args))
print('Done')
|
[
"itertools.chain",
"tensorflow.image.resize_images",
"tensorflow.shape",
"tensorflow.split",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.gradients",
"tensorflow.group",
"local_utils.log_utils.init_logger",
"tensorflow.reduce_mean",
"tensorflow.cast",
"sys.path.append",
"shadownet.build_shadownet",
"tensorflow.GPUOptions",
"argparse.ArgumentParser",
"tensorflow.contrib.learn.RunConfig",
"tensorflow.concat",
"tensorflow.train.get_global_step",
"tensorflow.train.SyncReplicasOptimizer",
"tensorflow.train.exponential_decay",
"tensorflow.trainable_variables",
"tensorflow.summary.scalar",
"tensorflow.stack",
"six.callable",
"tensorflow.device",
"shadownet.ShadownetDataSet",
"tensorflow.variable_scope",
"numpy.ones",
"tensorflow.cumsum",
"tensorflow.nn.l2_loss",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.train.AdadeltaOptimizer",
"crnn_model.crnn_model.ShadowNet",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"os.path.join",
"tensorflow.contrib.learn.Experiment",
"tensorflow.add_n",
"tensorflow.name_scope",
"functools.partial",
"tensorflow.reduce_join",
"tensorflow.map_fn",
"tensorflow.python.training.device_setter._RoundRobinStrategy",
"tensorflow.train.SummarySaverHook",
"tensorflow.contrib.training.HParams",
"six.iteritems",
"tensorflow.sparse_split",
"tensorflow.train.LoggingTensorHook",
"tensorflow.get_collection"
] |
[((291, 316), 'sys.path.append', 'sys.path.append', (['"""/data/"""'], {}), "('/data/')\n", (306, 316), False, 'import sys\n'), ((692, 770), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', '"""/data/data/tfrecords"""', '"""data path"""'], {}), "('dataset_dir', '/data/data/tfrecords', 'data path')\n", (718, 770), True, 'import tensorflow as tf\n'), ((769, 832), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""weights_path"""', 'None', '"""weight path"""'], {}), "('weights_path', None, 'weight path')\n", (795, 832), True, 'import tensorflow as tf\n'), ((868, 891), 'local_utils.log_utils.init_logger', 'log_utils.init_logger', ([], {}), '()\n', (889, 891), False, 'from local_utils import data_utils, log_utils, tensorboard_vis_summary\n'), ((9093, 9253), 'crnn_model.crnn_model.ShadowNet', 'crnn_model.ShadowNet', ([], {'phase': '"""Train"""', 'hidden_nums': '(256)', 'layers_nums': '(2)', 'seq_length': 'seq_len', 'num_classes': 'config.cfg.TRAIN.CLASSES_NUMS', 'rnn_cell_type': '"""lstm"""'}), "(phase='Train', hidden_nums=256, layers_nums=2,\n seq_length=seq_len, num_classes=config.cfg.TRAIN.CLASSES_NUMS,\n rnn_cell_type='lstm')\n", (9113, 9253), False, 'from crnn_model import crnn_model\n'), ((9413, 9472), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['feature', '(32, l_size * 4)'], {'method': '(0)'}), '(feature, (32, l_size * 4), method=0)\n', (9435, 9472), True, 'import tensorflow as tf\n'), ((9579, 9612), 'tensorflow.cast', 'tf.cast', ([], {'x': 'imgs', 'dtype': 'tf.float32'}), '(x=imgs, dtype=tf.float32)\n', (9586, 9612), True, 'import tensorflow as tf\n'), ((10225, 10267), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': '"""LSTMLayers"""'}), "(scope='LSTMLayers')\n", (10247, 10267), True, 'import tensorflow as tf\n'), ((10736, 10760), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10758, 10760), True, 'import tensorflow as tf\n'), ((10778, 10810), 'tensorflow.gradients', 'tf.gradients', (['cost', 'model_params'], {}), '(cost, model_params)\n', (10790, 10810), True, 'import tensorflow as tf\n'), ((13845, 13881), 'os.path.join', 'os.path.join', (['data_dir', 'tfrecord_dir'], {}), '(data_dir, tfrecord_dir)\n', (13857, 13881), False, 'import os\n'), ((14163, 14239), 'tensorflow.contrib.learn.RunConfig', 'tf.contrib.learn.RunConfig', ([], {'session_config': 'sess_config', 'model_dir': 'output_dir'}), '(session_config=sess_config, model_dir=output_dir)\n', (14189, 14239), True, 'import tensorflow as tf\n'), ((15126, 15151), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15149, 15151), False, 'import argparse\n'), ((1239, 1285), 'tensorflow.python.training.device_setter._RoundRobinStrategy', 'device_setter._RoundRobinStrategy', (['num_devices'], {}), '(num_devices)\n', (1272, 1285), False, 'from tensorflow.python.training import device_setter\n'), ((1297, 1322), 'six.callable', 'six.callable', (['ps_strategy'], {}), '(ps_strategy)\n', (1309, 1322), False, 'import six\n'), ((1441, 1486), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (["(op.device or '')"], {}), "(op.device or '')\n", (1469, 1486), True, 'from tensorflow.python.framework import device as pydev\n'), ((2185, 2209), 'tensorflow.name_scope', 'tf.name_scope', ([], {'name': 'name'}), '(name=name)\n', (2198, 2209), True, 'import tensorflow as tf\n'), ((8822, 8921), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op', 'training_hooks': 'train_hooks'}), '(mode=mode, loss=loss, train_op=train_op,\n training_hooks=train_hooks)\n', (8848, 8921), True, 'import tensorflow as tf\n'), ((9736, 9776), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""shadow"""'], {'reuse': '(False)'}), "('shadow', reuse=False)\n", (9753, 9776), True, 'import tensorflow as tf\n'), ((9809, 9856), 'shadownet.build_shadownet', 'shadownet.build_shadownet', ([], {'inputdata': 'input_imgs'}), '(inputdata=input_imgs)\n', (9834, 9856), False, 'import shadownet\n'), ((11593, 11612), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (11602, 11612), True, 'import tensorflow as tf\n'), ((11707, 11767), 'shadownet.ShadownetDataSet', 'shadownet.ShadownetDataSet', (['data_dir', 'subset', 'use_distortion'], {}), '(data_dir, subset, use_distortion)\n', (11733, 11767), False, 'import shadownet\n'), ((11961, 11992), 'tensorflow.split', 'tf.split', (['inputdata', 'num_shards'], {}), '(inputdata, num_shards)\n', (11969, 11992), True, 'import tensorflow as tf\n'), ((12016, 12084), 'tensorflow.sparse_split', 'tf.sparse_split', ([], {'sp_input': 'input_labels', 'num_split': 'num_shards', 'axis': '(0)'}), '(sp_input=input_labels, num_split=num_shards, axis=0)\n', (12031, 12084), True, 'import tensorflow as tf\n'), ((12386, 12557), 'functools.partial', 'functools.partial', (['input_fn', 'data_dir'], {'subset': '"""train"""', 'num_shards': 'num_gpus', 'batch_size': 'hparams.batch_size', 'use_distortion_for_training': 'use_distortion_for_training'}), "(input_fn, data_dir, subset='train', num_shards=num_gpus,\n batch_size=hparams.batch_size, use_distortion_for_training=\n use_distortion_for_training)\n", (12403, 12557), False, 'import functools\n'), ((12647, 12762), 'functools.partial', 'functools.partial', (['input_fn', 'data_dir'], {'subset': '"""validation"""', 'batch_size': 'hparams.batch_size', 'num_shards': 'num_gpus'}), "(input_fn, data_dir, subset='validation', batch_size=\n hparams.batch_size, num_shards=num_gpus)\n", (12664, 12762), False, 'import functools\n'), ((13261, 13441), 'tensorflow.contrib.learn.Experiment', 'tf.contrib.learn.Experiment', (['classifier'], {'train_input_fn': 'train_input_fn', 'eval_input_fn': 'eval_input_fn', 'train_steps': 'train_steps', 'eval_steps': 'eval_steps', 'min_eval_frequency': '(100)'}), '(classifier, train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn, train_steps=train_steps, eval_steps=\n eval_steps, min_eval_frequency=100)\n', (13288, 13441), True, 'import tensorflow as tf\n'), ((1876, 1925), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (["(worker_device or '')"], {}), "(worker_device or '')\n", (1904, 1925), True, 'from tensorflow.python.framework import device as pydev\n'), ((2270, 2322), 'tensorflow.reduce_join', 'tf.reduce_join', (['characters_list[coords[0]:coords[1]]'], {}), '(characters_list[coords[0]:coords[1]])\n', (2284, 2322), True, 'import tensorflow as tf\n'), ((2389, 2416), 'tensorflow.cumsum', 'tf.cumsum', (['sequence_lengths'], {}), '(sequence_lengths)\n', (2398, 2416), True, 'import tensorflow as tf\n'), ((2444, 2485), 'tensorflow.concat', 'tf.concat', (['[[0], end_coords[:-1]]'], {'axis': '(0)'}), '([[0], end_coords[:-1]], axis=0)\n', (2453, 2485), True, 'import tensorflow as tf\n'), ((2507, 2551), 'tensorflow.stack', 'tf.stack', (['[start_coords, end_coords]'], {'axis': '(1)'}), '([start_coords, end_coords], axis=1)\n', (2515, 2551), True, 'import tensorflow as tf\n'), ((2573, 2604), 'tensorflow.cast', 'tf.cast', (['coords'], {'dtype': 'tf.int32'}), '(coords, dtype=tf.int32)\n', (2580, 2604), True, 'import tensorflow as tf\n'), ((2624, 2678), 'tensorflow.map_fn', 'tf.map_fn', (['join_charcaters_fn', 'coords'], {'dtype': 'tf.string'}), '(join_charcaters_fn, coords, dtype=tf.string)\n', (2633, 2678), True, 'import tensorflow as tf\n'), ((2737, 2784), 'tensorflow.reduce_join', 'tf.reduce_join', (['characters_list'], {'keep_dims': '(True)'}), '(characters_list, keep_dims=True)\n', (2751, 2784), True, 'import tensorflow as tf\n'), ((5078, 5113), 'tensorflow.name_scope', 'tf.name_scope', (['"""gradient_averaging"""'], {}), "('gradient_averaging')\n", (5091, 5113), True, 'import tensorflow as tf\n'), ((5171, 5203), 'itertools.chain', 'itertools.chain', (['*tower_gradvars'], {}), '(*tower_gradvars)\n', (5186, 5203), False, 'import itertools\n'), ((5335, 5359), 'six.iteritems', 'six.iteritems', (['all_grads'], {}), '(all_grads)\n', (5348, 5359), False, 'import six\n'), ((5884, 5915), 'tensorflow.device', 'tf.device', (['consolidation_device'], {}), '(consolidation_device)\n', (5893, 5915), True, 'import tensorflow as tf\n'), ((5943, 5969), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (5967, 5969), True, 'import tensorflow as tf\n'), ((6055, 6177), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['starter_learning_rate', 'global_step', 'params.decay_steps', 'params.decay_rate'], {'staircase': '(True)'}), '(starter_learning_rate, global_step, params.\n decay_steps, params.decay_rate, staircase=True)\n', (6081, 6177), True, 'import tensorflow as tf\n'), ((6302, 6343), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_losses'], {'name': '"""loss"""'}), "(tower_losses, name='loss')\n", (6316, 6343), True, 'import tensorflow as tf\n'), ((7517, 7584), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', ([], {'tensors': 'tensors_to_log', 'every_n_iter': '(10)'}), '(tensors=tensors_to_log, every_n_iter=10)\n', (7543, 7584), True, 'import tensorflow as tf\n'), ((7626, 7692), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', ([], {'tensors': 'dist_to_log', 'every_n_iter': '(1000)'}), '(tensors=dist_to_log, every_n_iter=1000)\n', (7652, 7692), True, 'import tensorflow as tf\n'), ((7791, 7847), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""Seq_Dist"""', 'tensor': 'sequence_dist'}), "(name='Seq_Dist', tensor=sequence_dist)\n", (7808, 7847), True, 'import tensorflow as tf\n'), ((7869, 7930), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""Learning_rate"""', 'tensor': 'learning_rate'}), "(name='Learning_rate', tensor=learning_rate)\n", (7886, 7930), True, 'import tensorflow as tf\n'), ((8006, 8102), 'tensorflow.train.SummarySaverHook', 'tf.train.SummarySaverHook', ([], {'save_steps': '(1000)', 'output_dir': '"""/data/output/"""', 'summary_op': 'summaries'}), "(save_steps=1000, output_dir='/data/output/',\n summary_op=summaries)\n", (8031, 8102), True, 'import tensorflow as tf\n'), ((8173, 8228), 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (8199, 8228), True, 'import tensorflow as tf\n'), ((8786, 8805), 'tensorflow.group', 'tf.group', (['*train_op'], {}), '(*train_op)\n', (8794, 8805), True, 'import tensorflow as tf\n'), ((14108, 14148), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'force_gpu_compatible': '(True)'}), '(force_gpu_compatible=True)\n', (14121, 14148), True, 'import tensorflow as tf\n'), ((14370, 14434), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'is_chief': 'config.is_chief'}), '(is_chief=config.is_chief, **hparams)\n', (14397, 14434), True, 'import tensorflow as tf\n'), ((6770, 6813), 'tensorflow.cast', 'tf.cast', (['decoded[0].indices[:, 0]', 'tf.int32'], {}), '(decoded[0].indices[:, 0], tf.int32)\n', (6777, 6813), True, 'import tensorflow as tf\n'), ((6948, 6990), 'tensorflow.cast', 'tf.cast', (['labels[0].indices[:, 0]', 'tf.int32'], {}), '(labels[0].indices[:, 0], tf.int32)\n', (6955, 6990), True, 'import tensorflow as tf\n'), ((8285, 8361), 'tensorflow.train.SyncReplicasOptimizer', 'tf.train.SyncReplicasOptimizer', (['optimizer'], {'replicas_to_aggregate': 'num_workers'}), '(optimizer, replicas_to_aggregate=num_workers)\n', (8315, 8361), True, 'import tensorflow as tf\n'), ((10444, 10460), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (10457, 10460), True, 'import tensorflow as tf\n'), ((2810, 2836), 'tensorflow.shape', 'tf.shape', (['sequence_lengths'], {}), '(sequence_lengths)\n', (2818, 2836), True, 'import tensorflow as tf\n'), ((3838, 3867), 'tensorflow.name_scope', 'tf.name_scope', (["('tower_%d' % i)"], {}), "('tower_%d' % i)\n", (3851, 3867), True, 'import tensorflow as tf\n'), ((5454, 5475), 'tensorflow.device', 'tf.device', (['var.device'], {}), '(var.device)\n', (5463, 5475), True, 'import tensorflow as tf\n'), ((6501, 6526), 'numpy.ones', 'np.ones', (['tower_batch_size'], {}), '(tower_batch_size)\n', (6508, 6526), True, 'import numpy as np\n'), ((6672, 6701), 'tensorflow.cast', 'tf.cast', (['decoded[0]', 'tf.int32'], {}), '(decoded[0], tf.int32)\n', (6679, 6701), True, 'import tensorflow as tf\n'), ((10079, 10098), 'numpy.ones', 'np.ones', (['batch_size'], {}), '(batch_size)\n', (10086, 10098), True, 'import numpy as np\n'), ((3908, 3932), 'tensorflow.device', 'tf.device', (['device_setter'], {}), '(device_setter)\n', (3917, 3932), True, 'import tensorflow as tf\n'), ((6873, 6898), 'tensorflow.shape', 'tf.shape', (['tower_labels[0]'], {}), '(tower_labels[0])\n', (6881, 6898), True, 'import tensorflow as tf\n'), ((7050, 7075), 'tensorflow.shape', 'tf.shape', (['tower_labels[0]'], {}), '(tower_labels[0])\n', (7058, 7075), True, 'import tensorflow as tf\n'), ((8681, 8707), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (8705, 8707), True, 'import tensorflow as tf\n'), ((4880, 4934), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'name_scope'], {}), '(tf.GraphKeys.UPDATE_OPS, name_scope)\n', (4897, 4934), True, 'import tensorflow as tf\n'), ((5634, 5649), 'tensorflow.add_n', 'tf.add_n', (['grads'], {}), '(grads)\n', (5642, 5649), True, 'import tensorflow as tf\n')]
|
from common import small_buffer
import pytest
import numpy as np
import pyarrow as pa
import vaex
def test_unique_arrow(df_factory):
ds = df_factory(x=vaex.string_column(['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a']))
with small_buffer(ds, 2):
assert set(ds.unique(ds.x)) == {'a', 'b'}
values, index = ds.unique(ds.x, return_inverse=True)
assert np.array(values)[index].tolist() == ds.x.tolist()
def test_unique(df_factory):
ds = df_factory(colors=['red', 'green', 'blue', 'green'])
with small_buffer(ds, 2):
assert set(ds.unique(ds.colors)) == {'red', 'green', 'blue'}
values, index = ds.unique(ds.colors, return_inverse=True)
assert np.array(values)[index].tolist() == ds.colors.tolist()
ds = df_factory(x=['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a'])
with small_buffer(ds, 2):
assert set(ds.unique(ds.x)) == {'a', 'b'}
values, index = ds.unique(ds.x, return_inverse=True)
assert np.array(values)[index].tolist() == ds.x.tolist()
def test_unique_f4(df_factory):
x = np.array([np.nan, 0, 1, np.nan, 2, np.nan], dtype='f4')
df = df_factory(x=x)
assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
def test_unique_nan(df_factory):
x = [np.nan, 0, 1, np.nan, 2, np.nan]
df = df_factory(x=x)
assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
with small_buffer(df, 2):
values, indices = df.unique(df.x, return_inverse=True)
values = np.array(values)
values = values[indices]
mask = np.isnan(values)
assert values[~mask].tolist() == df.x.to_numpy()[~mask].tolist()
# assert indices.tolist() == [0, 1, 2, 0, 3, 0]
def test_unique_missing(df_factory):
# Create test databn
x = np.array([None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.nan, np.nan, np.nan])
df = df_factory(x=x)
uniques = df.x.unique(dropnan=True)
assert set(uniques) == set(['', 'A', 'B', -1, 0, 2, None])
def test_unique_missing_numeric(array_factory):
df = vaex.from_arrays(x=array_factory([1, None]))
values = df.x.unique()
assert set(values) == {1, None}
# assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
def test_unique_string_missing(df_factory):
x = ['John', None, 'Sally', None, '0.0']
df = df_factory(x=x)
result = df.x.unique()
assert len(result) == 4
assert'John' in result
assert None in result
assert 'Sally'
def test_unique_list(df_types):
df = df_types
assert set(df.string_list.unique()) == {'aap', 'noot', 'mies', None}
assert set(df.int_list.unique()) == {1, 2, 3, 4, 5, None}
@pytest.mark.parametrize("future", [False, True])
def test_unique_categorical(df_factory, future):
df = df_factory(x=vaex.string_column(['a', 'c', 'b', 'a', 'a']))
df = df.ordinal_encode('x')
df = df._future() if future else df
if future:
assert df.x.dtype == str
assert set(df.x.unique()) == {'a', 'b', 'c'}
assert df.x.nunique() == 3
else:
assert df.x.dtype == int
assert set(df.x.unique()) == {0, 1, 2}
assert df.x.nunique() == 3
|
[
"common.small_buffer",
"pytest.mark.parametrize",
"numpy.array",
"vaex.string_column",
"numpy.isnan"
] |
[((2694, 2742), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""future"""', '[False, True]'], {}), "('future', [False, True])\n", (2717, 2742), False, 'import pytest\n'), ((1088, 1143), 'numpy.array', 'np.array', (['[np.nan, 0, 1, np.nan, 2, np.nan]'], {'dtype': '"""f4"""'}), "([np.nan, 0, 1, np.nan, 2, np.nan], dtype='f4')\n", (1096, 1143), True, 'import numpy as np\n'), ((1800, 1899), 'numpy.array', 'np.array', (["[None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.nan, np.nan,\n np.nan]"], {}), "([None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.\n nan, np.nan, np.nan])\n", (1808, 1899), True, 'import numpy as np\n'), ((240, 259), 'common.small_buffer', 'small_buffer', (['ds', '(2)'], {}), '(ds, 2)\n', (252, 259), False, 'from common import small_buffer\n'), ((539, 558), 'common.small_buffer', 'small_buffer', (['ds', '(2)'], {}), '(ds, 2)\n', (551, 558), False, 'from common import small_buffer\n'), ((849, 868), 'common.small_buffer', 'small_buffer', (['ds', '(2)'], {}), '(ds, 2)\n', (861, 868), False, 'from common import small_buffer\n'), ((1416, 1435), 'common.small_buffer', 'small_buffer', (['df', '(2)'], {}), '(df, 2)\n', (1428, 1435), False, 'from common import small_buffer\n'), ((1517, 1533), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1525, 1533), True, 'import numpy as np\n'), ((1582, 1598), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (1590, 1598), True, 'import numpy as np\n'), ((159, 229), 'vaex.string_column', 'vaex.string_column', (["['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a']"], {}), "(['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a'])\n", (177, 229), False, 'import vaex\n'), ((2814, 2859), 'vaex.string_column', 'vaex.string_column', (["['a', 'c', 'b', 'a', 'a']"], {}), "(['a', 'c', 'b', 'a', 'a'])\n", (2832, 2859), False, 'import vaex\n'), ((387, 403), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (395, 403), True, 'import numpy as np\n'), ((710, 726), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (718, 726), True, 'import numpy as np\n'), ((996, 1012), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1004, 1012), True, 'import numpy as np\n')]
|
# utility functions for frequency related stuff
import numpy as np
import numpy.fft as fft
import math
def getFrequencyArray(fs, samples):
# frequencies go from to nyquist
nyquist = fs/2
return np.linspace(0, nyquist, samples)
# use this function for all FFT calculations
# then if change FFT later (i.e. FFTW), just replace one function
def forwardFFT(data, **kwargs):
if "norm" in kwargs and not kwargs["norm"]:
return fft.rfft(data, axis=0)
return fft.rfft(data, norm='ortho', axis=0)
def inverseFFT(data, length, **kwargs):
if "norm" in kwargs and not kwargs["norm"]:
return fft.irfft(data, n=length)
return fft.irfft(data, n=length, norm='ortho')
def padNextPower2(size):
next2Power = math.ceil(math.log(size,2))
next2Size = math.pow(2, int(next2Power))
return int(next2Size) - size
|
[
"numpy.fft.rfft",
"numpy.linspace",
"numpy.fft.irfft",
"math.log"
] |
[((200, 232), 'numpy.linspace', 'np.linspace', (['(0)', 'nyquist', 'samples'], {}), '(0, nyquist, samples)\n', (211, 232), True, 'import numpy as np\n'), ((462, 498), 'numpy.fft.rfft', 'fft.rfft', (['data'], {'norm': '"""ortho"""', 'axis': '(0)'}), "(data, norm='ortho', axis=0)\n", (470, 498), True, 'import numpy.fft as fft\n'), ((628, 667), 'numpy.fft.irfft', 'fft.irfft', (['data'], {'n': 'length', 'norm': '"""ortho"""'}), "(data, n=length, norm='ortho')\n", (637, 667), True, 'import numpy.fft as fft\n'), ((431, 453), 'numpy.fft.rfft', 'fft.rfft', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (439, 453), True, 'import numpy.fft as fft\n'), ((594, 619), 'numpy.fft.irfft', 'fft.irfft', (['data'], {'n': 'length'}), '(data, n=length)\n', (603, 619), True, 'import numpy.fft as fft\n'), ((718, 735), 'math.log', 'math.log', (['size', '(2)'], {}), '(size, 2)\n', (726, 735), False, 'import math\n')]
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import time
import cv2
from real.camera import Camera
from robot import Robot
from subprocess import Popen, PIPE
def get_camera_to_robot_transformation(camera):
color_img, depth_img = camera.get_data()
cv2.imwrite("real/temp.jpg", color_img)
p = Popen(['./real/detect-from-file', "real/temp.jpg"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
tag_info = output.decode("utf-8")
tag_info = tag_info.split("\n")[:4]
for i, info in enumerate(tag_info):
tag_info[i] = info.split(" ")
print(tag_info)
tag_info = np.array(tag_info, dtype=np.float32)
assert(tag_info.shape == (4, 3))
tag_loc_camera = tag_info
tag_loc_robot = {
22: (270.15 / 1000, -637.0 / 1000),
7: (255.35 / 1000, -247.6 / 1000),
4: (-272.7 / 1000, -660.9 / 1000),
2: (-289.8 / 1000, -274.2 / 1000)
}
camera_to_robot = cv2.getPerspectiveTransform(
np.float32([tag[1:] for tag in tag_loc_camera]),
np.float32([tag_loc_robot[tag[0]] for tag in tag_loc_camera]))
return camera_to_robot
# User options (change me)
# --------------- Setup options ---------------
tcp_host_ip = '172.16.31.10' # IP and port to robot arm as TCP client (UR5)
tcp_host_ip = "172.19.97.157"
tcp_port = 30002
rtc_host_ip = '172.16.31.10' # IP and port to robot arm as real-time client (UR5)
rtc_host_ip = "172.19.97.157"
rtc_port = 30003
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])
workspace_limits = np.asarray([[-0.237, 0.211], [-0.683, -0.235], [0.18, 0.4]])
# workspace_limits = np.asarray([[-0.224, 0.224], [-0.674, -0.226], [0.18, 0.4]])
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
tool_orientation = [2.22, -2.22, 0]
tool_orientation = [0, -3.14, 0]
# ---------------------------------------------
# Move robot to home pose
robot = Robot(False, None, None, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
False, None, None)
robot.open_gripper()
transformation_matrix = get_camera_to_robot_transformation(robot.camera)
# Slow down robot
robot.joint_acc = 1.4
robot.joint_vel = 1.05
# Callback function for clicking on OpenCV window
click_point_pix = ()
camera_color_img, camera_depth_img = robot.get_camera_data()
def mouseclick_callback(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
global camera, robot, click_point_pix
click_point_pix = (x, y)
# Get click point in camera coordinates
# click_z = camera_depth_img[y][x] * robot.cam_depth_scale
# click_x = np.multiply(x-robot.cam_intrinsics[0][2],click_z/robot.cam_intrinsics[0][0])
# click_y = np.multiply(y-robot.cam_intrinsics[1][2],click_z/robot.cam_intrinsics[1][1])
# if click_z == 0:
# return
# click_point = np.asarray([click_x,click_y,click_z])
# click_point.shape = (3,1)
# # Convert camera to robot coordinates
# # camera2robot = np.linalg.inv(robot.cam_pose)
# camera2robot = robot.cam_pose
# target_position = np.dot(camera2robot[0:3,0:3],click_point) + camera2robot[0:3,3:]
# target_position = target_position[0:3,0]
# print(target_position)
camera_pt = np.array([x, y, 1])
robot_pt = np.dot(transformation_matrix, camera_pt)
robot_pt = np.array([robot_pt[0], robot_pt[1]]) / robot_pt[2]
print([robot_pt[0], robot_pt[1], -0.1])
print(robot.parse_tcp_state_data(robot.get_state(), "cartesian_info"))
robot.move_to([robot_pt[0], robot_pt[1], 0.3], tool_orientation)
# Show color and depth frames
cv2.namedWindow('color')
cv2.setMouseCallback('color', mouseclick_callback)
cv2.namedWindow('depth')
while True:
camera_color_img, camera_depth_img = robot.get_camera_data()
bgr_data = cv2.cvtColor(camera_color_img, cv2.COLOR_RGB2BGR)
if len(click_point_pix) != 0:
bgr_data = cv2.circle(bgr_data, click_point_pix, 7, (0, 0, 255), 2)
cv2.imshow('color', bgr_data)
camera_depth_img[camera_depth_img < 0.19] = 0
cv2.imshow('depth', camera_depth_img)
if cv2.waitKey(1) == ord('c'):
break
cv2.destroyAllWindows()
|
[
"cv2.setMouseCallback",
"cv2.imwrite",
"numpy.float32",
"robot.Robot",
"subprocess.Popen",
"numpy.asarray",
"cv2.imshow",
"numpy.array",
"numpy.dot",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((1585, 1644), 'numpy.asarray', 'np.asarray', (['[[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]]'], {}), '([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])\n', (1595, 1644), True, 'import numpy as np\n'), ((1664, 1724), 'numpy.asarray', 'np.asarray', (['[[-0.237, 0.211], [-0.683, -0.235], [0.18, 0.4]]'], {}), '([[-0.237, 0.211], [-0.683, -0.235], [0.18, 0.4]])\n', (1674, 1724), True, 'import numpy as np\n'), ((2036, 2147), 'robot.Robot', 'Robot', (['(False)', 'None', 'None', 'workspace_limits', 'tcp_host_ip', 'tcp_port', 'rtc_host_ip', 'rtc_port', '(False)', 'None', 'None'], {}), '(False, None, None, workspace_limits, tcp_host_ip, tcp_port,\n rtc_host_ip, rtc_port, False, None, None)\n', (2041, 2147), False, 'from robot import Robot\n'), ((3821, 3845), 'cv2.namedWindow', 'cv2.namedWindow', (['"""color"""'], {}), "('color')\n", (3836, 3845), False, 'import cv2\n'), ((3846, 3896), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""color"""', 'mouseclick_callback'], {}), "('color', mouseclick_callback)\n", (3866, 3896), False, 'import cv2\n'), ((3897, 3921), 'cv2.namedWindow', 'cv2.namedWindow', (['"""depth"""'], {}), "('depth')\n", (3912, 3921), False, 'import cv2\n'), ((4352, 4375), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4373, 4375), False, 'import cv2\n'), ((286, 325), 'cv2.imwrite', 'cv2.imwrite', (['"""real/temp.jpg"""', 'color_img'], {}), "('real/temp.jpg', color_img)\n", (297, 325), False, 'import cv2\n'), ((334, 427), 'subprocess.Popen', 'Popen', (["['./real/detect-from-file', 'real/temp.jpg']"], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['./real/detect-from-file', 'real/temp.jpg'], stdin=PIPE, stdout=PIPE,\n stderr=PIPE)\n", (339, 427), False, 'from subprocess import Popen, PIPE\n'), ((649, 685), 'numpy.array', 'np.array', (['tag_info'], {'dtype': 'np.float32'}), '(tag_info, dtype=np.float32)\n', (657, 685), True, 'import numpy as np\n'), ((4015, 4064), 'cv2.cvtColor', 'cv2.cvtColor', (['camera_color_img', 'cv2.COLOR_RGB2BGR'], {}), '(camera_color_img, cv2.COLOR_RGB2BGR)\n', (4027, 4064), False, 'import cv2\n'), ((4179, 4208), 'cv2.imshow', 'cv2.imshow', (['"""color"""', 'bgr_data'], {}), "('color', bgr_data)\n", (4189, 4208), False, 'import cv2\n'), ((4263, 4300), 'cv2.imshow', 'cv2.imshow', (['"""depth"""', 'camera_depth_img'], {}), "('depth', camera_depth_img)\n", (4273, 4300), False, 'import cv2\n'), ((1012, 1059), 'numpy.float32', 'np.float32', (['[tag[1:] for tag in tag_loc_camera]'], {}), '([tag[1:] for tag in tag_loc_camera])\n', (1022, 1059), True, 'import numpy as np\n'), ((1069, 1130), 'numpy.float32', 'np.float32', (['[tag_loc_robot[tag[0]] for tag in tag_loc_camera]'], {}), '([tag_loc_robot[tag[0]] for tag in tag_loc_camera])\n', (1079, 1130), True, 'import numpy as np\n'), ((3437, 3456), 'numpy.array', 'np.array', (['[x, y, 1]'], {}), '([x, y, 1])\n', (3445, 3456), True, 'import numpy as np\n'), ((3476, 3516), 'numpy.dot', 'np.dot', (['transformation_matrix', 'camera_pt'], {}), '(transformation_matrix, camera_pt)\n', (3482, 3516), True, 'import numpy as np\n'), ((4118, 4174), 'cv2.circle', 'cv2.circle', (['bgr_data', 'click_point_pix', '(7)', '(0, 0, 255)', '(2)'], {}), '(bgr_data, click_point_pix, 7, (0, 0, 255), 2)\n', (4128, 4174), False, 'import cv2\n'), ((4309, 4323), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4320, 4323), False, 'import cv2\n'), ((3536, 3572), 'numpy.array', 'np.array', (['[robot_pt[0], robot_pt[1]]'], {}), '([robot_pt[0], robot_pt[1]])\n', (3544, 3572), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_calibration.CalibratedForecastDistributionParameters`
class.
"""
import unittest
import numpy as np
from iris.cube import CubeList
from iris.tests import IrisTest
from numpy.testing import assert_array_almost_equal
from improver.calibration.ensemble_calibration import (
CalibratedForecastDistributionParameters as Plugin,
)
from improver.calibration.ensemble_calibration import (
EstimateCoefficientsForEnsembleCalibration,
)
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.warnings_handler import ManageWarnings
from .helper_functions import EnsembleCalibrationAssertions, SetupCubes
from .test_EstimateCoefficientsForEnsembleCalibration import SetupExpectedCoefficients
class SetupCoefficientsCubes(SetupCubes, SetupExpectedCoefficients):
"""Set up coefficients cubes for testing."""
@ManageWarnings(
ignored_messages=[
"Collapsing a non-contiguous coordinate.",
"invalid escape sequence",
],
warning_types=[UserWarning, DeprecationWarning],
)
def setUp(self):
"""Set up coefficients cubes for when either the ensemble mean or the
ensemble realizations have been used as the predictor. The coefficients
have been constructed from the same underlying set of ensemble
realizations, so application of these coefficients would be expected
to give similar results. The values for the coefficients used to
construct the coefficients cubes are taken from the
SetupExpectedCoefficients class. These coefficients are the
expected outputs from the tests to estimate the coefficients."""
super().setUp()
# Set up a coefficients cube when using the ensemble mean as the
# predictor.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius"
)
self.coeffs_from_mean = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a timeshifted coefficients cube using the ensemble mean as a
# predictor.
forecast_timeshift_cube = self.historic_temperature_forecast_cube.copy()
for coord_name in ["time", "forecast_period"]:
forecast_timeshift_cube.coord(coord_name).points = [
_ + 3600 for _ in forecast_timeshift_cube.coord(coord_name).points
]
self.coeffs_from_mean_timeshift = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm,
forecast_timeshift_cube,
CubeList([forecast_timeshift_cube]),
)
# Set up a coefficients cube when using the ensemble mean as the
# predictor and separate coefficients at each point.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", point_by_point=True, desired_units="Celsius"
)
point_by_point_predictor = np.stack(
[self.expected_mean_pred_norm] * 9
).T.reshape(4, 3, 3)
self.coeffs_from_mean_point_by_point = estimator.create_coefficients_cubelist(
point_by_point_predictor,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a coefficients cube when using the ensemble realization as the
# predictor.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius", predictor="realizations"
)
self.coeffs_from_realizations = estimator.create_coefficients_cubelist(
self.expected_realizations_norm,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a coefficients cube when using the ensemble realization as the
# predictor and separate coefficients at each point.
expected_realizations_each_site = [
array if array.ndim == 1 else np.squeeze(array)
for array in list(self.expected_realizations_each_site.values())
]
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", predictor="realizations", point_by_point=True
)
self.coeffs_from_realizations_sites = estimator.create_coefficients_cubelist(
expected_realizations_each_site,
self.historic_forecast_spot_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# # Set up a coefficients cube when using an additional predictor.
self.altitude = set_up_variable_cube(
np.ones((3, 3), dtype=np.float32), name="surface_altitude", units="m"
)
for coord in ["time", "forecast_reference_time", "forecast_period"]:
self.altitude.remove_coord(coord)
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius"
)
self.coeffs_from_mean_alt = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm_alt,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube, self.altitude]),
)
# Some expected data that are used in various tests.
self.expected_loc_param_mean = np.array(
[
[273.7014, 274.6534, 275.4469],
[276.9385, 277.7636, 278.5570],
[279.6996, 280.1122, 281.2547],
],
dtype=np.float32,
)
self.expected_scale_param_mean = np.array(
[
[0.2316, 0.2342, 0.0168],
[0.0271, 0.0237, 0.0168],
[0.0634, 0.1151, 0.0116],
],
dtype=np.float32,
)
self.expected_loc_param_realizations = np.array(
[
[274.388, 275.3053, 275.4492],
[277.1295, 277.3866, 278.4672],
[280.2007, 280.3929, 281.2602],
],
dtype=np.float32,
)
self.expected_loc_param_realizations_sites = np.array(
[277.7531, 277.4529, 277.553, 277.2528], dtype=np.float32,
)
self.expected_scale_param_realizations_sites = np.array(
[0, 0, 0, 0], dtype=np.float32
)
self.expected_loc_param_mean_alt = np.array(
[
[275.18134, 276.18134, 277.01465],
[278.58133, 279.44797, 280.2813],
[281.48132, 281.91464, 283.11465],
],
dtype=np.float32,
)
self.expected_scale_param_mean_alt = np.array(
[
[0.4347, 0.4396, 0.0308],
[0.0503, 0.0438, 0.0308],
[0.1184, 0.2157, 0.0211],
],
dtype=np.float32,
)
# Create output cubes with the expected data.
self.expected_loc_param_mean_cube = set_up_variable_cube(
self.expected_loc_param_mean,
name="location_parameter",
units="K",
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
self.expected_scale_param_mean_cube = set_up_variable_cube(
self.expected_scale_param_mean,
name="scale_parameter",
units="Kelvin^2",
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_basic(self):
"""Test without specifying a predictor."""
plugin = Plugin()
self.assertEqual(plugin.predictor, "mean")
def test_with_predictor(self):
"""Test specifying the predictor."""
plugin = Plugin(predictor="realizations")
self.assertEqual(plugin.predictor, "realizations")
class Test__repr__(IrisTest):
"""Test the __repr__ method."""
def test_basic(self):
"""Test without the predictor."""
result = str(Plugin())
msg = "<CalibratedForecastDistributionParameters: " "predictor: mean>"
self.assertEqual(result, msg)
def test_with_predictor(self):
"""Test specifying the predictor."""
result = str(Plugin(predictor="realizations"))
msg = "<CalibratedForecastDistributionParameters: " "predictor: realizations>"
self.assertEqual(result, msg)
class Test__spatial_domain_match(SetupCoefficientsCubes):
""" Test the _spatial_domain_match method."""
def setUp(self):
super().setUp()
self.plugin = Plugin()
def test_matching(self):
"""Test case in which spatial domains match."""
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
self.plugin._spatial_domain_match()
def test_unmatching_x_axis_points(self):
"""Test when the points of the x dimension do not match."""
self.current_temperature_forecast_cube.coord(axis="x").bounds = (
self.current_temperature_forecast_cube.coord(axis="x").bounds + 2.0
)
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the x axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_unmatching_x_axis_bounds(self):
"""Test when the bounds of the x dimension do not match."""
self.current_temperature_forecast_cube.coord(axis="x").bounds = [
[-35, -5],
[-5, 5],
[5, 35],
]
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the x axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_unmatching_y_axis(self):
"""Test case in which the y-dimensions of the domains do not match."""
self.current_temperature_forecast_cube.coord(axis="y").bounds = (
self.current_temperature_forecast_cube.coord(axis="y").bounds + 2.0
)
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the y axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_skipping_spot_forecast(self):
"""Test passing a spot forecast. In this case, the spatial domain
is not checked."""
self.plugin.current_forecast = self.current_forecast_spot_cube
self.plugin._spatial_domain_match()
class Test__calculate_location_parameter_from_mean(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the __calculate_location_parameter_from_mean method."""
def setUp(self):
"""Set-up coefficients and plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the expected values for the location parameter are
calculated when using the ensemble mean. These expected values are
compared to the results when using the ensemble realizations to ensure
that the results are similar."""
location_parameter = self.plugin._calculate_location_parameter_from_mean()
self.assertCalibratedVariablesAlmostEqual(
location_parameter, self.expected_loc_param_mean
)
assert_array_almost_equal(
location_parameter, self.expected_loc_param_realizations, decimal=0,
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_missing_additional_predictor(self):
"""Test that an error is raised if an additional predictor is expected
based on the contents of the coefficients cube."""
self.plugin.coefficients_cubelist = self.coeffs_from_mean_alt
msg = "The number of forecast predictors must equal the number"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._calculate_location_parameter_from_mean()
class Test__calculate_location_parameter_from_realizations(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the _calculate_location_parameter_from_realizations method."""
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def setUp(self):
"""Set-up coefficients and plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the expected values for the location parameter are
calculated when using the ensemble realizations. These expected values
are compared to the results when using the ensemble mean to ensure
that the results are similar."""
self.plugin.coefficients_cubelist = self.coeffs_from_realizations
location_parameter = (
self.plugin._calculate_location_parameter_from_realizations()
)
self.assertCalibratedVariablesAlmostEqual(
location_parameter, self.expected_loc_param_realizations
)
assert_array_almost_equal(
location_parameter, self.expected_loc_param_mean, decimal=0
)
class Test__calculate_scale_parameter(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the _calculate_scale_parameter method."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test the scale parameter is calculated correctly."""
self.plugin.coefficients_cubelist = self.coeffs_from_mean
scale_parameter = self.plugin._calculate_scale_parameter()
self.assertCalibratedVariablesAlmostEqual(
scale_parameter, self.expected_scale_param_mean
)
class Test__create_output_cubes(SetupCoefficientsCubes, EnsembleCalibrationAssertions):
"""Test the _create_output_cubes method."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the cubes created containing the location and scale
parameter are formatted as expected."""
(
location_parameter_cube,
scale_parameter_cube,
) = self.plugin._create_output_cubes(
self.expected_loc_param_mean, self.expected_scale_param_mean
)
self.assertEqual(location_parameter_cube, self.expected_loc_param_mean_cube)
self.assertEqual(scale_parameter_cube, self.expected_scale_param_mean_cube)
class Test_process(SetupCoefficientsCubes, EnsembleCalibrationAssertions):
"""Test the process plugin."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_diagnostic_match(self):
"""Test that an error is raised if the diagnostic_standard_name does
not match when comparing a forecast cube and coefficients cubelist."""
msg = "The forecast diagnostic"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(
self.current_wind_speed_forecast_cube, self.coeffs_from_mean
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_time_match(self):
"""Test that an error is raised if the time coordinates do
not match when comparing a forecast cube and coefficients cubelist."""
msg = "rounded forecast_period hours"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean_timeshift
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_time_match_tolerate(self):
"""Test that no error is raised when using a coefficients file with
a mismatching forecast_period coordinate, if the
tolerate_time_mismatch option is enabled."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean_timeshift,
tolerate_time_mismatch=True,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_variable_setting(self):
"""Test that the cubes passed into the plugin are allocated to
plugin variables appropriately."""
_, _ = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean
)
self.assertEqual(
self.current_temperature_forecast_cube, self.plugin.current_forecast
)
self.assertEqual(self.coeffs_from_mean, self.plugin.coefficients_cubelist)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end(self):
"""An example end-to-end calculation. This repeats the test elements
above but all grouped together."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_point_by_point(self):
"""An example end-to-end calculation when a separate set of
coefficients are computed for each grid point. This repeats the test
elements above but all grouped together."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean_point_by_point
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_point_by_point_sites_realizations(self):
"""An example end-to-end calculation when a separate set of
coefficients are computed for each site using the realizations as the
predictor. This repeats the test elements above but all grouped together."""
plugin = Plugin(predictor="realizations")
calibrated_forecast_predictor, calibrated_forecast_var = plugin.process(
self.current_forecast_spot_cube, self.coeffs_from_realizations_sites
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data,
self.expected_loc_param_realizations_sites,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_realizations_sites
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_with_additional_predictor(self):
"""Test that the expected calibrated forecast is generated, if an
additional predictor is provided."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean_alt,
additional_fields=CubeList([self.altitude]),
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean_alt
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean_alt
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_with_mask(self):
"""An example end-to-end calculation, but making sure that the
areas that are masked within the landsea mask, are masked at the
end."""
# Construct a mask and encapsulate as a cube.
mask = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
mask_cube = self.current_temperature_forecast_cube[0].copy(data=mask)
# Convention for IMPROVER is that land points are ones and sea points
# are zeros in land-sea masks. In this case we want to mask sea points.
expected_mask = np.array(
[[False, True, True], [True, False, True], [True, True, False]]
)
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean,
landsea_mask=mask_cube,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data.data, self.expected_loc_param_mean
)
self.assertArrayEqual(calibrated_forecast_predictor.data.mask, expected_mask)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data.data, self.expected_scale_param_mean
)
self.assertArrayEqual(calibrated_forecast_var.data.mask, expected_mask)
if __name__ == "__main__":
unittest.main()
|
[
"improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters",
"numpy.testing.assert_array_almost_equal",
"iris.cube.CubeList",
"numpy.ones",
"improver.synthetic_data.set_up_test_cubes.set_up_variable_cube",
"improver.calibration.ensemble_calibration.EstimateCoefficientsForEnsembleCalibration",
"numpy.squeeze",
"numpy.array",
"numpy.stack",
"unittest.main",
"improver.utilities.warnings_handler.ManageWarnings"
] |
[((2638, 2799), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.', 'invalid escape sequence']", 'warning_types': '[UserWarning, DeprecationWarning]'}), "(ignored_messages=['Collapsing a non-contiguous coordinate.',\n 'invalid escape sequence'], warning_types=[UserWarning, DeprecationWarning]\n )\n", (2652, 2799), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((13255, 13331), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (13269, 13331), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((13962, 14038), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (13976, 14038), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((14693, 14769), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (14707, 14769), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((14989, 15065), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (15003, 15065), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((16158, 16234), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (16172, 16234), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((16924, 17000), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (16938, 17000), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((17768, 17844), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (17782, 17844), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((18262, 18338), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (18276, 18338), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((18757, 18833), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (18771, 18833), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((19646, 19722), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (19660, 19722), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((20201, 20277), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (20215, 20277), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((20952, 21028), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (20966, 21028), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((21810, 21886), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (21824, 21886), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((22799, 22875), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (22813, 22875), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((23657, 23733), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'ignored_messages': "['Collapsing a non-contiguous coordinate.']"}), "(ignored_messages=['Collapsing a non-contiguous coordinate.'])\n", (23671, 23733), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((25107, 25122), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25120, 25122), False, 'import unittest\n'), ((3588, 3663), 'improver.calibration.ensemble_calibration.EstimateCoefficientsForEnsembleCalibration', 'EstimateCoefficientsForEnsembleCalibration', (['"""norm"""'], {'desired_units': '"""Celsius"""'}), "('norm', desired_units='Celsius')\n", (3630, 3663), False, 'from improver.calibration.ensemble_calibration import EstimateCoefficientsForEnsembleCalibration\n'), ((4702, 4802), 'improver.calibration.ensemble_calibration.EstimateCoefficientsForEnsembleCalibration', 'EstimateCoefficientsForEnsembleCalibration', (['"""norm"""'], {'point_by_point': '(True)', 'desired_units': '"""Celsius"""'}), "('norm', point_by_point=True,\n desired_units='Celsius')\n", (4744, 4802), False, 'from improver.calibration.ensemble_calibration import EstimateCoefficientsForEnsembleCalibration\n'), ((5317, 5422), 'improver.calibration.ensemble_calibration.EstimateCoefficientsForEnsembleCalibration', 'EstimateCoefficientsForEnsembleCalibration', (['"""norm"""'], {'desired_units': '"""Celsius"""', 'predictor': '"""realizations"""'}), "('norm', desired_units='Celsius',\n predictor='realizations')\n", (5359, 5422), False, 'from improver.calibration.ensemble_calibration import EstimateCoefficientsForEnsembleCalibration\n'), ((6048, 6149), 'improver.calibration.ensemble_calibration.EstimateCoefficientsForEnsembleCalibration', 'EstimateCoefficientsForEnsembleCalibration', (['"""norm"""'], {'predictor': '"""realizations"""', 'point_by_point': '(True)'}), "('norm', predictor='realizations',\n point_by_point=True)\n", (6090, 6149), False, 'from improver.calibration.ensemble_calibration import EstimateCoefficientsForEnsembleCalibration\n'), ((6778, 6853), 'improver.calibration.ensemble_calibration.EstimateCoefficientsForEnsembleCalibration', 'EstimateCoefficientsForEnsembleCalibration', (['"""norm"""'], {'desired_units': '"""Celsius"""'}), "('norm', desired_units='Celsius')\n", (6820, 6853), False, 'from improver.calibration.ensemble_calibration import EstimateCoefficientsForEnsembleCalibration\n'), ((7242, 7370), 'numpy.array', 'np.array', (['[[273.7014, 274.6534, 275.4469], [276.9385, 277.7636, 278.557], [279.6996, \n 280.1122, 281.2547]]'], {'dtype': 'np.float32'}), '([[273.7014, 274.6534, 275.4469], [276.9385, 277.7636, 278.557], [\n 279.6996, 280.1122, 281.2547]], dtype=np.float32)\n', (7250, 7370), True, 'import numpy as np\n'), ((7506, 7617), 'numpy.array', 'np.array', (['[[0.2316, 0.2342, 0.0168], [0.0271, 0.0237, 0.0168], [0.0634, 0.1151, 0.0116]]'], {'dtype': 'np.float32'}), '([[0.2316, 0.2342, 0.0168], [0.0271, 0.0237, 0.0168], [0.0634, \n 0.1151, 0.0116]], dtype=np.float32)\n', (7514, 7617), True, 'import numpy as np\n'), ((7758, 7886), 'numpy.array', 'np.array', (['[[274.388, 275.3053, 275.4492], [277.1295, 277.3866, 278.4672], [280.2007, \n 280.3929, 281.2602]]'], {'dtype': 'np.float32'}), '([[274.388, 275.3053, 275.4492], [277.1295, 277.3866, 278.4672], [\n 280.2007, 280.3929, 281.2602]], dtype=np.float32)\n', (7766, 7886), True, 'import numpy as np\n'), ((8033, 8100), 'numpy.array', 'np.array', (['[277.7531, 277.4529, 277.553, 277.2528]'], {'dtype': 'np.float32'}), '([277.7531, 277.4529, 277.553, 277.2528], dtype=np.float32)\n', (8041, 8100), True, 'import numpy as np\n'), ((8180, 8220), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {'dtype': 'np.float32'}), '([0, 0, 0, 0], dtype=np.float32)\n', (8188, 8220), True, 'import numpy as np\n'), ((8287, 8424), 'numpy.array', 'np.array', (['[[275.18134, 276.18134, 277.01465], [278.58133, 279.44797, 280.2813], [\n 281.48132, 281.91464, 283.11465]]'], {'dtype': 'np.float32'}), '([[275.18134, 276.18134, 277.01465], [278.58133, 279.44797, \n 280.2813], [281.48132, 281.91464, 283.11465]], dtype=np.float32)\n', (8295, 8424), True, 'import numpy as np\n'), ((8564, 8675), 'numpy.array', 'np.array', (['[[0.4347, 0.4396, 0.0308], [0.0503, 0.0438, 0.0308], [0.1184, 0.2157, 0.0211]]'], {'dtype': 'np.float32'}), '([[0.4347, 0.4396, 0.0308], [0.0503, 0.0438, 0.0308], [0.1184, \n 0.2157, 0.0211]], dtype=np.float32)\n', (8572, 8675), True, 'import numpy as np\n'), ((8868, 9002), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['self.expected_loc_param_mean'], {'name': '"""location_parameter"""', 'units': '"""K"""', 'attributes': 'MANDATORY_ATTRIBUTE_DEFAULTS'}), "(self.expected_loc_param_mean, name=\n 'location_parameter', units='K', attributes=MANDATORY_ATTRIBUTE_DEFAULTS)\n", (8888, 9002), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((9103, 9242), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (['self.expected_scale_param_mean'], {'name': '"""scale_parameter"""', 'units': '"""Kelvin^2"""', 'attributes': 'MANDATORY_ATTRIBUTE_DEFAULTS'}), "(self.expected_scale_param_mean, name='scale_parameter',\n units='Kelvin^2', attributes=MANDATORY_ATTRIBUTE_DEFAULTS)\n", (9123, 9242), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((9462, 9470), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (9468, 9470), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((9620, 9652), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {'predictor': '"""realizations"""'}), "(predictor='realizations')\n", (9626, 9652), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((10438, 10446), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (10444, 10446), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((13096, 13104), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (13102, 13104), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((13838, 13937), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['location_parameter', 'self.expected_loc_param_realizations'], {'decimal': '(0)'}), '(location_parameter, self.\n expected_loc_param_realizations, decimal=0)\n', (13863, 13937), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14896, 14904), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (14902, 14904), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((15686, 15776), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['location_parameter', 'self.expected_loc_param_mean'], {'decimal': '(0)'}), '(location_parameter, self.expected_loc_param_mean,\n decimal=0)\n', (15711, 15776), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16065, 16073), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (16071, 16073), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((16831, 16839), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (16837, 16839), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((17753, 17761), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (17759, 17761), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((22200, 22232), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {'predictor': '"""realizations"""'}), "(predictor='realizations')\n", (22206, 22232), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((24005, 24048), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (24013, 24048), True, 'import numpy as np\n'), ((24309, 24382), 'numpy.array', 'np.array', (['[[False, True, True], [True, False, True], [True, True, False]]'], {}), '([[False, True, True], [True, False, True], [True, True, False]])\n', (24317, 24382), True, 'import numpy as np\n'), ((3865, 3916), 'iris.cube.CubeList', 'CubeList', (['[self.historic_temperature_forecast_cube]'], {}), '([self.historic_temperature_forecast_cube])\n', (3873, 3916), False, 'from iris.cube import CubeList\n'), ((4500, 4535), 'iris.cube.CubeList', 'CubeList', (['[forecast_timeshift_cube]'], {}), '([forecast_timeshift_cube])\n', (4508, 4535), False, 'from iris.cube import CubeList\n'), ((5132, 5183), 'iris.cube.CubeList', 'CubeList', (['[self.historic_temperature_forecast_cube]'], {}), '([self.historic_temperature_forecast_cube])\n', (5140, 5183), False, 'from iris.cube import CubeList\n'), ((5631, 5682), 'iris.cube.CubeList', 'CubeList', (['[self.historic_temperature_forecast_cube]'], {}), '([self.historic_temperature_forecast_cube])\n', (5639, 5682), False, 'from iris.cube import CubeList\n'), ((6357, 6408), 'iris.cube.CubeList', 'CubeList', (['[self.historic_temperature_forecast_cube]'], {}), '([self.historic_temperature_forecast_cube])\n', (6365, 6408), False, 'from iris.cube import CubeList\n'), ((6554, 6587), 'numpy.ones', 'np.ones', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (6561, 6587), True, 'import numpy as np\n'), ((7063, 7129), 'iris.cube.CubeList', 'CubeList', (['[self.historic_temperature_forecast_cube, self.altitude]'], {}), '([self.historic_temperature_forecast_cube, self.altitude])\n', (7071, 7129), False, 'from iris.cube import CubeList\n'), ((9871, 9879), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {}), '()\n', (9877, 9879), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((10100, 10132), 'improver.calibration.ensemble_calibration.CalibratedForecastDistributionParameters', 'Plugin', ([], {'predictor': '"""realizations"""'}), "(predictor='realizations')\n", (10106, 10132), True, 'from improver.calibration.ensemble_calibration import CalibratedForecastDistributionParameters as Plugin\n'), ((5922, 5939), 'numpy.squeeze', 'np.squeeze', (['array'], {}), '(array)\n', (5932, 5939), True, 'import numpy as np\n'), ((23259, 23284), 'iris.cube.CubeList', 'CubeList', (['[self.altitude]'], {}), '([self.altitude])\n', (23267, 23284), False, 'from iris.cube import CubeList\n'), ((4856, 4900), 'numpy.stack', 'np.stack', (['([self.expected_mean_pred_norm] * 9)'], {}), '([self.expected_mean_pred_norm] * 9)\n', (4864, 4900), True, 'import numpy as np\n')]
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
from tqdm import tqdm
import sys
import distributed as dist
import utils
from models.vqvae import VQVAE, VQVAE_Blob2Full
from models.discriminator import discriminator
visual_folder = '/home2/bipasha31/python_scripts/CurrentWork/samples/VQVAE'
os.makedirs(visual_folder, exist_ok=True)
verbose = False
save_idx_global = 0
save_at = 100
did = 0
models = {
'gan': 0,
'vae': 1
}
model_to_train = models['vae']
results = {
'n_updates': 0,
'recon_errors': [],
'loss_vals': [],
'perplexities': [],
'd_loss': []
}
device = 'cuda:0'
def main(args):
"""
Set up VQ-VAE model with components defined in ./models/ folder
"""
model = VQVAE(args.n_hiddens, args.n_residual_hiddens,
args.n_residual_layers, args.n_embeddings,
args.embedding_dim, args.beta, device)
if args.ckpt:
model.load_state_dict(torch.load(args.ckpt)['model'])
model = model.to(device)
if args.test:
loader = utils.load_data_and_data_loaders(args.dataset, args.batch_size, test=True)
test(loader, model)
return
"""
Load data and define batch data loaders
"""
items = utils.load_data_and_data_loaders(args.dataset, args.batch_size)
training_loader, validation_loader = items[2], items[3]
x_train_var = items[4]
"""
Set up optimizer and training loop
"""
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, amsgrad=True)
model.train()
if model_to_train == models['gan']:
train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer)
else:
train(args, training_loader, validation_loader, x_train_var, model, optimizer)
def test(loader, model):
for i, data in enumerate(tqdm(loader)):
x, _ = data
x = x.to(device)
with torch.no_grad():
_ = model(x, save_idx=f'{i}', visual_folder=visual_folder)
def train(args, training_loader, validation_loader, x_train_var, model, optimizer):
global save_idx_global
for i in range(args.n_updates):
(x, _) = next(iter(training_loader))
x = x.to(device)
optimizer.zero_grad()
save_idx = None
embedding_loss, x_hat, perplexity = model(x)
recon_loss = torch.mean((x_hat - x)**2) / x_train_var
loss = recon_loss + embedding_loss
loss.backward()
optimizer.step()
results["recon_errors"].append(recon_loss.cpu().detach().numpy())
results["perplexities"].append(perplexity.cpu().detach().numpy())
results["loss_vals"].append(loss.cpu().detach().numpy())
results["n_updates"] = i
if i % save_at == 0:
save_idx = save_idx_global
save_idx_global += 1
model.eval()
with torch.no_grad():
for vi in tqdm(range(10)):
(x, _) = next(iter(validation_loader))
x = x.to(device)
_, _, _ = model(x, verbose=verbose, save_idx=f'{save_idx}_{vi}', visual_folder=visual_folder)
model.train()
if i % args.log_interval == 0 and dist.is_primary():
"""
save model and print values
"""
if args.save:
hyperparameters = args.__dict__
utils.save_model_and_results(
model, optimizer, results, hyperparameters, args.filename)
print('Update #', i, 'Recon Error:',
np.mean(results["recon_errors"][-args.log_interval:]),
'Loss', np.mean(results["loss_vals"][-args.log_interval:]),
'Perplexity:', np.mean(results["perplexities"][-args.log_interval:]))
def train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer):
global save_idx_global
c_mse = nn.MSELoss()
disc = discriminator().to(device)
optim_D = optim.Adam(disc.parameters(), lr=args.learning_rate, amsgrad=True)
for i in range(args.n_updates):
(x, _) = next(iter(training_loader))
x = x.to(device)
optimizer.zero_grad()
optim_D.zero_grad()
save_idx = None
if i % save_at == 0 and i > 0:
save_idx = save_idx_global
save_idx_global += 1
embedding_loss, x_hat, perplexity = \
model(x, verbose=verbose, save_idx=save_idx, visual_folder=visual_folder)
recon_loss = torch.mean((x_hat - x)**2) / x_train_var
loss = recon_loss + embedding_loss
'''
adding the perceptual loss here - patch loss of real and fake
'''
B = args.batch_size
D = 16 * 16
ones = torch.ones((B, D), dtype=torch.float32, device=device)
zeros = torch.zeros((B, D), dtype=torch.float32, device=device)
if i % 2 == 0:
fake = disc(x_hat).view(B, D)
loss += c_mse(fake, ones)
else:
fake = disc(x_hat.clone().detach()).view(B, D)
real = disc(x).view(B, D)
d_loss = c_mse(real, ones) + c_mse(fake, zeros)
results["d_loss"].append(d_loss.cpu().detach().numpy())
d_loss.backward()
optim_D.step()
loss.backward()
optimizer.step()
results["recon_errors"].append(recon_loss.cpu().detach().numpy())
results["perplexities"].append(perplexity.cpu().detach().numpy())
results["loss_vals"].append(loss.cpu().detach().numpy())
results["n_updates"] = i
if i % args.log_interval == 0:
"""
save model and print values
"""
if args.save:
hyperparameters = args.__dict__
utils.save_model_and_results(
model, optimizer, results, hyperparameters, args.filename)
print('Update #', i, 'Recon Error:',
np.mean(results["recon_errors"][-args.log_interval:]),
'Loss', np.mean(results["loss_vals"][-args.log_interval:]),
'Discriminator Loss', np.mean(results['d_loss'][-args.log_interval:]),
'Perplexity:', np.mean(results["perplexities"][-args.log_interval:]), flush=True)
if __name__ == "__main__":
# train_vqgan()
# train_blob2full()
parser = argparse.ArgumentParser()
"""
Hyperparameters
"""
timestamp = utils.readable_timestamp()
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_updates", type=int, default=50000)
parser.add_argument("--n_hiddens", type=int, default=128)
parser.add_argument("--n_residual_hiddens", type=int, default=32)
parser.add_argument("--n_residual_layers", type=int, default=2)
parser.add_argument("--embedding_dim", type=int, default=64)
parser.add_argument("--n_embeddings", type=int, default=512)
parser.add_argument("--beta", type=float, default=.25)
parser.add_argument("--learning_rate", type=float, default=3e-4)
parser.add_argument("--ckpt", type=str)
parser.add_argument("--log_interval", type=int, default=3)
parser.add_argument("--save_at", type=int, default=100)
parser.add_argument("--device_id", type=int, default=0)
parser.add_argument("--dataset", type=str, default='HandGestures')
parser.add_argument("--test", action='store_true')
# whether or not to save model
parser.add_argument("-save", action="store_true")
parser.add_argument("--filename", type=str, default=timestamp)
args = parser.parse_args()
args.save = True
if args.save and dist.is_primary():
print('Results will be saved in ./results/vqvae_' + args.filename + '.pth')
args.n_gpu = torch.cuda.device_count()
port = (
2 ** 15
+ 2 ** 14
+ hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
)+1
print(f'port: {port}')
print(args)
dist.launch(main, args.n_gpu, 1, 0, f"tcp://127.0.0.1:{port}", args=(args,))
|
[
"os.getuid",
"torch.cuda.device_count",
"torch.nn.MSELoss",
"models.vqvae.VQVAE",
"numpy.mean",
"models.discriminator.discriminator",
"argparse.ArgumentParser",
"torch.mean",
"utils.load_data_and_data_loaders",
"utils.readable_timestamp",
"utils.save_model_and_results",
"os.makedirs",
"distributed.launch",
"distributed.is_primary",
"torch.load",
"tqdm.tqdm",
"torch.no_grad",
"torch.zeros",
"torch.ones"
] |
[((356, 397), 'os.makedirs', 'os.makedirs', (['visual_folder'], {'exist_ok': '(True)'}), '(visual_folder, exist_ok=True)\n', (367, 397), False, 'import os\n'), ((783, 916), 'models.vqvae.VQVAE', 'VQVAE', (['args.n_hiddens', 'args.n_residual_hiddens', 'args.n_residual_layers', 'args.n_embeddings', 'args.embedding_dim', 'args.beta', 'device'], {}), '(args.n_hiddens, args.n_residual_hiddens, args.n_residual_layers, args\n .n_embeddings, args.embedding_dim, args.beta, device)\n', (788, 916), False, 'from models.vqvae import VQVAE, VQVAE_Blob2Full\n'), ((1289, 1352), 'utils.load_data_and_data_loaders', 'utils.load_data_and_data_loaders', (['args.dataset', 'args.batch_size'], {}), '(args.dataset, args.batch_size)\n', (1321, 1352), False, 'import utils\n'), ((3980, 3992), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3990, 3992), True, 'import torch.nn as nn\n'), ((6442, 6467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6465, 6467), False, 'import argparse\n'), ((6521, 6547), 'utils.readable_timestamp', 'utils.readable_timestamp', ([], {}), '()\n', (6545, 6547), False, 'import utils\n'), ((7844, 7869), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7867, 7869), False, 'import torch\n'), ((8048, 8124), 'distributed.launch', 'dist.launch', (['main', 'args.n_gpu', '(1)', '(0)', 'f"""tcp://127.0.0.1:{port}"""'], {'args': '(args,)'}), "(main, args.n_gpu, 1, 0, f'tcp://127.0.0.1:{port}', args=(args,))\n", (8059, 8124), True, 'import distributed as dist\n'), ((1096, 1170), 'utils.load_data_and_data_loaders', 'utils.load_data_and_data_loaders', (['args.dataset', 'args.batch_size'], {'test': '(True)'}), '(args.dataset, args.batch_size, test=True)\n', (1128, 1170), False, 'import utils\n'), ((1891, 1903), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (1895, 1903), False, 'from tqdm import tqdm\n'), ((4826, 4880), 'torch.ones', 'torch.ones', (['(B, D)'], {'dtype': 'torch.float32', 'device': 'device'}), '((B, D), dtype=torch.float32, device=device)\n', (4836, 4880), False, 'import torch\n'), ((4897, 4952), 'torch.zeros', 'torch.zeros', (['(B, D)'], {'dtype': 'torch.float32', 'device': 'device'}), '((B, D), dtype=torch.float32, device=device)\n', (4908, 4952), False, 'import torch\n'), ((7723, 7740), 'distributed.is_primary', 'dist.is_primary', ([], {}), '()\n', (7738, 7740), True, 'import distributed as dist\n'), ((1966, 1981), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1979, 1981), False, 'import torch\n'), ((2403, 2431), 'torch.mean', 'torch.mean', (['((x_hat - x) ** 2)'], {}), '((x_hat - x) ** 2)\n', (2413, 2431), False, 'import torch\n'), ((3270, 3287), 'distributed.is_primary', 'dist.is_primary', ([], {}), '()\n', (3285, 3287), True, 'import distributed as dist\n'), ((4005, 4020), 'models.discriminator.discriminator', 'discriminator', ([], {}), '()\n', (4018, 4020), False, 'from models.discriminator import discriminator\n'), ((4583, 4611), 'torch.mean', 'torch.mean', (['((x_hat - x) ** 2)'], {}), '((x_hat - x) ** 2)\n', (4593, 4611), False, 'import torch\n'), ((998, 1019), 'torch.load', 'torch.load', (['args.ckpt'], {}), '(args.ckpt)\n', (1008, 1019), False, 'import torch\n'), ((2930, 2945), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2943, 2945), False, 'import torch\n'), ((3451, 3542), 'utils.save_model_and_results', 'utils.save_model_and_results', (['model', 'optimizer', 'results', 'hyperparameters', 'args.filename'], {}), '(model, optimizer, results, hyperparameters,\n args.filename)\n', (3479, 3542), False, 'import utils\n'), ((3628, 3681), 'numpy.mean', 'np.mean', (["results['recon_errors'][-args.log_interval:]"], {}), "(results['recon_errors'][-args.log_interval:])\n", (3635, 3681), True, 'import numpy as np\n'), ((3709, 3759), 'numpy.mean', 'np.mean', (["results['loss_vals'][-args.log_interval:]"], {}), "(results['loss_vals'][-args.log_interval:])\n", (3716, 3759), True, 'import numpy as np\n'), ((3794, 3847), 'numpy.mean', 'np.mean', (["results['perplexities'][-args.log_interval:]"], {}), "(results['perplexities'][-args.log_interval:])\n", (3801, 3847), True, 'import numpy as np\n'), ((5857, 5948), 'utils.save_model_and_results', 'utils.save_model_and_results', (['model', 'optimizer', 'results', 'hyperparameters', 'args.filename'], {}), '(model, optimizer, results, hyperparameters,\n args.filename)\n', (5885, 5948), False, 'import utils\n'), ((6034, 6087), 'numpy.mean', 'np.mean', (["results['recon_errors'][-args.log_interval:]"], {}), "(results['recon_errors'][-args.log_interval:])\n", (6041, 6087), True, 'import numpy as np\n'), ((6115, 6165), 'numpy.mean', 'np.mean', (["results['loss_vals'][-args.log_interval:]"], {}), "(results['loss_vals'][-args.log_interval:])\n", (6122, 6165), True, 'import numpy as np\n'), ((6207, 6254), 'numpy.mean', 'np.mean', (["results['d_loss'][-args.log_interval:]"], {}), "(results['d_loss'][-args.log_interval:])\n", (6214, 6254), True, 'import numpy as np\n'), ((6289, 6342), 'numpy.mean', 'np.mean', (["results['perplexities'][-args.log_interval:]"], {}), "(results['perplexities'][-args.log_interval:])\n", (6296, 6342), True, 'import numpy as np\n'), ((7933, 7944), 'os.getuid', 'os.getuid', ([], {}), '()\n', (7942, 7944), False, 'import os\n')]
|
import numpy as np
import scipy.stats as stats
from UQpy.Distributions.baseclass.Distribution import Distribution
class DistributionContinuous1D(Distribution):
"""
Parent class for univariate continuous probability distributions.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def _check_x_dimension(x):
"""
Check the dimension of input x - must be an ndarray of shape (npoints,) or (npoints, 1)
"""
x = np.atleast_1d(x)
if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):
raise ValueError('Wrong dimension in x.')
return x.reshape((-1,))
def _construct_from_scipy(self, scipy_name=stats.rv_continuous):
self.cdf = lambda x: scipy_name.cdf(x=self._check_x_dimension(x), **self.params)
self.pdf = lambda x: scipy_name.pdf(x=self._check_x_dimension(x), **self.params)
self.log_pdf = lambda x: scipy_name.logpdf(x=self._check_x_dimension(x), **self.params)
self.icdf = lambda x: scipy_name.ppf(q=self._check_x_dimension(x), **self.params)
self.moments = lambda moments2return='mvsk': scipy_name.stats(moments=moments2return, **self.params)
self.rvs = lambda nsamples=1, random_state=None: scipy_name.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, 1))
def tmp_fit(dist, data):
data = self._check_x_dimension(data)
fixed_params = {}
for key, value in dist.params.items():
if value is not None:
fixed_params['f' + key] = value
params_fitted = scipy_name.fit(data=data, **fixed_params)
return dict(zip(dist.order_params, params_fitted))
self.fit = lambda data: tmp_fit(self, data)
|
[
"numpy.atleast_1d"
] |
[((499, 515), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (512, 515), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_util_matrix
@author: jdiedrichsen
"""
import unittest
import pyrsa.util as rsu
import numpy as np
class TestIndicator(unittest.TestCase):
def test_indicator(self):
a = np.array(range(0, 5))
a = np.concatenate((a, a))
X = rsu.matrix.indicator(a)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 5)
self.assertEqual(X[0, 0], 1.0)
def test_indicator_pos(self):
a = np.array(range(0, 5))
a = np.concatenate((a, a))
X = rsu.matrix.indicator(a, positive=True)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 4)
self.assertEqual(X[0, 0], 0.0)
def test_pairwise(self):
a = np.array(range(0, 5))
X = rsu.matrix.pairwise_contrast(a)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 5)
self.assertEqual(X[0, 0], 1.0)
def test_centering(self):
X = rsu.matrix.centering(10)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 10)
if __name__ == '__main__':
unittest.main()
|
[
"pyrsa.util.matrix.indicator",
"pyrsa.util.matrix.pairwise_contrast",
"numpy.concatenate",
"unittest.main",
"pyrsa.util.matrix.centering"
] |
[((1224, 1239), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1237, 1239), False, 'import unittest\n'), ((275, 297), 'numpy.concatenate', 'np.concatenate', (['(a, a)'], {}), '((a, a))\n', (289, 297), True, 'import numpy as np\n'), ((310, 333), 'pyrsa.util.matrix.indicator', 'rsu.matrix.indicator', (['a'], {}), '(a)\n', (330, 333), True, 'import pyrsa.util as rsu\n'), ((556, 578), 'numpy.concatenate', 'np.concatenate', (['(a, a)'], {}), '((a, a))\n', (570, 578), True, 'import numpy as np\n'), ((591, 629), 'pyrsa.util.matrix.indicator', 'rsu.matrix.indicator', (['a'], {'positive': '(True)'}), '(a, positive=True)\n', (611, 629), True, 'import pyrsa.util as rsu\n'), ((847, 878), 'pyrsa.util.matrix.pairwise_contrast', 'rsu.matrix.pairwise_contrast', (['a'], {}), '(a)\n', (875, 878), True, 'import pyrsa.util as rsu\n'), ((1063, 1087), 'pyrsa.util.matrix.centering', 'rsu.matrix.centering', (['(10)'], {}), '(10)\n', (1083, 1087), True, 'import pyrsa.util as rsu\n')]
|
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout
import tensorflow.keras as keras
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
def data_prep(path, img_rows, img_cols, color):
"""
A function to preprocess the input data for a CNN.
The images are resized, normalised to have pixel values between 0-1, converted into greyscale if required and put into a numpy array.
Each class label is turned into a one hot pixel array and added to an ordered numpy array such that the order for the labels is the same as the images.
The data is shuffled to make sure each batch is representative of the overall data during training which will reduce overfitting to each batch.
This function requires that the images for each class are in a seperate directory.
param:
- path, a string of the path to the directory containing the images
- img_rows, an integer for the number of rows the resized image should have
- img_cols, an integer for the number of columns the resized image should have
- color, a boolean that is set to true if the image should be in RGB colour space or false for greyscale
return:
- images, a numpy array of images with pixel values normalised to be between 0 and 1.
numpy array dimensions are [number of images, number of rows, number of columns, number of chanels]
- labels, a numpy array of labels associated with each image (labels are a one hot pixel numpy array [1, 0, 0, ...] or [0, 1, 0, ...], etc)
"""
images = []
labels = []
for image_class in os.listdir(path):
print('image_class =', image_class)
path_to_class_directory = os.path.join(path, image_class)
for img_name in os.listdir(path_to_class_directory):
true_path = os.path.join(path_to_class_directory, img_name)
if color:
images.append(cv2.imread(true_path, 1)/255.0)
else:
images.append(cv2.imread(true_path, 0)/255.0) # greyscale
labels.append(os.listdir(path).index(image_class))
data = list(zip(images, labels))
np.random.shuffle(data)
images, labels = zip(*data)
images = [cv2.resize(img, (img_rows, img_cols), cv2.INTER_AREA) for img in images] # resize images to all be the same
if color:
images = np.array(images).reshape(len(images), img_rows, img_cols, 3)
else:
images = np.array(images).reshape(len(images), img_rows, img_cols, 1)
labels = keras.utils.to_categorical(labels, num_classes=len(os.listdir(path)))
return images, labels
def build_CNN(img_rows, img_cols, color=False):
model = Sequential()
if color:
model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 3)))
else:
model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 1)))
model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(Flatten())
#model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
return model
def decode_labels(coded, class_names):
"""
A funtion to get the name of the class by decoding a one hot pixel array.
Uses a list comprehension and boolean indexing.
The list comprehension returns the index of the variable with the highest value in each one hot pixel array.
That list is then used for boolean indexing with a numpy array to get a list of class_names for each label in coded.
Param:
- coded, a numpy array of coded labels
- class_names, a list of the class_names in the same order they were coded (alphabetical)
Return:
- numpy array of class names for each label in coded
"""
return np.array(class_names)[[np.argmax(example) for example in coded]]
def calc_accuracy(pred, real):
"""
A function to calculate the accuracy of a CNN when given a list of predicted classes and a list of the real classes
Param:
- pred, a numpy array of predicted classes
- real, a numpy array of the real classes
Return:
- Accuracy as a decimal
"""
return sum(pred==real) / len(pred)
if __name__ == '__main__':
path = 'data'
img_rows = 150
img_cols = 150
is_color = True
model_filename = 'flare_cnn'
print('\nloading training data\n')
num_classes = len(os.listdir(path))
x, y = data_prep(path, img_rows, img_cols, color=is_color)
x_train, x_test, y_train, y_test = train_test_split(x, y)
print('\nbuilding model\n')
cnn = build_CNN(img_rows, img_cols, color=is_color)
print('\ntraining model\n')
cnn.fit(x_train, y_train, batch_size=50, epochs=1, validation_split=0.2)
print('\nsaving model\n')
if is_color:
model_filename = model_filename + '_RGB' + '.h5'
else:
model_filename = model_filename + '_grey' + '.h5'
cnn.save(model_filename)
print('\nsaved model to file {}\n'.format(model_filename))
print('\nloading model\n')
loaded_cnn = keras.models.load_model(model_filename)
print('\ngenerating predictions\n')
predictions = loaded_cnn.predict(x_test)
dec_preds = decode_labels(predictions, os.listdir(path))
dec_ytest = decode_labels(y_test, os.listdir(path))
# F1 score would probably be a better metric due to skew of training expample (num B > num C)
print('\naccuracy =', calc_accuracy(dec_preds, dec_ytest))
|
[
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Sequential",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Flatten",
"cv2.resize",
"cv2.imread",
"numpy.random.shuffle"
] |
[((1704, 1720), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1714, 1720), False, 'import os\n'), ((2245, 2268), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (2262, 2268), True, 'import numpy as np\n'), ((2773, 2785), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (2783, 2785), False, 'from tensorflow.keras import Sequential\n'), ((4835, 4857), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {}), '(x, y)\n', (4851, 4857), False, 'from sklearn.model_selection import train_test_split\n'), ((5379, 5418), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['model_filename'], {}), '(model_filename)\n', (5402, 5418), True, 'import tensorflow.keras as keras\n'), ((1800, 1831), 'os.path.join', 'os.path.join', (['path', 'image_class'], {}), '(path, image_class)\n', (1812, 1831), False, 'import os\n'), ((1856, 1891), 'os.listdir', 'os.listdir', (['path_to_class_directory'], {}), '(path_to_class_directory)\n', (1866, 1891), False, 'import os\n'), ((2315, 2368), 'cv2.resize', 'cv2.resize', (['img', '(img_rows, img_cols)', 'cv2.INTER_AREA'], {}), '(img, (img_rows, img_cols), cv2.INTER_AREA)\n', (2325, 2368), False, 'import cv2\n'), ((3058, 3118), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(20)'], {'kernel_size': '(3, 3)', 'strides': '(1)', 'activation': '"""relu"""'}), "(20, kernel_size=(3, 3), strides=1, activation='relu')\n", (3064, 3118), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout\n'), ((3134, 3143), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3141, 3143), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout\n'), ((3189, 3218), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (3194, 3218), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout\n'), ((3234, 3274), 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (3239, 3274), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout\n'), ((4071, 4092), 'numpy.array', 'np.array', (['class_names'], {}), '(class_names)\n', (4079, 4092), True, 'import numpy as np\n'), ((4715, 4731), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4725, 4731), False, 'import os\n'), ((5548, 5564), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5558, 5564), False, 'import os\n'), ((5604, 5620), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5614, 5620), False, 'import os\n'), ((1917, 1964), 'os.path.join', 'os.path.join', (['path_to_class_directory', 'img_name'], {}), '(path_to_class_directory, img_name)\n', (1929, 1964), False, 'import os\n'), ((2818, 2920), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(20)'], {'kernel_size': '(3, 3)', 'strides': '(1)', 'activation': '"""relu"""', 'input_shape': '(img_rows, img_cols, 3)'}), "(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(\n img_rows, img_cols, 3))\n", (2824, 2920), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout\n'), ((2945, 3047), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(20)'], {'kernel_size': '(3, 3)', 'strides': '(1)', 'activation': '"""relu"""', 'input_shape': '(img_rows, img_cols, 1)'}), "(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(\n img_rows, img_cols, 1))\n", (2951, 3047), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout\n'), ((4094, 4112), 'numpy.argmax', 'np.argmax', (['example'], {}), '(example)\n', (4103, 4112), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2462, 2470), True, 'import numpy as np\n'), ((2542, 2558), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2550, 2558), True, 'import numpy as np\n'), ((2667, 2683), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2677, 2683), False, 'import os\n'), ((2017, 2041), 'cv2.imread', 'cv2.imread', (['true_path', '(1)'], {}), '(true_path, 1)\n', (2027, 2041), False, 'import cv2\n'), ((2097, 2121), 'cv2.imread', 'cv2.imread', (['true_path', '(0)'], {}), '(true_path, 0)\n', (2107, 2121), False, 'import cv2\n'), ((2167, 2183), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2177, 2183), False, 'import os\n')]
|
# STL imports
import random
import logging
import string
import time
import datetime
import random
import struct
import sys
from functools import wraps
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
logging.getLogger('faker').setLevel(logging.ERROR)
sys.path.append('.')
# grpc
from milvus.grpc_gen import milvus_pb2
def gen_vectors(num, dim):
return [[random.random() for _ in range(dim)] for _ in range(num)]
def gen_single_vector(dim):
return [[random.random() for _ in range(dim)]]
def gen_vector(nb, d, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, d).astype("float32")
return xb.tolist()
def gen_unique_str(str=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return prefix if str is None else str + "_" + prefix
def get_current_day():
return time.strftime('%Y-%m-%d', time.localtime())
def get_last_day(day):
tmp = datetime.datetime.now() - datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def get_next_day(day):
tmp = datetime.datetime.now() + datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def gen_long_str(num):
string = ''
for _ in range(num):
char = random.choice('tomorrow')
string += char
def gen_one_binary(topk):
ids = [random.randrange(10000000, 99999999) for _ in range(topk)]
distances = [random.random() for _ in range(topk)]
return milvus_pb2.TopKQueryResult(struct.pack(str(topk) + 'l', *ids), struct.pack(str(topk) + 'd', *distances))
def gen_nq_binaries(nq, topk):
return [gen_one_binary(topk) for _ in range(nq)]
def fake_query_bin_result(nq, topk):
return gen_nq_binaries(nq, topk)
class FakerProvider(BaseProvider):
def collection_name(self):
return 'collection_names' + str(random.randint(1000, 9999))
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def collection_name_factory():
return fake.collection_name()
def records_factory(dimension, nq):
return [[random.random() for _ in range(dimension)] for _ in range(nq)]
def binary_records_factory(dimension, nq):
def binary_record(bsize):
s_m = "abcdefghijklmnopqrstuvwxyz"
s_list = [s_m[random.randint(0, 25)] for _ in range(bsize)]
s = "".join(s_list)
return bytes(s, encoding="ASCII")
bs = dimension // 8
return [binary_record(bs) for _ in range(nq)]
def integer_factory(nq):
return [random.randint(0, 128) for _ in range(nq)]
def time_it(func):
@wraps(func)
def inner(*args, **kwrgs):
pref = time.perf_counter()
result = func(*args, **kwrgs)
delt = time.perf_counter() - pref
print(f"[{func.__name__}][{delt:.4}s]")
return result
return inner
|
[
"logging.getLogger",
"random.choice",
"random.randrange",
"time.perf_counter",
"functools.wraps",
"datetime.timedelta",
"faker.Faker",
"datetime.datetime.now",
"random.random",
"time.localtime",
"sys.path.append",
"random.randint",
"numpy.random.RandomState"
] |
[((301, 321), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (316, 321), False, 'import sys\n'), ((2030, 2043), 'faker.Faker', 'faker.Faker', ([], {}), '()\n', (2041, 2043), False, 'import faker\n'), ((578, 605), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (599, 605), True, 'import numpy as np\n'), ((2697, 2708), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2702, 2708), False, 'from functools import wraps\n'), ((249, 275), 'logging.getLogger', 'logging.getLogger', (['"""faker"""'], {}), "('faker')\n", (266, 275), False, 'import logging\n'), ((918, 934), 'time.localtime', 'time.localtime', ([], {}), '()\n', (932, 934), False, 'import time\n'), ((971, 994), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (992, 994), False, 'import datetime\n'), ((997, 1025), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'day'}), '(days=day)\n', (1015, 1025), False, 'import datetime\n'), ((1097, 1120), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1118, 1120), False, 'import datetime\n'), ((1123, 1151), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'day'}), '(days=day)\n', (1141, 1151), False, 'import datetime\n'), ((1269, 1294), 'random.choice', 'random.choice', (['"""tomorrow"""'], {}), "('tomorrow')\n", (1282, 1294), False, 'import random\n'), ((1357, 1393), 'random.randrange', 'random.randrange', (['(10000000)', '(99999999)'], {}), '(10000000, 99999999)\n', (1373, 1393), False, 'import random\n'), ((1433, 1448), 'random.random', 'random.random', ([], {}), '()\n', (1446, 1448), False, 'import random\n'), ((1998, 2020), 'random.randint', 'random.randint', (['(0)', '(999)'], {}), '(0, 999)\n', (2012, 2020), False, 'import random\n'), ((2628, 2650), 'random.randint', 'random.randint', (['(0)', '(128)'], {}), '(0, 128)\n', (2642, 2650), False, 'import random\n'), ((2755, 2774), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2772, 2774), False, 'import time\n'), ((410, 425), 'random.random', 'random.random', ([], {}), '()\n', (423, 425), False, 'import random\n'), ((511, 526), 'random.random', 'random.random', ([], {}), '()\n', (524, 526), False, 'import random\n'), ((728, 779), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (741, 779), False, 'import random\n'), ((2195, 2210), 'random.random', 'random.random', ([], {}), '()\n', (2208, 2210), False, 'import random\n'), ((2828, 2847), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2845, 2847), False, 'import time\n'), ((1858, 1884), 'random.randint', 'random.randint', (['(1000)', '(9999)'], {}), '(1000, 9999)\n', (1872, 1884), False, 'import random\n'), ((1935, 1961), 'random.randint', 'random.randint', (['(1000)', '(9999)'], {}), '(1000, 9999)\n', (1949, 1961), False, 'import random\n'), ((2398, 2419), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (2412, 2419), False, 'import random\n')]
|
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pickle
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj
tasks = [
'com_pos', 'com_vel', 'chassis_quat', 'chassis_ang_vel', 'toeFL_pos',
'toeFL_vel', 'toeFR_pos', 'toeFR_vel', 'toeRR_pos', 'toeRR_vel',
'toeRL_pos', 'toeRL_vel'
]
weights = [
'w_com', 'w_chassis_ori', 'w_toeFL', 'w_toeFR', 'w_toeRR', 'w_toeRL'
]
rf_z = ['rf_z_max_toeFL', 'rf_z_max_toeFR', 'rf_z_max_toeRR', 'rf_z_max_toeRL']
time = []
phase = []
rf_cmd = []
des, act = dict(), dict()
for topic in tasks:
des[topic] = []
act[topic] = []
w = dict()
for topic in weights:
w[topic] = []
rf_z_max = dict()
for topic in rf_z:
rf_z_max[topic] = []
with open('data/pnc.pkl', 'rb') as file:
while True:
try:
d = pickle.load(file)
time.append(d['time'])
phase.append(d['phase'])
for topic in tasks:
des[topic].append(d[topic + '_des'])
act[topic].append(d[topic])
for topic in weights:
w[topic].append(d[topic])
for topic in rf_z:
rf_z_max[topic].append(d[topic])
rf_cmd.append(d['rf_cmd'])
except EOFError:
break
for k, v in des.items():
des[k] = np.stack(v, axis=0)
for k, v in act.items():
act[k] = np.stack(v, axis=0)
rf_cmd = np.stack(rf_cmd, axis=0)
phase = np.stack(phase, axis=0)
## =============================================================================
## Plot Task
## =============================================================================
plot_task(time, des['com_pos'], act['com_pos'], des['com_vel'], act['com_vel'],
phase, 'com lin')
plot_task(time, des['chassis_quat'], act['chassis_quat'],
des['chassis_ang_vel'], act['chassis_ang_vel'], phase, 'pelvis ori')
plot_task(time, des['toeFL_pos'], act['toeFL_pos'], des['toeFL_vel'],
act['toeFL_vel'], phase, 'left foot lin')
plot_task(time, des['toeFR_pos'], act['toeFR_pos'], des['toeFR_vel'],
act['toeFR_vel'], phase, 'left foot ori')
plot_task(time, des['toeRR_pos'], act['toeRR_pos'], des['toeRR_vel'],
act['toeRR_vel'], phase, 'right foot lin')
plot_task(time, des['toeRL_pos'], act['toeRL_pos'], des['toeRL_vel'],
act['toeRL_vel'], phase, 'right foot ori')
## =============================================================================
## Plot WBC Solutions
## =============================================================================
plot_rf_quad(time, rf_cmd, phase)
## =============================================================================
## Plot Weights and Max Reaction Force Z
## =============================================================================
plot_weights(time, w, phase)
plot_rf_z_max(time, rf_z_max, phase)
plt.show()
|
[
"matplotlib.use",
"plot.helper.plot_rf_z_max",
"pickle.load",
"os.getcwd",
"numpy.stack",
"plot.helper.plot_weights",
"plot.helper.plot_task",
"sys.path.append",
"plot.helper.plot_rf_quad",
"matplotlib.pyplot.show"
] |
[((27, 38), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (36, 38), False, 'import os\n'), ((39, 59), 'sys.path.append', 'sys.path.append', (['cwd'], {}), '(cwd)\n', (54, 59), False, 'import sys\n'), ((112, 135), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (126, 135), False, 'import matplotlib\n'), ((1524, 1548), 'numpy.stack', 'np.stack', (['rf_cmd'], {'axis': '(0)'}), '(rf_cmd, axis=0)\n', (1532, 1548), True, 'import numpy as np\n'), ((1557, 1580), 'numpy.stack', 'np.stack', (['phase'], {'axis': '(0)'}), '(phase, axis=0)\n', (1565, 1580), True, 'import numpy as np\n'), ((1758, 1860), 'plot.helper.plot_task', 'plot_task', (['time', "des['com_pos']", "act['com_pos']", "des['com_vel']", "act['com_vel']", 'phase', '"""com lin"""'], {}), "(time, des['com_pos'], act['com_pos'], des['com_vel'], act[\n 'com_vel'], phase, 'com lin')\n", (1767, 1860), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((1867, 1998), 'plot.helper.plot_task', 'plot_task', (['time', "des['chassis_quat']", "act['chassis_quat']", "des['chassis_ang_vel']", "act['chassis_ang_vel']", 'phase', '"""pelvis ori"""'], {}), "(time, des['chassis_quat'], act['chassis_quat'], des[\n 'chassis_ang_vel'], act['chassis_ang_vel'], phase, 'pelvis ori')\n", (1876, 1998), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2005, 2121), 'plot.helper.plot_task', 'plot_task', (['time', "des['toeFL_pos']", "act['toeFL_pos']", "des['toeFL_vel']", "act['toeFL_vel']", 'phase', '"""left foot lin"""'], {}), "(time, des['toeFL_pos'], act['toeFL_pos'], des['toeFL_vel'], act[\n 'toeFL_vel'], phase, 'left foot lin')\n", (2014, 2121), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2128, 2244), 'plot.helper.plot_task', 'plot_task', (['time', "des['toeFR_pos']", "act['toeFR_pos']", "des['toeFR_vel']", "act['toeFR_vel']", 'phase', '"""left foot ori"""'], {}), "(time, des['toeFR_pos'], act['toeFR_pos'], des['toeFR_vel'], act[\n 'toeFR_vel'], phase, 'left foot ori')\n", (2137, 2244), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2251, 2368), 'plot.helper.plot_task', 'plot_task', (['time', "des['toeRR_pos']", "act['toeRR_pos']", "des['toeRR_vel']", "act['toeRR_vel']", 'phase', '"""right foot lin"""'], {}), "(time, des['toeRR_pos'], act['toeRR_pos'], des['toeRR_vel'], act[\n 'toeRR_vel'], phase, 'right foot lin')\n", (2260, 2368), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2375, 2492), 'plot.helper.plot_task', 'plot_task', (['time', "des['toeRL_pos']", "act['toeRL_pos']", "des['toeRL_vel']", "act['toeRL_vel']", 'phase', '"""right foot ori"""'], {}), "(time, des['toeRL_pos'], act['toeRL_pos'], des['toeRL_vel'], act[\n 'toeRL_vel'], phase, 'right foot ori')\n", (2384, 2492), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2683, 2716), 'plot.helper.plot_rf_quad', 'plot_rf_quad', (['time', 'rf_cmd', 'phase'], {}), '(time, rf_cmd, phase)\n', (2695, 2716), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2921, 2949), 'plot.helper.plot_weights', 'plot_weights', (['time', 'w', 'phase'], {}), '(time, w, phase)\n', (2933, 2949), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2951, 2987), 'plot.helper.plot_rf_z_max', 'plot_rf_z_max', (['time', 'rf_z_max', 'phase'], {}), '(time, rf_z_max, phase)\n', (2964, 2987), False, 'from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj\n'), ((2989, 2999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2997, 2999), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1456), 'numpy.stack', 'np.stack', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (1445, 1456), True, 'import numpy as np\n'), ((1495, 1514), 'numpy.stack', 'np.stack', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (1503, 1514), True, 'import numpy as np\n'), ((941, 958), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (952, 958), False, 'import pickle\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
class CFG:
n = 10
mean = 0.0
variance = 1.0
t = 1000
esp = [0, 0.01, 0.05, 0.1, 0.15, 0.2]
n_try = 2000
class bandit():
def __init__(self, m, v):
self.m = m
self.v = v
self.mean = 0.0
self.cnt = 0
def reset(self):
self.mean = 0.0
self.cnt = 0
def get_reward(self):
reward = self.v * np.random.randn() + self.m
return reward
def update(self, reward):
self.cnt += 1
self.mean = self.mean + 1/self.cnt * (reward - self.mean)
def get_result(e):
bandits = [bandit(np.random.randn(),CFG.variance) for i in range(CFG.n)]
res = []
global cnt
for _ in range(CFG.t):
if (np.random.random()<e):
choose = np.random.choice(CFG.n)
else:
choose = np.argmax([ban.mean for ban in bandits])
val = bandits[choose].get_reward()
res.append(val)
bandits[choose].update(val)
# print(res)
return res
plt.figure(figsize=(20, 10))
for e in CFG.esp:
res = np.zeros(CFG.t)
for tr in trange(CFG.n_try):
res += get_result(e)
print(res.shape)
res /= CFG.n_try
# print(res)
plt.plot(res, label = e)
print(f'done {e}')
plt.xlabel('step')
plt.ylabel('average reward')
plt.legend()
plt.savefig('figure_2_1.png')
plt.show()
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"numpy.random.choice",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argmax",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.random.randn",
"tqdm.trange",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1088, 1116), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1098, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1354), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (1346, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1355, 1383), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average reward"""'], {}), "('average reward')\n", (1365, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1396), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1394, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1426), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure_2_1.png"""'], {}), "('figure_2_1.png')\n", (1408, 1426), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1435, 1437), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1161), 'numpy.zeros', 'np.zeros', (['CFG.t'], {}), '(CFG.t)\n', (1154, 1161), True, 'import numpy as np\n'), ((1176, 1193), 'tqdm.trange', 'trange', (['CFG.n_try'], {}), '(CFG.n_try)\n', (1182, 1193), False, 'from tqdm import trange\n'), ((1287, 1309), 'matplotlib.pyplot.plot', 'plt.plot', (['res'], {'label': 'e'}), '(res, label=e)\n', (1295, 1309), True, 'import matplotlib.pyplot as plt\n'), ((671, 688), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (686, 688), True, 'import numpy as np\n'), ((794, 812), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (810, 812), True, 'import numpy as np\n'), ((838, 861), 'numpy.random.choice', 'np.random.choice', (['CFG.n'], {}), '(CFG.n)\n', (854, 861), True, 'import numpy as np\n'), ((897, 937), 'numpy.argmax', 'np.argmax', (['[ban.mean for ban in bandits]'], {}), '([ban.mean for ban in bandits])\n', (906, 937), True, 'import numpy as np\n'), ((460, 477), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (475, 477), True, 'import numpy as np\n')]
|
__description__ = \
"""
Fitter subclass for performing bayesian (MCMC) fits.
"""
__author__ = "<NAME>"
__date__ = "2017-05-10"
from .base import Fitter
import emcee, corner
import numpy as np
import scipy.optimize as optimize
import multiprocessing
class BayesianFitter(Fitter):
"""
"""
def __init__(self,num_walkers=100,initial_walker_spread=1e-4,ml_guess=True,
num_steps=100,burn_in=0.1,num_threads=1):
"""
Initialize the bayesian fitter
Parameters
----------
num_walkers : int > 0
how many markov chains to have in the analysis
initial_walker_spread : float
each walker is initialized with parameters sampled from normal
distributions with mean equal to the initial guess and a standard
deviation of guess*initial_walker_spread
ml_guess : bool
if true, do an ML optimization to get the initial guess
num_steps:
number of steps to run the markov chains
burn_in : float between 0 and 1
fraction of samples to discard from the start of the run
num_threads : int or `"max"`
number of threads to use. if `"max"`, use the total number of
cpus. [NOT YET IMPLEMENTED]
"""
Fitter.__init__(self)
self._num_walkers = num_walkers
self._initial_walker_spread = initial_walker_spread
self._ml_guess = ml_guess
self._num_steps = num_steps
self._burn_in = burn_in
self._num_threads = num_threads
if self._num_threads == "max":
self._num_threads = multiprocessing.cpu_count()
if not type(self._num_threads) == int and self._num_threads > 0:
err = "num_threads must be 'max' or a positive integer\n"
raise ValueError(err)
if self._num_threads != 1:
err = "multithreading has not yet been (fully) implemented.\n"
raise NotImplementedError(err)
self._success = None
self.fit_type = "bayesian"
def ln_prior(self,param):
"""
Log prior of fit parameters. Priors are uniform between bounds and
set to -np.inf outside of bounds.
Parameters
----------
param : array of floats
parameters to fit
Returns
-------
float value for log of priors.
"""
# If a paramter falls outside of the bounds, make the prior -infinity
if np.sum(param < self._bounds[0,:]) > 0 or np.sum(param > self._bounds[1,:]) > 0:
return -np.inf
# otherwise, uniform
return 0.0
def ln_prob(self,param):
"""
Posterior probability of model parameters.
Parameters
----------
param : array of floats
parameters to fit
Returns
-------
float value for log posterior proability
"""
# Calcualte prior. If not finite, this solution has an -infinity log
# likelihood
ln_prior = self.ln_prior(param)
if not np.isfinite(ln_prior):
return -np.inf
# Calcualte likelihood. If not finite, this solution has an -infinity
# log likelihood
ln_like = self.ln_like(param)
if not np.isfinite(ln_like):
return -np.inf
# log posterior is log prior plus log likelihood
return ln_prior + ln_like
def fit(self,model,parameters,bounds,y_obs,y_err=None,param_names=None):
"""
Fit the parameters.
Parameters
----------
model : callable
model to fit. model should take "parameters" as its only argument.
this should (usually) be GlobalFit._y_calc
parameters : array of floats
parameters to be optimized. usually constructed by GlobalFit._prep_fit
bounds : list
list of two lists containing lower and upper bounds
y_obs : array of floats
observations in an concatenated array
y_err : array of floats or None
standard deviation of each observation. if None, each observation
is assigned an error of 1/num_obs
param_names : array of str
names of parameters. If None, parameters assigned names p0,p1,..pN
"""
self._model = model
self._y_obs = y_obs
# Convert the bounds (list of lower and upper lists) into a 2d numpy array
self._bounds = np.array(bounds)
# If no error is specified, assign the error as 1/N, identical for all
# points
self._y_err = y_err
if y_err is None:
self._y_err = np.array([1/len(self._y_obs) for i in range(len(self._y_obs))])
if param_names is None:
self._param_names = ["p{}".format(i) for i in range(len(parameters))]
else:
self._param_names = param_names[:]
# Make initial guess (ML or just whatever the paramters sent in were)
if self._ml_guess:
fn = lambda *args: -self.weighted_residuals(*args)
ml_fit = optimize.least_squares(fn,x0=parameters,bounds=self._bounds)
self._initial_guess = np.copy(ml_fit.x)
else:
self._initial_guess = np.copy(parameters)
# Create walker positions
# Size of perturbation in parameter depends on the scale of the parameter
perturb_size = self._initial_guess*self._initial_walker_spread
ndim = len(parameters)
pos = [self._initial_guess + np.random.randn(ndim)*perturb_size
for i in range(self._num_walkers)]
# Sample using walkers
self._fit_result = emcee.EnsembleSampler(self._num_walkers, ndim, self.ln_prob,
threads=self._num_threads)
self._fit_result.run_mcmc(pos, self._num_steps)
# Create list of samples
to_discard = int(round(self._burn_in*self._num_steps,0))
self._samples = self._fit_result.chain[:,to_discard:,:].reshape((-1,ndim))
self._lnprob = self._fit_result.lnprobability[:,:].reshape(-1)
# Get mean and standard deviation
self._estimate = np.mean(self._samples,axis=0)
self._stdev = np.std(self._samples,axis=0)
# Calculate 95% confidence intervals
self._ninetyfive = []
lower = int(round(0.025*self._samples.shape[0],0))
upper = int(round(0.975*self._samples.shape[0],0))
for i in range(self._samples.shape[1]):
nf = np.sort(self._samples[:,i])
self._ninetyfive.append([nf[lower],nf[upper]])
self._ninetyfive = np.array(self._ninetyfive)
self._success = True
@property
def fit_info(self):
"""
Information about the Bayesian run.
"""
output = {}
output["Num walkers"] = self._num_walkers
output["Initial walker spread"] = self._initial_walker_spread
output["Use ML guess"] = self._ml_guess
output["Num steps"] = self._num_steps
output["Burn in"] = self._burn_in
output["Final sample number"] = len(self._samples[:,0])
output["Num threads"] = self._num_threads
return output
@property
def samples(self):
"""
Bayesian samples.
"""
return self._samples
|
[
"numpy.mean",
"numpy.copy",
"scipy.optimize.least_squares",
"numpy.sort",
"multiprocessing.cpu_count",
"emcee.EnsembleSampler",
"numpy.array",
"numpy.sum",
"numpy.isfinite",
"numpy.std",
"numpy.random.randn"
] |
[((4549, 4565), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (4557, 4565), True, 'import numpy as np\n'), ((5769, 5861), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['self._num_walkers', 'ndim', 'self.ln_prob'], {'threads': 'self._num_threads'}), '(self._num_walkers, ndim, self.ln_prob, threads=self.\n _num_threads)\n', (5790, 5861), False, 'import emcee, corner\n'), ((6284, 6314), 'numpy.mean', 'np.mean', (['self._samples'], {'axis': '(0)'}), '(self._samples, axis=0)\n', (6291, 6314), True, 'import numpy as np\n'), ((6336, 6365), 'numpy.std', 'np.std', (['self._samples'], {'axis': '(0)'}), '(self._samples, axis=0)\n', (6342, 6365), True, 'import numpy as np\n'), ((6739, 6765), 'numpy.array', 'np.array', (['self._ninetyfive'], {}), '(self._ninetyfive)\n', (6747, 6765), True, 'import numpy as np\n'), ((1658, 1685), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1683, 1685), False, 'import multiprocessing\n'), ((3126, 3147), 'numpy.isfinite', 'np.isfinite', (['ln_prior'], {}), '(ln_prior)\n', (3137, 3147), True, 'import numpy as np\n'), ((3335, 3355), 'numpy.isfinite', 'np.isfinite', (['ln_like'], {}), '(ln_like)\n', (3346, 3355), True, 'import numpy as np\n'), ((5175, 5237), 'scipy.optimize.least_squares', 'optimize.least_squares', (['fn'], {'x0': 'parameters', 'bounds': 'self._bounds'}), '(fn, x0=parameters, bounds=self._bounds)\n', (5197, 5237), True, 'import scipy.optimize as optimize\n'), ((5270, 5287), 'numpy.copy', 'np.copy', (['ml_fit.x'], {}), '(ml_fit.x)\n', (5277, 5287), True, 'import numpy as np\n'), ((5336, 5355), 'numpy.copy', 'np.copy', (['parameters'], {}), '(parameters)\n', (5343, 5355), True, 'import numpy as np\n'), ((6624, 6652), 'numpy.sort', 'np.sort', (['self._samples[:, i]'], {}), '(self._samples[:, i])\n', (6631, 6652), True, 'import numpy as np\n'), ((2524, 2558), 'numpy.sum', 'np.sum', (['(param < self._bounds[0, :])'], {}), '(param < self._bounds[0, :])\n', (2530, 2558), True, 'import numpy as np\n'), ((2565, 2599), 'numpy.sum', 'np.sum', (['(param > self._bounds[1, :])'], {}), '(param > self._bounds[1, :])\n', (2571, 2599), True, 'import numpy as np\n'), ((5625, 5646), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (5640, 5646), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# We must always import the relevant libraries for our problem at hand. NumPy and TensorFlow are required for this example.
# https://www.kaggle.com/c/costa-rican-household-poverty-prediction/data#_=_
import numpy as np
np.set_printoptions(threshold='nan')
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
def toInt(x):
if x == 'yes':
return 1
else:
if x == 'no':
return 0
else:
return x
costa_rica_household = pd.read_csv('data/train.csv')
#x1 =
costa_rica_household.describe()
#x1["v2a1"]
costa_rica_household.head()
list(costa_rica_household.dtypes)
#costa_rica_household = costa_rica_household.fillna(0)
costa_rica_household = costa_rica_household.fillna(costa_rica_household.mean())
#costa_rica_household["idhogar"] = costa_rica_household["idhogar"].apply(lambda x: int(x, 16))
#costa_rica_household["dependency"] = costa_rica_household["dependency"].apply(lambda x: toInt(x))
#costa_rica_household["edjefe"] = costa_rica_household["edjefe"].apply(lambda x: toInt(x))//edjefa
#costa_rica_household.loc[costa_rica_household['dependency'] == "'<='"]
#v1 = costa_rica_household[costa_rica_household['dependency'].apply(lambda x: type(x) == str)]['dependency']
#col_name = costa_rica_household.columns
#print(list(col_name))
#costa_rica_household[["age", "SQBage", "agesq", "r4h1", "r4h2"]]
cols_to_norm = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1',
'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro',
'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3',
'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',
'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female',
'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2',
'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',
'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1',
'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1',
'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4',
'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency',
'SQBmeaned', 'agesq']
cat_cols_to_norm = ['r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3']
cols_of_interest = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1', 'r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3',
'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro',
'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3',
'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',
'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female',
'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2',
'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',
'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1',
'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1',
'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4',
'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency',
'SQBmeaned', 'agesq']
#costa_rica_household[cols_to_norm] = costa_rica_household[cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min()))
#costa_rica_household[cat_cols_to_norm] = costa_rica_household[cat_cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min()))
costa_rica_household[cols_of_interest] = costa_rica_household[cols_of_interest].apply(lambda x: (x - x.min())/(x.max() - x.min()))
feat_cols = []
for col_name in cols_to_norm:
col_name = tf.feature_column.numeric_column(col_name)
feat_cols.append(col_name)
age_range_count = [1,2,3,4,5,7]
r4h1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h1'), boundaries=age_range_count)
r4h2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h2'), boundaries=age_range_count)
r4h3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h3'), boundaries=age_range_count)
crossed_r4h = tf.feature_column.crossed_column([r4h1_bucket, r4h2_bucket, r4h3_bucket], 100)
#fc = [r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h]
r4m1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m1'), boundaries=age_range_count)
r4m2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m2'), boundaries=age_range_count)
r4m3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m3'), boundaries=age_range_count)
crossed_r4m = tf.feature_column.crossed_column([r4m1_bucket, r4m2_bucket, r4m3_bucket], 100)
r4t1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t1'), boundaries=age_range_count)
r4t2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t2'), boundaries=age_range_count)
r4t3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t3'), boundaries=age_range_count)
crossed_r4t = tf.feature_column.crossed_column([r4t1_bucket, r4t2_bucket, r4t3_bucket], 100)
feat_cols.extend([r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h, r4m1_bucket, r4m2_bucket, r4m3_bucket, crossed_r4m, r4t1_bucket, r4t2_bucket, r4t3_bucket, crossed_r4t])
len(feat_cols)
feat_cols[138]
estimator = tf.estimator.LinearClassifier(feature_columns=feat_cols, n_classes=4)
#costa_rica_household[(costa_rica_household.Target == 4)]
x_data = costa_rica_household.drop('Id', axis=1).drop('edjefa', axis=1).drop('idhogar', axis=1).drop('dependency', axis=1).drop('Target', axis=1)
#x_data['idhogar']
#x_data.describe()
#x_data.head()
labels = costa_rica_household['Target']
labels.head()
from sklearn.model_selection import train_test_split
X_train, X_eval, y_train, y_eval = train_test_split(x_data, labels, test_size=0.3, random_state=101)
print(X_train.shape, y_eval.shape)
input_func = tf.estimator.inputs.pandas_input_fn(x=X_train, y=y_train, batch_size=10, num_epochs=100, shuffle=True)
estimator.train(input_fn=input_func,steps=1000)
eval_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, y=y_eval, batch_size=10, num_epochs=1, shuffle=False)
eval_metrics = estimator.evaluate(input_fn=eval_input_func)
print('Eval metrics')
print(eval_metrics)
pred_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, shuffle=False)
predictions = []
for predict in estimator.predict(input_fn=pred_input_func):
predictions.append(predict)
predictions
#categorical_columun_voc = tf.feature_column.embedding_column(categorical_columun_voc, 4)
dnn_classifier = tf.estimator.DNNClassifier(hidden_units=[10, 10, 10], feature_columns=feat_cols, n_classes=2)
dnn_classifier.train(input_fn=input_func,steps=1000)
dnn_eval_metrics = dnn_classifier.evaluate(input_fn=eval_input_func)
dnn_eval_metrics
|
[
"tensorflow.feature_column.crossed_column",
"tensorflow.estimator.DNNClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"tensorflow.estimator.LinearClassifier",
"tensorflow.feature_column.numeric_column",
"tensorflow.estimator.inputs.pandas_input_fn",
"numpy.set_printoptions"
] |
[((245, 281), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '"""nan"""'}), "(threshold='nan')\n", (264, 281), True, 'import numpy as np\n'), ((521, 550), 'pandas.read_csv', 'pd.read_csv', (['"""data/train.csv"""'], {}), "('data/train.csv')\n", (532, 550), True, 'import pandas as pd\n'), ((6250, 6328), 'tensorflow.feature_column.crossed_column', 'tf.feature_column.crossed_column', (['[r4h1_bucket, r4h2_bucket, r4h3_bucket]', '(100)'], {}), '([r4h1_bucket, r4h2_bucket, r4h3_bucket], 100)\n', (6282, 6328), True, 'import tensorflow as tf\n'), ((6763, 6841), 'tensorflow.feature_column.crossed_column', 'tf.feature_column.crossed_column', (['[r4m1_bucket, r4m2_bucket, r4m3_bucket]', '(100)'], {}), '([r4m1_bucket, r4m2_bucket, r4m3_bucket], 100)\n', (6795, 6841), True, 'import tensorflow as tf\n'), ((7217, 7295), 'tensorflow.feature_column.crossed_column', 'tf.feature_column.crossed_column', (['[r4t1_bucket, r4t2_bucket, r4t3_bucket]', '(100)'], {}), '([r4t1_bucket, r4t2_bucket, r4t3_bucket], 100)\n', (7249, 7295), True, 'import tensorflow as tf\n'), ((7517, 7586), 'tensorflow.estimator.LinearClassifier', 'tf.estimator.LinearClassifier', ([], {'feature_columns': 'feat_cols', 'n_classes': '(4)'}), '(feature_columns=feat_cols, n_classes=4)\n', (7546, 7586), True, 'import tensorflow as tf\n'), ((7989, 8054), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_data', 'labels'], {'test_size': '(0.3)', 'random_state': '(101)'}), '(x_data, labels, test_size=0.3, random_state=101)\n', (8005, 8054), False, 'from sklearn.model_selection import train_test_split\n'), ((8104, 8210), 'tensorflow.estimator.inputs.pandas_input_fn', 'tf.estimator.inputs.pandas_input_fn', ([], {'x': 'X_train', 'y': 'y_train', 'batch_size': '(10)', 'num_epochs': '(100)', 'shuffle': '(True)'}), '(x=X_train, y=y_train, batch_size=10,\n num_epochs=100, shuffle=True)\n', (8139, 8210), True, 'import tensorflow as tf\n'), ((8274, 8377), 'tensorflow.estimator.inputs.pandas_input_fn', 'tf.estimator.inputs.pandas_input_fn', ([], {'x': 'X_eval', 'y': 'y_eval', 'batch_size': '(10)', 'num_epochs': '(1)', 'shuffle': '(False)'}), '(x=X_eval, y=y_eval, batch_size=10,\n num_epochs=1, shuffle=False)\n', (8309, 8377), True, 'import tensorflow as tf\n'), ((8498, 8558), 'tensorflow.estimator.inputs.pandas_input_fn', 'tf.estimator.inputs.pandas_input_fn', ([], {'x': 'X_eval', 'shuffle': '(False)'}), '(x=X_eval, shuffle=False)\n', (8533, 8558), True, 'import tensorflow as tf\n'), ((8789, 8887), 'tensorflow.estimator.DNNClassifier', 'tf.estimator.DNNClassifier', ([], {'hidden_units': '[10, 10, 10]', 'feature_columns': 'feat_cols', 'n_classes': '(2)'}), '(hidden_units=[10, 10, 10], feature_columns=\n feat_cols, n_classes=2)\n', (8815, 8887), True, 'import tensorflow as tf\n'), ((5768, 5810), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['col_name'], {}), '(col_name)\n', (5800, 5810), True, 'import tensorflow as tf\n'), ((5926, 5966), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4h1"""'], {}), "('r4h1')\n", (5958, 5966), True, 'import tensorflow as tf\n'), ((6046, 6086), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4h2"""'], {}), "('r4h2')\n", (6078, 6086), True, 'import tensorflow as tf\n'), ((6166, 6206), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4h3"""'], {}), "('r4h3')\n", (6198, 6206), True, 'import tensorflow as tf\n'), ((6439, 6479), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4m1"""'], {}), "('r4m1')\n", (6471, 6479), True, 'import tensorflow as tf\n'), ((6559, 6599), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4m2"""'], {}), "('r4m2')\n", (6591, 6599), True, 'import tensorflow as tf\n'), ((6679, 6719), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4m3"""'], {}), "('r4m3')\n", (6711, 6719), True, 'import tensorflow as tf\n'), ((6893, 6933), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4t1"""'], {}), "('r4t1')\n", (6925, 6933), True, 'import tensorflow as tf\n'), ((7013, 7053), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4t2"""'], {}), "('r4t2')\n", (7045, 7053), True, 'import tensorflow as tf\n'), ((7133, 7173), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""r4t3"""'], {}), "('r4t3')\n", (7165, 7173), True, 'import tensorflow as tf\n')]
|
# Adapted by <NAME>, 2019
#
# Based on Detectron.pytorch/lib/roi_data/fast_rcnn.py
# Original license text:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Construct minibatches for Fast R-CNN training. Handles the minibatch blobs
that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.
are handled by their respecitive roi_data modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numpy.random as npr
import logging
from core.config import cfg
import utils_rel.boxes_rel as box_utils_rel
import utils.blob as blob_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def add_rel_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_rel_multilevel_rois(blobs)
return True
def _sample_pairs(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM
pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic
max_pair_overlaps = roidb['max_pair_overlaps']
if cfg.MODEL.MULTI_RELATION:
prd_gt_overlaps = roidb['prd_gt_overlaps'].toarray()
prd_class_num = prd_gt_overlaps.shape[1]
gt_pair_inds, gt_pair_class = np.where(prd_gt_overlaps > 1.0 - 1e-4)
fg_pair_inds, fg_pair_class = np.where((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) &
(prd_gt_overlaps <= 1.0 - 1e-4))
hash_gt_pair_inds = prd_class_num * gt_pair_inds + gt_pair_class
hash_fg_pair_inds = prd_class_num * fg_pair_inds + fg_pair_class
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, hash_gt_pair_inds.size + hash_fg_pair_inds.size)
if hash_fg_pair_inds.size > 0 and fg_pairs_per_this_image > hash_gt_pair_inds.size:
hash_fg_pair_inds = npr.choice(
hash_fg_pair_inds, size=(fg_pairs_per_this_image - hash_gt_pair_inds.size), replace=False)
hash_fg_pair_inds = np.append(hash_fg_pair_inds, hash_gt_pair_inds)
elif fg_pairs_per_this_image <= hash_gt_pair_inds.size:
hash_gt_pair_inds = npr.choice(
hash_gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
hash_fg_pair_inds = hash_gt_pair_inds
else:
hash_fg_pair_inds = hash_gt_pair_inds
blob_dict = {}
if cfg.MODEL.USE_BG:
bg_pair_inds, bg_pair_class_inds = np.where((prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI))
hash_bg_pair_inds = prd_class_num * bg_pair_inds + bg_pair_class_inds
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, hash_bg_pair_inds.size)
if hash_bg_pair_inds.size > 0:
hash_bg_pair_inds = npr.choice(
hash_bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
hash_keep_pair_inds = np.append(hash_fg_pair_inds, hash_bg_pair_inds)
multi_prd_labels = np.zeros(hash_keep_pair_inds.size, dtype=np.int32)
multi_prd_labels[:hash_fg_pair_inds.size] = 1.0 #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num, hash_bg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num, hash_bg_pair_inds % prd_class_num)
else:
multi_prd_labels = np.ones(fg_multi_prd_labels.size, dtype=np.int32) #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num)
blob_dict['multi_prd_labels_int32'] = multi_prd_labels.astype(np.int32, copy=False)
blob_dict['keep_pair_class_int32'] = keep_pair_class.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([hash_fg_pair_inds.size], dtype=np.int32)
else:
gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]
fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps <= 1.0 - 1e-4))[0]
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)
# Sample foreground regions without replacement
if fg_pair_inds.size > 0 and fg_pairs_per_this_image > gt_pair_inds.size:
fg_pair_inds = npr.choice(
fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)
fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)
elif fg_pairs_per_this_image <= gt_pair_inds.size:
gt_pair_inds = npr.choice(
gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
fg_pair_inds = gt_pair_inds
else:
fg_pair_inds = gt_pair_inds
# Label is the class each RoI has max overlap with
fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]
blob_dict = dict(
fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))
if cfg.MODEL.USE_BG:
bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)
# Sample foreground regions without replacement
if bg_pair_inds.size > 0:
bg_pair_inds = npr.choice(
bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
# logger.info('{} : {}'.format(fg_pair_inds.size, bg_pair_inds.size))
keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)
all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)
all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1
else:
keep_pair_inds = fg_pair_inds
all_prd_labels = fg_prd_labels
blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn
sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]
sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]
sampled_all_boxes = roidb['all_boxes']
det_labels = roidb['det_labels']
sampled_sbj_inds = roidb['sbj_id'][keep_pair_inds]
sampled_obj_inds = roidb['obj_id'][keep_pair_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois = sampled_sbj_boxes * im_scale
sampled_obj_rois = sampled_obj_boxes * im_scale
sampled_all_rois = sampled_all_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))
all_boxes_repeated_batch_idx = batch_idx * blob_utils.ones((sampled_all_boxes.shape[0], 1))
sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))
sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))
sampled_all_rois = np.hstack((all_boxes_repeated_batch_idx, sampled_all_rois))
int_repeated_batch_idx = batch_idx * np.ones((keep_pair_inds.shape[0], 1), dtype=np.int)
blob_dict['sbj_inds'] = np.hstack((repeated_batch_idx, sampled_sbj_inds.reshape(-1, 1)))
blob_dict['obj_inds'] = np.hstack((repeated_batch_idx, sampled_obj_inds.reshape(-1, 1)))
blob_dict['sbj_rois'] = sampled_sbj_rois
blob_dict['obj_rois'] = sampled_obj_rois
blob_dict['det_rois'] = sampled_all_rois
blob_dict['det_labels'] = det_labels
sampled_rel_rois = box_utils_rel.rois_union(sampled_sbj_rois, sampled_obj_rois)
blob_dict['rel_rois'] = sampled_rel_rois
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat = box_utils_rel.get_spt_features(
sampled_sbj_boxes, sampled_obj_boxes, roidb['width'], roidb['height'])
blob_dict['spt_feat'] = sampled_spt_feat
if cfg.MODEL.USE_FREQ_BIAS:
sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]
obj_labels = roidb['max_obj_classes'][keep_pair_inds]
blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False)
blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
nodes_per_image = cfg.MODEL.NODE_SAMPLE_SIZE
max_sbj_overlaps = roidb['max_sbj_overlaps']
max_obj_overlaps = roidb['max_obj_overlaps']
# sbj
# Here a naturally existing assumption is, each positive sbj should have at least one positive obj
sbj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
sbj_pos_obj_pos_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
sbj_pos_obj_neg_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if sbj_pos_pair_pos_inds.size > 0:
sbj_pos_pair_pos_inds = npr.choice(
sbj_pos_pair_pos_inds,
size=int(min(nodes_per_image, sbj_pos_pair_pos_inds.size)),
replace=False)
if sbj_pos_obj_pos_pair_neg_inds.size > 0:
sbj_pos_obj_pos_pair_neg_inds = npr.choice(
sbj_pos_obj_pos_pair_neg_inds,
size=int(min(nodes_per_image, sbj_pos_obj_pos_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = sbj_pos_obj_pos_pair_neg_inds
if nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size > 0 and sbj_pos_obj_neg_pair_neg_inds.size > 0:
sbj_pos_obj_neg_pair_neg_inds = npr.choice(
sbj_pos_obj_neg_pair_neg_inds,
size=int(min(nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size, sbj_pos_obj_neg_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = np.append(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds)
sbj_pos_inds = np.append(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds)
binary_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
binary_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_sbj_pos_int32'] = binary_labels_sbj_pos.astype(np.int32, copy=False)
prd_pos_labels_sbj_pos = roidb['max_prd_classes'][sbj_pos_pair_pos_inds]
prd_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
prd_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = prd_pos_labels_sbj_pos + 1
blob_dict['prd_labels_sbj_pos_int32'] = prd_labels_sbj_pos.astype(np.int32, copy=False)
sbj_labels_sbj_pos = roidb['max_sbj_classes'][sbj_pos_inds] + 1
# 1. set all obj labels > 0
obj_labels_sbj_pos = roidb['max_obj_classes'][sbj_pos_inds] + 1
# 2. find those negative obj
max_obj_overlaps_sbj_pos = roidb['max_obj_overlaps'][sbj_pos_inds]
obj_neg_inds_sbj_pos = np.where(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)[0]
obj_labels_sbj_pos[obj_neg_inds_sbj_pos] = 0
blob_dict['sbj_labels_sbj_pos_int32'] = sbj_labels_sbj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_int32'] = obj_labels_sbj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_sbj_pos_fg_int32'] = roidb['max_sbj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_fg_int32'] = roidb['max_obj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_sbj_pos = roidb['sbj_boxes'][sbj_pos_inds]
sampled_obj_boxes_sbj_pos = roidb['obj_boxes'][sbj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_sbj_pos = sampled_sbj_boxes_sbj_pos * im_scale
sampled_obj_rois_sbj_pos = sampled_obj_boxes_sbj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((sbj_pos_inds.shape[0], 1))
sampled_sbj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_sbj_pos))
sampled_obj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_sbj_pos))
blob_dict['sbj_rois_sbj_pos'] = sampled_sbj_rois_sbj_pos
blob_dict['obj_rois_sbj_pos'] = sampled_obj_rois_sbj_pos
sampled_rel_rois_sbj_pos = box_utils_rel.rois_union(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos)
blob_dict['rel_rois_sbj_pos'] = sampled_rel_rois_sbj_pos
_, inds_unique_sbj_pos, inds_reverse_sbj_pos = np.unique(
sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_sbj_pos.shape[0] == sampled_sbj_rois_sbj_pos.shape[0]
blob_dict['inds_unique_sbj_pos'] = inds_unique_sbj_pos
blob_dict['inds_reverse_sbj_pos'] = inds_reverse_sbj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_sbj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_sbj_pos, sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_sbj_pos'] = sampled_spt_feat_sbj_pos
# obj
# Here a naturally existing assumption is, each positive obj should have at least one positive sbj
obj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
obj_pos_sbj_pos_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
obj_pos_sbj_neg_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if obj_pos_pair_pos_inds.size > 0:
obj_pos_pair_pos_inds = npr.choice(
obj_pos_pair_pos_inds,
size=int(min(nodes_per_image, obj_pos_pair_pos_inds.size)),
replace=False)
if obj_pos_sbj_pos_pair_neg_inds.size > 0:
obj_pos_sbj_pos_pair_neg_inds = npr.choice(
obj_pos_sbj_pos_pair_neg_inds,
size=int(min(nodes_per_image, obj_pos_sbj_pos_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = obj_pos_sbj_pos_pair_neg_inds
if nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size > 0 and obj_pos_sbj_neg_pair_neg_inds.size:
obj_pos_sbj_neg_pair_neg_inds = npr.choice(
obj_pos_sbj_neg_pair_neg_inds,
size=int(min(nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size, obj_pos_sbj_neg_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = np.append(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds)
obj_pos_inds = np.append(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds)
binary_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
binary_labels_obj_pos[:obj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_obj_pos_int32'] = binary_labels_obj_pos.astype(np.int32, copy=False)
prd_pos_labels_obj_pos = roidb['max_prd_classes'][obj_pos_pair_pos_inds]
prd_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
prd_labels_obj_pos[:obj_pos_pair_pos_inds.size] = prd_pos_labels_obj_pos + 1
blob_dict['prd_labels_obj_pos_int32'] = prd_labels_obj_pos.astype(np.int32, copy=False)
obj_labels_obj_pos = roidb['max_obj_classes'][obj_pos_inds] + 1
# 1. set all sbj labels > 0
sbj_labels_obj_pos = roidb['max_sbj_classes'][obj_pos_inds] + 1
# 2. find those negative sbj
max_sbj_overlaps_obj_pos = roidb['max_sbj_overlaps'][obj_pos_inds]
sbj_neg_inds_obj_pos = np.where(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)[0]
sbj_labels_obj_pos[sbj_neg_inds_obj_pos] = 0
blob_dict['sbj_labels_obj_pos_int32'] = sbj_labels_obj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_int32'] = obj_labels_obj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_obj_pos_fg_int32'] = roidb['max_sbj_classes'][obj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_fg_int32'] = roidb['max_obj_classes'][obj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_obj_pos = roidb['sbj_boxes'][obj_pos_inds]
sampled_obj_boxes_obj_pos = roidb['obj_boxes'][obj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_obj_pos = sampled_sbj_boxes_obj_pos * im_scale
sampled_obj_rois_obj_pos = sampled_obj_boxes_obj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((obj_pos_inds.shape[0], 1))
sampled_sbj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_obj_pos))
sampled_obj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_obj_pos))
blob_dict['sbj_rois_obj_pos'] = sampled_sbj_rois_obj_pos
blob_dict['obj_rois_obj_pos'] = sampled_obj_rois_obj_pos
sampled_rel_rois_obj_pos = box_utils_rel.rois_union(sampled_sbj_rois_obj_pos, sampled_obj_rois_obj_pos)
blob_dict['rel_rois_obj_pos'] = sampled_rel_rois_obj_pos
_, inds_unique_obj_pos, inds_reverse_obj_pos = np.unique(
sampled_obj_rois_obj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_obj_pos.shape[0] == sampled_obj_rois_obj_pos.shape[0]
blob_dict['inds_unique_obj_pos'] = inds_unique_obj_pos
blob_dict['inds_reverse_obj_pos'] = inds_reverse_obj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_obj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_obj_pos, sampled_obj_boxes_obj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_obj_pos'] = sampled_spt_feat_obj_pos
return blob_dict
def _add_rel_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_names):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
lowest_target_lvls = None
for rois_blob_name in rois_blob_names:
target_lvls = fpn_utils.map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
if lowest_target_lvls is None:
lowest_target_lvls = target_lvls
else:
lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_utils.add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,
lvl_max)
_distribute_rois_over_fpn_levels(['sbj_rois'])
_distribute_rois_over_fpn_levels(['obj_rois'])
_distribute_rois_over_fpn_levels(['rel_rois'])
_distribute_rois_over_fpn_levels(['det_rois'])
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
_distribute_rois_over_fpn_levels(['sbj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['sbj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_obj_pos'])
|
[
"logging.getLogger",
"numpy.minimum",
"numpy.ones",
"numpy.hstack",
"numpy.where",
"utils_rel.boxes_rel.get_spt_features",
"numpy.unique",
"numpy.random.choice",
"utils_rel.boxes_rel.rois_union",
"numpy.append",
"numpy.array",
"numpy.zeros",
"utils.fpn.add_multilevel_roi_blobs",
"numpy.concatenate",
"utils.blob.ones",
"utils.fpn.map_rois_to_fpn_levels"
] |
[((1404, 1431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1421, 1431), False, 'import logging\n'), ((8692, 8741), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_sbj_rois)'], {}), '((repeated_batch_idx, sampled_sbj_rois))\n', (8701, 8741), True, 'import numpy as np\n'), ((8765, 8814), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_obj_rois)'], {}), '((repeated_batch_idx, sampled_obj_rois))\n', (8774, 8814), True, 'import numpy as np\n'), ((8838, 8897), 'numpy.hstack', 'np.hstack', (['(all_boxes_repeated_batch_idx, sampled_all_rois)'], {}), '((all_boxes_repeated_batch_idx, sampled_all_rois))\n', (8847, 8897), True, 'import numpy as np\n'), ((9386, 9446), 'utils_rel.boxes_rel.rois_union', 'box_utils_rel.rois_union', (['sampled_sbj_rois', 'sampled_obj_rois'], {}), '(sampled_sbj_rois, sampled_obj_rois)\n', (9410, 9446), True, 'import utils_rel.boxes_rel as box_utils_rel\n'), ((2669, 2709), 'numpy.where', 'np.where', (['(prd_gt_overlaps > 1.0 - 0.0001)'], {}), '(prd_gt_overlaps > 1.0 - 0.0001)\n', (2677, 2709), True, 'import numpy as np\n'), ((2746, 2836), 'numpy.where', 'np.where', (['((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) & (prd_gt_overlaps <= 1.0 - 0.0001))'], {}), '((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) & (prd_gt_overlaps <= 1.0 -\n 0.0001))\n', (2754, 2836), True, 'import numpy as np\n'), ((3081, 3160), 'numpy.minimum', 'np.minimum', (['fg_pairs_per_image', '(hash_gt_pair_inds.size + hash_fg_pair_inds.size)'], {}), '(fg_pairs_per_image, hash_gt_pair_inds.size + hash_fg_pair_inds.size)\n', (3091, 3160), True, 'import numpy as np\n'), ((5375, 5425), 'numpy.array', 'np.array', (['[hash_fg_pair_inds.size]'], {'dtype': 'np.int32'}), '([hash_fg_pair_inds.size], dtype=np.int32)\n', (5383, 5425), True, 'import numpy as np\n'), ((5702, 5771), 'numpy.minimum', 'np.minimum', (['fg_pairs_per_image', '(gt_pair_inds.size + fg_pair_inds.size)'], {}), '(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)\n', (5712, 5771), True, 'import numpy as np\n'), ((7838, 7883), 'numpy.array', 'np.array', (['[fg_pair_inds.size]'], {'dtype': 'np.int32'}), '([fg_pair_inds.size], dtype=np.int32)\n', (7846, 7883), True, 'import numpy as np\n'), ((8522, 8567), 'utils.blob.ones', 'blob_utils.ones', (['(keep_pair_inds.shape[0], 1)'], {}), '((keep_pair_inds.shape[0], 1))\n', (8537, 8567), True, 'import utils.blob as blob_utils\n'), ((8615, 8663), 'utils.blob.ones', 'blob_utils.ones', (['(sampled_all_boxes.shape[0], 1)'], {}), '((sampled_all_boxes.shape[0], 1))\n', (8630, 8663), True, 'import utils.blob as blob_utils\n'), ((8944, 8995), 'numpy.ones', 'np.ones', (['(keep_pair_inds.shape[0], 1)'], {'dtype': 'np.int'}), '((keep_pair_inds.shape[0], 1), dtype=np.int)\n', (8951, 8995), True, 'import numpy as np\n'), ((9554, 9660), 'utils_rel.boxes_rel.get_spt_features', 'box_utils_rel.get_spt_features', (['sampled_sbj_boxes', 'sampled_obj_boxes', "roidb['width']", "roidb['height']"], {}), "(sampled_sbj_boxes, sampled_obj_boxes, roidb[\n 'width'], roidb['height'])\n", (9584, 9660), True, 'import utils_rel.boxes_rel as box_utils_rel\n'), ((12174, 12229), 'numpy.append', 'np.append', (['sbj_pos_pair_pos_inds', 'sbj_pos_pair_neg_inds'], {}), '(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds)\n', (12183, 12229), True, 'import numpy as np\n'), ((12262, 12305), 'numpy.zeros', 'np.zeros', (['sbj_pos_inds.size'], {'dtype': 'np.int32'}), '(sbj_pos_inds.size, dtype=np.int32)\n', (12270, 12305), True, 'import numpy as np\n'), ((12581, 12624), 'numpy.zeros', 'np.zeros', (['sbj_pos_inds.size'], {'dtype': 'np.int32'}), '(sbj_pos_inds.size, dtype=np.int32)\n', (12589, 12624), True, 'import numpy as np\n'), ((14187, 14244), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_sbj_rois_sbj_pos)'], {}), '((repeated_batch_idx, sampled_sbj_rois_sbj_pos))\n', (14196, 14244), True, 'import numpy as np\n'), ((14280, 14337), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_obj_rois_sbj_pos)'], {}), '((repeated_batch_idx, sampled_obj_rois_sbj_pos))\n', (14289, 14337), True, 'import numpy as np\n'), ((14503, 14579), 'utils_rel.boxes_rel.rois_union', 'box_utils_rel.rois_union', (['sampled_sbj_rois_sbj_pos', 'sampled_obj_rois_sbj_pos'], {}), '(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos)\n', (14527, 14579), True, 'import utils_rel.boxes_rel as box_utils_rel\n'), ((14700, 14787), 'numpy.unique', 'np.unique', (['sampled_sbj_rois_sbj_pos'], {'return_index': '(True)', 'return_inverse': '(True)', 'axis': '(0)'}), '(sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True,\n axis=0)\n', (14709, 14787), True, 'import numpy as np\n'), ((17119, 17174), 'numpy.append', 'np.append', (['obj_pos_pair_pos_inds', 'obj_pos_pair_neg_inds'], {}), '(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds)\n', (17128, 17174), True, 'import numpy as np\n'), ((17207, 17250), 'numpy.zeros', 'np.zeros', (['obj_pos_inds.size'], {'dtype': 'np.int32'}), '(obj_pos_inds.size, dtype=np.int32)\n', (17215, 17250), True, 'import numpy as np\n'), ((17526, 17569), 'numpy.zeros', 'np.zeros', (['obj_pos_inds.size'], {'dtype': 'np.int32'}), '(obj_pos_inds.size, dtype=np.int32)\n', (17534, 17569), True, 'import numpy as np\n'), ((19132, 19189), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_sbj_rois_obj_pos)'], {}), '((repeated_batch_idx, sampled_sbj_rois_obj_pos))\n', (19141, 19189), True, 'import numpy as np\n'), ((19225, 19282), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_obj_rois_obj_pos)'], {}), '((repeated_batch_idx, sampled_obj_rois_obj_pos))\n', (19234, 19282), True, 'import numpy as np\n'), ((19448, 19524), 'utils_rel.boxes_rel.rois_union', 'box_utils_rel.rois_union', (['sampled_sbj_rois_obj_pos', 'sampled_obj_rois_obj_pos'], {}), '(sampled_sbj_rois_obj_pos, sampled_obj_rois_obj_pos)\n', (19472, 19524), True, 'import utils_rel.boxes_rel as box_utils_rel\n'), ((19645, 19732), 'numpy.unique', 'np.unique', (['sampled_obj_rois_obj_pos'], {'return_index': '(True)', 'return_inverse': '(True)', 'axis': '(0)'}), '(sampled_obj_rois_obj_pos, return_index=True, return_inverse=True,\n axis=0)\n', (19654, 19732), True, 'import numpy as np\n'), ((1948, 1965), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (1962, 1965), True, 'import numpy as np\n'), ((3285, 3388), 'numpy.random.choice', 'npr.choice', (['hash_fg_pair_inds'], {'size': '(fg_pairs_per_this_image - hash_gt_pair_inds.size)', 'replace': '(False)'}), '(hash_fg_pair_inds, size=fg_pairs_per_this_image -\n hash_gt_pair_inds.size, replace=False)\n', (3295, 3388), True, 'import numpy.random as npr\n'), ((3436, 3483), 'numpy.append', 'np.append', (['hash_fg_pair_inds', 'hash_gt_pair_inds'], {}), '(hash_fg_pair_inds, hash_gt_pair_inds)\n', (3445, 3483), True, 'import numpy as np\n'), ((3894, 3944), 'numpy.where', 'np.where', (['(prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI)'], {}), '(prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI)\n', (3902, 3944), True, 'import numpy as np\n'), ((4173, 4232), 'numpy.minimum', 'np.minimum', (['bg_pairs_per_this_image', 'hash_bg_pair_inds.size'], {}), '(bg_pairs_per_this_image, hash_bg_pair_inds.size)\n', (4183, 4232), True, 'import numpy as np\n'), ((4442, 4489), 'numpy.append', 'np.append', (['hash_fg_pair_inds', 'hash_bg_pair_inds'], {}), '(hash_fg_pair_inds, hash_bg_pair_inds)\n', (4451, 4489), True, 'import numpy as np\n'), ((4529, 4579), 'numpy.zeros', 'np.zeros', (['hash_keep_pair_inds.size'], {'dtype': 'np.int32'}), '(hash_keep_pair_inds.size, dtype=np.int32)\n', (4537, 4579), True, 'import numpy as np\n'), ((4703, 4788), 'numpy.append', 'np.append', (['(hash_fg_pair_inds // prd_class_num)', '(hash_bg_pair_inds // prd_class_num)'], {}), '(hash_fg_pair_inds // prd_class_num, hash_bg_pair_inds //\n prd_class_num)\n', (4712, 4788), True, 'import numpy as np\n'), ((4815, 4894), 'numpy.append', 'np.append', (['(hash_fg_pair_inds % prd_class_num)', '(hash_bg_pair_inds % prd_class_num)'], {}), '(hash_fg_pair_inds % prd_class_num, hash_bg_pair_inds % prd_class_num)\n', (4824, 4894), True, 'import numpy as np\n'), ((4940, 4989), 'numpy.ones', 'np.ones', (['fg_multi_prd_labels.size'], {'dtype': 'np.int32'}), '(fg_multi_prd_labels.size, dtype=np.int32)\n', (4947, 4989), True, 'import numpy as np\n'), ((5040, 5085), 'numpy.append', 'np.append', (['(hash_fg_pair_inds // prd_class_num)'], {}), '(hash_fg_pair_inds // prd_class_num)\n', (5049, 5085), True, 'import numpy as np\n'), ((5116, 5160), 'numpy.append', 'np.append', (['(hash_fg_pair_inds % prd_class_num)'], {}), '(hash_fg_pair_inds % prd_class_num)\n', (5125, 5160), True, 'import numpy as np\n'), ((5468, 5510), 'numpy.where', 'np.where', (['(max_pair_overlaps > 1.0 - 0.0001)'], {}), '(max_pair_overlaps > 1.0 - 0.0001)\n', (5476, 5510), True, 'import numpy as np\n'), ((5535, 5629), 'numpy.where', 'np.where', (['((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps <= 1.0 - \n 0.0001))'], {}), '((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps <=\n 1.0 - 0.0001))\n', (5543, 5629), True, 'import numpy as np\n'), ((5937, 6030), 'numpy.random.choice', 'npr.choice', (['fg_pair_inds'], {'size': '(fg_pairs_per_this_image - gt_pair_inds.size)', 'replace': '(False)'}), '(fg_pair_inds, size=fg_pairs_per_this_image - gt_pair_inds.size,\n replace=False)\n', (5947, 6030), True, 'import numpy.random as npr\n'), ((6073, 6110), 'numpy.append', 'np.append', (['fg_pair_inds', 'gt_pair_inds'], {}), '(fg_pair_inds, gt_pair_inds)\n', (6082, 6110), True, 'import numpy as np\n'), ((6992, 7046), 'numpy.minimum', 'np.minimum', (['bg_pairs_per_this_image', 'bg_pair_inds.size'], {}), '(bg_pairs_per_this_image, bg_pair_inds.size)\n', (7002, 7046), True, 'import numpy as np\n'), ((7380, 7417), 'numpy.append', 'np.append', (['fg_pair_inds', 'bg_pair_inds'], {}), '(fg_pair_inds, bg_pair_inds)\n', (7389, 7417), True, 'import numpy as np\n'), ((7447, 7492), 'numpy.zeros', 'np.zeros', (['keep_pair_inds.size'], {'dtype': 'np.int32'}), '(keep_pair_inds.size, dtype=np.int32)\n', (7455, 7492), True, 'import numpy as np\n'), ((10493, 10543), 'numpy.where', 'np.where', (['(max_pair_overlaps >= cfg.TRAIN.FG_THRESH)'], {}), '(max_pair_overlaps >= cfg.TRAIN.FG_THRESH)\n', (10501, 10543), True, 'import numpy as np\n'), ((10589, 10735), 'numpy.where', 'np.where', (['((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_obj_overlaps >= cfg.TRAIN\n .FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))'], {}), '((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_obj_overlaps >=\n cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))\n', (10597, 10735), True, 'import numpy as np\n'), ((10873, 11018), 'numpy.where', 'np.where', (['((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_obj_overlaps < cfg.TRAIN.\n FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))'], {}), '((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_obj_overlaps <\n cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))\n', (10881, 11018), True, 'import numpy as np\n'), ((12087, 12150), 'numpy.append', 'np.append', (['sbj_pos_pair_neg_inds', 'sbj_pos_obj_neg_pair_neg_inds'], {}), '(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds)\n', (12096, 12150), True, 'import numpy as np\n'), ((13129, 13185), 'numpy.where', 'np.where', (['(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)'], {}), '(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)\n', (13137, 13185), True, 'import numpy as np\n'), ((14108, 14151), 'utils.blob.ones', 'blob_utils.ones', (['(sbj_pos_inds.shape[0], 1)'], {}), '((sbj_pos_inds.shape[0], 1))\n', (14123, 14151), True, 'import utils.blob as blob_utils\n'), ((15085, 15206), 'utils_rel.boxes_rel.get_spt_features', 'box_utils_rel.get_spt_features', (['sampled_sbj_boxes_sbj_pos', 'sampled_obj_boxes_sbj_pos', "roidb['width']", "roidb['height']"], {}), "(sampled_sbj_boxes_sbj_pos,\n sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height'])\n", (15115, 15206), True, 'import utils_rel.boxes_rel as box_utils_rel\n'), ((15442, 15492), 'numpy.where', 'np.where', (['(max_pair_overlaps >= cfg.TRAIN.FG_THRESH)'], {}), '(max_pair_overlaps >= cfg.TRAIN.FG_THRESH)\n', (15450, 15492), True, 'import numpy as np\n'), ((15538, 15684), 'numpy.where', 'np.where', (['((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_sbj_overlaps >= cfg.TRAIN\n .FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))'], {}), '((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_sbj_overlaps >=\n cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))\n', (15546, 15684), True, 'import numpy as np\n'), ((15822, 15967), 'numpy.where', 'np.where', (['((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_sbj_overlaps < cfg.TRAIN.\n FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))'], {}), '((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) & (max_sbj_overlaps <\n cfg.TRAIN.FG_THRESH) & (max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))\n', (15830, 15967), True, 'import numpy as np\n'), ((17032, 17095), 'numpy.append', 'np.append', (['obj_pos_pair_neg_inds', 'obj_pos_sbj_neg_pair_neg_inds'], {}), '(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds)\n', (17041, 17095), True, 'import numpy as np\n'), ((18074, 18130), 'numpy.where', 'np.where', (['(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)'], {}), '(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)\n', (18082, 18130), True, 'import numpy as np\n'), ((19053, 19096), 'utils.blob.ones', 'blob_utils.ones', (['(obj_pos_inds.shape[0], 1)'], {}), '((obj_pos_inds.shape[0], 1))\n', (19068, 19096), True, 'import utils.blob as blob_utils\n'), ((20030, 20151), 'utils_rel.boxes_rel.get_spt_features', 'box_utils_rel.get_spt_features', (['sampled_sbj_boxes_obj_pos', 'sampled_obj_boxes_obj_pos', "roidb['width']", "roidb['height']"], {}), "(sampled_sbj_boxes_obj_pos,\n sampled_obj_boxes_obj_pos, roidb['width'], roidb['height'])\n", (20060, 20151), True, 'import utils_rel.boxes_rel as box_utils_rel\n'), ((21019, 21104), 'utils.fpn.map_rois_to_fpn_levels', 'fpn_utils.map_rois_to_fpn_levels', (['blobs[rois_blob_name][:, 1:5]', 'lvl_min', 'lvl_max'], {}), '(blobs[rois_blob_name][:, 1:5], lvl_min,\n lvl_max)\n', (21051, 21104), True, 'import utils.fpn as fpn_utils\n'), ((21448, 21571), 'utils.fpn.add_multilevel_roi_blobs', 'fpn_utils.add_multilevel_roi_blobs', (['blobs', 'rois_blob_name', 'blobs[rois_blob_name]', 'lowest_target_lvls', 'lvl_min', 'lvl_max'], {}), '(blobs, rois_blob_name, blobs[\n rois_blob_name], lowest_target_lvls, lvl_min, lvl_max)\n', (21482, 21571), True, 'import utils.fpn as fpn_utils\n'), ((3580, 3654), 'numpy.random.choice', 'npr.choice', (['hash_gt_pair_inds'], {'size': 'fg_pairs_per_this_image', 'replace': '(False)'}), '(hash_gt_pair_inds, size=fg_pairs_per_this_image, replace=False)\n', (3590, 3654), True, 'import numpy.random as npr\n'), ((4312, 4386), 'numpy.random.choice', 'npr.choice', (['hash_bg_pair_inds'], {'size': 'bg_pairs_per_this_image', 'replace': '(False)'}), '(hash_bg_pair_inds, size=bg_pairs_per_this_image, replace=False)\n', (4322, 4386), True, 'import numpy.random as npr\n'), ((6197, 6266), 'numpy.random.choice', 'npr.choice', (['gt_pair_inds'], {'size': 'fg_pairs_per_this_image', 'replace': '(False)'}), '(gt_pair_inds, size=fg_pairs_per_this_image, replace=False)\n', (6207, 6266), True, 'import numpy.random as npr\n'), ((6667, 6719), 'numpy.where', 'np.where', (['(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI)'], {}), '(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI)\n', (6675, 6719), True, 'import numpy as np\n'), ((7176, 7245), 'numpy.random.choice', 'npr.choice', (['bg_pair_inds'], {'size': 'bg_pairs_per_this_image', 'replace': '(False)'}), '(bg_pair_inds, size=bg_pairs_per_this_image, replace=False)\n', (7186, 7245), True, 'import numpy.random as npr\n'), ((21265, 21308), 'numpy.minimum', 'np.minimum', (['lowest_target_lvls', 'target_lvls'], {}), '(lowest_target_lvls, target_lvls)\n', (21275, 21308), True, 'import numpy as np\n')]
|
# encoding: utf-8
import torch
import cv2
import numpy as np
import pdb
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (tensor) [batch, num_gt, 5]
batch of annotations stacked on their 0 dim
annotations for a given image are stacked on 1 dim
"""
targets = []
imgs = []
# numpy array
num_gts = [sample[1].shape[0] for sample in batch]
max_num_gt = max(num_gts)
for sample in batch:
imgs.append(sample[0])
size_gt = sample[1].shape
num_gt = size_gt[0]
aug_size = list(size_gt[:])
aug_size[0] = max_num_gt
aug_gt = np.zeros(aug_size, dtype=sample[1].dtype)
aug_gt[:num_gt] = sample[1]
targets.append(torch.FloatTensor(aug_gt))
return torch.stack(imgs, 0), torch.stack(targets, 0)
def base_transform(image, size, mean):
x = cv2.resize(image, (size, size)).astype(np.float32)
x -= mean
x = x.astype(np.float32)
return x
class BaseTransform:
"""
For evaluation and testing.
"""
def __init__(self, size, mean):
self.size = size
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, boxes=None, labels=None):
return base_transform(image, self.size, self.mean), boxes, labels
|
[
"torch.stack",
"numpy.array",
"numpy.zeros",
"cv2.resize",
"torch.FloatTensor"
] |
[((948, 989), 'numpy.zeros', 'np.zeros', (['aug_size'], {'dtype': 'sample[1].dtype'}), '(aug_size, dtype=sample[1].dtype)\n', (956, 989), True, 'import numpy as np\n'), ((1087, 1107), 'torch.stack', 'torch.stack', (['imgs', '(0)'], {}), '(imgs, 0)\n', (1098, 1107), False, 'import torch\n'), ((1109, 1132), 'torch.stack', 'torch.stack', (['targets', '(0)'], {}), '(targets, 0)\n', (1120, 1132), False, 'import torch\n'), ((1441, 1473), 'numpy.array', 'np.array', (['mean'], {'dtype': 'np.float32'}), '(mean, dtype=np.float32)\n', (1449, 1473), True, 'import numpy as np\n'), ((1049, 1074), 'torch.FloatTensor', 'torch.FloatTensor', (['aug_gt'], {}), '(aug_gt)\n', (1066, 1074), False, 'import torch\n'), ((1182, 1213), 'cv2.resize', 'cv2.resize', (['image', '(size, size)'], {}), '(image, (size, size))\n', (1192, 1213), False, 'import cv2\n')]
|
# Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
import numpy as np
import torch
class Optimizer(object):
def __init__(self, policy, use_gpu=False):
self.networks = self._init_networks(policy.input_dim, policy.output_dim)
networks = self.networks.copy()
networks['policy'] = policy
self.optimizers = self._init_optimizers(networks)
self.use_gpu = use_gpu
if self.use_gpu:
self.networks = {k: v.cuda() for k, v in self.networks.items()}
@classmethod
def _init_networks(cls, obs_dim, action_dim):
raise NotImplementedError
def process_batch(self, policy, batch, update_policy_args):
states, actions, rewards, masks = unpack_batch(batch)
if self.use_gpu:
states, actions, rewards, masks = map(
lambda x: x.cuda(), [states, actions, rewards, masks])
policy = self.update_networks(
policy, actions, masks, rewards, states,
batch["num_episodes"], *update_policy_args)
return policy
def update_networks(self, policy,
actions, masks, rewards, states, num_episodes,
*args, **step_kwargs):
raise NotImplementedError
@staticmethod
def _init_optimizers(networks, lr_rates=None):
return init_optimizers(networks, lr_rates=lr_rates)
def init_optimizers(networks, lr_rates=None):
args = {key: [network] for key, network in networks.items()}
if lr_rates is not None:
for key in args.keys():
args[key].append(lr_rates[key])
optimizers = {key: init_optimizer(*args[key])
for key in networks.keys()}
return optimizers
def unpack_batch(batch):
states = torch.from_numpy(np.array(batch["states"], dtype=np.float32))
rewards = torch.from_numpy(np.array(batch["rewards"], dtype=np.float32))
masks = torch.from_numpy(np.array(batch["masks"], dtype=np.float32))
actions = torch.from_numpy(np.array(batch["actions"]))
return states, actions, rewards, masks
def init_optimizer(network, lr_rate=0.01):
return torch.optim.Adam(network.parameters(), lr=lr_rate)
|
[
"numpy.array"
] |
[((1868, 1911), 'numpy.array', 'np.array', (["batch['states']"], {'dtype': 'np.float32'}), "(batch['states'], dtype=np.float32)\n", (1876, 1911), True, 'import numpy as np\n'), ((1944, 1988), 'numpy.array', 'np.array', (["batch['rewards']"], {'dtype': 'np.float32'}), "(batch['rewards'], dtype=np.float32)\n", (1952, 1988), True, 'import numpy as np\n'), ((2019, 2061), 'numpy.array', 'np.array', (["batch['masks']"], {'dtype': 'np.float32'}), "(batch['masks'], dtype=np.float32)\n", (2027, 2061), True, 'import numpy as np\n'), ((2094, 2120), 'numpy.array', 'np.array', (["batch['actions']"], {}), "(batch['actions'])\n", (2102, 2120), True, 'import numpy as np\n')]
|
from attempt.ddpg import HERDDPG, DDPG
import gym
import os
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
env = gym.make('FetchReach-v1')
agent = HERDDPG(env)
for epoch in range(2):
for cycle in tqdm(range(10)):
agent.gather_cycle()
# target_agent.train()
agent.test_env(10)
env.close()
plt.plot(np.vstack(agent.rewards))
plt.title('Rewards')
plt.show()
plt.plot(np.vstack(agent.policy_losses))
plt.title('Policy Losses')
plt.show()
plt.plot(np.vstack(agent.value_losses))
plt.title('Value Losses')
plt.show()
|
[
"attempt.ddpg.HERDDPG",
"numpy.vstack",
"matplotlib.pyplot.title",
"gym.make",
"matplotlib.pyplot.show"
] |
[((219, 244), 'gym.make', 'gym.make', (['"""FetchReach-v1"""'], {}), "('FetchReach-v1')\n", (227, 244), False, 'import gym\n'), ((257, 269), 'attempt.ddpg.HERDDPG', 'HERDDPG', (['env'], {}), '(env)\n', (264, 269), False, 'from attempt.ddpg import HERDDPG, DDPG\n'), ((488, 508), 'matplotlib.pyplot.title', 'plt.title', (['"""Rewards"""'], {}), "('Rewards')\n", (497, 508), True, 'import matplotlib.pyplot as plt\n'), ((513, 523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (521, 523), True, 'import matplotlib.pyplot as plt\n'), ((574, 600), 'matplotlib.pyplot.title', 'plt.title', (['"""Policy Losses"""'], {}), "('Policy Losses')\n", (583, 600), True, 'import matplotlib.pyplot as plt\n'), ((605, 615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (613, 615), True, 'import matplotlib.pyplot as plt\n'), ((665, 690), 'matplotlib.pyplot.title', 'plt.title', (['"""Value Losses"""'], {}), "('Value Losses')\n", (674, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((458, 482), 'numpy.vstack', 'np.vstack', (['agent.rewards'], {}), '(agent.rewards)\n', (467, 482), True, 'import numpy as np\n'), ((538, 568), 'numpy.vstack', 'np.vstack', (['agent.policy_losses'], {}), '(agent.policy_losses)\n', (547, 568), True, 'import numpy as np\n'), ((630, 659), 'numpy.vstack', 'np.vstack', (['agent.value_losses'], {}), '(agent.value_losses)\n', (639, 659), True, 'import numpy as np\n')]
|
import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
class BinEB(object):
def __init__(
self, tmin, tmax, Nb, windows=None, linear=False, useArcmin=True, fname=None
):
if fname is not None:
self.read_data(fname)
else:
# set basic params
if useArcmin:
am2r = np.pi / 180.0 / 60.0
else:
am2r = 1.0
self.Nb = Nb
self.L = tmin * am2r
self.H = tmax * am2r
if linear:
self.Lb = (self.H - self.L) / Nb * np.arange(Nb) + self.L
self.Hb = (self.H - self.L) / Nb * (np.arange(Nb) + 1.0) + self.L
else:
self.Lb = np.exp(np.log(self.H / self.L) / Nb * np.arange(Nb)) * self.L
self.Hb = (
np.exp(np.log(self.H / self.L) / Nb * (np.arange(Nb) + 1.0))
* self.L
)
self.have_ell_win = False
# make the bin window functions
if windows is None:
def _make_geomwin(L, H):
return lambda x: 2.0 * x / (H * H - L * L)
self.windows = []
for i in range(self.Nb):
self.windows.append(_make_geomwin(self.Lb[i], self.Hb[i]))
else:
def _make_normwin(winf, norm):
return lambda x: winf(x / am2r) / norm
self.windows = []
assert (
len(windows) == Nb
), "binEB requires as many windows as angular bins!"
for i in range(self.Nb):
twin = _make_normwin(windows[i], 1.0)
norm, err = scipy.integrate.quad(twin, self.Lb[i], self.Hb[i])
self.windows.append(_make_normwin(windows[i], norm))
# get fa and fb
self.fa = np.zeros(self.Nb)
self.fa[:] = 1.0
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def fb_int(x, args):
win = args[0]
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(fb_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.fb[i] = val
else:
def fb_int(x, win):
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
fb_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
self.fb[i] = val
self.fa_on = self.fa / np.sqrt(np.sum(self.fa * self.fa))
self.fb_on = self.fb - self.fa * np.sum(self.fa * self.fb) / np.sum(
self.fa * self.fa
)
self.fb_on = self.fb_on / np.sqrt(np.sum(self.fb_on * self.fb_on))
# get Mplus matrix
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def knorm_int(x, args):
win = args[0]
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(knorm_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, args):
win = args[0]
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv2_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv2[i] = val
def inv4_int(x, args):
win = args[0]
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv4_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv4[i] = val
else:
def knorm_int(x, win):
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
knorm_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, win):
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv2_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv2[i] = val
def inv4_int(x, win):
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv4_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv4[i] = val
if HAVE_PYGSL:
def _mp_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if p > t:
val = (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mp_int(p, t, k, i):
if p > t:
return (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mp = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i < k:
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[i] * self.Hb[i]
- self.Lb[i] * self.Lb[i]
)
* np.log(self.Hb[k] / self.Lb[k])
+ 3.0
/ 2.0
* (
np.power(self.Hb[i], 4.0)
- np.power(self.Lb[i], 4.0)
)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
if k == i:
self.mp[k, i] += 1.0
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
-0.5
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
- 2.0
* self.Lb[i]
* self.Lb[i]
* np.log(self.Hb[k] / self.Lb[k])
- 3.0
/ 2.0
* np.power(self.Lb[i], 4.0)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
else:
if k == i:
self.mp[k, i] += 1.0
val = dblquad(
_mp_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mp[k, i] += val / knorm[k]
if i < k:
self.mp[k, i] = (
4.0 * inv2[k] - 12.0 * inv4[k] * self.fb[i]
) / knorm[k]
# sys.stdout.write("\n")
if HAVE_PYGSL:
def _mm_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if t > p:
val = (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mm_int(p, t, k, i):
if t > p:
return (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mm = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i > k:
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
* np.log(self.Hb[i] / self.Lb[i])
+ 3.0
/ 2.0
* (
np.power(self.Hb[k], 4.0)
- np.power(self.Lb[k], 4.0)
)
* (
1.0 / self.Hb[i] / self.Hb[i]
- 1.0 / self.Lb[i] / self.Lb[i]
)
)
)
if k == i:
self.mm[k, i] += 1.0
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
0.5
* (
-1.0 * self.Hb[k] * self.Hb[k]
+ self.Lb[k]
* self.Lb[k]
* (
4.0
- 3.0
* self.Lb[k]
* self.Lb[k]
/ self.Hb[i]
/ self.Hb[i]
- 4.0 * np.log(self.Hb[i] / self.Lb[k])
)
)
)
)
else:
if k == i:
self.mm[k, i] += 1.0
val = dblquad(
_mm_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mm[k, i] += val / knorm[k]
if i > k:
self.mm[k, i] = (
4.0 * inv2[i] - 12.0 * inv4[i] * self.fb[k]
) / knorm[k]
# sys.stdout.write("\n")
# compute the ell windows
self.comp_ell_windows()
def comp_ell_windows(self):
# get the windows in ell
self.have_ell_win = True
if HAVE_PYGSL:
def ellwin_int(theta, args):
ell = args[0]
win = args[1]
n = args[2]
return (pygsl.sf.bessel_Jn(n, ell * theta))[0] * win(theta)
else:
def ellwin_int(theta, ell, win, n):
return scipy.special.jn(n, ell * theta) * win(theta)
self.ellv = np.logspace(0.0, 5.5, 1500)
self.ellwindowsJ0 = np.zeros((self.Nb, len(self.ellv)))
self.ellwindowsJ4 = np.zeros((self.Nb, len(self.ellv)))
for i in range(self.Nb):
sys.stdout.write("|")
sys.stdout.flush()
if HAVE_PYGSL:
epsabs = 1e-6
epsrel = 1e-6
limit = 1000
w = pygsl.integrate.workspace(limit)
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 0]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,
# limit,pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ0[i, j] = val
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 4]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,limit,
# pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ4[i, j] = val
else:
win0 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 0),
)
)[0]
for ell in self.ellv
]
)
win4 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 4),
)
)[0]
for ell in self.ellv
]
)
self.ellwindowsJ0[i, :] = win0
self.ellwindowsJ4[i, :] = win4
sys.stdout.write("\n")
def write_data(self, fname):
"""
writes a simple text file with object info
# N L H
100 1.0 400.0
# Lb
1.0 1.2 ... 398.0
# Hb
1.2 1.4 ... 400.0
# fa
1.0 1.0 .... 1.0
# fb
blah blah ... blah
# fa_on
blah blah ... blah
# fb_on
blah blah ... blah
# invnorm
blah blah ... blah
# Mplus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# Mminus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellv
blah blah ... blah
# ellwinJ0
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellwinJ4
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
"""
def write_vec(fp, vec):
for val in vec:
fp.write("%.20lg " % val)
fp.write("\n#\n")
def write_mat(fp, mat):
shape = mat.shape
for i in range(shape[0]):
for val in mat[i, :]:
fp.write("%.20lg " % val)
fp.write("\n")
fp.write("#\n")
fp = open(fname, "w")
fp.write("# N L H\n")
fp.write("%ld %.20lg %.20lg\n" % (self.Nb, self.L, self.H))
fp.write("# Lb\n")
write_vec(fp, self.Lb)
fp.write("# Hb\n")
write_vec(fp, self.Hb)
fp.write("# fa\n")
write_vec(fp, self.fa)
fp.write("# fb\n")
write_vec(fp, self.fb)
fp.write("# fa_on\n")
write_vec(fp, self.fa_on)
fp.write("# fb_on\n")
write_vec(fp, self.fb_on)
fp.write("# invnorm\n")
write_vec(fp, self.invnorm)
fp.write("# Mplus\n")
write_mat(fp, self.mp)
fp.write("# Mminus\n")
write_mat(fp, self.mm)
fp.write("# ellv\n")
write_vec(fp, self.ellv)
fp.write("# ellwinJ0\n")
write_mat(fp, self.ellwindowsJ0)
fp.write("# ellwinJ4\n")
write_mat(fp, self.ellwindowsJ4)
fp.close()
def read_data(self, fname):
def read_vec(fp):
line = fp.readline()
line = line.strip()
val = np.array([float(tag) for tag in line.split()])
line = fp.readline()
return val
def read_mat(fp):
mat = []
line = fp.readline()
while line[0] != "#":
line = line.strip()
mat.append([float(tag) for tag in line.split()])
line = fp.readline()
mat = np.array(mat)
return mat
fp = open(fname, "r")
line = fp.readline()
line = fp.readline()
line = line.strip()
line = line.split()
self.Nb = int(line[0])
self.L = float(line[1])
self.H = float(line[2])
line = fp.readline()
self.Lb = read_vec(fp)
line = fp.readline()
self.Hb = read_vec(fp)
line = fp.readline()
self.fa = read_vec(fp)
line = fp.readline()
self.fb = read_vec(fp)
line = fp.readline()
self.fa_on = read_vec(fp)
line = fp.readline()
self.fb_on = read_vec(fp)
line = fp.readline()
self.invnorm = read_vec(fp)
line = fp.readline()
self.mp = read_mat(fp)
line = fp.readline()
self.mm = read_mat(fp)
line = fp.readline()
self.ellv = read_vec(fp)
line = fp.readline()
self.ellwindowsJ0 = read_mat(fp)
line = fp.readline()
self.ellwindowsJ4 = read_mat(fp)
self.have_ell_win = True
fp.close()
def fplusminus(self, fptest):
fp = fptest - np.sum(fptest * self.fa_on) * self.fa_on
fp = fp - np.sum(fp * self.fb_on) * self.fb_on
fm = np.dot(self.mp, fp)
"""
code to test
fm = np.zeros(len(fp))
for i in range(len(fp)):
for j in range(len(fp)):
fm[i] += self.mp[i,j]*fp[j]
print fm-np.dot(self.mp,fp)
"""
return fp, fm
def wplus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5
def wminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum - msum) * 0.5
def wplusminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5, (psum - msum) * 0.5
|
[
"numpy.power",
"numpy.log",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"sys.stdout.flush",
"numpy.logspace",
"numpy.arange",
"sys.stdout.write"
] |
[((15257, 15284), 'numpy.logspace', 'np.logspace', (['(0.0)', '(5.5)', '(1500)'], {}), '(0.0, 5.5, 1500)\n', (15268, 15284), True, 'import numpy as np\n'), ((17918, 17940), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (17934, 17940), False, 'import sys\n'), ((22001, 22020), 'numpy.dot', 'np.dot', (['self.mp', 'fp'], {}), '(self.mp, fp)\n', (22007, 22020), True, 'import numpy as np\n'), ((2093, 2110), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (2101, 2110), True, 'import numpy as np\n'), ((7288, 7316), 'numpy.zeros', 'np.zeros', (['(self.Nb, self.Nb)'], {}), '((self.Nb, self.Nb))\n', (7296, 7316), True, 'import numpy as np\n'), ((11470, 11498), 'numpy.zeros', 'np.zeros', (['(self.Nb, self.Nb)'], {}), '((self.Nb, self.Nb))\n', (11478, 11498), True, 'import numpy as np\n'), ((15458, 15479), 'sys.stdout.write', 'sys.stdout.write', (['"""|"""'], {}), "('|')\n", (15474, 15479), False, 'import sys\n'), ((15492, 15510), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15508, 15510), False, 'import sys\n'), ((20743, 20756), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (20751, 20756), True, 'import numpy as np\n'), ((2449, 2466), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (2457, 2466), True, 'import numpy as np\n'), ((2938, 2955), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (2946, 2955), True, 'import numpy as np\n'), ((3814, 3831), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (3822, 3831), True, 'import numpy as np\n'), ((4357, 4374), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (4365, 4374), True, 'import numpy as np\n'), ((4869, 4886), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (4877, 4886), True, 'import numpy as np\n'), ((5363, 5380), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (5371, 5380), True, 'import numpy as np\n'), ((5757, 5774), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (5765, 5774), True, 'import numpy as np\n'), ((6120, 6137), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (6128, 6137), True, 'import numpy as np\n'), ((21892, 21919), 'numpy.sum', 'np.sum', (['(fptest * self.fa_on)'], {}), '(fptest * self.fa_on)\n', (21898, 21919), True, 'import numpy as np\n'), ((21951, 21974), 'numpy.sum', 'np.sum', (['(fp * self.fb_on)'], {}), '(fp * self.fb_on)\n', (21957, 21974), True, 'import numpy as np\n'), ((22407, 22443), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ0[:, i] * fp)'], {}), '(self.ellwindowsJ0[:, i] * fp)\n', (22413, 22443), True, 'import numpy as np\n'), ((22524, 22560), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ4[:, i] * fm)'], {}), '(self.ellwindowsJ4[:, i] * fm)\n', (22530, 22560), True, 'import numpy as np\n'), ((22795, 22831), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ0[:, i] * fp)'], {}), '(self.ellwindowsJ0[:, i] * fp)\n', (22801, 22831), True, 'import numpy as np\n'), ((22912, 22948), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ4[:, i] * fm)'], {}), '(self.ellwindowsJ4[:, i] * fm)\n', (22918, 22948), True, 'import numpy as np\n'), ((23187, 23223), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ0[:, i] * fp)'], {}), '(self.ellwindowsJ0[:, i] * fp)\n', (23193, 23223), True, 'import numpy as np\n'), ((23304, 23340), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ4[:, i] * fm)'], {}), '(self.ellwindowsJ4[:, i] * fm)\n', (23310, 23340), True, 'import numpy as np\n'), ((3233, 3258), 'numpy.sum', 'np.sum', (['(self.fa * self.fa)'], {}), '(self.fa * self.fa)\n', (3239, 3258), True, 'import numpy as np\n'), ((3333, 3358), 'numpy.sum', 'np.sum', (['(self.fa * self.fa)'], {}), '(self.fa * self.fa)\n', (3339, 3358), True, 'import numpy as np\n'), ((3435, 3466), 'numpy.sum', 'np.sum', (['(self.fb_on * self.fb_on)'], {}), '(self.fb_on * self.fb_on)\n', (3441, 3466), True, 'import numpy as np\n'), ((751, 764), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (760, 764), True, 'import numpy as np\n'), ((3305, 3330), 'numpy.sum', 'np.sum', (['(self.fa * self.fb)'], {}), '(self.fa * self.fb)\n', (3311, 3330), True, 'import numpy as np\n'), ((826, 839), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (835, 839), True, 'import numpy as np\n'), ((938, 951), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (947, 951), True, 'import numpy as np\n'), ((907, 930), 'numpy.log', 'np.log', (['(self.H / self.L)'], {}), '(self.H / self.L)\n', (913, 930), True, 'import numpy as np\n'), ((1017, 1040), 'numpy.log', 'np.log', (['(self.H / self.L)'], {}), '(self.H / self.L)\n', (1023, 1040), True, 'import numpy as np\n'), ((1049, 1062), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (1058, 1062), True, 'import numpy as np\n'), ((8000, 8031), 'numpy.log', 'np.log', (['(self.Hb[k] / self.Lb[k])'], {}), '(self.Hb[k] / self.Lb[k])\n', (8006, 8031), True, 'import numpy as np\n'), ((12182, 12213), 'numpy.log', 'np.log', (['(self.Hb[i] / self.Lb[i])'], {}), '(self.Hb[i] / self.Lb[i])\n', (12188, 12213), True, 'import numpy as np\n'), ((9328, 9359), 'numpy.log', 'np.log', (['(self.Hb[k] / self.Lb[k])'], {}), '(self.Hb[k] / self.Lb[k])\n', (9334, 9359), True, 'import numpy as np\n'), ((9482, 9507), 'numpy.power', 'np.power', (['self.Lb[i]', '(4.0)'], {}), '(self.Lb[i], 4.0)\n', (9490, 9507), True, 'import numpy as np\n'), ((8196, 8221), 'numpy.power', 'np.power', (['self.Hb[i]', '(4.0)'], {}), '(self.Hb[i], 4.0)\n', (8204, 8221), True, 'import numpy as np\n'), ((8264, 8289), 'numpy.power', 'np.power', (['self.Lb[i]', '(4.0)'], {}), '(self.Lb[i], 4.0)\n', (8272, 8289), True, 'import numpy as np\n'), ((12378, 12403), 'numpy.power', 'np.power', (['self.Hb[k]', '(4.0)'], {}), '(self.Hb[k], 4.0)\n', (12386, 12403), True, 'import numpy as np\n'), ((12446, 12471), 'numpy.power', 'np.power', (['self.Lb[k]', '(4.0)'], {}), '(self.Lb[k], 4.0)\n', (12454, 12471), True, 'import numpy as np\n'), ((13763, 13794), 'numpy.log', 'np.log', (['(self.Hb[i] / self.Lb[k])'], {}), '(self.Hb[i] / self.Lb[k])\n', (13769, 13794), True, 'import numpy as np\n')]
|
import batoid
import numpy as np
import math
from test_helpers import timer, do_pickle, all_obj_diff
@timer
def test_properties():
import random
random.seed(5)
for i in range(100):
R = random.gauss(0.7, 0.8)
sphere = batoid.Sphere(R)
assert sphere.R == R
do_pickle(sphere)
@timer
def test_sag():
import random
random.seed(57)
for i in range(100):
R = random.gauss(4.2, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = random.uniform(-0.7*R, 0.7*R)
y = random.uniform(-0.7*R, 0.7*R)
result = sphere.sag(x, y)
np.testing.assert_allclose(result, R*(1-math.sqrt(1.0-(x*x + y*y)/R/R)))
# Check that it returned a scalar float and not an array
assert isinstance(result, float)
# Check vectorization
x = np.random.uniform(-0.7*R, 0.7*R, size=(10, 10))
y = np.random.uniform(-0.7*R, 0.7*R, size=(10, 10))
np.testing.assert_allclose(sphere.sag(x, y), R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)))
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.sag(x[::5,::2], y[::5,::2]),
(R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)))[::5, ::2]
)
@timer
def test_intersect():
import random
random.seed(577)
for i in range(100):
R = random.gauss(10.0, 0.1)
sphere = batoid.Sphere(R)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r0 = batoid.Ray(x, y, -1000, 0, 0, 1, 0)
r = sphere.intersect(r0)
np.testing.assert_allclose(r.r[0], x)
np.testing.assert_allclose(r.r[1], y)
np.testing.assert_allclose(r.r[2], sphere.sag(x, y), rtol=0, atol=1e-9)
# Check normal for R=0 paraboloid (a plane)
sphere = batoid.Sphere(0.0)
np.testing.assert_array_equal(sphere.normal(0.1,0.1), [0,0,1])
@timer
def test_intersect_vectorized():
import random
random.seed(5772)
r0s = [batoid.Ray([random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(10.0, 0.1)],
[random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(-1.0, 0.1)],
random.gauss(0.0, 0.1))
for i in range(1000)]
r0s = batoid.RayVector(r0s)
for i in range(100):
R = random.gauss(0.05, 0.01)
sphere = batoid.Sphere(R)
r1s = sphere.intersect(r0s)
r2s = batoid.RayVector([sphere.intersect(r0) for r0 in r0s])
assert r1s == r2s
@timer
def test_ne():
objs = [
batoid.Sphere(1.0),
batoid.Sphere(2.0),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sphere = batoid.Sphere(1.0)
ray = batoid.Ray([0,0,-1], [0,0,-1])
ray = sphere.intersect(ray)
assert ray.failed
ray = batoid.Ray([0,0,-1], [0,0,-1])
sphere.intersectInPlace(ray)
assert ray.failed
if __name__ == '__main__':
test_properties()
test_sag()
test_intersect()
test_intersect_vectorized()
test_ne()
test_fail()
|
[
"batoid.Ray",
"batoid.Plane",
"random.uniform",
"numpy.sqrt",
"test_helpers.do_pickle",
"numpy.testing.assert_allclose",
"batoid.RayVector",
"math.sqrt",
"random.seed",
"test_helpers.all_obj_diff",
"numpy.random.uniform",
"batoid.Sphere",
"random.gauss"
] |
[((155, 169), 'random.seed', 'random.seed', (['(5)'], {}), '(5)\n', (166, 169), False, 'import random\n'), ((366, 381), 'random.seed', 'random.seed', (['(57)'], {}), '(57)\n', (377, 381), False, 'import random\n'), ((1331, 1347), 'random.seed', 'random.seed', (['(577)'], {}), '(577)\n', (1342, 1347), False, 'import random\n'), ((1995, 2013), 'batoid.Sphere', 'batoid.Sphere', (['(0.0)'], {}), '(0.0)\n', (2008, 2013), False, 'import batoid\n'), ((2145, 2162), 'random.seed', 'random.seed', (['(5772)'], {}), '(5772)\n', (2156, 2162), False, 'import random\n'), ((2538, 2559), 'batoid.RayVector', 'batoid.RayVector', (['r0s'], {}), '(r0s)\n', (2554, 2559), False, 'import batoid\n'), ((2914, 2932), 'test_helpers.all_obj_diff', 'all_obj_diff', (['objs'], {}), '(objs)\n', (2926, 2932), False, 'from test_helpers import timer, do_pickle, all_obj_diff\n'), ((2972, 2990), 'batoid.Sphere', 'batoid.Sphere', (['(1.0)'], {}), '(1.0)\n', (2985, 2990), False, 'import batoid\n'), ((3001, 3035), 'batoid.Ray', 'batoid.Ray', (['[0, 0, -1]', '[0, 0, -1]'], {}), '([0, 0, -1], [0, 0, -1])\n', (3011, 3035), False, 'import batoid\n'), ((3097, 3131), 'batoid.Ray', 'batoid.Ray', (['[0, 0, -1]', '[0, 0, -1]'], {}), '([0, 0, -1], [0, 0, -1])\n', (3107, 3131), False, 'import batoid\n'), ((207, 229), 'random.gauss', 'random.gauss', (['(0.7)', '(0.8)'], {}), '(0.7, 0.8)\n', (219, 229), False, 'import random\n'), ((247, 263), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (260, 263), False, 'import batoid\n'), ((301, 318), 'test_helpers.do_pickle', 'do_pickle', (['sphere'], {}), '(sphere)\n', (310, 318), False, 'from test_helpers import timer, do_pickle, all_obj_diff\n'), ((419, 441), 'random.gauss', 'random.gauss', (['(4.2)', '(0.3)'], {}), '(4.2, 0.3)\n', (431, 441), False, 'import random\n'), ((459, 475), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (472, 475), False, 'import batoid\n'), ((875, 926), 'numpy.random.uniform', 'np.random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {'size': '(10, 10)'}), '(-0.7 * R, 0.7 * R, size=(10, 10))\n', (892, 926), True, 'import numpy as np\n'), ((935, 986), 'numpy.random.uniform', 'np.random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {'size': '(10, 10)'}), '(-0.7 * R, 0.7 * R, size=(10, 10))\n', (952, 986), True, 'import numpy as np\n'), ((1385, 1408), 'random.gauss', 'random.gauss', (['(10.0)', '(0.1)'], {}), '(10.0, 0.1)\n', (1397, 1408), False, 'import random\n'), ((1426, 1442), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (1439, 1442), False, 'import batoid\n'), ((2598, 2622), 'random.gauss', 'random.gauss', (['(0.05)', '(0.01)'], {}), '(0.05, 0.01)\n', (2610, 2622), False, 'import random\n'), ((2640, 2656), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (2653, 2656), False, 'import batoid\n'), ((2833, 2851), 'batoid.Sphere', 'batoid.Sphere', (['(1.0)'], {}), '(1.0)\n', (2846, 2851), False, 'import batoid\n'), ((2861, 2879), 'batoid.Sphere', 'batoid.Sphere', (['(2.0)'], {}), '(2.0)\n', (2874, 2879), False, 'import batoid\n'), ((2889, 2903), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (2901, 2903), False, 'import batoid\n'), ((520, 553), 'random.uniform', 'random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {}), '(-0.7 * R, 0.7 * R)\n', (534, 553), False, 'import random\n'), ((566, 599), 'random.uniform', 'random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {}), '(-0.7 * R, 0.7 * R)\n', (580, 599), False, 'import random\n'), ((1487, 1509), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1499, 1509), False, 'import random\n'), ((1526, 1548), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1538, 1548), False, 'import random\n'), ((1676, 1711), 'batoid.Ray', 'batoid.Ray', (['x', 'y', '(-1000)', '(0)', '(0)', '(1)', '(0)'], {}), '(x, y, -1000, 0, 0, 1, 0)\n', (1686, 1711), False, 'import batoid\n'), ((1761, 1798), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r.r[0]', 'x'], {}), '(r.r[0], x)\n', (1787, 1798), True, 'import numpy as np\n'), ((1811, 1848), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r.r[1]', 'y'], {}), '(r.r[1], y)\n', (1837, 1848), True, 'import numpy as np\n'), ((2471, 2493), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2483, 2493), False, 'import random\n'), ((2186, 2208), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2198, 2208), False, 'import random\n'), ((2233, 2255), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2245, 2255), False, 'import random\n'), ((2280, 2303), 'random.gauss', 'random.gauss', (['(10.0)', '(0.1)'], {}), '(10.0, 0.1)\n', (2292, 2303), False, 'import random\n'), ((2329, 2351), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2341, 2351), False, 'import random\n'), ((2376, 2398), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2388, 2398), False, 'import random\n'), ((2423, 2446), 'random.gauss', 'random.gauss', (['(-1.0)', '(0.1)'], {}), '(-1.0, 0.1)\n', (2435, 2446), False, 'import random\n'), ((1041, 1079), 'numpy.sqrt', 'np.sqrt', (['(1.0 - (x * x + y * y) / R / R)'], {}), '(1.0 - (x * x + y * y) / R / R)\n', (1048, 1079), True, 'import numpy as np\n'), ((686, 726), 'math.sqrt', 'math.sqrt', (['(1.0 - (x * x + y * y) / R / R)'], {}), '(1.0 - (x * x + y * y) / R / R)\n', (695, 726), False, 'import math\n'), ((1227, 1265), 'numpy.sqrt', 'np.sqrt', (['(1.0 - (x * x + y * y) / R / R)'], {}), '(1.0 - (x * x + y * y) / R / R)\n', (1234, 1265), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:28:54 2020
@author: Dr <NAME> (CIMAT-CONACYT, Mexico) jac at cimat.mx
Instantaneous reproduction numbers calculations.
Rts_P, Implementation of Cori et al (2013)
Rts_AR, new filtering version using an autoregressive linear model of Capistrán, Capella and Christen (2020):
https://arxiv.org/abs/2012.02168, 05DIC2021
01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1.
Go directly to __main__ for examples.
"""
import os
from datetime import date, timedelta
from pickle import load, dump
from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones
from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt
from numpy import sum as np_sum
from scipy.stats import erlang, gamma, nbinom, uniform, beta
from scipy.stats import t as t_student
from matplotlib.pyplot import subplots, rcParams, close
from matplotlib.dates import drange
from pytwalk import pytwalk
from plotfrozen import PlotFrozenDist
def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt as in:
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
if q == 2: # return a and b of post gamma
rt = zeros(( q, n))
else:
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
for t in range(max(m-n,0), m):
S1 = 0.0
S2 = 0.0
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
S2 += data[(t-k)]
S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k
#print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b))
if simulate:
if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)
rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b)
else:
rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q)
else:
rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b))
return rt
def PlotRts_P( data_fnam, init_date, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_P.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_P(data=data[:,1],\
tau=tau, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
"""
def loglikelihood_NB( x, mu, psi):
mu_psi = mu/psi
return -gammaln(x + 1) + gammaln(x + psi) - gammaln(psi)\
-(x + psi)*log(1 + mu_psi) + x*log(mu_psi)
"""
def loglikelihood_NB( x, mu, psi):
return beta.logcdf(x, mu*psi, (1-mu)*psi)
def Rts_NB( data, n=30, tau=7, psi=10, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt Using a Negative Binomial instead of Poisson.
Here one needs to fix psi = 1/theta (= 10).
Extension of (not documented):
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
quantiles = zeros(len(q))
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
R = linspace( 0.1, 3.0, num=100)
DeltaR = R[1]-R[0]
#omega = 1
#theta = THETA_MEAN #0.01
#psi = 1/theta
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for t in range(max(m-n,0), m):
#S1 = 0.0
log_likelihood_I = zeros(R.shape) ## Same size of array for values for R
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
Gammak = I @ w[(m-(t-k)):] #\Gamma_k
#S1 += Gammak
I_k = data[(t-k)]
log_likelihood_I += loglikelihood_NB( I_k, R*Gammak, psi)
log_post = log_likelihood_I + gamma.logpdf( R, Rt_pr_a, scale=1/Rt_pr_b)
pdf = exp(log_post)
pdf /= sum(pdf)*DeltaR
cdf = cumsum(pdf)*DeltaR
if simulate:
u = uniform.rvs()
rt[:,t-(m-n)] = R[where(cdf < u)[0][-1]]
else:
for i,qua in enumerate(q):
quantiles[i] = R[where(cdf < qua)[0][-1]]
rt[:,t-(m-n)] = quantiles
return rt
def PlotRts_NB( data_fnam, init_date, psi, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_NB.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_NB(data=data[:,1],\
tau=tau, psi=psi, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color='red' )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_NB_psi:
def __init__( self, data_fnam, init_date, trim=0, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90], workdir="./../"):
"""Calculate Rt Using a Negative Binomial with unknown psi = 1/theta.
Here one needs to run the MCMC first, RunMCMC.
See example below.
Extension of (not documented):
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
#convolve
self.init_date = init_date
self.m = len(data)
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
self.n = min(self.m, n)
self.tau = tau
self.Rt_pr_a = Rt_pr_a
self.Rt_pr_b = Rt_pr_b
self.prior = gamma( self.Rt_pr_a, scale=1/self.Rt_pr_b)
#omega = 1
self.psi = 100
self.psi_prior = gamma( 3, scale=self.psi/3)
for t in range( self.m - self.n, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.Gammak = zeros(self.m) ##We calculate all gammas previously:
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
if os.path.isfile(workdir + 'output/' + self.data_fnam + '_rts.pkl'): # samples file exists
print("File with rts and psi samples exists, loading rts ...", end=' ')
self.rts = load(open(workdir + 'output/' + self.data_fnam + '_rts.pkl', 'rb'))
self.psi_samples = load(open(workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'rb'))
else:
print("File with rts and psi samples does not exist, run RunMCMC first.")
def logpost( self, Rs, psi):
log_post = 0.0
for t in range( self.m - self.n, self.m):
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)]) +\
np_sum(loglikelihood_NB( self.data[(t-self.tau+1):t], Rs[t-(self.m - self.n)]*tst.Gammak[(t-self.tau+1):t], psi))
#log_post += sum([loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi) for s in range( t-self.tau+1, t)])
"""
for k in range(self.tau):
s = t-k
#I = self.data[:s] ## window of reports
#Gammak = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
#I_k = self.data[s]
log_post += loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi)
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)])
"""
return log_post
def sim_init(self):
"""Simulate initial values from the Rts_NB and the prior for psi."""
# Shake the Rts_NB simulation to avoid repeated values
#shake = Rts_NB( self.data*self.Z, tau=self.tau, n=self.n, IP_dist=self.IP_dist,\
# Rt_pr_a=self.Rt_pr_a, Rt_pr_b=self.Rt_pr_b, q=1) + 0.001*uniform.rvs(size=self.n)
shake = ones(self.n) + 0.001*uniform.rvs(size=self.n)
return append( shake, self.psi_prior.rvs(size=1))
#Simulate intial values from the prior.
#return append(self.prior.rvs(size=self.n),self.psi_prior.rvs(size=1))
def support(self, x):
rt = all( (0.1 <= x[:-1]) * (x[:-1] <= 40) ) #Rt's
rt &= (x[-1] > 0.0)
return rt
def RunMCMC( self, T, burnin=5000, q=[10,25,50,75,90]):
"""Run twalk MCMC, T = number of iterations.
burnin, thining = IAT.
"""
#self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], self.psi), Supp =self.support) #Ignore x[-1] = psi
self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], x[-1]) - self.prior.logpdf(x[-1]), Supp =self.support)
self.twalk.Run( T=T, x0 = self.sim_init(), xp0 = self.sim_init())
self.burnin = burnin
self.Rts(q=q)
dump( self.rts, open(self.workdir + 'output/' + self.data_fnam + '_rts.pkl', 'wb'))
self.psi_samples = self.twalk.Output[self.burnin:, self.n]
dump( self.psi_samples, open(self.workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'wb'))
def PlotPostPsi( self, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.psi_samples, density=True)
ax.set_xlabel(r'$\psi$')
def PlotPostRt( self, i, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
#PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.twalk.Output[self.burnin:,i], density=True)
ax.set_xlabel(r'$R_%d$' % (i))
def Rts( self, q=[10,25,50,75,90]):
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rts = zeros(( len(q), self.n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rts = zeros(( q, self.n))
simulate = True
self.q = q
self.simulate = simulate
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for i in range(self.n):
if simulate:
#u = uniform.rvs()
rts[:,i] = self.twalk.Output[self.burnin+0,i]
else:
rts[:,i] = quantile( self.twalk.Output[self.burnin:,i], q=q)
self.rts = rts
return rts
def PlotRts( self, color='blue', median_color='red', csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam is an optional file name to save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
#self.rts already been produced after running RunMCMC
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(self.n))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(self.n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_AR:
def __init__( self, data_fnam, init_date, trim=0,\
IP_dist=erlang( a=3, scale=8/3), tau=7, m0=0, c_a_0=1, w_a_t=2/7, n0=2, s0=3,\
n=30, pred=0, workdir="./../"):
"""Calculate Rt Using a log autoregressive time series on the logs.
See: ...
See example below.
Parameters:
data_fnam: file name = workdir + 'data/' + data_fnam + '.csv'
or array with case incidence.
init_date: intial date for firt datum, e.g. date(2020, 2, 27).
trim: (negative) cut trim days at the end of data.
tau: number of days to lern form the past (default 7, see paper).
n: calculate n R_t's to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
m0=0, c_a_0=1, w_a_t=0.25, n0=2, s0=3, m_0, c_0^*, w_t^*, n_0 prior
hyperparameters (see paper).
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
self.init_date = init_date
self.m = len(self.data) ##Data size
### Calculate the serial time distribution
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
### Calculation range
self.shift = 5*tau #Number of days to start calculation before the frist Rt.
self.n = min(self.m, n) #Number of Rt's to calculate, from the present into the past.
self.N = n+self.shift #Total range (into the past) for calculation
#If self.N is larger than the whole data set
if self.N > (self.m-1):
self.n -= self.N - (self.m-1)#Reduce self.n accordingly
self.N = n+self.shift
if self.n < 0:
raise ValueError("ERROR: Not enough data to calculate Rts: 5*tau > %d (data size)" % (self.m,))
print("Not enough data to calculate Rts: 5*tau + n > %d (data size)" % (self.m,))
print("Reducing to n=%d" % (self.n,))
for t in range(self.n):
if self.data[self.m-(self.n - t)] >= 10:
break
else:
self.n -= 1 #Reduce n if the counts have not reached 10
print("Incidence below 10, reducing n to %d." % (self.n,))
self.N = self.n+self.shift
### Setting prior parameters
self.delta = 1-(1/tau)
self.tau = tau
self.pred = pred
self.g = 1 #exp(-2/tau)
self.m0 = m0
self.c_a_0 = c_a_0
self.w_a_t = w_a_t
self.n0 = n0
self.s0 = s0
"""
### Calculation range
for t in range( self.m - self.N, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.N = min(self.m, n+self.shift)
"""
### We calculate all gammas previously:
self.Gammak = zeros(self.m)
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
### Calculate the log data:
### We add 1e-6 for convinience, since very early data may be zero
### This makes no diference at the end.
self.y = log(self.data + 1e-6) - log(self.Gammak + 1e-6)
def sim_data( self, R, I0):
pass
def CalculateRts( self, q=[10,25,50,75,90]):
"""Calculate the posterior distribution and the Rt's quantiles.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt.
If q=2, save the mean and dispersion parameter of the posterior for Rt
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
self.rts = zeros(( len(q), self.n))
self.rts_pred = zeros((len(q), self.pred))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
self.rts = zeros(( q, self.n))
self.rts_pred = zeros(( q, self.pred))
simulate = True
self.q = q
self.simulate = simulate
### nt, at, rt, qt, st, mt, ct # hiperparameters
### 0 1 2 3 4 5 6
self.hiper = zeros(( self.N+1, 7))
### nt, at, rt, qt, st, mt, ct # hiperparameters
self.hiper[0,:] = self.n0, -1, -1, -1, self.s0, self.m0, self.s0*self.c_a_0
for t in range( self.N ):
r_a_t = self.g**2 * self.hiper[t,6] + self.w_a_t #r^*_t
At = r_a_t/(r_a_t + 1)
self.hiper[t+1,0] = self.delta*self.hiper[t,0] + 1 #nt
self.hiper[t+1,1] = self.g * self.hiper[t,5] #at
et = self.y[self.m-(self.N - t)] - self.hiper[t+1,1]
self.hiper[t+1,2] = self.hiper[t,4]*r_a_t #rt
self.hiper[t+1,3] = self.hiper[t,4]*(r_a_t + 1) #qt
# st:
self.hiper[t+1,4] = self.delta*(self.hiper[t,0]/self.hiper[t+1,0])*self.hiper[t,4] +\
self.hiper[t,4]/self.hiper[t+1,0] * (et**2/self.hiper[t+1,3])
self.hiper[t+1,5] = self.hiper[t+1,1] + At*et #mt
#ct
self.hiper[t+1,6] = (self.hiper[t+1,4]/self.hiper[t,4]) * (self.hiper[t+1,2]- self.hiper[t+1,3]*At**2)
if t >= self.shift:
if self.simulate:
self.rts[:,t-self.shift] = exp(t_student.rvs( size=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
else:
self.rts[:,t-self.shift] = exp(t_student.ppf( q=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
if self.pred>0:
t = self.N
self.pred_hiper = zeros(( self.pred, 2)) # a_t^k and r_t^k
for k in range(self.pred):
self.pred_hiper[k,0] = self.g**(k+1) * self.hiper[t,5] #a_t^k
if self.g == 1:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * (k+1) #r_t^k
else:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * ((1-self.g**(2*(k+1)))/(1-self.g**2)) #r_t^k
if self.simulate:
self.rts_pred[:,k] = exp(t_student.rvs( size=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
else:
self.rts_pred[:,k] = exp(t_student.ppf( q=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
def PlotPostRt( self, i, ax=None, color='black'):
"""Plot the i-th Rt posterior distribution."""
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
t = i+self.tau
y = linspace( 0.01, 4, num=500)
### Transformed pdf using the Jacobian y^{-1}
pdf = (y**-1) * t_student.pdf( log(y), df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) )
ax.plot( y, pdf, '-', color=color)
ax.set_ylabel("Density")
ax.set_xlabel(r'$R_{%d}$' % (i))
def PlotRts( self, color='blue', median_color='red', x_jump=1, plot_area=[0.4,2.2], alpha=0.25, csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam: optional file name to save the Rts info: workdir/csv/csv_fnam.csv
ax: Axis hadle to for the plot, if None, it creates one and retruns it.
x_jump: put ticks every x_jump days.
plot_area: ([0.4,2.2]), interval with the y-axis (Rt values) plot area.
"""
#self.rts already been produced after running CalculateRts
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
### Plot the Rt's posterior quantiles
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
### Plot the observed Rt's
ax.plot( exp(self.y[self.m-self.n:]), '-', color='grey')
### Plot the predictions
if self.pred >0:
for k in range(self.pred):
h = self.rts_pred[:,k]
i=self.n+k
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color='light'+color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color='light'+color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(0,self.n,x_jump))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(0,self.n,x_jump)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim(plot_area)
ax.set_yticks(arange( plot_area[0], plot_area[1], step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( self.workdir + "csv/" + csv_fnam + ".csv", sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
##### Dirctionary with general information for the metro zone or region to be analyzed:
##### id Name not used Population init date
ZMs = { "9-01": ["Mexico city", 2, 21.942666e6, date(2020, 2, 27)],\
"15-02": ["Toluca", 1, 2.377828e6, date(2020, 3, 7)],\
"31-01": ["Mérida", 2, 1.237697e6, date(2020, 3, 7)],\
"17-02": ["Cuernavaca", 1, 1.059521e6, date(2020, 3, 2)],\
"12-01": ["Acapulco", 2, 0.919726e6, date(2020, 3, 11)],\
"25-01": ["Culiacán", 2, 0.962871e6, date(2020, 3, 1)],\
"23-01": ["Cancun", 2, 0.867768e6, date(2020, 3, 1)]}
### The correponding data files have two columns separated by space, deaths and incidence.
### Each row is one day.
### The file for clave="9-01" (Mexico city) is: ../data/clave.csv etc.
if __name__=='__main__':
rcParams.update({'font.size': 14})
close('all')
#Plot the imputed serial time distribution for covid: erlang( a=3, scale=8/3 )
fig, ax = subplots( num=30, figsize=( 4.5, 3.5))
PlotFrozenDist( erlang( a=3, scale=8/3 ), ax=ax)
### Plota the erlang( a=5, scale=9/5 ) alternative
PlotFrozenDist( erlang( a=5, scale=9/5 ), color='grey', ax=ax)
ax.set_xlim((0,20))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_ylabel(r"Density")
ax.set_xlabel("days")
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/Covid19_SerialTimeDist.png")
### Plot the Rt's estimation. Only Merida, '13-01' and Mexico city, '9-01', are in the paper
claves = ['15-02', '17-02', '23-01', '25-01', '12-01', "31-01", '9-01']
n=60 ## Number of days to calculate the Rt's
trim=0 ## Number of days to cut data from the end, negative, e.g. -10, cut 10 days
x_jump = 7 ## For ploting, put ticks every x_jump days.
for i,clave in enumerate(claves):
print(clave)
### Open an instance of the Rts_AR class:
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=5, n=n)
tst.CalculateRts() # Most be called before ploting the Rt's
### Plot the Rts:
fig, ax = subplots( num=i+1, figsize=( 8, 3.5))
### Plot Cori et al (2013) Poisson model version:
PlotRts_P( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3]+timedelta(days=4),\
n=tst.n, trim=trim, ax=ax, color='green', alpha=0.5, median_color='black')
### Plot ours:
tst.PlotRts( ax=ax, x_jump=x_jump, plot_area=[0.4,2.2], csv_fnam=clave)
ax.set_title("")
ax.set_ylabel(r"$R_t$")
ax.set_xlabel("")
ax.set_title(ZMs[clave][0] + ", Mexico")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
if clave == '9-01':
m_max = tst.m
ax.set_xlabel("day.month, 2020")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
### Figure with Cori et al (2013) posterior distributions of '31-01' and '9-01'
fig1, ax1 = subplots( num=20, nrows=1, ncols=2, figsize=( 10, 3.5))
color = [ "red", "black", "darkred"]
for i,clave in enumerate([ '31-01', '9-01']):
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data, '.-', color=color[i], label=ZMs[clave][0])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[i])
last_date = tst.init_date + timedelta(tst.m)
ax1[0].set_xlabel('')
ax1[0].set_xticks(range(0,tst.m,x_jump*2))
ax1[0].set_xticklabels([(last_date-timedelta(tst.m-i)).strftime("%d.%m") for i in range(0,tst.m,x_jump*2)], ha='right')
ax1[0].tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax1[0].set_xlabel("day.month, 2020")
#ax1[0].set_ylim((0,1.1*max(tst.data[-n:])))
ax1[0].grid(color='grey', linestyle='--', linewidth=0.5)
ax1[0].set_ylabel(r"Incidence")
ax1[0].legend(loc=0, shadow = False)
### Add '31-01', with incidence multiplied by 10
clave = '31-01'
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data*10, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data*10, '.-', color=color[2])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[2])
ax1[1].set_xticks(arange(0.8,1.4,0.2))
ax1[1].set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax1[1].grid(color='grey', linestyle='--', linewidth=0.5)
fig1.tight_layout()
fig1.savefig("../figs/Rts_Compare.png")
### Comparison of results changing the serial time distribution
fig, ax = subplots( num=31, figsize=( 4.5, 3.5))
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax)
#### Here we change the serial time: Any other positive density could be used.
tst = Rts_AR( clave, IP_dist=erlang( a=5, scale=9/5), init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax, color='grey')
ax.set_xlim((0.5,2.5))
ax.set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_Compare.png" % (clave,))
"""
################# Example of use of Rts_NB_psi and Rts_NB (not documented)
T=100000
for clave in claves: #Instance of the object and run the MCMC
tst = Rts_NB_psi( clave, init_date=ZMs[clave][3], n=n)
if T > 0:
tst.RunMCMC(T=T)
### Plot the Rts
close(1)
fig, ax = subplots( num=1, figsize=( 10, 3.5) )
tst.PlotRts( ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB_psi.")
fig.savefig("../figs/%s_Rts_NB_psi.png" % (clave,))
### Plot the posterior distribution of \psi
close(3)
fig, ax = subplots( num=3, figsize=( 5,5) )
tst.PlotPostPsi(ax=ax)
ax.set_title(ZMs[clave][0])
fig.savefig("../figs/%s_Rts_NB_Post_psi.png" % clave)
### Fix \psi with the postrior expeted value and use that for PlotRts_NB
close(2)
fig, ax = subplots( num=2, figsize=( 10, 3.5) )
psi = mean(tst.psi_samples) #Posterior mean of psi
PlotRts_NB( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3],\
n=n, psi=psi, ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB, fixed $\psi$.")
fig.savefig("../figs/%s_Rts.png" % (clave,))
"""
|
[
"scipy.stats.erlang",
"scipy.stats.gamma.rvs",
"numpy.sqrt",
"plotfrozen.PlotFrozenDist",
"numpy.log",
"scipy.stats.beta.logcdf",
"numpy.array",
"datetime.timedelta",
"scipy.stats.uniform.rvs",
"numpy.arange",
"numpy.flip",
"numpy.where",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.linspace",
"datetime.date",
"numpy.ones",
"scipy.stats.gamma.ppf",
"os.path.isfile",
"numpy.savetxt",
"scipy.stats.gamma",
"matplotlib.pyplot.rcParams.update",
"numpy.zeros",
"numpy.quantile",
"scipy.stats.gamma.logpdf",
"numpy.cumsum",
"numpy.loadtxt",
"matplotlib.pyplot.subplots"
] |
[((1114, 1138), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (1120, 1138), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((2711, 2718), 'numpy.flip', 'flip', (['w'], {}), '(w)\n', (2715, 2718), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((3574, 3598), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (3580, 3598), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((6319, 6359), 'scipy.stats.beta.logcdf', 'beta.logcdf', (['x', '(mu * psi)', '((1 - mu) * psi)'], {}), '(x, mu * psi, (1 - mu) * psi)\n', (6330, 6359), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((6403, 6427), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (6409, 6427), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((8071, 8078), 'numpy.flip', 'flip', (['w'], {}), '(w)\n', (8075, 8078), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8092, 8119), 'numpy.linspace', 'linspace', (['(0.1)', '(3.0)'], {'num': '(100)'}), '(0.1, 3.0, num=100)\n', (8100, 8119), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((9281, 9305), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (9287, 9305), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((32903, 32937), 'matplotlib.pyplot.rcParams.update', 'rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (32918, 32937), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((32942, 32954), 'matplotlib.pyplot.close', 'close', (['"""all"""'], {}), "('all')\n", (32947, 32954), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((33052, 33088), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(30)', 'figsize': '(4.5, 3.5)'}), '(num=30, figsize=(4.5, 3.5))\n', (33060, 33088), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((35058, 35111), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(20)', 'nrows': '(1)', 'ncols': '(2)', 'figsize': '(10, 3.5)'}), '(num=20, nrows=1, ncols=2, figsize=(10, 3.5))\n', (35066, 35111), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((36814, 36850), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(31)', 'figsize': '(4.5, 3.5)'}), '(num=31, figsize=(4.5, 3.5))\n', (36822, 36850), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((4058, 4076), 'numpy.loadtxt', 'loadtxt', (['data_fnam'], {}), '(data_fnam)\n', (4065, 4076), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((4372, 4384), 'datetime.timedelta', 'timedelta', (['m'], {}), '(m)\n', (4381, 4384), False, 'from datetime import date, timedelta\n'), ((4422, 4452), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(n / 3, 3.5)'}), '(figsize=(n / 3, 3.5))\n', (4430, 4452), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((5188, 5214), 'numpy.arange', 'arange', (['(0.4)', '(3.4)'], {'step': '(0.2)'}), '(0.4, 3.4, step=0.2)\n', (5194, 5214), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((5976, 6080), 'numpy.savetxt', 'savetxt', (['csv_fnam', 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(csv_fnam, sv, delimiter=', ', fmt='%.1f', header=\n 'year, month, day, ' + q_str, comments='')\n", (5983, 6080), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((7937, 7950), 'numpy.zeros', 'zeros', (['(q, n)'], {}), '((q, n))\n', (7942, 7950), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8362, 8376), 'numpy.zeros', 'zeros', (['R.shape'], {}), '(R.shape)\n', (8367, 8376), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8848, 8861), 'numpy.exp', 'exp', (['log_post'], {}), '(log_post)\n', (8851, 8861), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((9734, 9752), 'numpy.loadtxt', 'loadtxt', (['data_fnam'], {}), '(data_fnam)\n', (9741, 9752), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((10058, 10070), 'datetime.timedelta', 'timedelta', (['m'], {}), '(m)\n', (10067, 10070), False, 'from datetime import date, timedelta\n'), ((10108, 10138), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(n / 3, 3.5)'}), '(figsize=(n / 3, 3.5))\n', (10116, 10138), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((10865, 10891), 'numpy.arange', 'arange', (['(0.4)', '(3.4)'], {'step': '(0.2)'}), '(0.4, 3.4, step=0.2)\n', (10871, 10891), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((11653, 11757), 'numpy.savetxt', 'savetxt', (['csv_fnam', 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(csv_fnam, sv, delimiter=', ', fmt='%.1f', header=\n 'year, month, day, ' + q_str, comments='')\n", (11660, 11757), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((11865, 11889), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (11871, 11889), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((13244, 13291), 'numpy.loadtxt', 'loadtxt', (["(workdir + 'data/' + data_fnam + '.csv')"], {}), "(workdir + 'data/' + data_fnam + '.csv')\n", (13251, 13291), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((13646, 13658), 'numpy.flip', 'flip', (['self.w'], {}), '(self.w)\n', (13650, 13658), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((13797, 13840), 'scipy.stats.gamma', 'gamma', (['self.Rt_pr_a'], {'scale': '(1 / self.Rt_pr_b)'}), '(self.Rt_pr_a, scale=1 / self.Rt_pr_b)\n', (13802, 13840), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((13907, 13935), 'scipy.stats.gamma', 'gamma', (['(3)'], {'scale': '(self.psi / 3)'}), '(3, scale=self.psi / 3)\n', (13912, 13935), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((14188, 14201), 'numpy.zeros', 'zeros', (['self.m'], {}), '(self.m)\n', (14193, 14201), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((14359, 14424), 'os.path.isfile', 'os.path.isfile', (["(workdir + 'output/' + self.data_fnam + '_rts.pkl')"], {}), "(workdir + 'output/' + self.data_fnam + '_rts.pkl')\n", (14373, 14424), False, 'import os\n'), ((17421, 17473), 'plotfrozen.PlotFrozenDist', 'PlotFrozenDist', (['self.psi_prior'], {'color': '"""green"""', 'ax': 'ax'}), "(self.psi_prior, color='green', ax=ax)\n", (17435, 17473), False, 'from plotfrozen import PlotFrozenDist\n'), ((21043, 21067), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (21049, 21067), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((22149, 22196), 'numpy.loadtxt', 'loadtxt', (["(workdir + 'data/' + data_fnam + '.csv')"], {}), "(workdir + 'data/' + data_fnam + '.csv')\n", (22156, 22196), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((22601, 22613), 'numpy.flip', 'flip', (['self.w'], {}), '(self.w)\n', (22605, 22613), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((24358, 24371), 'numpy.zeros', 'zeros', (['self.m'], {}), '(self.m)\n', (24363, 24371), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25805, 25827), 'numpy.zeros', 'zeros', (['(self.N + 1, 7)'], {}), '((self.N + 1, 7))\n', (25810, 25827), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((28421, 28447), 'numpy.linspace', 'linspace', (['(0.01)', '(4)'], {'num': '(500)'}), '(0.01, 4, num=500)\n', (28429, 28447), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((32195, 32212), 'datetime.date', 'date', (['(2020)', '(2)', '(27)'], {}), '(2020, 2, 27)\n', (32199, 32212), False, 'from datetime import date, timedelta\n'), ((32273, 32289), 'datetime.date', 'date', (['(2020)', '(3)', '(7)'], {}), '(2020, 3, 7)\n', (32277, 32289), False, 'from datetime import date, timedelta\n'), ((32352, 32368), 'datetime.date', 'date', (['(2020)', '(3)', '(7)'], {}), '(2020, 3, 7)\n', (32356, 32368), False, 'from datetime import date, timedelta\n'), ((32429, 32445), 'datetime.date', 'date', (['(2020)', '(3)', '(2)'], {}), '(2020, 3, 2)\n', (32433, 32445), False, 'from datetime import date, timedelta\n'), ((32507, 32524), 'datetime.date', 'date', (['(2020)', '(3)', '(11)'], {}), '(2020, 3, 11)\n', (32511, 32524), False, 'from datetime import date, timedelta\n'), ((32586, 32602), 'datetime.date', 'date', (['(2020)', '(3)', '(1)'], {}), '(2020, 3, 1)\n', (32590, 32602), False, 'from datetime import date, timedelta\n'), ((32663, 32679), 'datetime.date', 'date', (['(2020)', '(3)', '(1)'], {}), '(2020, 3, 1)\n', (32667, 32679), False, 'from datetime import date, timedelta\n'), ((33111, 33135), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (33117, 33135), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((33219, 33243), 'scipy.stats.erlang', 'erlang', ([], {'a': '(5)', 'scale': '(9 / 5)'}), '(a=5, scale=9 / 5)\n', (33225, 33243), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((34194, 34231), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(i + 1)', 'figsize': '(8, 3.5)'}), '(num=i + 1, figsize=(8, 3.5))\n', (34202, 34231), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((35575, 35591), 'datetime.timedelta', 'timedelta', (['tst.m'], {}), '(tst.m)\n', (35584, 35591), False, 'from datetime import date, timedelta\n'), ((36334, 36365), 'numpy.arange', 'arange', (['(m_max - tst.m)', 'm_max', '(1)'], {}), '(m_max - tst.m, m_max, 1)\n', (36340, 36365), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((36420, 36445), 'scipy.stats.gamma', 'gamma', (['a[-1]'], {'scale': 'b[-1]'}), '(a[-1], scale=b[-1])\n', (36425, 36445), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((36497, 36518), 'numpy.arange', 'arange', (['(0.8)', '(1.4)', '(0.2)'], {}), '(0.8, 1.4, 0.2)\n', (36503, 36518), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2300, 2308), 'numpy.array', 'array', (['q'], {}), '(q)\n', (2305, 2308), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2531, 2544), 'numpy.zeros', 'zeros', (['(q, n)'], {}), '((q, n))\n', (2536, 2544), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2577, 2590), 'numpy.zeros', 'zeros', (['(q, n)'], {}), '((q, n))\n', (2582, 2590), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2669, 2685), 'numpy.arange', 'arange', (['(0)', '(m + 1)'], {}), '(0, m + 1)\n', (2675, 2685), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((3424, 3480), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['q', '(Rt_pr_a + S2)'], {'scale': '(1 / (S1 + 1 / Rt_pr_b))'}), '(q, Rt_pr_a + S2, scale=1 / (S1 + 1 / Rt_pr_b))\n', (3433, 3480), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((5440, 5457), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5449, 5457), False, 'from datetime import date, timedelta\n'), ((7726, 7734), 'numpy.array', 'array', (['q'], {}), '(q)\n', (7731, 7734), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8029, 8045), 'numpy.arange', 'arange', (['(0)', '(m + 1)'], {}), '(0, m + 1)\n', (8035, 8045), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8791, 8834), 'scipy.stats.gamma.logpdf', 'gamma.logpdf', (['R', 'Rt_pr_a'], {'scale': '(1 / Rt_pr_b)'}), '(R, Rt_pr_a, scale=1 / Rt_pr_b)\n', (8803, 8834), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((8907, 8918), 'numpy.cumsum', 'cumsum', (['pdf'], {}), '(pdf)\n', (8913, 8918), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((8963, 8976), 'scipy.stats.uniform.rvs', 'uniform.rvs', ([], {}), '()\n', (8974, 8976), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((11117, 11134), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (11126, 11134), False, 'from datetime import date, timedelta\n'), ((16109, 16121), 'numpy.ones', 'ones', (['self.n'], {}), '(self.n)\n', (16113, 16121), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((17387, 17411), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (17395, 17411), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((17641, 17665), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (17649, 17665), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((18156, 18174), 'numpy.zeros', 'zeros', (['(q, self.n)'], {}), '((q, self.n))\n', (18161, 18174), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((19021, 19038), 'datetime.timedelta', 'timedelta', (['self.m'], {}), '(self.m)\n', (19030, 19038), False, 'from datetime import date, timedelta\n'), ((19093, 19128), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(self.n / 3, 3.5)'}), '(figsize=(self.n / 3, 3.5))\n', (19101, 19128), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((19953, 19979), 'numpy.arange', 'arange', (['(0.4)', '(3.4)'], {'step': '(0.2)'}), '(0.4, 3.4, step=0.2)\n', (19959, 19979), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((20829, 20933), 'numpy.savetxt', 'savetxt', (['csv_fnam', 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(csv_fnam, sv, delimiter=', ', fmt='%.1f', header=\n 'year, month, day, ' + q_str, comments='')\n", (20836, 20933), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((24656, 24678), 'numpy.log', 'log', (['(self.data + 1e-06)'], {}), '(self.data + 1e-06)\n', (24659, 24678), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((24680, 24704), 'numpy.log', 'log', (['(self.Gammak + 1e-06)'], {}), '(self.Gammak + 1e-06)\n', (24683, 24704), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25518, 25536), 'numpy.zeros', 'zeros', (['(q, self.n)'], {}), '((q, self.n))\n', (25523, 25536), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25566, 25587), 'numpy.zeros', 'zeros', (['(q, self.pred)'], {}), '((q, self.pred))\n', (25571, 25587), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((27344, 27365), 'numpy.zeros', 'zeros', (['(self.pred, 2)'], {}), '((self.pred, 2))\n', (27349, 27365), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((28360, 28384), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (28368, 28384), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((29344, 29361), 'datetime.timedelta', 'timedelta', (['self.m'], {}), '(self.m)\n', (29353, 29361), False, 'from datetime import date, timedelta\n'), ((29416, 29451), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(self.n / 3, 3.5)'}), '(figsize=(self.n / 3, 3.5))\n', (29424, 29451), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((29877, 29906), 'numpy.exp', 'exp', (['self.y[self.m - self.n:]'], {}), '(self.y[self.m - self.n:])\n', (29880, 29906), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((30914, 30958), 'numpy.arange', 'arange', (['plot_area[0]', 'plot_area[1]'], {'step': '(0.2)'}), '(plot_area[0], plot_area[1], step=0.2)\n', (30920, 30958), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((31808, 31945), 'numpy.savetxt', 'savetxt', (["(self.workdir + 'csv/' + csv_fnam + '.csv')", 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(self.workdir + 'csv/' + csv_fnam + '.csv', sv, delimiter=', ', fmt=\n '%.1f', header='year, month, day, ' + q_str, comments='')\n", (31815, 31945), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((35380, 35411), 'numpy.arange', 'arange', (['(m_max - tst.m)', 'm_max', '(1)'], {}), '(m_max - tst.m, m_max, 1)\n', (35386, 35411), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((35488, 35513), 'scipy.stats.gamma', 'gamma', (['a[-1]'], {'scale': 'b[-1]'}), '(a[-1], scale=b[-1])\n', (35493, 35513), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((37119, 37143), 'scipy.stats.erlang', 'erlang', ([], {'a': '(5)', 'scale': '(9 / 5)'}), '(a=5, scale=9 / 5)\n', (37125, 37143), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((3325, 3386), 'scipy.stats.gamma.rvs', 'gamma.rvs', (['(Rt_pr_a + S2)'], {'scale': '(1 / (S1 + 1 / Rt_pr_b))', 'size': 'q'}), '(Rt_pr_a + S2, scale=1 / (S1 + 1 / Rt_pr_b), size=q)\n', (3334, 3386), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((5415, 5427), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (5424, 5427), False, 'from datetime import date, timedelta\n'), ((11092, 11104), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (11101, 11104), False, 'from datetime import date, timedelta\n'), ((13576, 13597), 'numpy.arange', 'arange', (['(0)', '(self.m + 1)'], {}), '(0, self.m + 1)\n', (13582, 13597), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((16130, 16154), 'scipy.stats.uniform.rvs', 'uniform.rvs', ([], {'size': 'self.n'}), '(size=self.n)\n', (16141, 16154), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((17956, 17964), 'numpy.array', 'array', (['q'], {}), '(q)\n', (17961, 17964), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((18519, 18568), 'numpy.quantile', 'quantile', (['self.twalk.Output[self.burnin:, i]'], {'q': 'q'}), '(self.twalk.Output[self.burnin:, i], q=q)\n', (18527, 18568), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((20230, 20247), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (20239, 20247), False, 'from datetime import date, timedelta\n'), ((22531, 22552), 'numpy.arange', 'arange', (['(0)', '(self.m + 1)'], {}), '(0, self.m + 1)\n', (22537, 22552), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25253, 25261), 'numpy.array', 'array', (['q'], {}), '(q)\n', (25258, 25261), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((28542, 28548), 'numpy.log', 'log', (['y'], {}), '(y)\n', (28545, 28548), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((31209, 31226), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (31218, 31226), False, 'from datetime import date, timedelta\n'), ((36225, 36242), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (36234, 36242), False, 'from datetime import date, timedelta\n'), ((36902, 36919), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (36911, 36919), False, 'from datetime import date, timedelta\n'), ((37168, 37185), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (37177, 37185), False, 'from datetime import date, timedelta\n'), ((20200, 20217), 'datetime.timedelta', 'timedelta', (['self.n'], {}), '(self.n)\n', (20209, 20217), False, 'from datetime import date, timedelta\n'), ((28601, 28627), 'numpy.sqrt', 'sqrt', (['self.hiper[t + 1, 6]'], {}), '(self.hiper[t + 1, 6])\n', (28605, 28627), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((31179, 31196), 'datetime.timedelta', 'timedelta', (['self.n'], {}), '(self.n)\n', (31188, 31196), False, 'from datetime import date, timedelta\n'), ((34039, 34056), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (34048, 34056), False, 'from datetime import date, timedelta\n'), ((34362, 34379), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (34371, 34379), False, 'from datetime import date, timedelta\n'), ((35266, 35283), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (35275, 35283), False, 'from datetime import date, timedelta\n'), ((4895, 4911), 'datetime.timedelta', 'timedelta', (['(n - i)'], {}), '(n - i)\n', (4904, 4911), False, 'from datetime import date, timedelta\n'), ((9007, 9021), 'numpy.where', 'where', (['(cdf < u)'], {}), '(cdf < u)\n', (9012, 9021), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((10572, 10588), 'datetime.timedelta', 'timedelta', (['(n - i)'], {}), '(n - i)\n', (10581, 10588), False, 'from datetime import date, timedelta\n'), ((35704, 35724), 'datetime.timedelta', 'timedelta', (['(tst.m - i)'], {}), '(tst.m - i)\n', (35713, 35724), False, 'from datetime import date, timedelta\n'), ((36564, 36576), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (36573, 36576), False, 'from datetime import date, timedelta\n'), ((37349, 37361), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (37358, 37361), False, 'from datetime import date, timedelta\n'), ((9116, 9132), 'numpy.where', 'where', (['(cdf < qua)'], {}), '(cdf < qua)\n', (9121, 9132), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((19626, 19647), 'datetime.timedelta', 'timedelta', (['(self.n - i)'], {}), '(self.n - i)\n', (19635, 19647), False, 'from datetime import date, timedelta\n'), ((30578, 30599), 'datetime.timedelta', 'timedelta', (['(self.n - i)'], {}), '(self.n - i)\n', (30587, 30599), False, 'from datetime import date, timedelta\n'), ((27064, 27090), 'numpy.sqrt', 'sqrt', (['self.hiper[t + 1, 6]'], {}), '(self.hiper[t + 1, 6])\n', (27068, 27090), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((27240, 27266), 'numpy.sqrt', 'sqrt', (['self.hiper[t + 1, 6]'], {}), '(self.hiper[t + 1, 6])\n', (27244, 27266), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((27982, 28009), 'numpy.sqrt', 'sqrt', (['self.pred_hiper[k, 1]'], {}), '(self.pred_hiper[k, 1])\n', (27986, 28009), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((28159, 28186), 'numpy.sqrt', 'sqrt', (['self.pred_hiper[k, 1]'], {}), '(self.pred_hiper[k, 1])\n', (28163, 28186), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n')]
|
import numpy as np
from numpy.core.fromnumeric import mean
from numpy.core.numeric import True_
from numpy.testing._private.utils import rand
from polynomial_regression import PolynomialRegression
from generate_regression_data import generate_regression_data
from metrics import mean_squared_error # mse
from math import log # use if scale too large to see error
from k_nearest_neighbor import KNearestNeighbor
try:
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Number 7, split A
degree = 4
N = 100
x, y = generate_regression_data(degree, N, amount_of_noise=0.1)
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:10]], y[rand_sampl[:10]]
x_test, y_test = x[rand_sampl[10:]], y[rand_sampl[10:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N7_splitA/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N7_splitA/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/lowest_training_and_test_error.png")
# Number 10, split A
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(knn)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k]), kplots[low_test_err_k], label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/lowest_test_error.png")
# Number 9, split B
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:50]], y[rand_sampl[:50]]
x_test, y_test = x[rand_sampl[50:]], y[rand_sampl[50:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N9_splitB/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N9_splitB/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/lowest_training_and_test_error.png")
# Number 10, split B
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(poly)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k].X_training), kplots[low_test_err_k].f, label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/lowest_test_error.png")
|
[
"generate_regression_data.generate_regression_data",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"numpy.random.choice",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"polynomial_regression.PolynomialRegression",
"numpy.sort",
"k_nearest_neighbor.KNearestNeighbor",
"math.log",
"matplotlib.pyplot.figure",
"metrics.mean_squared_error",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend"
] |
[((637, 693), 'generate_regression_data.generate_regression_data', 'generate_regression_data', (['degree', 'N'], {'amount_of_noise': '(0.1)'}), '(degree, N, amount_of_noise=0.1)\n', (661, 693), False, 'from generate_regression_data import generate_regression_data\n'), ((712, 749), 'numpy.random.choice', 'np.random.choice', (['N', 'N'], {'replace': '(False)'}), '(N, N, replace=False)\n', (728, 749), True, 'import numpy as np\n'), ((1756, 1765), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1763, 1765), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1798), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1796, 1798), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2045), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of Degree"""'], {}), "('Error as a Function of Degree')\n", (2012, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2070), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""degree"""'], {}), "('degree')\n", (2060, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2094), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (2085, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2111), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2109, 2111), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2130), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2124, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2202), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N7_splitA/error_as_a_function_of_degree.png"""'], {}), "('../plots_N7_splitA/error_as_a_function_of_degree.png')\n", (2146, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2374), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2372, 2374), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2407), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2405, 2407), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2447), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (2423, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2834), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Training and Test Errors"""'], {}), "('Lowest Training and Test Errors')\n", (2799, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2854), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2849, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2869, 2874), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2891), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2889, 2891), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2910), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2904, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2983), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N7_splitA/lowest_training_and_test_error.png"""'], {}), "('../plots_N7_splitA/lowest_training_and_test_error.png')\n", (2926, 2983), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3144), 'numpy.reshape', 'np.reshape', (['x_training', '(-1, 2)'], {}), '(x_training, (-1, 2))\n', (3123, 3144), True, 'import numpy as np\n'), ((3162, 3193), 'numpy.reshape', 'np.reshape', (['y_training', '(-1, 2)'], {}), '(y_training, (-1, 2))\n', (3172, 3193), True, 'import numpy as np\n'), ((3207, 3234), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 2)'], {}), '(x_test, (-1, 2))\n', (3217, 3234), True, 'import numpy as np\n'), ((3249, 3276), 'numpy.reshape', 'np.reshape', (['y_test', '(-1, 2)'], {}), '(y_test, (-1, 2))\n', (3259, 3276), True, 'import numpy as np\n'), ((3781, 3790), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3788, 3790), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3821, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3996, 4033), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of k"""'], {}), "('Error as a Function of k')\n", (4005, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (4048, 4053), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4077), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (4068, 4077), True, 'import matplotlib.pyplot as plt\n'), ((4082, 4094), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4092, 4094), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4113), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4107, 4113), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4181), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitA/error_as_a_function_of_k.png"""'], {}), "('../plots_N10_splitA/error_as_a_function_of_k.png')\n", (4129, 4181), True, 'import matplotlib.pyplot as plt\n'), ((4243, 4252), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4250, 4252), True, 'import matplotlib.pyplot as plt\n'), ((4273, 4285), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4283, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4290, 4325), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (4301, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4460, 4490), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Test Error"""'], {}), "('Lowest Test Error')\n", (4469, 4490), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4510), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4505, 4510), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (4525, 4530), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4547), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4545, 4547), True, 'import matplotlib.pyplot as plt\n'), ((4552, 4566), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4560, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4627), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitA/lowest_test_error.png"""'], {}), "('../plots_N10_splitA/lowest_test_error.png')\n", (4582, 4627), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4716), 'numpy.random.choice', 'np.random.choice', (['N', 'N'], {'replace': '(False)'}), '(N, N, replace=False)\n', (4695, 4716), True, 'import numpy as np\n'), ((5723, 5732), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5730, 5732), True, 'import matplotlib.pyplot as plt\n'), ((5753, 5765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5763, 5765), True, 'import matplotlib.pyplot as plt\n'), ((5970, 6012), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of Degree"""'], {}), "('Error as a Function of Degree')\n", (5979, 6012), True, 'import matplotlib.pyplot as plt\n'), ((6017, 6037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""degree"""'], {}), "('degree')\n", (6027, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6042, 6061), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (6052, 6061), True, 'import matplotlib.pyplot as plt\n'), ((6066, 6078), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6076, 6078), True, 'import matplotlib.pyplot as plt\n'), ((6083, 6097), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6091, 6097), True, 'import matplotlib.pyplot as plt\n'), ((6102, 6169), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N9_splitB/error_as_a_function_of_degree.png"""'], {}), "('../plots_N9_splitB/error_as_a_function_of_degree.png')\n", (6113, 6169), True, 'import matplotlib.pyplot as plt\n'), ((6332, 6341), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6339, 6341), True, 'import matplotlib.pyplot as plt\n'), ((6362, 6374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6372, 6374), True, 'import matplotlib.pyplot as plt\n'), ((6379, 6414), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (6390, 6414), True, 'import matplotlib.pyplot as plt\n'), ((6757, 6801), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Training and Test Errors"""'], {}), "('Lowest Training and Test Errors')\n", (6766, 6801), True, 'import matplotlib.pyplot as plt\n'), ((6806, 6821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (6816, 6821), True, 'import matplotlib.pyplot as plt\n'), ((6826, 6841), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (6836, 6841), True, 'import matplotlib.pyplot as plt\n'), ((6846, 6858), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6856, 6858), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6877), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6871, 6877), True, 'import matplotlib.pyplot as plt\n'), ((6882, 6950), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N9_splitB/lowest_training_and_test_error.png"""'], {}), "('../plots_N9_splitB/lowest_training_and_test_error.png')\n", (6893, 6950), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7107), 'numpy.reshape', 'np.reshape', (['x_training', '(-1, 2)'], {}), '(x_training, (-1, 2))\n', (7086, 7107), True, 'import numpy as np\n'), ((7125, 7156), 'numpy.reshape', 'np.reshape', (['y_training', '(-1, 2)'], {}), '(y_training, (-1, 2))\n', (7135, 7156), True, 'import numpy as np\n'), ((7170, 7197), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 2)'], {}), '(x_test, (-1, 2))\n', (7180, 7197), True, 'import numpy as np\n'), ((7212, 7239), 'numpy.reshape', 'np.reshape', (['y_test', '(-1, 2)'], {}), '(y_test, (-1, 2))\n', (7222, 7239), True, 'import numpy as np\n'), ((7745, 7754), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7752, 7754), True, 'import matplotlib.pyplot as plt\n'), ((7775, 7787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7785, 7787), True, 'import matplotlib.pyplot as plt\n'), ((7960, 7997), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of k"""'], {}), "('Error as a Function of k')\n", (7969, 7997), True, 'import matplotlib.pyplot as plt\n'), ((8002, 8017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (8012, 8017), True, 'import matplotlib.pyplot as plt\n'), ((8022, 8041), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (8032, 8041), True, 'import matplotlib.pyplot as plt\n'), ((8046, 8058), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8056, 8058), True, 'import matplotlib.pyplot as plt\n'), ((8063, 8077), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8071, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8082, 8145), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitB/error_as_a_function_of_k.png"""'], {}), "('../plots_N10_splitB/error_as_a_function_of_k.png')\n", (8093, 8145), True, 'import matplotlib.pyplot as plt\n'), ((8207, 8216), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8214, 8216), True, 'import matplotlib.pyplot as plt\n'), ((8237, 8249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8247, 8249), True, 'import matplotlib.pyplot as plt\n'), ((8254, 8289), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (8265, 8289), True, 'import matplotlib.pyplot as plt\n'), ((8437, 8467), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Test Error"""'], {}), "('Lowest Test Error')\n", (8446, 8467), True, 'import matplotlib.pyplot as plt\n'), ((8472, 8487), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8482, 8487), True, 'import matplotlib.pyplot as plt\n'), ((8492, 8507), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (8502, 8507), True, 'import matplotlib.pyplot as plt\n'), ((8512, 8524), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8522, 8524), True, 'import matplotlib.pyplot as plt\n'), ((8529, 8543), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8537, 8543), True, 'import matplotlib.pyplot as plt\n'), ((8548, 8604), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitB/lowest_test_error.png"""'], {}), "('../plots_N10_splitB/lowest_test_error.png')\n", (8559, 8604), True, 'import matplotlib.pyplot as plt\n'), ((488, 509), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (502, 509), False, 'import matplotlib\n'), ((1020, 1043), 'polynomial_regression.PolynomialRegression', 'PolynomialRegression', (['i'], {}), '(i)\n', (1040, 1043), False, 'from polynomial_regression import PolynomialRegression\n'), ((2461, 2511), 'numpy.sort', 'np.sort', (['plots[low_training_err_degree].X_training'], {}), '(plots[low_training_err_degree].X_training)\n', (2468, 2511), True, 'import numpy as np\n'), ((2638, 2684), 'numpy.sort', 'np.sort', (['plots[low_test_err_degree].X_training'], {}), '(plots[low_test_err_degree].X_training)\n', (2645, 2684), True, 'import numpy as np\n'), ((3361, 3429), 'k_nearest_neighbor.KNearestNeighbor', 'KNearestNeighbor', (['i'], {'distance_measure': '"""euclidean"""', 'aggregator': '"""mean"""'}), "(i, distance_measure='euclidean', aggregator='mean')\n", (3377, 3429), False, 'from k_nearest_neighbor import KNearestNeighbor\n'), ((4339, 4370), 'numpy.sort', 'np.sort', (['kplots[low_test_err_k]'], {}), '(kplots[low_test_err_k])\n', (4346, 4370), True, 'import numpy as np\n'), ((4987, 5010), 'polynomial_regression.PolynomialRegression', 'PolynomialRegression', (['i'], {}), '(i)\n', (5007, 5010), False, 'from polynomial_regression import PolynomialRegression\n'), ((6428, 6478), 'numpy.sort', 'np.sort', (['plots[low_training_err_degree].X_training'], {}), '(plots[low_training_err_degree].X_training)\n', (6435, 6478), True, 'import numpy as np\n'), ((6605, 6651), 'numpy.sort', 'np.sort', (['plots[low_test_err_degree].X_training'], {}), '(plots[low_test_err_degree].X_training)\n', (6612, 6651), True, 'import numpy as np\n'), ((7324, 7392), 'k_nearest_neighbor.KNearestNeighbor', 'KNearestNeighbor', (['i'], {'distance_measure': '"""euclidean"""', 'aggregator': '"""mean"""'}), "(i, distance_measure='euclidean', aggregator='mean')\n", (7340, 7392), False, 'from k_nearest_neighbor import KNearestNeighbor\n'), ((8303, 8345), 'numpy.sort', 'np.sort', (['kplots[low_test_err_k].X_training'], {}), '(kplots[low_test_err_k].X_training)\n', (8310, 8345), True, 'import numpy as np\n'), ((1570, 1616), 'metrics.mean_squared_error', 'mean_squared_error', (['y_training', 'y_hat_training'], {}), '(y_training, y_hat_training)\n', (1588, 1616), False, 'from metrics import mean_squared_error\n'), ((1684, 1722), 'metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_hat_test'], {}), '(y_test, y_hat_test)\n', (1702, 1722), False, 'from metrics import mean_squared_error\n'), ((1853, 1873), 'math.log', 'log', (['mse_training[i]'], {}), '(mse_training[i])\n', (1856, 1873), False, 'from math import log\n'), ((1942, 1958), 'math.log', 'log', (['mse_test[i]'], {}), '(mse_test[i])\n', (1945, 1958), False, 'from math import log\n'), ((3599, 3642), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_training', 'k_training'], {}), '(ky_training, k_training)\n', (3617, 3642), False, 'from metrics import mean_squared_error\n'), ((3708, 3743), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_test', 'k_test'], {}), '(ky_test, k_test)\n', (3726, 3743), False, 'from metrics import mean_squared_error\n'), ((5537, 5583), 'metrics.mean_squared_error', 'mean_squared_error', (['y_training', 'y_hat_training'], {}), '(y_training, y_hat_training)\n', (5555, 5583), False, 'from metrics import mean_squared_error\n'), ((5651, 5689), 'metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_hat_test'], {}), '(y_test, y_hat_test)\n', (5669, 5689), False, 'from metrics import mean_squared_error\n'), ((5820, 5840), 'math.log', 'log', (['mse_training[i]'], {}), '(mse_training[i])\n', (5823, 5840), False, 'from math import log\n'), ((5909, 5925), 'math.log', 'log', (['mse_test[i]'], {}), '(mse_test[i])\n', (5912, 5925), False, 'from math import log\n'), ((7562, 7605), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_training', 'k_training'], {}), '(ky_training, k_training)\n', (7580, 7605), False, 'from metrics import mean_squared_error\n'), ((7671, 7706), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_test', 'k_test'], {}), '(ky_test, k_test)\n', (7689, 7706), False, 'from metrics import mean_squared_error\n')]
|
#
# Solver class using Scipy's adaptive time stepper
#
import casadi
import pybamm
import scipy.integrate as it
import numpy as np
class ScipySolver(pybamm.BaseSolver):
"""Solve a discretised model, using scipy._integrate.solve_ivp.
Parameters
----------
method : str, optional
The method to use in solve_ivp (default is "BDF")
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
"""
def __init__(self, method="BDF", rtol=1e-6, atol=1e-6):
super().__init__(method, rtol, atol)
self.ode_solver = True
self.name = "Scipy solver ({})".format(method)
pybamm.citations.register("virtanen2020scipy")
def _integrate(self, model, t_eval, inputs=None):
"""
Solve a model defined by dydt with initial conditions y0.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate.
t_eval : :class:`numpy.array`, size (k,)
The times at which to compute the solution
inputs : dict, optional
Any input parameters to pass to the model when solving
Returns
-------
object
An object containing the times and values of the solution, as well as
various diagnostic messages.
"""
if model.convert_to_format == "casadi":
inputs = casadi.vertcat(*[x for x in inputs.values()])
extra_options = {"rtol": self.rtol, "atol": self.atol}
# check for user-supplied Jacobian
implicit_methods = ["Radau", "BDF", "LSODA"]
if np.any([self.method in implicit_methods]):
if model.jacobian_eval:
extra_options.update(
{"jac": lambda t, y: model.jacobian_eval(t, y, inputs)}
)
# make events terminal so that the solver stops when they are reached
if model.terminate_events_eval:
def event_wrapper(event):
def event_fn(t, y):
return event(t, y, inputs)
event_fn.terminal = True
return event_fn
events = [event_wrapper(event) for event in model.terminate_events_eval]
extra_options.update({"events": events})
sol = it.solve_ivp(
lambda t, y: model.rhs_eval(t, y, inputs),
(t_eval[0], t_eval[-1]),
model.y0,
t_eval=t_eval,
method=self.method,
dense_output=True,
**extra_options
)
if sol.success:
# Set the reason for termination
if sol.message == "A termination event occurred.":
termination = "event"
t_event = []
for time in sol.t_events:
if time.size > 0:
t_event = np.append(t_event, np.max(time))
t_event = np.array([np.max(t_event)])
y_event = sol.sol(t_event)
elif sol.message.startswith("The solver successfully reached the end"):
termination = "final time"
t_event = None
y_event = np.array(None)
return pybamm.Solution(sol.t, sol.y, t_event, y_event, termination)
else:
raise pybamm.SolverError(sol.message)
|
[
"pybamm.SolverError",
"pybamm.citations.register",
"numpy.any",
"numpy.max",
"numpy.array",
"pybamm.Solution"
] |
[((748, 794), 'pybamm.citations.register', 'pybamm.citations.register', (['"""virtanen2020scipy"""'], {}), "('virtanen2020scipy')\n", (773, 794), False, 'import pybamm\n'), ((1734, 1775), 'numpy.any', 'np.any', (['[self.method in implicit_methods]'], {}), '([self.method in implicit_methods])\n', (1740, 1775), True, 'import numpy as np\n'), ((3332, 3392), 'pybamm.Solution', 'pybamm.Solution', (['sol.t', 'sol.y', 't_event', 'y_event', 'termination'], {}), '(sol.t, sol.y, t_event, y_event, termination)\n', (3347, 3392), False, 'import pybamm\n'), ((3425, 3456), 'pybamm.SolverError', 'pybamm.SolverError', (['sol.message'], {}), '(sol.message)\n', (3443, 3456), False, 'import pybamm\n'), ((3298, 3312), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (3306, 3312), True, 'import numpy as np\n'), ((3053, 3068), 'numpy.max', 'np.max', (['t_event'], {}), '(t_event)\n', (3059, 3068), True, 'import numpy as np\n'), ((3003, 3015), 'numpy.max', 'np.max', (['time'], {}), '(time)\n', (3009, 3015), True, 'import numpy as np\n')]
|
import sys, os
import nltk
import numpy as np
class Patch():
def __init__(self):
self.id = -1
self.parent_code = ''
self.child_code = ''
self.patches = []
self.verdict = False
self.distance = 0
self.verdict_token = False
pass
def __repr__(self):
return str(self.id) + '\n' + ' '.join(self.parent_code) + '\n' + ' '.join(self.child_code) \
+ '\n' + str(self.distance) + '\n' + str(self.verdict)
def read_patch(file_path, size):
num_line_per_patch = size * 2 + 9
patches_lines = []
with open(file_path) as f:
patch = []
for ln, line in enumerate(f):
line = line.strip()
if (ln % num_line_per_patch == 0) and (ln != 0):
patches_lines.append([l for l in patch])
patch = []
patch.append(line)
patches_lines.append(patch)
patches = []
for lines in patches_lines:
ex = Patch()
ex.id = int(lines[0])
ex.parent_code = [token.strip() for token in lines[1].split()]
ex.child_code = [token.strip() for token in lines[3].split()]
ex.patches = []
for gen_idx in range(size):
cidx = gen_idx * 2
didx = cidx + 1
ex.patches.append([lines[cidx + 7], int(lines[didx + 7])])
verdict = lines[-2].strip()
if verdict == 'True':
ex.verdict = True
else:
ex.verdict = False
# print(verdict)
ex.distance = nltk.edit_distance([token.strip() for token in ex.parent_code],
[token.strip() for token in ex.child_code])
patches.append(ex)
return np.asarray(patches)
def de_duplicate_patches(patches):
patch_map = {}
for pidx, patch in enumerate(patches):
key = ' '.join(patch.parent_code) + ' '.join(patch.child_code)
if key not in patch_map.keys():
patch_map[key] = []
patch_map[key].append([patch, pidx])
unique_indices = []
for key in patch_map:
ps = patch_map[key]
if len(ps) == 1:
unique_indices.append(ps[0][1])
else:
idx = -1
for pi, p in enumerate(ps):
if p[0].verdict:
idx = pi
unique_indices.append(ps[idx][1])
return unique_indices
pass
if __name__ == '__main__':
result_base = '/home/sc2nf/codit-clone'
option = 'token' # 'token
size = 10
# if option == 'tree':
# file_name = 'codit-all-concrete_' + str(size) + '.2_' + str(2*size) + '_decode_res.txt'
# else:
# file_name = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_name_tree = 'codit-all-concrete_' + str(size) + '.2_' + str(2 * size) + '_decode_res.txt'
file_path_tree = result_base + '/' + file_name_tree
patches_tree = read_patch(file_path_tree, size)
unique_indices = de_duplicate_patches(patches_tree)
# unique_patches_tree = patches_tree[unique_indices]
# unique_count = len(unique_patches_tree)
file_name_token = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_path_token = result_base + '/' + file_name_token
patches_token = read_patch(file_path_token, size)
# unique_patches = patches_token[unique_indices]
unified_patches = []
for idx, (p_tree, p_token) in enumerate(zip(patches_tree, patches_token)):
if idx in unique_indices:
assert isinstance(p_tree, Patch) and isinstance(p_token, Patch)
p_tree.verdict_token = p_token.verdict
unified_patches.append(p_tree)
tree_count = np.sum([1 if p.verdict else 0 for p in unified_patches])
token_count = np.sum([1 if p.verdict_token else 0 for p in unified_patches])
tree_indices = set()
token_indices = set()
for i, p in enumerate(unified_patches):
if p.verdict:
tree_indices.add(i)
if p.verdict_token:
token_indices.add(i)
only_tree = tree_indices.difference(token_indices)
only_token = token_indices.difference(tree_indices)
common = tree_indices.intersection(token_indices)
print(tree_count, token_count, len(only_token), len(only_tree), len(common), len(unified_patches))
#
# total_success_tree = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(unique_patches, total_success_tree)
# tree_success_indices_in_unique = set()
# for idx, p in enumerate(unique_patches):
# if p.verdict:
# tree_success_indices_in_unique.add(idx)
#
#
#
# total_success_token = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(tree_count, total_success_token)
|
[
"numpy.sum",
"numpy.asarray"
] |
[((1726, 1745), 'numpy.asarray', 'np.asarray', (['patches'], {}), '(patches)\n', (1736, 1745), True, 'import numpy as np\n'), ((3701, 3759), 'numpy.sum', 'np.sum', (['[(1 if p.verdict else 0) for p in unified_patches]'], {}), '([(1 if p.verdict else 0) for p in unified_patches])\n', (3707, 3759), True, 'import numpy as np\n'), ((3776, 3840), 'numpy.sum', 'np.sum', (['[(1 if p.verdict_token else 0) for p in unified_patches]'], {}), '([(1 if p.verdict_token else 0) for p in unified_patches])\n', (3782, 3840), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
#Dahlquist test
#sol1ex = lambda t: np.exp(-t)
#sol2ex = lambda t: np.exp(-2*t)
#oscillator 1
sol1ex = lambda t: np.cos(t**2/2)
sol2ex = lambda t: np.sin(t**2/2)
#oscillator 2
#sol1ex = lambda t: np.exp(np.sin(t**2))
#sol2ex = lambda t: np.exp(np.cos(t**2))
name = 'Osc1'
t = np.fromfile('../out/%s_snap_t' % name)
nsnap = len(t)
sol1 = np.zeros((nsnap,))
sol2 = sol1.copy()
for i in range(nsnap):
s = np.fromfile('../out/%s_snap_%d' % (name,i))
sol1[i] = s[0]
sol2[i] = s[1]
fig, axs = plt.subplots(2, 3, figsize=(10,5))
axs = [item for sublist in axs for item in sublist]
tdense = np.linspace(min(t), max(t), 2500)
axs[0].plot(tdense, sol1ex(tdense), 'k', linewidth=0.5, label='$y_1$ exact')
axs[0].plot(t, sol1, 'C0.', label='$y_1$ numerical')
axs[0].set_title('Solutions')
axs[0].set_ylabel('$y_1$')
axs[0].legend()
axs[3].plot(tdense, sol2ex(tdense), 'k', linewidth=0.5, label='$y_2$ exact')
axs[3].plot(t, sol2, 'C1.', label='$y_2$ numerical')
axs[3].set_ylabel('$y_2$')
axs[3].legend()
axs[1].semilogy(t, np.abs(sol1 - sol1ex(t)), 'C0.', label='$y_1$ abs err')
axs[4].semilogy(t, np.abs(sol2 - sol2ex(t)), 'C1.', label='$y_2$ abs err')
axs[1].set_title('Absolute Error')
axs[2].semilogy(t, np.abs((sol1 - sol1ex(t))/sol1ex(t)), 'C0.', label='$y_1$ rel err')
axs[5].semilogy(t, np.abs((sol2 - sol2ex(t))/sol1ex(t)), 'C1.', label='$y_2$ rel err')
axs[2].set_title('Relative Error')
axs[3].set_xlabel('t')
axs[4].set_xlabel('t')
axs[5].set_xlabel('t')
plt.tight_layout()
plt.show()
|
[
"numpy.fromfile",
"numpy.zeros",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((329, 367), 'numpy.fromfile', 'np.fromfile', (["('../out/%s_snap_t' % name)"], {}), "('../out/%s_snap_t' % name)\n", (340, 367), True, 'import numpy as np\n'), ((390, 408), 'numpy.zeros', 'np.zeros', (['(nsnap,)'], {}), '((nsnap,))\n', (398, 408), True, 'import numpy as np\n'), ((553, 588), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(10, 5)'}), '(2, 3, figsize=(10, 5))\n', (565, 588), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1546), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1544, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1557), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1555, 1557), True, 'import matplotlib.pyplot as plt\n'), ((165, 183), 'numpy.cos', 'np.cos', (['(t ** 2 / 2)'], {}), '(t ** 2 / 2)\n', (171, 183), True, 'import numpy as np\n'), ((199, 217), 'numpy.sin', 'np.sin', (['(t ** 2 / 2)'], {}), '(t ** 2 / 2)\n', (205, 217), True, 'import numpy as np\n'), ((459, 503), 'numpy.fromfile', 'np.fromfile', (["('../out/%s_snap_%d' % (name, i))"], {}), "('../out/%s_snap_%d' % (name, i))\n", (470, 503), True, 'import numpy as np\n')]
|
"""
@author: yuboya
"""
### pins position to be sent to robot
## from TransformationCalculation:
import numpy as np
import math
def PointsToRobot(alpha, deltax,deltay,deltaz,xyzc):
sina = math.sin(alpha)
cosa = math.cos(alpha)
pointrs = []
for pointc in xyzc:
# METHOD 2: matrix calculation
pc = pointc.reshape(3,1)
R = np.array([cosa, -sina, 0, sina, cosa, 0, 0,0,1])
R = R.reshape(3,3)
T= np.array([deltax,deltay,deltaz])
T = T.reshape(3,1)
pr = np.dot(np.transpose(R),pc)+T
pointr = pr.reshape(1,3)
pointrs.append(pointr)
return pointrs
|
[
"math.cos",
"numpy.array",
"numpy.transpose",
"math.sin"
] |
[((222, 237), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (230, 237), False, 'import math\n'), ((250, 265), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (258, 265), False, 'import math\n'), ((396, 446), 'numpy.array', 'np.array', (['[cosa, -sina, 0, sina, cosa, 0, 0, 0, 1]'], {}), '([cosa, -sina, 0, sina, cosa, 0, 0, 0, 1])\n', (404, 446), True, 'import numpy as np\n'), ((486, 520), 'numpy.array', 'np.array', (['[deltax, deltay, deltaz]'], {}), '([deltax, deltay, deltaz])\n', (494, 520), True, 'import numpy as np\n'), ((568, 583), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (580, 583), True, 'import numpy as np\n')]
|
"""
This is the script containing the calibration module, basically calculating homography matrix.
This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
# The license is only for non-commercial use (commercial licenses can be obtained from Stanford).
# The material is provided as-is, with no warranties whatsoever.
# If you publish any code, data, or scientific work based on this, please cite our work.
Technical Paper:
<NAME>, <NAME>, <NAME>, <NAME>. Neural Holography with Camera-in-the-loop Training. ACM TOG (SIGGRAPH Asia), 2020.
"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
def circle_detect(captured_img, num_circles, spacing, pad_pixels=(0., 0.), show_preview=True):
"""
Detects the circle of a circle board pattern
:param captured_img: captured image
:param num_circles: a tuple of integers, (num_circle_x, num_circle_y)
:param spacing: a tuple of integers, in pixels, (space between circles in x, space btw circs in y direction)
:param show_preview: boolean, default True
:param pad_pixels: coordinate of the left top corner of warped image.
Assuming pad this amount of pixels on the other side.
:return: a tuple, (found_dots, H)
found_dots: boolean, indicating success of calibration
H: a 3x3 homography matrix (numpy)
"""
# Binarization
# org_copy = org.copy() # Otherwise, we write on the original image!
img = (captured_img.copy() * 255).astype(np.uint8)
if len(img.shape) > 2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 15)
img_gray = img.copy()
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 121, 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img = 255 - img
# Blob detection
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.filterByColor = True
params.minThreshold = 128
# Filter by Area.
params.filterByArea = True
params.minArea = 50
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.785
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = False
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
# Detecting keypoints
# this is redundant for what comes next, but gives us access to the detected dots for debug
keypoints = detector.detect(img)
found_dots, centers = cv2.findCirclesGrid(img, num_circles,
blobDetector=detector, flags=cv2.CALIB_CB_SYMMETRIC_GRID)
# Drawing the keypoints
cv2.drawChessboardCorners(captured_img, num_circles, centers, found_dots)
img_gray = cv2.drawKeypoints(img_gray, keypoints, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Find transformation
H = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=np.float32)
if found_dots:
# Generate reference points to compute the homography
ref_pts = np.zeros((num_circles[0] * num_circles[1], 1, 2), np.float32)
pos = 0
for i in range(0, num_circles[1]):
for j in range(0, num_circles[0]):
ref_pts[pos, 0, :] = spacing * np.array([j, i]) + np.array(pad_pixels)
pos += 1
H, mask = cv2.findHomography(centers, ref_pts, cv2.RANSAC, 1)
if show_preview:
dsize = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(num_circles, spacing, pad_pixels)]
captured_img_warp = cv2.warpPerspective(captured_img, H, tuple(dsize))
if show_preview:
fig = plt.figure()
ax = fig.add_subplot(223)
ax.imshow(img_gray, cmap='gray')
ax2 = fig.add_subplot(221)
ax2.imshow(img, cmap='gray')
ax3 = fig.add_subplot(222)
ax3.imshow(captured_img, cmap='gray')
if found_dots:
ax4 = fig.add_subplot(224)
ax4.imshow(captured_img_warp, cmap='gray')
plt.show()
return found_dots, H
class Calibration:
def __init__(self, num_circles=(21, 12), spacing_size=(80, 80), pad_pixels=(0, 0)):
self.num_circles = num_circles
self.spacing_size = spacing_size
self.pad_pixels = pad_pixels
self.h_transform = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
def calibrate(self, img, show_preview=True):
found_corners, self.h_transform = circle_detect(img, self.num_circles,
self.spacing_size, self.pad_pixels, show_preview)
return found_corners
def get_transform(self):
return self.h_transform
def __call__(self, input_img, img_size=None):
"""
This forward pass returns the warped image.
:param input_img: A numpy grayscale image shape of [H, W].
:param img_size: output size, default None.
:return: output_img: warped image with pre-calculated homography and destination size.
"""
if img_size is None:
img_size = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(self.num_circles, self.spacing_size, self.pad_pixels)]
output_img = cv2.warpPerspective(input_img, self.h_transform, tuple(img_size))
return output_img
|
[
"cv2.findCirclesGrid",
"cv2.SimpleBlobDetector_create",
"cv2.findHomography",
"cv2.medianBlur",
"cv2.morphologyEx",
"cv2.adaptiveThreshold",
"cv2.SimpleBlobDetector_Params",
"numpy.array",
"numpy.zeros",
"cv2.cvtColor",
"matplotlib.pyplot.figure",
"cv2.drawChessboardCorners",
"cv2.getStructuringElement",
"matplotlib.pyplot.show"
] |
[((1681, 1704), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(15)'], {}), '(img, 15)\n', (1695, 1704), False, 'import cv2\n'), ((1742, 1837), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['img', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(121)', '(0)'], {}), '(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 121, 0)\n', (1763, 1837), False, 'import cv2\n'), ((1846, 1900), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(15, 15)'], {}), '(cv2.MORPH_ELLIPSE, (15, 15))\n', (1871, 1900), False, 'import cv2\n'), ((1911, 1956), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (1927, 1956), False, 'import cv2\n'), ((2012, 2043), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (2041, 2043), False, 'import cv2\n'), ((2514, 2551), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (2543, 2551), False, 'import cv2\n'), ((2738, 2838), 'cv2.findCirclesGrid', 'cv2.findCirclesGrid', (['img', 'num_circles'], {'blobDetector': 'detector', 'flags': 'cv2.CALIB_CB_SYMMETRIC_GRID'}), '(img, num_circles, blobDetector=detector, flags=cv2.\n CALIB_CB_SYMMETRIC_GRID)\n', (2757, 2838), False, 'import cv2\n'), ((2913, 2986), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['captured_img', 'num_circles', 'centers', 'found_dots'], {}), '(captured_img, num_circles, centers, found_dots)\n', (2938, 2986), False, 'import cv2\n'), ((3180, 3259), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (3188, 3259), True, 'import numpy as np\n'), ((1632, 1669), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1644, 1669), False, 'import cv2\n'), ((3041, 3053), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3049, 3053), True, 'import numpy as np\n'), ((3386, 3447), 'numpy.zeros', 'np.zeros', (['(num_circles[0] * num_circles[1], 1, 2)', 'np.float32'], {}), '((num_circles[0] * num_circles[1], 1, 2), np.float32)\n', (3394, 3447), True, 'import numpy as np\n'), ((3685, 3736), 'cv2.findHomography', 'cv2.findHomography', (['centers', 'ref_pts', 'cv2.RANSAC', '(1)'], {}), '(centers, ref_pts, cv2.RANSAC, 1)\n', (3703, 3736), False, 'import cv2\n'), ((4040, 4052), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4050, 4052), True, 'import matplotlib.pyplot as plt\n'), ((4411, 4421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4419, 4421), True, 'import matplotlib.pyplot as plt\n'), ((4701, 4762), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (4709, 4762), True, 'import numpy as np\n'), ((3620, 3640), 'numpy.array', 'np.array', (['pad_pixels'], {}), '(pad_pixels)\n', (3628, 3640), True, 'import numpy as np\n'), ((3601, 3617), 'numpy.array', 'np.array', (['[j, i]'], {}), '([j, i])\n', (3609, 3617), True, 'import numpy as np\n')]
|
"""Learn ideal points with the text-based ideal point model (TBIP).
Let y_{dv} denote the counts of word v in document d. Let x_d refer to the
ideal point of the author of document d. Then we model:
theta, beta ~ Gamma(alpha, alpha)
x, eta ~ N(0, 1)
y_{dv} ~ Pois(sum_k theta_dk beta_kv exp(x_d * eta_kv).
We perform variational inference to provide estimates for the posterior
distribution of each latent variable. We take reparameterization gradients,
using a lognormal variational family for the positive variables (theta, beta)
and a normal variational family for the real variables (x, eta).
The directory `data/{data_name}/clean/` should have the following four files:
1. `counts.npz`: a [num_documents, num_words] sparse matrix containing the
word counts for each document.
2. `author_indices.npy`: a [num_documents] vector where each entry is an
integer in the set {0, 1, ..., num_authors - 1}, indicating the author of
the corresponding document in `counts.npz`.
3. `vocabulary.txt`: a [num_words]-length file where each line is a string
denoting the corresponding word in the vocabulary.
4. `author_map.txt`: a [num_authors]-length file where each line is a string
denoting the name of an author in the corpus.
We provide more details in our paper [1].
#### References
[1]: <NAME>, <NAME>, <NAME>. Text-Based Ideal Points. In
_Conference of the Association for Computational Linguistics_, 2020.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import flags
import numpy as np
import scipy.sparse as sparse
import tensorflow as tf
import tensorflow_probability as tfp
flags.DEFINE_float("learning_rate",
default=0.01,
help="Adam learning rate.")
flags.DEFINE_integer("max_steps",
default=1000000,
help="Number of training steps to run.")
flags.DEFINE_integer("num_topics",
default=50,
help="Number of topics.")
flags.DEFINE_integer("batch_size",
default=1024,
help="Batch size.")
flags.DEFINE_integer("num_samples",
default=1,
help="Number of samples to use for ELBO approximation.")
flags.DEFINE_enum("counts_transformation",
default="nothing",
enum_values=["nothing", "binary", "sqrt", "log"],
help="Transformation used on counts data.")
flags.DEFINE_boolean("pre_initialize_parameters",
default=True,
help="Whether to use pre-initialized document and topic "
"intensities (with Poisson factorization).")
flags.DEFINE_string("data",
default="senate-speeches-114",
help="Data source being used.")
flags.DEFINE_integer("senate_session",
default=113,
help="Senate session (used only when data is "
"'senate-speech-comparisons'.")
flags.DEFINE_integer("print_steps",
default=500,
help="Number of steps to print and save results.")
flags.DEFINE_integer("seed",
default=123,
help="Random seed to be used.")
FLAGS = flags.FLAGS
def build_input_pipeline(data_dir,
batch_size,
random_state,
counts_transformation="nothing"):
"""Load data and build iterator for minibatches.
Args:
data_dir: The directory where the data is located. There must be four
files inside the rep: `counts.npz`, `author_indices.npy`,
`author_map.txt`, and `vocabulary.txt`.
batch_size: The batch size to use for training.
random_state: A NumPy `RandomState` object, used to shuffle the data.
counts_transformation: A string indicating how to transform the counts.
One of "nothing", "binary", "log", or "sqrt".
"""
counts = sparse.load_npz(os.path.join(data_dir, "counts.npz"))
num_documents, num_words = counts.shape
author_indices = np.load(
os.path.join(data_dir, "author_indices.npy")).astype(np.int32)
num_authors = np.max(author_indices + 1)
author_map = np.loadtxt(os.path.join(data_dir, "author_map.txt"),
dtype=str,
delimiter="\n",
encoding='latin-1')
# Shuffle data.
documents = random_state.permutation(num_documents)
shuffled_author_indices = author_indices[documents]
shuffled_counts = counts[documents]
# Apply counts transformation.
if counts_transformation == "nothing":
count_values = shuffled_counts.data
elif counts_transformation == "binary":
count_values = np.int32(shuffled_counts.data > 0)
elif counts_transformation == "log":
count_values = np.round(np.log(1 + shuffled_counts.data))
elif counts_transformation == "sqrt":
count_values = np.round(np.sqrt(shuffled_counts.data))
else:
raise ValueError("Unrecognized counts transformation.")
# Store counts as sparse tensor so it occupies less memory.
shuffled_counts = tf.SparseTensor(
indices=np.array(shuffled_counts.nonzero()).T,
values=count_values,
dense_shape=shuffled_counts.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(documents, shuffled_counts, shuffled_author_indices))
batches = dataset.repeat().batch(batch_size).prefetch(batch_size)
iterator = batches.make_one_shot_iterator()
vocabulary = np.loadtxt(os.path.join(data_dir, "vocabulary.txt"),
dtype=str,
delimiter="\n",
comments="<!-")
total_counts_per_author = np.bincount(
author_indices,
weights=np.array(np.sum(counts, axis=1)).flatten())
counts_per_document_per_author = (
total_counts_per_author / np.bincount(author_indices))
# Author weights is how much lengthy each author's opinion over average is.
author_weights = (counts_per_document_per_author /
np.mean(np.sum(counts, axis=1))).astype(np.float32)
return (iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors)
def build_lognormal_variational_parameters(initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
num_topics):
"""
Build document and objective topic lognormal variational parameters.
Args:
initial_document_loc: A [num_documents, num_topics] NumPy array containing
the initial document intensity means.
initial_objective_topic_loc: A [num_topics, num_words] NumPy array
containing the initial objective topic means.
num_documents: Number of documents in the data set.
num_words: Number of words in the data set.
num_topics: Number of topics.
Returns:
document_loc: A Variable object with shape [num_documents, num_topics].
document_scale: A positive Variable object with shape [num_documents,
num_topics].
objective_topic_loc: A Variable object with shape [num_topics, num_words].
objective_topic_scale: A positive Variable object with shape [num_topics,
num_words].
"""
document_loc = tf.get_variable(
"document_loc",
initializer=tf.constant(np.log(initial_document_loc)))
objective_topic_loc = tf.get_variable(
"objective_topic_loc",
initializer=tf.constant(np.log(initial_objective_topic_loc)))
document_scale_logit = tf.get_variable(
"document_scale_logit",
shape=[num_documents, num_topics],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
objective_topic_scale_logit = tf.get_variable(
"objective_topic_scale_logit",
shape=[num_topics, num_words],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
document_scale = tf.nn.softplus(document_scale_logit)
objective_topic_scale = tf.nn.softplus(objective_topic_scale_logit)
tf.summary.histogram("params/document_loc", document_loc)
tf.summary.histogram("params/objective_topic_loc", objective_topic_loc)
tf.summary.histogram("params/document_scale", document_scale)
tf.summary.histogram("params/objective_topic_scale", objective_topic_scale)
return (document_loc, document_scale,
objective_topic_loc, objective_topic_scale)
def print_topics(neutral_mean, negative_mean, positive_mean, vocabulary):
"""Get neutral and ideological topics to be used for Tensorboard.
Args:
neutral_mean: The mean of the neutral topics, a NumPy matrix with shape
[num_topics, num_words].
negative_mean: The mean of the negative topics, a NumPy matrix with shape
[num_topics, num_words].
positive_mean: The mean of the positive topics, a NumPy matrix with shape
[num_topics, num_words].
vocabulary: A list of the vocabulary with shape [num_words].
Returns:
topic_strings: A list of the negative, neutral, and positive topics.
"""
num_topics, num_words = neutral_mean.shape
words_per_topic = 10
top_neutral_words = np.argsort(-neutral_mean, axis=1)
top_negative_words = np.argsort(-negative_mean, axis=1)
top_positive_words = np.argsort(-positive_mean, axis=1)
topic_strings = []
for topic_idx in range(num_topics):
neutral_start_string = "Neutral {}:".format(topic_idx)
neutral_row = [vocabulary[word] for word in
top_neutral_words[topic_idx, :words_per_topic]]
neutral_row_string = ", ".join(neutral_row)
neutral_string = " ".join([neutral_start_string, neutral_row_string])
positive_start_string = "Positive {}:".format(topic_idx)
positive_row = [vocabulary[word] for word in
top_positive_words[topic_idx, :words_per_topic]]
positive_row_string = ", ".join(positive_row)
positive_string = " ".join([positive_start_string, positive_row_string])
negative_start_string = "Negative {}:".format(topic_idx)
negative_row = [vocabulary[word] for word in
top_negative_words[topic_idx, :words_per_topic]]
negative_row_string = ", ".join(negative_row)
negative_string = " ".join([negative_start_string, negative_row_string])
topic_strings.append(" \n".join(
[negative_string, neutral_string, positive_string]))
return np.array(topic_strings)
def print_ideal_points(ideal_point_loc, author_map):
"""Print ideal point ordering for Tensorboard."""
return ", ".join(author_map[np.argsort(ideal_point_loc)])
def get_log_prior(samples, prior):
"""Return log prior of sampled Gaussians.
Args:
samples: A `Tensor` with shape `[num_samples, :, :]`.
prior: String representing prior distribution.
Returns:
log_prior: A `Tensor` with shape `[num_samples]`, with the log priors
summed across latent dimensions.
"""
if prior == 'normal':
prior_distribution = tfp.distributions.Normal(loc=0., scale=1.)
elif prior == 'gamma':
prior_distribution = tfp.distributions.Gamma(concentration=0.3, rate=0.3)
log_prior = tf.reduce_sum(prior_distribution.log_prob(samples),
axis=[1, 2])
return log_prior
def get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
batch_size,
num_samples=1):
"""Approximate variational Lognormal ELBO using reparameterization.
Args:
counts: A matrix with shape `[batch_size, num_words]`.
document_indices: An int-vector with shape `[batch_size]`.
author_indices: An int-vector with shape `[batch_size]`.
author_weights: A vector with shape `[num_authors]`, constituting how
lengthy the opinion is above average.
document_distribution: A positive `Distribution` object with parameter
shape `[num_documents, num_topics]`.
objective_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideological_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideal_point_distribution: A `Distribution` object over [0, 1] with
parameter_shape `[num_authors]`.
num_documents: The number of documents in the total data set (used to
calculate log-likelihood scale).
batch_size: Batch size (used to calculate log-likelihood scale).
num_samples: Number of Monte-Carlo samples.
Returns:
elbo: A scalar representing a Monte-Carlo sample of the ELBO. This value is
averaged across samples and summed across batches.
"""
document_samples = document_distribution.sample(num_samples)
objective_topic_samples = objective_topic_distribution.sample(num_samples)
ideological_topic_samples = ideological_topic_distribution.sample(
num_samples)
ideal_point_samples = ideal_point_distribution.sample(num_samples)
_, num_topics, _ = objective_topic_samples.get_shape().as_list()
ideal_point_log_prior = tfp.distributions.Normal(
loc=0.,
scale=1.)
ideal_point_log_prior = tf.reduce_sum(
ideal_point_log_prior.log_prob(ideal_point_samples), axis=[1,2])
document_log_prior = get_log_prior(document_samples, 'gamma')
objective_topic_log_prior = get_log_prior(objective_topic_samples, 'gamma')
ideological_topic_log_prior = get_log_prior(ideological_topic_samples,
'normal')
log_prior = (document_log_prior +
objective_topic_log_prior +
ideological_topic_log_prior +
ideal_point_log_prior)
selected_document_samples = tf.gather(document_samples,
document_indices,
axis=1)
selected_ideal_points = tf.gather(ideal_point_samples,
author_indices,
axis=1)
selected_ideological_topic_samples = tf.exp(
# replace by a column
selected_ideal_points[:, :, :, tf.newaxis] *
ideological_topic_samples[:, tf.newaxis, :, :])
# Normalize by how lengthy the author's opinion is.
selected_author_weights = tf.gather(author_weights, author_indices)
selected_ideological_topic_samples = (
selected_author_weights[tf.newaxis, :, tf.newaxis, tf.newaxis] *
selected_ideological_topic_samples)
document_entropy = -tf.reduce_sum(
document_distribution.log_prob(document_samples),
axis=[1, 2])
objective_topic_entropy = -tf.reduce_sum(
objective_topic_distribution.log_prob(objective_topic_samples),
axis=[1, 2])
ideological_topic_entropy = -tf.reduce_sum(
ideological_topic_distribution.log_prob(ideological_topic_samples),
axis=[1, 2])
ideal_point_entropy = -tf.reduce_sum(
ideal_point_distribution.log_prob(ideal_point_samples),
axis=1)
entropy = (document_entropy +
objective_topic_entropy +
ideological_topic_entropy +
ideal_point_entropy)
rate = tf.reduce_sum(
selected_document_samples[:, :, :, tf.newaxis] *
objective_topic_samples[:, tf.newaxis, :, :] *
selected_ideological_topic_samples[:, :, :, :],
axis=2)
count_distribution = tfp.distributions.Poisson(rate=rate)
# Need to un-sparsify the counts to evaluate log-likelihood.
count_log_likelihood = count_distribution.log_prob(
tf.sparse.to_dense(counts))
count_log_likelihood = tf.reduce_sum(count_log_likelihood, axis=[1, 2])
# Adjust for the fact that we're only using a minibatch.
count_log_likelihood = count_log_likelihood * (num_documents / batch_size)
elbo = log_prior + count_log_likelihood + entropy
elbo = tf.reduce_mean(elbo)
tf.summary.scalar("elbo/elbo", elbo)
tf.summary.scalar("elbo/log_prior", tf.reduce_mean(log_prior))
tf.summary.scalar("elbo/count_log_likelihood",
tf.reduce_mean(count_log_likelihood))
tf.summary.scalar("elbo/entropy", tf.reduce_mean(entropy))
return elbo
def main(argv):
del argv
tf.set_random_seed(FLAGS.seed)
random_state = np.random.RandomState(FLAGS.seed)
project_dir = os.path.abspath(os.path.dirname(__file__))
source_dir = os.path.join(project_dir, "data/{}".format(FLAGS.data))
# For model comparisons, we must also specify a Senate session.
if FLAGS.data == "senate-speech-comparisons":
source_dir = os.path.join(
source_dir, "tbip/{}".format(FLAGS.senate_session))
# As described in the docstring, the data directory must have the following
# files: counts.npz, author_indices.npy, vocabulary.txt, author_map.txt.
data_dir = os.path.join(source_dir, "clean")
save_dir = os.path.join(source_dir, "tbip-fits")
if tf.gfile.Exists(save_dir):
tf.logging.warn("Deleting old log directory at {}".format(save_dir))
tf.gfile.DeleteRecursively(save_dir)
tf.gfile.MakeDirs(save_dir)
(iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors) = build_input_pipeline(
data_dir,
FLAGS.batch_size,
random_state,
FLAGS.counts_transformation)
document_indices, counts, author_indices = iterator.get_next()
if FLAGS.pre_initialize_parameters:
fit_dir = os.path.join(source_dir, "pf-fits")
fitted_document_shape = np.load(
os.path.join(fit_dir, "document_shape.npy")).astype(np.float32)
fitted_document_rate = np.load(
os.path.join(fit_dir, "document_rate.npy")).astype(np.float32)
fitted_topic_shape = np.load(
os.path.join(fit_dir, "topic_shape.npy")).astype(np.float32)
fitted_topic_rate = np.load(
os.path.join(fit_dir, "topic_rate.npy")).astype(np.float32)
initial_document_loc = fitted_document_shape / fitted_document_rate
initial_objective_topic_loc = fitted_topic_shape / fitted_topic_rate
else:
initial_document_loc = np.float32(
np.exp(random_state.randn(num_documents, FLAGS.num_topics)))
initial_objective_topic_loc = np.float32(
np.exp(random_state.randn(FLAGS.num_topics, num_words)))
# Initialize lognormal variational parameters.
(document_loc, document_scale, objective_topic_loc,
objective_topic_scale) = build_lognormal_variational_parameters(
initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
FLAGS.num_topics)
document_distribution = tfp.distributions.LogNormal(
loc=document_loc,
scale=document_scale)
objective_topic_distribution = tfp.distributions.LogNormal(
loc=objective_topic_loc,
scale=objective_topic_scale)
ideological_topic_loc = tf.get_variable(
"ideological_topic_loc",
shape=[FLAGS.num_topics, num_words],
dtype=tf.float32)
ideological_topic_scale_logit = tf.get_variable(
"ideological_topic_scale_logit",
shape=[FLAGS.num_topics, num_words],
dtype=tf.float32)
ideological_topic_scale = tf.nn.softplus(ideological_topic_scale_logit)
tf.summary.histogram("params/ideological_topic_loc", ideological_topic_loc)
tf.summary.histogram("params/ideological_topic_scale",
ideological_topic_scale)
ideological_topic_distribution = tfp.distributions.Normal(
loc=ideological_topic_loc,
scale=ideological_topic_scale)
ideal_point_loc = tf.get_variable(
"ideal_point_loc",
shape=[num_authors],
dtype=tf.float32)
ideal_point_scale_logit = tf.get_variable(
"ideal_point_scale_logit",
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
shape=[num_authors],
dtype=tf.float32)
ideal_point_scale = tf.nn.softplus(ideal_point_scale_logit)
ideal_point_distribution = tfp.distributions.Normal(
loc=ideal_point_loc,
scale=ideal_point_scale)
tf.summary.histogram("params/ideal_point_loc",
tf.reshape(ideal_point_loc, [-1]))
tf.summary.histogram("params/ideal_point_scale",
tf.reshape(ideal_point_scale, [-1]))
elbo = get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
FLAGS.batch_size,
num_samples=FLAGS.num_samples)
loss = -elbo
tf.summary.scalar("loss", loss)
optim = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optim.minimize(loss)
"""
For each (k,v), we want to evaluate E[beta_kv], E[beta_kv * exp(eta_kv)],
and E[beta_kv * exp(-eta_kv)], where the expectations are with respect to the
variational distributions. Like the paper, beta refers to the obective topic
and eta refers to the ideological topic.
Dropping the indices and denoting by mu_b the objective topic location and
sigma_b the objective topic scale, we have E[beta] = exp(mu + sigma_b^2 / 2),
using the mean of a lognormal distribution.
Denoting by mu_e the ideological topic location and sigma_e the ideological
topic scale, we have E[beta * exp(eta)] = E[beta]E[exp(eta)] by the
mean-field assumption. exp(eta) is lognormal distributed, so E[exp(eta)] =
exp(mu_e + sigma_e^2 / 2). Thus, E[beta * exp(eta)] =
exp(mu_b + mu_e + (sigma_b^2 + sigma_e^2) / 2).
Finally, E[beta * exp(-eta)] =
exp(mu_b - mu_e + (sigma_b^2 + sigma_e^2) / 2).
Because we only care about the orderings of topics, we can drop the exponents
from the means.
"""
neutral_mean = objective_topic_loc + objective_topic_scale ** 2 / 2
positive_mean = (objective_topic_loc +
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
negative_mean = (objective_topic_loc -
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
positive_mean_at_two = (objective_topic_loc +
2*ideological_topic_loc +
(objective_topic_scale ** 2 +
2*ideological_topic_scale ** 2) / 2)
negative_mean_at_two = (objective_topic_loc -
2*ideological_topic_loc +
(objective_topic_scale ** 2 +
2*ideological_topic_scale ** 2) / 2)
topics = tf.py_func(
functools.partial(print_topics, vocabulary=vocabulary),
[neutral_mean, negative_mean, positive_mean],
tf.string,
stateful=False)
ideal_point_list = tf.py_func(
functools.partial(print_ideal_points, author_map=author_map),
[ideal_point_loc],
tf.string, stateful=False)
tf.summary.text("topics", topics)
tf.summary.text("ideal_points", ideal_point_list)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(save_dir, sess.graph)
sess.run(init)
start_time = time.time()
for step in range(FLAGS.max_steps):
(_, elbo_val) = sess.run([train_op, elbo])
duration = (time.time() - start_time) / (step + 1)
if step % FLAGS.print_steps == 0:
print("Step: {:>3d} ELBO: {:.3f} ({:.3f} sec)".format(
step, elbo_val, duration))
summary_str = sess.run(summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
if step % 1000 == 0 or step == FLAGS.max_steps - 1:
param_save_dir = os.path.join(save_dir, "params/")
if not tf.gfile.Exists(param_save_dir):
tf.gfile.MakeDirs(param_save_dir)
(ideological_topic_loc_val, ideological_topic_scale_val,
ideal_point_loc_val, ideal_point_scale_val) = sess.run([
ideological_topic_loc, ideological_topic_scale,
ideal_point_loc, ideal_point_scale])
(document_loc_val, document_scale_val, objective_topic_loc_val,
objective_topic_scale_val, ideological_topic_loc_val,
ideological_topic_scale_val, ideal_point_loc_val,
ideal_point_scale_val) = sess.run([
document_loc, document_scale, objective_topic_loc,
objective_topic_scale, ideological_topic_loc,
ideological_topic_scale, ideal_point_loc, ideal_point_scale])
np.save(os.path.join(param_save_dir, "document_loc"),
document_loc_val)
np.save(os.path.join(param_save_dir, "document_scale"),
document_scale_val)
np.save(os.path.join(param_save_dir, "objective_topic_loc"),
objective_topic_loc_val)
np.save(os.path.join(param_save_dir, "objective_topic_scale"),
objective_topic_scale_val)
np.save(os.path.join(param_save_dir, "ideological_topic_loc"),
ideological_topic_loc_val)
np.save(os.path.join(param_save_dir, "ideological_topic_scale"),
ideological_topic_scale_val)
np.save(os.path.join(param_save_dir, "ideal_point_loc"),
ideal_point_loc_val)
np.save(os.path.join(param_save_dir, "ideal_point_scale"),
ideal_point_scale_val)
if __name__ == "__main__":
tf.app.run()
|
[
"numpy.sqrt",
"tensorflow.get_variable",
"tensorflow.initializers.random_normal",
"tensorflow.reduce_sum",
"numpy.int32",
"numpy.log",
"numpy.argsort",
"numpy.array",
"tensorflow.nn.softplus",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_mean",
"tensorflow.sparse.to_dense",
"tensorflow.set_random_seed",
"absl.flags.DEFINE_enum",
"absl.flags.DEFINE_float",
"numpy.random.RandomState",
"tensorflow.app.run",
"tensorflow_probability.distributions.LogNormal",
"tensorflow.gfile.Exists",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.Session",
"tensorflow.gfile.DeleteRecursively",
"absl.flags.DEFINE_boolean",
"numpy.max",
"tensorflow_probability.distributions.Gamma",
"tensorflow.summary.scalar",
"tensorflow.train.AdamOptimizer",
"tensorflow_probability.distributions.Poisson",
"tensorflow_probability.distributions.Normal",
"tensorflow.summary.merge_all",
"os.path.dirname",
"tensorflow.summary.histogram",
"tensorflow.gather",
"tensorflow.summary.text",
"tensorflow.reshape",
"tensorflow.summary.FileWriter",
"absl.flags.DEFINE_string",
"numpy.bincount",
"time.time",
"absl.flags.DEFINE_integer",
"os.path.join",
"tensorflow.global_variables_initializer",
"numpy.sum",
"functools.partial",
"tensorflow.exp"
] |
[((1743, 1820), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.01)', 'help': '"""Adam learning rate."""'}), "('learning_rate', default=0.01, help='Adam learning rate.')\n", (1761, 1820), False, 'from absl import flags\n'), ((1859, 1955), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_steps"""'], {'default': '(1000000)', 'help': '"""Number of training steps to run."""'}), "('max_steps', default=1000000, help=\n 'Number of training steps to run.')\n", (1879, 1955), False, 'from absl import flags\n'), ((1993, 2065), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_topics"""'], {'default': '(50)', 'help': '"""Number of topics."""'}), "('num_topics', default=50, help='Number of topics.')\n", (2013, 2065), False, 'from absl import flags\n'), ((2108, 2176), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""'], {'default': '(1024)', 'help': '"""Batch size."""'}), "('batch_size', default=1024, help='Batch size.')\n", (2128, 2176), False, 'from absl import flags\n'), ((2219, 2327), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_samples"""'], {'default': '(1)', 'help': '"""Number of samples to use for ELBO approximation."""'}), "('num_samples', default=1, help=\n 'Number of samples to use for ELBO approximation.')\n", (2239, 2327), False, 'from absl import flags\n'), ((2365, 2530), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""counts_transformation"""'], {'default': '"""nothing"""', 'enum_values': "['nothing', 'binary', 'sqrt', 'log']", 'help': '"""Transformation used on counts data."""'}), "('counts_transformation', default='nothing', enum_values=[\n 'nothing', 'binary', 'sqrt', 'log'], help=\n 'Transformation used on counts data.')\n", (2382, 2530), False, 'from absl import flags\n'), ((2575, 2748), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""pre_initialize_parameters"""'], {'default': '(True)', 'help': '"""Whether to use pre-initialized document and topic intensities (with Poisson factorization)."""'}), "('pre_initialize_parameters', default=True, help=\n 'Whether to use pre-initialized document and topic intensities (with Poisson factorization).'\n )\n", (2595, 2748), False, 'from absl import flags\n'), ((2810, 2905), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data"""'], {'default': '"""senate-speeches-114"""', 'help': '"""Data source being used."""'}), "('data', default='senate-speeches-114', help=\n 'Data source being used.')\n", (2829, 2905), False, 'from absl import flags\n'), ((2941, 3073), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""senate_session"""'], {'default': '(113)', 'help': '"""Senate session (used only when data is \'senate-speech-comparisons\'."""'}), '(\'senate_session\', default=113, help=\n "Senate session (used only when data is \'senate-speech-comparisons\'.")\n', (2961, 3073), False, 'from absl import flags\n'), ((3140, 3244), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""print_steps"""'], {'default': '(500)', 'help': '"""Number of steps to print and save results."""'}), "('print_steps', default=500, help=\n 'Number of steps to print and save results.')\n", (3160, 3244), False, 'from absl import flags\n'), ((3282, 3355), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""'], {'default': '(123)', 'help': '"""Random seed to be used."""'}), "('seed', default=123, help='Random seed to be used.')\n", (3302, 3355), False, 'from absl import flags\n'), ((4321, 4347), 'numpy.max', 'np.max', (['(author_indices + 1)'], {}), '(author_indices + 1)\n', (4327, 4347), True, 'import numpy as np\n'), ((5423, 5516), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(documents, shuffled_counts, shuffled_author_indices)'], {}), '((documents, shuffled_counts,\n shuffled_author_indices))\n', (5457, 5516), True, 'import tensorflow as tf\n'), ((8197, 8233), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['document_scale_logit'], {}), '(document_scale_logit)\n', (8211, 8233), True, 'import tensorflow as tf\n'), ((8260, 8303), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['objective_topic_scale_logit'], {}), '(objective_topic_scale_logit)\n', (8274, 8303), True, 'import tensorflow as tf\n'), ((8309, 8366), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/document_loc"""', 'document_loc'], {}), "('params/document_loc', document_loc)\n", (8329, 8366), True, 'import tensorflow as tf\n'), ((8369, 8440), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/objective_topic_loc"""', 'objective_topic_loc'], {}), "('params/objective_topic_loc', objective_topic_loc)\n", (8389, 8440), True, 'import tensorflow as tf\n'), ((8443, 8504), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/document_scale"""', 'document_scale'], {}), "('params/document_scale', document_scale)\n", (8463, 8504), True, 'import tensorflow as tf\n'), ((8507, 8582), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/objective_topic_scale"""', 'objective_topic_scale'], {}), "('params/objective_topic_scale', objective_topic_scale)\n", (8527, 8582), True, 'import tensorflow as tf\n'), ((9405, 9438), 'numpy.argsort', 'np.argsort', (['(-neutral_mean)'], {'axis': '(1)'}), '(-neutral_mean, axis=1)\n', (9415, 9438), True, 'import numpy as np\n'), ((9462, 9496), 'numpy.argsort', 'np.argsort', (['(-negative_mean)'], {'axis': '(1)'}), '(-negative_mean, axis=1)\n', (9472, 9496), True, 'import numpy as np\n'), ((9520, 9554), 'numpy.argsort', 'np.argsort', (['(-positive_mean)'], {'axis': '(1)'}), '(-positive_mean, axis=1)\n', (9530, 9554), True, 'import numpy as np\n'), ((10648, 10671), 'numpy.array', 'np.array', (['topic_strings'], {}), '(topic_strings)\n', (10656, 10671), True, 'import numpy as np\n'), ((13495, 13539), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': '(0.0)', 'scale': '(1.0)'}), '(loc=0.0, scale=1.0)\n', (13519, 13539), True, 'import tensorflow_probability as tfp\n'), ((14133, 14186), 'tensorflow.gather', 'tf.gather', (['document_samples', 'document_indices'], {'axis': '(1)'}), '(document_samples, document_indices, axis=1)\n', (14142, 14186), True, 'import tensorflow as tf\n'), ((14295, 14349), 'tensorflow.gather', 'tf.gather', (['ideal_point_samples', 'author_indices'], {'axis': '(1)'}), '(ideal_point_samples, author_indices, axis=1)\n', (14304, 14349), True, 'import tensorflow as tf\n'), ((14468, 14571), 'tensorflow.exp', 'tf.exp', (['(selected_ideal_points[:, :, :, tf.newaxis] * ideological_topic_samples[:,\n tf.newaxis, :, :])'], {}), '(selected_ideal_points[:, :, :, tf.newaxis] *\n ideological_topic_samples[:, tf.newaxis, :, :])\n', (14474, 14571), True, 'import tensorflow as tf\n'), ((14688, 14729), 'tensorflow.gather', 'tf.gather', (['author_weights', 'author_indices'], {}), '(author_weights, author_indices)\n', (14697, 14729), True, 'import tensorflow as tf\n'), ((15545, 15718), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(selected_document_samples[:, :, :, tf.newaxis] * objective_topic_samples[:,\n tf.newaxis, :, :] * selected_ideological_topic_samples[:, :, :, :])'], {'axis': '(2)'}), '(selected_document_samples[:, :, :, tf.newaxis] *\n objective_topic_samples[:, tf.newaxis, :, :] *\n selected_ideological_topic_samples[:, :, :, :], axis=2)\n', (15558, 15718), True, 'import tensorflow as tf\n'), ((15763, 15799), 'tensorflow_probability.distributions.Poisson', 'tfp.distributions.Poisson', ([], {'rate': 'rate'}), '(rate=rate)\n', (15788, 15799), True, 'import tensorflow_probability as tfp\n'), ((15976, 16024), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['count_log_likelihood'], {'axis': '[1, 2]'}), '(count_log_likelihood, axis=[1, 2])\n', (15989, 16024), True, 'import tensorflow as tf\n'), ((16223, 16243), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['elbo'], {}), '(elbo)\n', (16237, 16243), True, 'import tensorflow as tf\n'), ((16247, 16283), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""elbo/elbo"""', 'elbo'], {}), "('elbo/elbo', elbo)\n", (16264, 16283), True, 'import tensorflow as tf\n'), ((16563, 16593), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (16581, 16593), True, 'import tensorflow as tf\n'), ((16611, 16644), 'numpy.random.RandomState', 'np.random.RandomState', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (16632, 16644), True, 'import numpy as np\n'), ((17150, 17183), 'os.path.join', 'os.path.join', (['source_dir', '"""clean"""'], {}), "(source_dir, 'clean')\n", (17162, 17183), False, 'import os\n'), ((17197, 17234), 'os.path.join', 'os.path.join', (['source_dir', '"""tbip-fits"""'], {}), "(source_dir, 'tbip-fits')\n", (17209, 17234), False, 'import os\n'), ((17240, 17265), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['save_dir'], {}), '(save_dir)\n', (17255, 17265), True, 'import tensorflow as tf\n'), ((17383, 17410), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['save_dir'], {}), '(save_dir)\n', (17400, 17410), True, 'import tensorflow as tf\n'), ((18903, 18970), 'tensorflow_probability.distributions.LogNormal', 'tfp.distributions.LogNormal', ([], {'loc': 'document_loc', 'scale': 'document_scale'}), '(loc=document_loc, scale=document_scale)\n', (18930, 18970), True, 'import tensorflow_probability as tfp\n'), ((19018, 19104), 'tensorflow_probability.distributions.LogNormal', 'tfp.distributions.LogNormal', ([], {'loc': 'objective_topic_loc', 'scale': 'objective_topic_scale'}), '(loc=objective_topic_loc, scale=\n objective_topic_scale)\n', (19045, 19104), True, 'import tensorflow_probability as tfp\n'), ((19143, 19243), 'tensorflow.get_variable', 'tf.get_variable', (['"""ideological_topic_loc"""'], {'shape': '[FLAGS.num_topics, num_words]', 'dtype': 'tf.float32'}), "('ideological_topic_loc', shape=[FLAGS.num_topics, num_words\n ], dtype=tf.float32)\n", (19158, 19243), True, 'import tensorflow as tf\n'), ((19292, 19399), 'tensorflow.get_variable', 'tf.get_variable', (['"""ideological_topic_scale_logit"""'], {'shape': '[FLAGS.num_topics, num_words]', 'dtype': 'tf.float32'}), "('ideological_topic_scale_logit', shape=[FLAGS.num_topics,\n num_words], dtype=tf.float32)\n", (19307, 19399), True, 'import tensorflow as tf\n'), ((19443, 19488), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['ideological_topic_scale_logit'], {}), '(ideological_topic_scale_logit)\n', (19457, 19488), True, 'import tensorflow as tf\n'), ((19491, 19566), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/ideological_topic_loc"""', 'ideological_topic_loc'], {}), "('params/ideological_topic_loc', ideological_topic_loc)\n", (19511, 19566), True, 'import tensorflow as tf\n'), ((19569, 19648), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/ideological_topic_scale"""', 'ideological_topic_scale'], {}), "('params/ideological_topic_scale', ideological_topic_scale)\n", (19589, 19648), True, 'import tensorflow as tf\n'), ((19708, 19795), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'ideological_topic_loc', 'scale': 'ideological_topic_scale'}), '(loc=ideological_topic_loc, scale=\n ideological_topic_scale)\n', (19732, 19795), True, 'import tensorflow_probability as tfp\n'), ((19827, 19900), 'tensorflow.get_variable', 'tf.get_variable', (['"""ideal_point_loc"""'], {'shape': '[num_authors]', 'dtype': 'tf.float32'}), "('ideal_point_loc', shape=[num_authors], dtype=tf.float32)\n", (19842, 19900), True, 'import tensorflow as tf\n'), ((20139, 20178), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['ideal_point_scale_logit'], {}), '(ideal_point_scale_logit)\n', (20153, 20178), True, 'import tensorflow as tf\n'), ((20208, 20278), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'ideal_point_loc', 'scale': 'ideal_point_scale'}), '(loc=ideal_point_loc, scale=ideal_point_scale)\n', (20232, 20278), True, 'import tensorflow_probability as tfp\n'), ((20963, 20994), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (20980, 20994), True, 'import tensorflow as tf\n'), ((21006, 21063), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'FLAGS.learning_rate'}), '(learning_rate=FLAGS.learning_rate)\n', (21028, 21063), True, 'import tensorflow as tf\n'), ((23316, 23349), 'tensorflow.summary.text', 'tf.summary.text', (['"""topics"""', 'topics'], {}), "('topics', topics)\n", (23331, 23349), True, 'import tensorflow as tf\n'), ((23352, 23401), 'tensorflow.summary.text', 'tf.summary.text', (['"""ideal_points"""', 'ideal_point_list'], {}), "('ideal_points', ideal_point_list)\n", (23367, 23401), True, 'import tensorflow as tf\n'), ((23418, 23440), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (23438, 23440), True, 'import tensorflow as tf\n'), ((23450, 23483), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (23481, 23483), True, 'import tensorflow as tf\n'), ((25890, 25902), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (25900, 25902), True, 'import tensorflow as tf\n'), ((4127, 4163), 'os.path.join', 'os.path.join', (['data_dir', '"""counts.npz"""'], {}), "(data_dir, 'counts.npz')\n", (4139, 4163), False, 'import os\n'), ((4374, 4414), 'os.path.join', 'os.path.join', (['data_dir', '"""author_map.txt"""'], {}), "(data_dir, 'author_map.txt')\n", (4386, 4414), False, 'import os\n'), ((5660, 5700), 'os.path.join', 'os.path.join', (['data_dir', '"""vocabulary.txt"""'], {}), "(data_dir, 'vocabulary.txt')\n", (5672, 5700), False, 'import os\n'), ((6017, 6044), 'numpy.bincount', 'np.bincount', (['author_indices'], {}), '(author_indices)\n', (6028, 6044), True, 'import numpy as np\n'), ((11223, 11267), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': '(0.0)', 'scale': '(1.0)'}), '(loc=0.0, scale=1.0)\n', (11247, 11267), True, 'import tensorflow_probability as tfp\n'), ((15923, 15949), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['counts'], {}), '(counts)\n', (15941, 15949), True, 'import tensorflow as tf\n'), ((16322, 16347), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_prior'], {}), '(log_prior)\n', (16336, 16347), True, 'import tensorflow as tf\n'), ((16419, 16455), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['count_log_likelihood'], {}), '(count_log_likelihood)\n', (16433, 16455), True, 'import tensorflow as tf\n'), ((16493, 16516), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['entropy'], {}), '(entropy)\n', (16507, 16516), True, 'import tensorflow as tf\n'), ((16680, 16705), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (16695, 16705), False, 'import os\n'), ((17344, 17380), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['save_dir'], {}), '(save_dir)\n', (17370, 17380), True, 'import tensorflow as tf\n'), ((17751, 17786), 'os.path.join', 'os.path.join', (['source_dir', '"""pf-fits"""'], {}), "(source_dir, 'pf-fits')\n", (17763, 17786), False, 'import os\n'), ((20365, 20398), 'tensorflow.reshape', 'tf.reshape', (['ideal_point_loc', '[-1]'], {}), '(ideal_point_loc, [-1])\n', (20375, 20398), True, 'import tensorflow as tf\n'), ((20475, 20510), 'tensorflow.reshape', 'tf.reshape', (['ideal_point_scale', '[-1]'], {}), '(ideal_point_scale, [-1])\n', (20485, 20510), True, 'import tensorflow as tf\n'), ((23008, 23062), 'functools.partial', 'functools.partial', (['print_topics'], {'vocabulary': 'vocabulary'}), '(print_topics, vocabulary=vocabulary)\n', (23025, 23062), False, 'import functools\n'), ((23194, 23254), 'functools.partial', 'functools.partial', (['print_ideal_points'], {'author_map': 'author_map'}), '(print_ideal_points, author_map=author_map)\n', (23211, 23254), False, 'import functools\n'), ((23492, 23504), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (23502, 23504), True, 'import tensorflow as tf\n'), ((23535, 23578), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['save_dir', 'sess.graph'], {}), '(save_dir, sess.graph)\n', (23556, 23578), True, 'import tensorflow as tf\n'), ((23615, 23626), 'time.time', 'time.time', ([], {}), '()\n', (23624, 23626), False, 'import time\n'), ((4884, 4918), 'numpy.int32', 'np.int32', (['(shuffled_counts.data > 0)'], {}), '(shuffled_counts.data > 0)\n', (4892, 4918), True, 'import numpy as np\n'), ((7889, 7938), 'tensorflow.initializers.random_normal', 'tf.initializers.random_normal', ([], {'mean': '(0)', 'stddev': '(1.0)'}), '(mean=0, stddev=1.0)\n', (7918, 7938), True, 'import tensorflow as tf\n'), ((8104, 8153), 'tensorflow.initializers.random_normal', 'tf.initializers.random_normal', ([], {'mean': '(0)', 'stddev': '(1.0)'}), '(mean=0, stddev=1.0)\n', (8133, 8153), True, 'import tensorflow as tf\n'), ((10809, 10836), 'numpy.argsort', 'np.argsort', (['ideal_point_loc'], {}), '(ideal_point_loc)\n', (10819, 10836), True, 'import numpy as np\n'), ((11316, 11368), 'tensorflow_probability.distributions.Gamma', 'tfp.distributions.Gamma', ([], {'concentration': '(0.3)', 'rate': '(0.3)'}), '(concentration=0.3, rate=0.3)\n', (11339, 11368), True, 'import tensorflow_probability as tfp\n'), ((20016, 20065), 'tensorflow.initializers.random_normal', 'tf.initializers.random_normal', ([], {'mean': '(0)', 'stddev': '(1.0)'}), '(mean=0, stddev=1.0)\n', (20045, 20065), True, 'import tensorflow as tf\n'), ((4241, 4285), 'os.path.join', 'os.path.join', (['data_dir', '"""author_indices.npy"""'], {}), "(data_dir, 'author_indices.npy')\n", (4253, 4285), False, 'import os\n'), ((7589, 7617), 'numpy.log', 'np.log', (['initial_document_loc'], {}), '(initial_document_loc)\n', (7595, 7617), True, 'import numpy as np\n'), ((7720, 7755), 'numpy.log', 'np.log', (['initial_objective_topic_loc'], {}), '(initial_objective_topic_loc)\n', (7726, 7755), True, 'import numpy as np\n'), ((24152, 24185), 'os.path.join', 'os.path.join', (['save_dir', '"""params/"""'], {}), "(save_dir, 'params/')\n", (24164, 24185), False, 'import os\n'), ((4986, 5018), 'numpy.log', 'np.log', (['(1 + shuffled_counts.data)'], {}), '(1 + shuffled_counts.data)\n', (4992, 5018), True, 'import numpy as np\n'), ((6206, 6228), 'numpy.sum', 'np.sum', (['counts'], {'axis': '(1)'}), '(counts, axis=1)\n', (6212, 6228), True, 'import numpy as np\n'), ((17832, 17875), 'os.path.join', 'os.path.join', (['fit_dir', '"""document_shape.npy"""'], {}), "(fit_dir, 'document_shape.npy')\n", (17844, 17875), False, 'import os\n'), ((17940, 17982), 'os.path.join', 'os.path.join', (['fit_dir', '"""document_rate.npy"""'], {}), "(fit_dir, 'document_rate.npy')\n", (17952, 17982), False, 'import os\n'), ((18045, 18085), 'os.path.join', 'os.path.join', (['fit_dir', '"""topic_shape.npy"""'], {}), "(fit_dir, 'topic_shape.npy')\n", (18057, 18085), False, 'import os\n'), ((18147, 18186), 'os.path.join', 'os.path.join', (['fit_dir', '"""topic_rate.npy"""'], {}), "(fit_dir, 'topic_rate.npy')\n", (18159, 18186), False, 'import os\n'), ((23734, 23745), 'time.time', 'time.time', ([], {}), '()\n', (23743, 23745), False, 'import time\n'), ((24201, 24232), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['param_save_dir'], {}), '(param_save_dir)\n', (24216, 24232), True, 'import tensorflow as tf\n'), ((24244, 24277), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['param_save_dir'], {}), '(param_save_dir)\n', (24261, 24277), True, 'import tensorflow as tf\n'), ((24999, 25043), 'os.path.join', 'os.path.join', (['param_save_dir', '"""document_loc"""'], {}), "(param_save_dir, 'document_loc')\n", (25011, 25043), False, 'import os\n'), ((25096, 25142), 'os.path.join', 'os.path.join', (['param_save_dir', '"""document_scale"""'], {}), "(param_save_dir, 'document_scale')\n", (25108, 25142), False, 'import os\n'), ((25197, 25248), 'os.path.join', 'os.path.join', (['param_save_dir', '"""objective_topic_loc"""'], {}), "(param_save_dir, 'objective_topic_loc')\n", (25209, 25248), False, 'import os\n'), ((25308, 25361), 'os.path.join', 'os.path.join', (['param_save_dir', '"""objective_topic_scale"""'], {}), "(param_save_dir, 'objective_topic_scale')\n", (25320, 25361), False, 'import os\n'), ((25423, 25476), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideological_topic_loc"""'], {}), "(param_save_dir, 'ideological_topic_loc')\n", (25435, 25476), False, 'import os\n'), ((25538, 25593), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideological_topic_scale"""'], {}), "(param_save_dir, 'ideological_topic_scale')\n", (25550, 25593), False, 'import os\n'), ((25657, 25704), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideal_point_loc"""'], {}), "(param_save_dir, 'ideal_point_loc')\n", (25669, 25704), False, 'import os\n'), ((25760, 25809), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideal_point_scale"""'], {}), "(param_save_dir, 'ideal_point_scale')\n", (25772, 25809), False, 'import os\n'), ((5088, 5117), 'numpy.sqrt', 'np.sqrt', (['shuffled_counts.data'], {}), '(shuffled_counts.data)\n', (5095, 5117), True, 'import numpy as np\n'), ((5913, 5935), 'numpy.sum', 'np.sum', (['counts'], {'axis': '(1)'}), '(counts, axis=1)\n', (5919, 5935), True, 'import numpy as np\n')]
|
import copy
import logging
import numpy as np
import six
import tensorflow as tf
from functools import wraps
from contextlib import contextmanager
from .backend_base import BackendBase, FunctionBase, DeviceDecorator
try:
from tensorflow.contrib.distributions import fill_triangular
except:
print("Cannot find fill_triangular")
class TensorflowFunction(FunctionBase):
def __init__(self, *args, **kwargs):
super(TensorflowFunction, self).__init__(*args, **kwargs)
with tf.control_dependencies(self.outputs):
self.updates = [tf.assign(k, v) for k, v in self.updates]
def __call__(self, *inputs):
feed_dict = self.feed_dict(*inputs)
result = self.session.get_current_session().run(self.outputs + self.updates, feed_dict=feed_dict)
if len(self.outputs) == 1:
return result[0]
return result[:len(self.outputs)]
@six.add_metaclass(DeviceDecorator)
class TensorflowBackend(BackendBase):
def __init__(self, **kwargs):
super(TensorflowBackend, self).__init__(**kwargs)
self.core = tf
self._sessions = []
self.set_default_device(self.gpu() if tf.test.is_gpu_available() else self.cpu())
# General purpose methods
@classmethod
def use_device(cls, method):
@wraps(method)
def func(self, *args, **kwargs):
with tf.device(self.get_current_device()):
result = method(self, *args, **kwargs)
return result
return func
def enable_eager(self):
tf.enable_eager_execution()
def cpu(self, id=0):
return 'cpu/:%u' % id
def gpu(self, id=0):
return 'gpu/:%u' % id
@property
def int32(self):
return tf.int32
@property
def float32(self):
return tf.float32
def _placeholder(self, dtype=None, shape=None, name=None):
with self._device(self.get_current_device()):
return tf.placeholder(dtype, shape=shape, name=name)
def _variable(self, initial_value=None, trainable=True, name=None):
with self._device(self.get_current_device()):
return tf.Variable(initial_value=initial_value, trainable=trainable, name=name)
def _device(self, name):
return tf.device(name)
def create_session(self, graph=None, **kwargs):
allow_growth = kwargs.pop('allow_growth', False)
config_proto = tf.ConfigProto(**kwargs)
config_proto.gpu_options.allow_growth = allow_growth
sess = tf.Session(graph=graph, config=config_proto)
self._initialize(sess)
return sess
@contextmanager
def session(self, **kwargs):
with self.create_session(**kwargs) as sess:
self._sessions.append(sess)
self._initialize(sess)
yield sess
self._sessions.pop()
def interactive_session(self, graph=None, **kwargs):
config_proto = tf.ConfigProto(**kwargs)
sess = tf.InteractiveSession(config=config_proto, graph=graph)
self._initialize(sess)
return sess
def get_current_session(self):
if len(self._sessions) == 0:
raise Exception('No current session')
return self._sessions[-1]
def _initialize(self, sess):
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# Unified interface
def cast(self, x, dtype):
return tf.cast(x, dtype)
def dtype(self, x):
return x.dtype
def shape(self, x):
return tf.shape(x)
def rank(self, x):
return tf.rank(x)
def abs(self, x):
return tf.abs(x)
def set_value(self, x, value):
tf.assign(x, np.asarray(value)).op.run(session=self.get_current_session())
def zeros(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.zeros(shape, dtype=dtype, name=name)
def zeros_like(self, x, dtype=None, name=None):
return tf.zeros_like(x, dtype=dtype, name=name)
def ones(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.ones(shape, dtype=dtype, name=name)
def ones_like(self, x, dtype=None, name=None):
return tf.ones_like(x, dtype=dtype, name=name)
def random_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.truncated_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_uniform(self, shape, minval=0, maxval=None, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_binomial(self, shape, p=0.5, dtype=None):
dtype = dtype or self.floatx()
return tf.where(tf.random_uniform(shape, dtype=dtype) <= p,
tf.ones(shape, dtype=dtype),
tf.zeros(shape, dtype=dtype))
def random_gamma(self, shape, alpha, beta=None):
return tf.random_gamma(shape, alpha, beta=beta)
pass
def tanh(self, x, name=None):
return tf.tanh(x, name=name)
def sigmoid(self, x, name=None):
return tf.sigmoid(x, name=name)
def relu(self, x, alpha=0., name=None):
return tf.nn.relu(x, name=name)
def softmax(self, x, T=1.0):
return tf.nn.softmax(x)
def softplus(self, x):
return tf.nn.softplus(x)
def dropout(self, x, p, seed=None):
retain_prob = 1. - p
if seed is None:
seed = np.random.randint(10e6)
return tf.nn.dropout(x * 1., retain_prob, seed=seed)
def conv2d(self, x, kernel, strides=(1, 1), border_mode='same',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
dim_ordering: whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# strides = strides# + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.convolution(input=x, filter=kernel, strides=strides, padding=padding,
data_format='NHWC')
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def conv2d_transpose(self, x, kernel, dim_out, strides=(1, 1), border_mode='same'):
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
output_shape = [self.shape(x)[0]] + list(dim_out)
strides = (1,) + strides + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding)
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def pool2d(self, x, pool_size, strides=(1, 1),
border_mode='valid', pool_mode='max'):
'''
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
border_mode: one of "valid", "same".
dim_ordering: one of "th", "tf".
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
if pool_mode == 'max':
x = tf.nn.max_pool(x, pool_size, strides, padding=padding)
elif pool_mode == 'avg':
x = tf.nn.avg_pool(x, pool_size, strides, padding=padding)
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def flatten(self, x, leading=1):
leading_dim = self.shape(x)[:leading]
new_shape = tf.concat([leading_dim, [-1]], 0)
return tf.reshape(x, new_shape)
def split(self, x, num_splits, axis=None):
axis = axis % len(x.get_shape())
return tf.split(x, num_splits, axis=axis)
def reshape(self, x, shape):
return tf.reshape(x, shape)
def sum(self, x, axis=None, keepdims=False):
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_sum(x, axis=axis, keepdims=keepdims)
def prod(self, x, axis=None, keepdims=False):
return tf.reduce_prod(x, axis=axis, keepdims=keepdims)
def mean(self, x, axis=None, keepdims=False):
if axis is not None and axis < 0:
axis = axis % len(x.get_shape())
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_mean(x, axis=axis, keepdims=keepdims)
def batch_norm(self, x, beta, gamma):
mean, variance = tf.nn.moments(x, [0])
normed = tf.nn.batch_normalization(tf.identity(x), mean, variance, beta, gamma, self.epsilon())
return normed
def log(self, x):
return tf.log(x)
def log1p(self, x):
return tf.log1p(x)
def exp(self, x):
return tf.exp(x)
def pow(self, x, a):
return tf.pow(x, a)
def mul(self, x, y):
return tf.multiply(x, y)
def sqrt(self, x):
x = tf.clip_by_value(x,
tf.cast(0., dtype=self.floatx()),
tf.cast(np.inf, dtype=self.floatx()))
return tf.sqrt(x)
def categorical_crossentropy(self, output, target, from_logits=False, axis=-1):
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output = output / tf.reduce_sum(output, axis, True)
# manual computation of crossentropy
output = tf.clip_by_value(output, self.epsilon(), 1. - self.epsilon())
return -tf.reduce_sum(target * tf.log(output), axis)
else:
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=target)
def binary_crossentropy(self, output, target, from_logits=False):
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
else:
raise NotImplementedError
def concatenate(self, tensors, axis=-1):
return tf.concat(tensors, axis=axis)
def sort(self, tensor):
values, indices = tf.nn.top_k(-tensor, k=tf.shape(tensor)[0])
return -values, indices
def argmin(self, tensor, axis=0):
return tf.argmin(tensor, axis=axis)
def map(self, function, input):
return tf.map_fn(function, input)
def rnn(self, step_function, input, initial_states, **kwargs):
num_dims = self.rank(input)
perm = self.concat([[1, 0], self.range(2, num_dims)])
input = self.transpose(input, perm)
def step(state, input_):
output, state = step_function(input_, state, **kwargs)
return state
result = tf.scan(step, input, initial_states)[0]
return self.transpose(result, perm)
def while_loop(self, condition, body, loop_vars, **kwargs):
return tf.while_loop(condition, body, loop_vars)
def scan(self, fn, elems, initializer=None):
return tf.scan(fn, elems, initializer=initializer, back_prop=True)
def logdet(self, A, **kwargs):
A = (A + self.matrix_transpose(A)) / 2.
term = tf.log(tf.matrix_diag_part(self.cholesky(A, **kwargs)))
return 2 * tf.reduce_sum(term, -1)
def einsum(self, subscripts, *operands):
return tf.einsum(subscripts, *operands)
def cholesky(self, A, lower=True, warn=True, correct=False):
assert lower is True
# Gradient through py_func adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({'PyFunc': rnd_name, 'PyFuncStateless': rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def correction(A):
A_new, del_ = A.copy(), 1e-4
while True:
try:
np.linalg.cholesky(A_new)
break
except np.linalg.linalg.LinAlgError:
if warn:
logging.warn('[Cholesky] singular matrix, adding diagonal {}'.format(del_))
A_new = A + del_ * np.eye(A.shape[-1]).astype(self.floatx())
del_ *= 2
return A_new
def _correction_grad(op, grad):
A = op.inputs[0]
return grad
if correct:
shape = A.get_shape()
A = py_func(correction, [A], A.dtype, grad=_correction_grad)
A.set_shape(shape)
return tf.cholesky(A)
# Tensorflow interface
def placeholder(self, dtype, shape=None, name=None):
return self._placeholder(dtype=dtype, shape=shape, name=name)
def variable(self, initial_value=None, trainable=True, name=None):
return self._variable(initial_value=initial_value, trainable=trainable, name=name)
def assign(self, a, b):
return tf.assign(a, b)
def to_float(self, x):
return tf.cast(x, self.floatx())
def constant(self, value, dtype=None, shape=None):
return tf.constant(value, dtype=dtype, shape=shape)
def get_shape(self, x):
return [a.value for a in tf.convert_to_tensor(x).get_shape()]
def get_value(self, variable):
return self.get_current_session().run(variable)
def concat(self, values, axis=-1):
return tf.concat(values, axis=axis)
def gather(self, params, indices):
return tf.gather(params, indices)
def gather_nd(self, params, indices):
return tf.gather_nd(params, indices)
def equal(self, x, y):
return tf.equal(x, y)
def logical_and(self, x, y):
return tf.logical_and(x, y)
def matmul(self, a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, name=name)
def trace(self, a):
return tf.trace(a)
def transpose(self, a, perm=None):
return tf.transpose(a, perm=perm)
def matrix_transpose(self, a):
return tf.matrix_transpose(a)
def matrix_diag(self, a):
return tf.matrix_diag(a)
def matrix_diag_part(self, a):
return tf.matrix_diag_part(a)
def set_diag(self, input, diagonal):
return tf.linalg.set_diag(input, diagonal)
def band_part(self, input, num_lower, num_upper):
return tf.linalg.band_part(input, num_lower, num_upper)
def vec(self, A):
A = self.matrix_transpose(A)
leading_dim = self.shape(A)[:-2]
return self.reshape(A, self.concat([
leading_dim,
[-1]
], 0))
def unvec(self, v, m, n):
leading_dim = self.shape(v)[:-1]
return self.matrix_transpose(self.reshape(v, self.concat([
leading_dim,
[n, m]
], 0)))
def kronecker(self, A, B):
C = (A[..., None, None] * B[..., None, None, :, :])
blocks = [
tf.unstack(a, axis=-3 % len(a.shape)) for a in
tf.unstack(C, axis=-4 % len(C.shape))
]
return tf.concat([
tf.concat(a, -1) for a in blocks
], -2)
def block_sum(self, X, m, n):
leading_dim = self.shape(X)[:-2]
block_sum = self.zeros(self.concat([leading_dim, [m, m]], 0))
for i in range(n):
block_sum += X[..., i*m:(i+1)*m, i*m:(i+1)*m]
return block_sum
def block_trace(self, X, m, n):
blocks = []
for i in range(n):
blocks.append([])
for j in range(n):
block = self.trace(X[..., i*m:(i+1)*m, j*m:(j+1)*m])
blocks[-1].append(block)
return self.pack([
self.pack([
b for b in block
])
for block in blocks
])
def kronecker_vec(self, X, m, n):
leading_dim = tf.shape(X)[:-2]
blocks = []
for i in range(n):
blocks.append([])
for j in range(m):
idx = i * m + j
block = tf.matrix_transpose(tf.reshape(X[..., idx, :], tf.concat([leading_dim, [n, m]], 0)))
blocks[-1].append(block)
return tf.concat([tf.concat(b, -2) for b in blocks], -1)
def lower_triangular(self, a):
return fill_triangular(a)
def matrix_inverse(self, a):
return tf.matrix_inverse(a)
def expand_dims(self, x, dim=-1):
return tf.expand_dims(x, dim)
def tile(self, input, multiples):
return tf.tile(input, multiples)
def gradients(self, loss, variables):
return tf.gradients(loss, variables)
def square(self, x):
return tf.square(x)
def clip_by_value(self, x, low, high):
return tf.clip_by_value(x, low, high)
def stack(self, values, axis=0, name='stack'):
return tf.stack(values, axis=axis, name=name)
def unstack(self, values, num=None, axis=0, name='unstack'):
return tf.unstack(values, num=num, axis=axis, name=name)
def pack(self, *args, **kwargs):
return self.stack(*args, **kwargs)
def unpack(self, *args, **kwargs):
return self.unstack(*args, **kwargs)
def reduce_max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def reduce_logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def matrix_solve(self, matrix, rhs, adjoint=None):
return tf.matrix_solve(matrix, rhs, adjoint=adjoint)
# Theano interface
def dim(self, x):
return len(x.get_shape())
def scalar(self, name=None, dtype=None, shape=[]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def vector(self, name=None, dtype=None, shape=[None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def matrix(self, name=None, dtype=None, shape=[None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor3(self, name=None, dtype=None, shape=[None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor4(self, name=None, dtype=None, shape=[None, None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def shared(self, value, name=None):
return self._variable(initial_value=value, name=name)
def arange(self, start, stop=None, step=None):
return self.range(start, stop=stop, step=step)
def sparse_dot(self, x, y):
return tf.sparse_tensor_dense_matmul(x, y)
def dot(self, x, y):
if len(x.get_shape()) != len(y.get_shape()):
len_y = len(y.get_shape())
new_y_shape = tf.concat([tf.shape(x)[:-len_y], tf.shape(y)], 0)
y = tf.broadcast_to(y, new_y_shape)
return tf.matmul(x, y)
def outer(self, x, y):
if len(x.get_shape()) == 0:
return x * y
return x[...,:,None] * y[...,None,:]
def eye(self, d, batch_shape=None):
return tf.eye(d, batch_shape=batch_shape)
def function(self, inputs, outputs, updates=[]):
return TensorflowFunction(self, inputs, outputs, updates)
def grad(self, loss, variables):
return tf.gradients(loss, variables)
def sqr(self, x):
return tf.square(x)
def argmax(self, x, axis=None):
return tf.argmax(x, axis=axis)
def max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def switch(self, condition, then_expression, else_expression):
'''Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
'''
return tf.where(condition, then_expression, else_expression)
def alloc(self, value, shape, unbroadcast=None, dtype=None):
dtype = dtype or self.floatx()
vals = tf.fill(tf.stack(shape), np.array(value).astype(dtype))
new_shape = []
for s in shape:
if isinstance(s, tf.Tensor):
new_shape.append(None)
else:
new_shape.append(s)
vals.set_shape(new_shape)
return vals
def range(self, start, limit=None, delta=1):
if limit is None:
return tf.range(start, delta=delta)
return tf.range(start, limit, delta=delta)
def solve(self, a, b):
return tf.matrix_solve(a, b)
def one_hot(self, indices, depth):
return tf.one_hot(indices, depth)
# Science methods
def gammaln(self, x):
return tf.lgamma(x)
def multigammaln(self, a, p):
p = self.to_float(p)
p_ = self.cast(p, 'int32')
a = a[..., None]
i = self.to_float(self.range(1, p_ + 1))
term1 = p * (p - 1) / 4. * self.log(np.pi)
term2 = self.gammaln(a - (i - 1) / 2.)
return term1 + self.sum(term2, axis=-1)
def digamma(self, a):
return tf.digamma(a)
|
[
"tensorflow.tile",
"tensorflow.matrix_diag_part",
"tensorflow.multiply",
"tensorflow.einsum",
"tensorflow.gradients",
"tensorflow.nn.softplus",
"tensorflow.nn.conv2d_transpose",
"tensorflow.while_loop",
"tensorflow.scan",
"tensorflow.pow",
"tensorflow.Session",
"functools.wraps",
"tensorflow.matrix_solve",
"tensorflow.convert_to_tensor",
"tensorflow.py_func",
"tensorflow.get_default_graph",
"tensorflow.one_hot",
"tensorflow.device",
"tensorflow.Variable",
"tensorflow.sqrt",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.contrib.distributions.fill_triangular",
"tensorflow.nn.max_pool",
"tensorflow.reduce_prod",
"numpy.random.randint",
"tensorflow.sparse_tensor_dense_matmul",
"numpy.linalg.cholesky",
"tensorflow.gather_nd",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.split",
"tensorflow.enable_eager_execution",
"tensorflow.nn.dropout",
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.clip_by_value",
"tensorflow.ConfigProto",
"tensorflow.zeros",
"tensorflow.log1p",
"tensorflow.InteractiveSession",
"tensorflow.nn.avg_pool",
"tensorflow.logical_and",
"tensorflow.reduce_max",
"tensorflow.range",
"tensorflow.gather",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.broadcast_to",
"tensorflow.map_fn",
"tensorflow.unstack",
"tensorflow.local_variables_initializer",
"tensorflow.tanh",
"tensorflow.matrix_diag",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.ones_like",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.eye",
"tensorflow.matrix_transpose",
"tensorflow.linalg.band_part",
"tensorflow.test.is_gpu_available",
"tensorflow.square",
"tensorflow.zeros_like",
"tensorflow.stack",
"tensorflow.trace",
"tensorflow.where",
"tensorflow.lgamma",
"tensorflow.argmin",
"tensorflow.ones",
"six.add_metaclass",
"tensorflow.random_uniform",
"tensorflow.constant",
"tensorflow.identity",
"tensorflow.RegisterGradient",
"tensorflow.abs",
"tensorflow.linalg.set_diag",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"numpy.array",
"tensorflow.cholesky",
"numpy.asarray",
"tensorflow.rank",
"tensorflow.concat",
"tensorflow.assign",
"tensorflow.random_gamma",
"numpy.eye",
"tensorflow.nn.convolution",
"tensorflow.matrix_inverse",
"tensorflow.digamma",
"tensorflow.sigmoid",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.truncated_normal",
"tensorflow.nn.relu",
"tensorflow.reduce_logsumexp",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.exp"
] |
[((899, 933), 'six.add_metaclass', 'six.add_metaclass', (['DeviceDecorator'], {}), '(DeviceDecorator)\n', (916, 933), False, 'import six\n'), ((1297, 1310), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (1302, 1310), False, 'from functools import wraps\n'), ((1545, 1572), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (1570, 1572), True, 'import tensorflow as tf\n'), ((2256, 2271), 'tensorflow.device', 'tf.device', (['name'], {}), '(name)\n', (2265, 2271), True, 'import tensorflow as tf\n'), ((2405, 2429), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '(**kwargs)\n', (2419, 2429), True, 'import tensorflow as tf\n'), ((2506, 2550), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph', 'config': 'config_proto'}), '(graph=graph, config=config_proto)\n', (2516, 2550), True, 'import tensorflow as tf\n'), ((2920, 2944), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '(**kwargs)\n', (2934, 2944), True, 'import tensorflow as tf\n'), ((2960, 3015), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'config_proto', 'graph': 'graph'}), '(config=config_proto, graph=graph)\n', (2981, 3015), True, 'import tensorflow as tf\n'), ((3433, 3450), 'tensorflow.cast', 'tf.cast', (['x', 'dtype'], {}), '(x, dtype)\n', (3440, 3450), True, 'import tensorflow as tf\n'), ((3539, 3550), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3547, 3550), True, 'import tensorflow as tf\n'), ((3590, 3600), 'tensorflow.rank', 'tf.rank', (['x'], {}), '(x)\n', (3597, 3600), True, 'import tensorflow as tf\n'), ((3639, 3648), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (3645, 3648), True, 'import tensorflow as tf\n'), ((3874, 3913), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'dtype', 'name': 'name'}), '(shape, dtype=dtype, name=name)\n', (3882, 3913), True, 'import tensorflow as tf\n'), ((3982, 4022), 'tensorflow.zeros_like', 'tf.zeros_like', (['x'], {'dtype': 'dtype', 'name': 'name'}), '(x, dtype=dtype, name=name)\n', (3995, 4022), True, 'import tensorflow as tf\n'), ((4128, 4166), 'tensorflow.ones', 'tf.ones', (['shape'], {'dtype': 'dtype', 'name': 'name'}), '(shape, dtype=dtype, name=name)\n', (4135, 4166), True, 'import tensorflow as tf\n'), ((4234, 4273), 'tensorflow.ones_like', 'tf.ones_like', (['x'], {'dtype': 'dtype', 'name': 'name'}), '(x, dtype=dtype, name=name)\n', (4246, 4273), True, 'import tensorflow as tf\n'), ((4410, 4483), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'mean': 'mean', 'stddev': 'stddev', 'dtype': 'dtype', 'seed': 'seed'}), '(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)\n', (4426, 4483), True, 'import tensorflow as tf\n'), ((4630, 4706), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'mean': 'mean', 'stddev': 'stddev', 'dtype': 'dtype', 'seed': 'seed'}), '(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)\n', (4649, 4706), True, 'import tensorflow as tf\n'), ((4845, 4923), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': 'minval', 'maxval': 'maxval', 'dtype': 'dtype', 'seed': 'seed'}), '(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)\n', (4862, 4923), True, 'import tensorflow as tf\n'), ((5303, 5343), 'tensorflow.random_gamma', 'tf.random_gamma', (['shape', 'alpha'], {'beta': 'beta'}), '(shape, alpha, beta=beta)\n', (5318, 5343), True, 'import tensorflow as tf\n'), ((5407, 5428), 'tensorflow.tanh', 'tf.tanh', (['x'], {'name': 'name'}), '(x, name=name)\n', (5414, 5428), True, 'import tensorflow as tf\n'), ((5482, 5506), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {'name': 'name'}), '(x, name=name)\n', (5492, 5506), True, 'import tensorflow as tf\n'), ((5567, 5591), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': 'name'}), '(x, name=name)\n', (5577, 5591), True, 'import tensorflow as tf\n'), ((5641, 5657), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['x'], {}), '(x)\n', (5654, 5657), True, 'import tensorflow as tf\n'), ((5701, 5718), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['x'], {}), '(x)\n', (5715, 5718), True, 'import tensorflow as tf\n'), ((5872, 5918), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['(x * 1.0)', 'retain_prob'], {'seed': 'seed'}), '(x * 1.0, retain_prob, seed=seed)\n', (5885, 5918), True, 'import tensorflow as tf\n'), ((6651, 6750), 'tensorflow.nn.convolution', 'tf.nn.convolution', ([], {'input': 'x', 'filter': 'kernel', 'strides': 'strides', 'padding': 'padding', 'data_format': '"""NHWC"""'}), "(input=x, filter=kernel, strides=strides, padding=padding,\n data_format='NHWC')\n", (6668, 6750), True, 'import tensorflow as tf\n'), ((7415, 7488), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'kernel', 'output_shape', 'strides'], {'padding': 'padding'}), '(x, kernel, output_shape, strides, padding=padding)\n', (7437, 7488), True, 'import tensorflow as tf\n'), ((8748, 8781), 'tensorflow.concat', 'tf.concat', (['[leading_dim, [-1]]', '(0)'], {}), '([leading_dim, [-1]], 0)\n', (8757, 8781), True, 'import tensorflow as tf\n'), ((8797, 8821), 'tensorflow.reshape', 'tf.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (8807, 8821), True, 'import tensorflow as tf\n'), ((8926, 8960), 'tensorflow.split', 'tf.split', (['x', 'num_splits'], {'axis': 'axis'}), '(x, num_splits, axis=axis)\n', (8934, 8960), True, 'import tensorflow as tf\n'), ((9010, 9030), 'tensorflow.reshape', 'tf.reshape', (['x', 'shape'], {}), '(x, shape)\n', (9020, 9030), True, 'import tensorflow as tf\n'), ((9180, 9226), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (9193, 9226), True, 'import tensorflow as tf\n'), ((9293, 9340), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (9307, 9340), True, 'import tensorflow as tf\n'), ((9578, 9625), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (9592, 9625), True, 'import tensorflow as tf\n'), ((9694, 9715), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0]'], {}), '(x, [0])\n', (9707, 9715), True, 'import tensorflow as tf\n'), ((9880, 9889), 'tensorflow.log', 'tf.log', (['x'], {}), '(x)\n', (9886, 9889), True, 'import tensorflow as tf\n'), ((9930, 9941), 'tensorflow.log1p', 'tf.log1p', (['x'], {}), '(x)\n', (9938, 9941), True, 'import tensorflow as tf\n'), ((9980, 9989), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (9986, 9989), True, 'import tensorflow as tf\n'), ((10031, 10043), 'tensorflow.pow', 'tf.pow', (['x', 'a'], {}), '(x, a)\n', (10037, 10043), True, 'import tensorflow as tf\n'), ((10085, 10102), 'tensorflow.multiply', 'tf.multiply', (['x', 'y'], {}), '(x, y)\n', (10096, 10102), True, 'import tensorflow as tf\n'), ((10304, 10314), 'tensorflow.sqrt', 'tf.sqrt', (['x'], {}), '(x)\n', (10311, 10314), True, 'import tensorflow as tf\n'), ((11167, 11196), 'tensorflow.concat', 'tf.concat', (['tensors'], {'axis': 'axis'}), '(tensors, axis=axis)\n', (11176, 11196), True, 'import tensorflow as tf\n'), ((11382, 11410), 'tensorflow.argmin', 'tf.argmin', (['tensor'], {'axis': 'axis'}), '(tensor, axis=axis)\n', (11391, 11410), True, 'import tensorflow as tf\n'), ((11463, 11489), 'tensorflow.map_fn', 'tf.map_fn', (['function', 'input'], {}), '(function, input)\n', (11472, 11489), True, 'import tensorflow as tf\n'), ((12006, 12047), 'tensorflow.while_loop', 'tf.while_loop', (['condition', 'body', 'loop_vars'], {}), '(condition, body, loop_vars)\n', (12019, 12047), True, 'import tensorflow as tf\n'), ((12113, 12172), 'tensorflow.scan', 'tf.scan', (['fn', 'elems'], {'initializer': 'initializer', 'back_prop': '(True)'}), '(fn, elems, initializer=initializer, back_prop=True)\n', (12120, 12172), True, 'import tensorflow as tf\n'), ((12432, 12464), 'tensorflow.einsum', 'tf.einsum', (['subscripts', '*operands'], {}), '(subscripts, *operands)\n', (12441, 12464), True, 'import tensorflow as tf\n'), ((13852, 13866), 'tensorflow.cholesky', 'tf.cholesky', (['A'], {}), '(A)\n', (13863, 13866), True, 'import tensorflow as tf\n'), ((14230, 14245), 'tensorflow.assign', 'tf.assign', (['a', 'b'], {}), '(a, b)\n', (14239, 14245), True, 'import tensorflow as tf\n'), ((14386, 14430), 'tensorflow.constant', 'tf.constant', (['value'], {'dtype': 'dtype', 'shape': 'shape'}), '(value, dtype=dtype, shape=shape)\n', (14397, 14430), True, 'import tensorflow as tf\n'), ((14677, 14705), 'tensorflow.concat', 'tf.concat', (['values'], {'axis': 'axis'}), '(values, axis=axis)\n', (14686, 14705), True, 'import tensorflow as tf\n'), ((14761, 14787), 'tensorflow.gather', 'tf.gather', (['params', 'indices'], {}), '(params, indices)\n', (14770, 14787), True, 'import tensorflow as tf\n'), ((14846, 14875), 'tensorflow.gather_nd', 'tf.gather_nd', (['params', 'indices'], {}), '(params, indices)\n', (14858, 14875), True, 'import tensorflow as tf\n'), ((14919, 14933), 'tensorflow.equal', 'tf.equal', (['x', 'y'], {}), '(x, y)\n', (14927, 14933), True, 'import tensorflow as tf\n'), ((14983, 15003), 'tensorflow.logical_and', 'tf.logical_and', (['x', 'y'], {}), '(x, y)\n', (14997, 15003), True, 'import tensorflow as tf\n'), ((15135, 15240), 'tensorflow.matmul', 'tf.matmul', (['a', 'b'], {'transpose_a': 'transpose_a', 'transpose_b': 'transpose_b', 'a_is_sparse': 'a_is_sparse', 'name': 'name'}), '(a, b, transpose_a=transpose_a, transpose_b=transpose_b,\n a_is_sparse=a_is_sparse, name=name)\n', (15144, 15240), True, 'import tensorflow as tf\n'), ((15277, 15288), 'tensorflow.trace', 'tf.trace', (['a'], {}), '(a)\n', (15285, 15288), True, 'import tensorflow as tf\n'), ((15344, 15370), 'tensorflow.transpose', 'tf.transpose', (['a'], {'perm': 'perm'}), '(a, perm=perm)\n', (15356, 15370), True, 'import tensorflow as tf\n'), ((15422, 15444), 'tensorflow.matrix_transpose', 'tf.matrix_transpose', (['a'], {}), '(a)\n', (15441, 15444), True, 'import tensorflow as tf\n'), ((15491, 15508), 'tensorflow.matrix_diag', 'tf.matrix_diag', (['a'], {}), '(a)\n', (15505, 15508), True, 'import tensorflow as tf\n'), ((15560, 15582), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['a'], {}), '(a)\n', (15579, 15582), True, 'import tensorflow as tf\n'), ((15640, 15675), 'tensorflow.linalg.set_diag', 'tf.linalg.set_diag', (['input', 'diagonal'], {}), '(input, diagonal)\n', (15658, 15675), True, 'import tensorflow as tf\n'), ((15746, 15794), 'tensorflow.linalg.band_part', 'tf.linalg.band_part', (['input', 'num_lower', 'num_upper'], {}), '(input, num_lower, num_upper)\n', (15765, 15794), True, 'import tensorflow as tf\n'), ((17651, 17669), 'tensorflow.contrib.distributions.fill_triangular', 'fill_triangular', (['a'], {}), '(a)\n', (17666, 17669), False, 'from tensorflow.contrib.distributions import fill_triangular\n'), ((17719, 17739), 'tensorflow.matrix_inverse', 'tf.matrix_inverse', (['a'], {}), '(a)\n', (17736, 17739), True, 'import tensorflow as tf\n'), ((17794, 17816), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', 'dim'], {}), '(x, dim)\n', (17808, 17816), True, 'import tensorflow as tf\n'), ((17871, 17896), 'tensorflow.tile', 'tf.tile', (['input', 'multiples'], {}), '(input, multiples)\n', (17878, 17896), True, 'import tensorflow as tf\n'), ((17955, 17984), 'tensorflow.gradients', 'tf.gradients', (['loss', 'variables'], {}), '(loss, variables)\n', (17967, 17984), True, 'import tensorflow as tf\n'), ((18026, 18038), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (18035, 18038), True, 'import tensorflow as tf\n'), ((18098, 18128), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', 'low', 'high'], {}), '(x, low, high)\n', (18114, 18128), True, 'import tensorflow as tf\n'), ((18196, 18234), 'tensorflow.stack', 'tf.stack', (['values'], {'axis': 'axis', 'name': 'name'}), '(values, axis=axis, name=name)\n', (18204, 18234), True, 'import tensorflow as tf\n'), ((18316, 18365), 'tensorflow.unstack', 'tf.unstack', (['values'], {'num': 'num', 'axis': 'axis', 'name': 'name'}), '(values, num=num, axis=axis, name=name)\n', (18326, 18365), True, 'import tensorflow as tf\n'), ((18604, 18650), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (18617, 18650), True, 'import tensorflow as tf\n'), ((18729, 18781), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (18748, 18781), True, 'import tensorflow as tf\n'), ((18853, 18898), 'tensorflow.matrix_solve', 'tf.matrix_solve', (['matrix', 'rhs'], {'adjoint': 'adjoint'}), '(matrix, rhs, adjoint=adjoint)\n', (18868, 18898), True, 'import tensorflow as tf\n'), ((20117, 20152), 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['x', 'y'], {}), '(x, y)\n', (20146, 20152), True, 'import tensorflow as tf\n'), ((20410, 20425), 'tensorflow.matmul', 'tf.matmul', (['x', 'y'], {}), '(x, y)\n', (20419, 20425), True, 'import tensorflow as tf\n'), ((20616, 20650), 'tensorflow.eye', 'tf.eye', (['d'], {'batch_shape': 'batch_shape'}), '(d, batch_shape=batch_shape)\n', (20622, 20650), True, 'import tensorflow as tf\n'), ((20824, 20853), 'tensorflow.gradients', 'tf.gradients', (['loss', 'variables'], {}), '(loss, variables)\n', (20836, 20853), True, 'import tensorflow as tf\n'), ((20892, 20904), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (20901, 20904), True, 'import tensorflow as tf\n'), ((20957, 20980), 'tensorflow.argmax', 'tf.argmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (20966, 20980), True, 'import tensorflow as tf\n'), ((21046, 21092), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (21059, 21092), True, 'import tensorflow as tf\n'), ((21164, 21216), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (21183, 21216), True, 'import tensorflow as tf\n'), ((21677, 21730), 'tensorflow.where', 'tf.where', (['condition', 'then_expression', 'else_expression'], {}), '(condition, then_expression, else_expression)\n', (21685, 21730), True, 'import tensorflow as tf\n'), ((22281, 22316), 'tensorflow.range', 'tf.range', (['start', 'limit'], {'delta': 'delta'}), '(start, limit, delta=delta)\n', (22289, 22316), True, 'import tensorflow as tf\n'), ((22360, 22381), 'tensorflow.matrix_solve', 'tf.matrix_solve', (['a', 'b'], {}), '(a, b)\n', (22375, 22381), True, 'import tensorflow as tf\n'), ((22437, 22463), 'tensorflow.one_hot', 'tf.one_hot', (['indices', 'depth'], {}), '(indices, depth)\n', (22447, 22463), True, 'import tensorflow as tf\n'), ((22529, 22541), 'tensorflow.lgamma', 'tf.lgamma', (['x'], {}), '(x)\n', (22538, 22541), True, 'import tensorflow as tf\n'), ((22903, 22916), 'tensorflow.digamma', 'tf.digamma', (['a'], {}), '(a)\n', (22913, 22916), True, 'import tensorflow as tf\n'), ((498, 535), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['self.outputs'], {}), '(self.outputs)\n', (521, 535), True, 'import tensorflow as tf\n'), ((1946, 1991), 'tensorflow.placeholder', 'tf.placeholder', (['dtype'], {'shape': 'shape', 'name': 'name'}), '(dtype, shape=shape, name=name)\n', (1960, 1991), True, 'import tensorflow as tf\n'), ((2138, 2210), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'initial_value', 'trainable': 'trainable', 'name': 'name'}), '(initial_value=initial_value, trainable=trainable, name=name)\n', (2149, 2210), True, 'import tensorflow as tf\n'), ((3275, 3307), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3305, 3307), True, 'import tensorflow as tf\n'), ((3326, 3359), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3357, 3359), True, 'import tensorflow as tf\n'), ((5132, 5159), 'tensorflow.ones', 'tf.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5139, 5159), True, 'import tensorflow as tf\n'), ((5204, 5232), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5212, 5232), True, 'import tensorflow as tf\n'), ((5833, 5862), 'numpy.random.randint', 'np.random.randint', (['(10000000.0)'], {}), '(10000000.0)\n', (5850, 5862), True, 'import numpy as np\n'), ((6568, 6589), 'tensorflow.cast', 'tf.cast', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (6575, 6589), True, 'import tensorflow as tf\n'), ((6611, 6637), 'tensorflow.cast', 'tf.cast', (['kernel', '"""float32"""'], {}), "(kernel, 'float32')\n", (6618, 6637), True, 'import tensorflow as tf\n'), ((6833, 6854), 'tensorflow.cast', 'tf.cast', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (6840, 6854), True, 'import tensorflow as tf\n'), ((7332, 7353), 'tensorflow.cast', 'tf.cast', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (7339, 7353), True, 'import tensorflow as tf\n'), ((7375, 7401), 'tensorflow.cast', 'tf.cast', (['kernel', '"""float32"""'], {}), "(kernel, 'float32')\n", (7382, 7401), True, 'import tensorflow as tf\n'), ((7545, 7566), 'tensorflow.cast', 'tf.cast', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (7552, 7566), True, 'import tensorflow as tf\n'), ((8235, 8256), 'tensorflow.cast', 'tf.cast', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (8242, 8256), True, 'import tensorflow as tf\n'), ((8305, 8359), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x', 'pool_size', 'strides'], {'padding': 'padding'}), '(x, pool_size, strides, padding=padding)\n', (8319, 8359), True, 'import tensorflow as tf\n'), ((8605, 8626), 'tensorflow.cast', 'tf.cast', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (8612, 8626), True, 'import tensorflow as tf\n'), ((9759, 9773), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (9770, 9773), True, 'import tensorflow as tf\n'), ((10797, 10869), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'output', 'labels': 'target'}), '(logits=output, labels=target)\n', (10839, 10869), True, 'import tensorflow as tf\n'), ((10984, 11053), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'target', 'logits': 'output'}), '(labels=target, logits=output)\n', (11023, 11053), True, 'import tensorflow as tf\n'), ((11842, 11878), 'tensorflow.scan', 'tf.scan', (['step', 'input', 'initial_states'], {}), '(step, input, initial_states)\n', (11849, 11878), True, 'import tensorflow as tf\n'), ((12347, 12370), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['term', '(-1)'], {}), '(term, -1)\n', (12360, 12370), True, 'import tensorflow as tf\n'), ((12883, 12905), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12903, 12905), True, 'import tensorflow as tf\n'), ((17228, 17239), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (17236, 17239), True, 'import tensorflow as tf\n'), ((20363, 20394), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['y', 'new_y_shape'], {}), '(y, new_y_shape)\n', (20378, 20394), True, 'import tensorflow as tf\n'), ((21859, 21874), 'tensorflow.stack', 'tf.stack', (['shape'], {}), '(shape)\n', (21867, 21874), True, 'import tensorflow as tf\n'), ((22237, 22265), 'tensorflow.range', 'tf.range', (['start'], {'delta': 'delta'}), '(start, delta=delta)\n', (22245, 22265), True, 'import tensorflow as tf\n'), ((565, 580), 'tensorflow.assign', 'tf.assign', (['k', 'v'], {}), '(k, v)\n', (574, 580), True, 'import tensorflow as tf\n'), ((1162, 1188), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (1186, 1188), True, 'import tensorflow as tf\n'), ((5045, 5082), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5062, 5082), True, 'import tensorflow as tf\n'), ((8409, 8463), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['x', 'pool_size', 'strides'], {'padding': 'padding'}), '(x, pool_size, strides, padding=padding)\n', (8423, 8463), True, 'import tensorflow as tf\n'), ((10533, 10566), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['output', 'axis', '(True)'], {}), '(output, axis, True)\n', (10546, 10566), True, 'import tensorflow as tf\n'), ((12831, 12860), 'tensorflow.RegisterGradient', 'tf.RegisterGradient', (['rnd_name'], {}), '(rnd_name)\n', (12850, 12860), True, 'import tensorflow as tf\n'), ((13022, 13079), 'tensorflow.py_func', 'tf.py_func', (['func', 'inp', 'Tout'], {'stateful': 'stateful', 'name': 'name'}), '(func, inp, Tout, stateful=stateful, name=name)\n', (13032, 13079), True, 'import tensorflow as tf\n'), ((16466, 16482), 'tensorflow.concat', 'tf.concat', (['a', '(-1)'], {}), '(a, -1)\n', (16475, 16482), True, 'import tensorflow as tf\n'), ((17561, 17577), 'tensorflow.concat', 'tf.concat', (['b', '(-2)'], {}), '(b, -2)\n', (17570, 17577), True, 'import tensorflow as tf\n'), ((11275, 11291), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (11283, 11291), True, 'import tensorflow as tf\n'), ((12791, 12824), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100000000.0)'], {}), '(0, 100000000.0)\n', (12808, 12824), True, 'import numpy as np\n'), ((13214, 13239), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['A_new'], {}), '(A_new)\n', (13232, 13239), True, 'import numpy as np\n'), ((20330, 20341), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (20338, 20341), True, 'import tensorflow as tf\n'), ((21876, 21891), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (21884, 21891), True, 'import numpy as np\n'), ((3706, 3723), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (3716, 3723), True, 'import numpy as np\n'), ((10742, 10756), 'tensorflow.log', 'tf.log', (['output'], {}), '(output)\n', (10748, 10756), True, 'import tensorflow as tf\n'), ((14493, 14516), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (14513, 14516), True, 'import tensorflow as tf\n'), ((17456, 17491), 'tensorflow.concat', 'tf.concat', (['[leading_dim, [n, m]]', '(0)'], {}), '([leading_dim, [n, m]], 0)\n', (17465, 17491), True, 'import tensorflow as tf\n'), ((20308, 20319), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (20316, 20319), True, 'import tensorflow as tf\n'), ((13487, 13506), 'numpy.eye', 'np.eye', (['A.shape[-1]'], {}), '(A.shape[-1])\n', (13493, 13506), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function
import cv2
import pandas as pd
import numpy as np
import six
import ubelt as ub
from six.moves import zip_longest
from os.path import join, dirname
import warnings
def multi_plot(xdata=None, ydata=[], **kwargs):
r"""
plots multiple lines, bars, etc...
This is the big function that implements almost all of the heavy lifting in
this file. Any function not using this should probably find a way to use
it. It is pretty general and relatively clean.
Args:
xdata (ndarray): can also be a list of arrays
ydata (list or dict of ndarrays): can also be a single array
**kwargs:
Misc:
fnum, pnum, use_legend, legend_loc
Labels:
xlabel, ylabel, title, figtitle
ticksize, titlesize, legendsize, labelsize
Grid:
gridlinewidth, gridlinestyle
Ticks:
num_xticks, num_yticks, tickwidth, ticklength, ticksize
Data:
xmin, xmax, ymin, ymax, spread_list
# can append _list to any of these
# these can be dictionaries if ydata was also a dict
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle']
any plot_kw key can be a scalar (corresponding to all ydatas),
a list if ydata was specified as a list, or a dict if ydata was
specified as a dict.
kind = ['bar', 'plot', ...]
if kind='plot':
spread
if kind='bar':
stacked, width
References:
matplotlib.org/examples/api/barchart_demo.html
CommandLine:
python -m netharn.util.mplutil multi_plot:0 --show
python -m netharn.util.mplutil multi_plot:1 --show
Example:
>>> autompl()
>>> xdata = [1, 2, 3, 4, 5]
>>> ydata_list = [[1, 2, 3, 4, 5], [3, 3, 3, 3, 3], [5, 4, np.nan, 2, 1], [4, 3, np.nan, 1, 0]]
>>> kwargs = {'label': ['spamΣ', 'eggs', 'jamµ', 'pram'], 'linestyle': '-'}
>>> #fig = multi_plot(xdata, ydata_list, title='$\phi_1(\\vec{x})$', xlabel='\nfds', **kwargs)
>>> fig = multi_plot(xdata, ydata_list, title='ΣΣΣµµµ', xlabel='\nfdsΣΣΣµµµ', **kwargs)
>>> show_if_requested()
Example:
>>> autompl()
>>> fig1 = multi_plot([1, 2, 3], [4, 5, 6])
>>> fig2 = multi_plot([1, 2, 3], [4, 5, 6], fnum=4)
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
ydata_list = ydata
if isinstance(ydata_list, dict):
# Special case where ydata is a dictionary
if isinstance(xdata, six.string_types):
# Special-er case where xdata is specified in ydata
xkey = xdata
ykeys = set(ydata_list.keys()) - {xkey}
xdata = ydata_list[xkey]
else:
ykeys = list(ydata_list.keys())
# Normalize input
ydata_list = list(ub.take(ydata_list, ykeys))
kwargs['label_list'] = kwargs.get('label_list', ykeys)
else:
ykeys = None
def is_listlike(data):
flag = isinstance(data, (list, np.ndarray, tuple, pd.Series))
flag &= hasattr(data, '__getitem__') and hasattr(data, '__len__')
return flag
def is_list_of_scalars(data):
if is_listlike(data):
if len(data) > 0 and not is_listlike(data[0]):
return True
return False
def is_list_of_lists(data):
if is_listlike(data):
if len(data) > 0 and is_listlike(data[0]):
return True
return False
# allow ydata_list to be passed without a container
if is_list_of_scalars(ydata_list):
ydata_list = [np.array(ydata_list)]
if xdata is None:
xdata = list(range(len(ydata_list[0])))
num_lines = len(ydata_list)
# Transform xdata into xdata_list
if is_list_of_lists(xdata):
xdata_list = [np.array(xd, copy=True) for xd in xdata]
else:
xdata_list = [np.array(xdata, copy=True)] * num_lines
fnum = ensure_fnum(kwargs.get('fnum', None))
pnum = kwargs.get('pnum', None)
kind = kwargs.get('kind', 'plot')
transpose = kwargs.get('transpose', False)
def parsekw_list(key, kwargs, num_lines=num_lines, ykeys=ykeys):
""" copies relevant plot commands into plot_list_kw """
if key in kwargs:
val_list = kwargs[key]
elif key + '_list' in kwargs:
warnings.warn('*_list is depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + '_list']
elif key + 's' in kwargs:
# hack, multiple ways to do something
warnings.warn('*s depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + 's']
else:
val_list = None
if val_list is not None:
if isinstance(val_list, dict):
if ykeys is None:
raise ValueError('ydata is not a dict, but a property was.')
else:
val_list = [val_list[key] for key in ykeys]
if not isinstance(val_list, list):
val_list = [val_list] * num_lines
return val_list
# Parse out arguments to ax.plot
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle', 'alpha']
# hackish / extra args that dont go to plot, but help
extra_plot_kw_keys = ['spread_alpha', 'autolabel', 'edgecolor', 'fill']
plot_kw_keys += extra_plot_kw_keys
plot_ks_vals = [parsekw_list(key, kwargs) for key in plot_kw_keys]
plot_list_kw = dict([
(key, vals)
for key, vals in zip(plot_kw_keys, plot_ks_vals) if vals is not None
])
if 'color' not in plot_list_kw:
plot_list_kw['color'] = distinct_colors(num_lines)
if kind == 'plot':
if 'marker' not in plot_list_kw:
plot_list_kw['marker'] = distinct_markers(num_lines)
if 'spread_alpha' not in plot_list_kw:
plot_list_kw['spread_alpha'] = [.2] * num_lines
if kind == 'bar':
# Remove non-bar kwargs
for key in ['markeredgewidth', 'linewidth', 'marker', 'markersize', 'linestyle']:
plot_list_kw.pop(key, None)
stacked = kwargs.get('stacked', False)
width_key = 'height' if transpose else 'width'
if 'width_list' in kwargs:
plot_list_kw[width_key] = kwargs['width_list']
else:
width = kwargs.get('width', .9)
# if width is None:
# # HACK: need variable width
# # width = np.mean(np.diff(xdata_list[0]))
# width = .9
if not stacked:
width /= num_lines
#plot_list_kw['orientation'] = ['horizontal'] * num_lines
plot_list_kw[width_key] = [width] * num_lines
spread_list = kwargs.get('spread_list', None)
if spread_list is None:
pass
# nest into a list of dicts for each line in the multiplot
valid_keys = list(set(plot_list_kw.keys()) - set(extra_plot_kw_keys))
valid_vals = list(ub.dict_take(plot_list_kw, valid_keys))
plot_kw_list = [dict(zip(valid_keys, vals)) for vals in zip(*valid_vals)]
extra_kw_keys = [key for key in extra_plot_kw_keys if key in plot_list_kw]
extra_kw_vals = list(ub.dict_take(plot_list_kw, extra_kw_keys))
extra_kw_list = [dict(zip(extra_kw_keys, vals)) for vals in zip(*extra_kw_vals)]
# Get passed in axes or setup a new figure
ax = kwargs.get('ax', None)
if ax is None:
doclf = kwargs.get('doclf', False)
fig = figure(fnum=fnum, pnum=pnum, docla=False, doclf=doclf)
ax = plt.gca()
else:
plt.sca(ax)
fig = ax.figure
# +---------------
# Draw plot lines
ydata_list = np.array(ydata_list)
if transpose:
if kind == 'bar':
plot_func = ax.barh
elif kind == 'plot':
def plot_func(_x, _y, **kw):
return ax.plot(_y, _x, **kw)
else:
plot_func = getattr(ax, kind) # usually ax.plot
assert len(ydata_list) > 0, 'no ydata'
#assert len(extra_kw_list) == len(plot_kw_list), 'bad length'
#assert len(extra_kw_list) == len(ydata_list), 'bad length'
_iter = enumerate(zip_longest(xdata_list, ydata_list, plot_kw_list, extra_kw_list))
for count, (_xdata, _ydata, plot_kw, extra_kw) in _iter:
ymask = np.isfinite(_ydata)
ydata_ = _ydata.compress(ymask)
xdata_ = _xdata.compress(ymask)
if kind == 'bar':
if stacked:
# Plot bars on top of each other
xdata_ = xdata_
else:
# Plot bars side by side
baseoffset = (width * num_lines) / 2
lineoffset = (width * count)
offset = baseoffset - lineoffset # Fixeme for more histogram bars
xdata_ = xdata_ - offset
# width_key = 'height' if transpose else 'width'
# plot_kw[width_key] = np.diff(xdata)
objs = plot_func(xdata_, ydata_, **plot_kw)
if kind == 'bar':
if extra_kw is not None and 'edgecolor' in extra_kw:
for rect in objs:
rect.set_edgecolor(extra_kw['edgecolor'])
if extra_kw is not None and extra_kw.get('autolabel', False):
# FIXME: probably a more cannonical way to include bar
# autolabeling with tranpose support, but this is a hack that
# works for now
for rect in objs:
if transpose:
numlbl = width = rect.get_width()
xpos = width + ((_xdata.max() - _xdata.min()) * .005)
ypos = rect.get_y() + rect.get_height() / 2.
ha, va = 'left', 'center'
else:
numlbl = height = rect.get_height()
xpos = rect.get_x() + rect.get_width() / 2.
ypos = 1.05 * height
ha, va = 'center', 'bottom'
barlbl = '%.3f' % (numlbl,)
ax.text(xpos, ypos, barlbl, ha=ha, va=va)
# print('extra_kw = %r' % (extra_kw,))
if kind == 'plot' and extra_kw.get('fill', False):
ax.fill_between(_xdata, ydata_, alpha=plot_kw.get('alpha', 1.0),
color=plot_kw.get('color', None)) # , zorder=0)
if spread_list is not None:
# Plots a spread around plot lines usually indicating standard
# deviation
_xdata = np.array(_xdata)
spread = spread_list[count]
ydata_ave = np.array(ydata_)
y_data_dev = np.array(spread)
y_data_max = ydata_ave + y_data_dev
y_data_min = ydata_ave - y_data_dev
ax = plt.gca()
spread_alpha = extra_kw['spread_alpha']
ax.fill_between(_xdata, y_data_min, y_data_max, alpha=spread_alpha,
color=plot_kw.get('color', None)) # , zorder=0)
# L________________
#max_y = max(np.max(y_data), max_y)
#min_y = np.min(y_data) if min_y is None else min(np.min(y_data), min_y)
ydata = _ydata # HACK
xdata = _xdata # HACK
if transpose:
#xdata_list = ydata_list
ydata = xdata
# Hack / Fix any transpose issues
def transpose_key(key):
if key.startswith('x'):
return 'y' + key[1:]
elif key.startswith('y'):
return 'x' + key[1:]
elif key.startswith('num_x'):
# hackier, fixme to use regex or something
return 'num_y' + key[5:]
elif key.startswith('num_y'):
# hackier, fixme to use regex or something
return 'num_x' + key[5:]
else:
return key
kwargs = {transpose_key(key): val for key, val in kwargs.items()}
# Setup axes labeling
title = kwargs.get('title', None)
xlabel = kwargs.get('xlabel', '')
ylabel = kwargs.get('ylabel', '')
def none_or_unicode(text):
return None if text is None else ub.ensure_unicode(text)
xlabel = none_or_unicode(xlabel)
ylabel = none_or_unicode(ylabel)
title = none_or_unicode(title)
# Initial integration with mpl rcParams standards
mplrc = mpl.rcParams.copy()
mplrc.update({
# 'legend.fontsize': custom_figure.LEGEND_SIZE,
# 'axes.titlesize': custom_figure.TITLE_SIZE,
# 'axes.labelsize': custom_figure.LABEL_SIZE,
# 'legend.facecolor': 'w',
# 'font.family': 'sans-serif',
# 'xtick.labelsize': custom_figure.TICK_SIZE,
# 'ytick.labelsize': custom_figure.TICK_SIZE,
})
mplrc.update(kwargs.get('rcParams', {}))
titlesize = kwargs.get('titlesize', mplrc['axes.titlesize'])
labelsize = kwargs.get('labelsize', mplrc['axes.labelsize'])
legendsize = kwargs.get('legendsize', mplrc['legend.fontsize'])
xticksize = kwargs.get('ticksize', mplrc['xtick.labelsize'])
yticksize = kwargs.get('ticksize', mplrc['ytick.labelsize'])
family = kwargs.get('fontfamily', mplrc['font.family'])
tickformat = kwargs.get('tickformat', None)
ytickformat = kwargs.get('ytickformat', tickformat)
xtickformat = kwargs.get('xtickformat', tickformat)
# 'DejaVu Sans','Verdana', 'Arial'
weight = kwargs.get('fontweight', None)
if weight is None:
weight = 'normal'
labelkw = {
'fontproperties': mpl.font_manager.FontProperties(
weight=weight,
family=family, size=labelsize)
}
ax.set_xlabel(xlabel, **labelkw)
ax.set_ylabel(ylabel, **labelkw)
tick_fontprop = mpl.font_manager.FontProperties(family=family,
weight=weight)
if tick_fontprop is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontproperties(tick_fontprop)
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontproperties(tick_fontprop)
if xticksize is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontsize(xticksize)
if yticksize is not None:
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontsize(yticksize)
if xtickformat is not None:
# mpl.ticker.StrMethodFormatter # newstyle
# mpl.ticker.FormatStrFormatter # oldstyle
ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(ytickformat))
xtick_kw = ytick_kw = {
'width': kwargs.get('tickwidth', None),
'length': kwargs.get('ticklength', None),
}
xtick_kw = {k: v for k, v in xtick_kw.items() if v is not None}
ytick_kw = {k: v for k, v in ytick_kw.items() if v is not None}
ax.xaxis.set_tick_params(**xtick_kw)
ax.yaxis.set_tick_params(**ytick_kw)
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# Setup axes limits
if 'xlim' in kwargs:
xlim = kwargs['xlim']
if xlim is not None:
if 'xmin' not in kwargs and 'xmax' not in kwargs:
kwargs['xmin'] = xlim[0]
kwargs['xmax'] = xlim[1]
else:
raise ValueError('use xmax, xmin instead of xlim')
if 'ylim' in kwargs:
ylim = kwargs['ylim']
if ylim is not None:
if 'ymin' not in kwargs and 'ymax' not in kwargs:
kwargs['ymin'] = ylim[0]
kwargs['ymax'] = ylim[1]
else:
raise ValueError('use ymax, ymin instead of ylim')
xmin = kwargs.get('xmin', ax.get_xlim()[0])
xmax = kwargs.get('xmax', ax.get_xlim()[1])
ymin = kwargs.get('ymin', ax.get_ylim()[0])
ymax = kwargs.get('ymax', ax.get_ylim()[1])
text_type = six.text_type
if text_type(xmax) == 'data':
xmax = max([xd.max() for xd in xdata_list])
if text_type(xmin) == 'data':
xmin = min([xd.min() for xd in xdata_list])
# Setup axes ticks
num_xticks = kwargs.get('num_xticks', None)
num_yticks = kwargs.get('num_yticks', None)
if num_xticks is not None:
# TODO check if xdata is integral
if xdata.dtype.kind == 'i':
xticks = np.linspace(np.ceil(xmin), np.floor(xmax),
num_xticks).astype(np.int32)
else:
xticks = np.linspace((xmin), (xmax), num_xticks)
ax.set_xticks(xticks)
if num_yticks is not None:
if ydata.dtype.kind == 'i':
yticks = np.linspace(np.ceil(ymin), np.floor(ymax),
num_yticks).astype(np.int32)
else:
yticks = np.linspace((ymin), (ymax), num_yticks)
ax.set_yticks(yticks)
force_xticks = kwargs.get('force_xticks', None)
if force_xticks is not None:
xticks = np.array(sorted(ax.get_xticks().tolist() + force_xticks))
ax.set_xticks(xticks)
yticklabels = kwargs.get('yticklabels', None)
if yticklabels is not None:
# Hack ONLY WORKS WHEN TRANSPOSE = True
# Overrides num_yticks
ax.set_yticks(ydata)
ax.set_yticklabels(yticklabels)
xticklabels = kwargs.get('xticklabels', None)
if xticklabels is not None:
# Overrides num_xticks
ax.set_xticks(xdata)
ax.set_xticklabels(xticklabels)
xtick_rotation = kwargs.get('xtick_rotation', None)
if xtick_rotation is not None:
[lbl.set_rotation(xtick_rotation)
for lbl in ax.get_xticklabels()]
ytick_rotation = kwargs.get('ytick_rotation', None)
if ytick_rotation is not None:
[lbl.set_rotation(ytick_rotation)
for lbl in ax.get_yticklabels()]
# Axis padding
xpad = kwargs.get('xpad', None)
ypad = kwargs.get('ypad', None)
xpad_factor = kwargs.get('xpad_factor', None)
ypad_factor = kwargs.get('ypad_factor', None)
if xpad is None and xpad_factor is not None:
xpad = (xmax - xmin) * xpad_factor
if ypad is None and ypad_factor is not None:
ypad = (ymax - ymin) * ypad_factor
xpad = 0 if xpad is None else xpad
ypad = 0 if ypad is None else ypad
ypad_high = kwargs.get('ypad_high', ypad)
ypad_low = kwargs.get('ypad_low', ypad)
xpad_high = kwargs.get('xpad_high', xpad)
xpad_low = kwargs.get('xpad_low', xpad)
xmin, xmax = (xmin - xpad_low), (xmax + xpad_high)
ymin, ymax = (ymin - ypad_low), (ymax + ypad_high)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
xscale = kwargs.get('xscale', None)
yscale = kwargs.get('yscale', None)
if yscale is not None:
ax.set_yscale(yscale)
if xscale is not None:
ax.set_xscale(xscale)
gridlinestyle = kwargs.get('gridlinestyle', None)
gridlinewidth = kwargs.get('gridlinewidth', None)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
if gridlinestyle:
for line in gridlines:
line.set_linestyle(gridlinestyle)
if gridlinewidth:
for line in gridlines:
line.set_linewidth(gridlinewidth)
# Setup title
if title is not None:
titlekw = {
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=titlesize)
}
ax.set_title(title, **titlekw)
use_legend = kwargs.get('use_legend', 'label' in valid_keys)
legend_loc = kwargs.get('legend_loc', 'best')
legend_alpha = kwargs.get('legend_alpha', 1.0)
if use_legend:
legendkw = {
'alpha': legend_alpha,
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=legendsize)
}
legend(loc=legend_loc, ax=ax, **legendkw)
figtitle = kwargs.get('figtitle', None)
if figtitle is not None:
set_figtitle(figtitle, fontfamily=family, fontweight=weight,
size=kwargs.get('figtitlesize'))
use_darkbackground = kwargs.get('use_darkbackground', None)
lightbg = kwargs.get('lightbg', None)
if lightbg is None:
lightbg = True
if use_darkbackground is None:
use_darkbackground = not lightbg
if use_darkbackground:
_dark_background(force=use_darkbackground is True)
# TODO: return better info
return fig
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False,
docla=False, projection=None, **kwargs):
"""
http://matplotlib.org/users/gridspec.html
Args:
fnum (int): fignum = figure number
pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple
title (str): (default = None)
figtitle (None): (default = None)
docla (bool): (default = False)
doclf (bool): (default = False)
Returns:
mpl.Figure: fig
CommandLine:
python -m netharn.util.mplutil figure:0 --show
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> show_if_requested()
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> fig = figure(fnum, (2, 4, (1, slice(1, None))))
>>> plt.gca().text(0.5, 0.5, "ax3", va="center", ha="center")
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def ensure_fig(fnum=None):
if fnum is None:
try:
fig = plt.gcf()
except Exception as ex:
fig = plt.figure()
else:
try:
fig = plt.figure(fnum)
except Exception as ex:
fig = plt.gcf()
return fig
def _convert_pnum_int_to_tup(int_pnum):
# Convert pnum to tuple format if in integer format
nr = int_pnum // 100
nc = int_pnum // 10 - (nr * 10)
px = int_pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
return pnum
def _pnum_to_subspec(pnum):
if isinstance(pnum, six.string_types):
pnum = list(pnum)
nrow, ncols, plotnum = pnum
# if kwargs.get('use_gridspec', True):
# Convert old pnums to gridspec
gs = gridspec.GridSpec(nrow, ncols)
if isinstance(plotnum, (tuple, slice, list)):
subspec = gs[plotnum]
else:
subspec = gs[plotnum - 1]
return (subspec,)
def _setup_subfigure(pnum):
if isinstance(pnum, int):
pnum = _convert_pnum_int_to_tup(pnum)
axes_list = fig.get_axes()
if docla or len(axes_list) == 0:
if pnum is not None:
assert pnum[0] > 0, 'nRows must be > 0: pnum=%r' % (pnum,)
assert pnum[1] > 0, 'nCols must be > 0: pnum=%r' % (pnum,)
subspec = _pnum_to_subspec(pnum)
ax = fig.add_subplot(*subspec, projection=projection)
if len(axes_list) > 0:
ax.cla()
else:
ax = plt.gca()
else:
if pnum is not None:
subspec = _pnum_to_subspec(pnum)
ax = plt.subplot(*subspec)
else:
ax = plt.gca()
fig = ensure_fig(fnum)
if doclf:
fig.clf()
if pnum is not None:
_setup_subfigure(pnum)
# Set the title / figtitle
if title is not None:
ax = plt.gca()
ax.set_title(title)
if figtitle is not None:
fig.suptitle(figtitle)
return fig
def pandas_plot_matrix(df, rot=90, ax=None, grid=True, label=None,
zerodiag=False,
cmap='viridis', showvals=False, logscale=True):
import matplotlib as mpl
import copy
from matplotlib import pyplot as plt
if ax is None:
fig = figure(fnum=1, pnum=(1, 1, 1))
fig.clear()
ax = plt.gca()
ax = plt.gca()
values = df.values
if zerodiag:
values = values.copy()
values = values - np.diag(np.diag(values))
# aximg = ax.imshow(values, interpolation='none', cmap='viridis')
if logscale:
from matplotlib.colors import LogNorm
vmin = df[df > 0].min().min()
norm = LogNorm(vmin=vmin, vmax=values.max())
else:
norm = None
cmap = copy.copy(mpl.cm.get_cmap(cmap)) # copy the default cmap
cmap.set_bad((0, 0, 0))
aximg = ax.matshow(values, interpolation='none', cmap=cmap, norm=norm)
# aximg = ax.imshow(values, interpolation='none', cmap='viridis', norm=norm)
# ax.imshow(values, interpolation='none', cmap='viridis')
ax.grid(False)
cax = plt.colorbar(aximg, ax=ax)
if label is not None:
cax.set_label(label)
ax.set_xticks(list(range(len(df.index))))
ax.set_xticklabels([lbl[0:100] for lbl in df.index])
for lbl in ax.get_xticklabels():
lbl.set_rotation(rot)
for lbl in ax.get_xticklabels():
lbl.set_horizontalalignment('center')
ax.set_yticks(list(range(len(df.columns))))
ax.set_yticklabels([lbl[0:100] for lbl in df.columns])
for lbl in ax.get_yticklabels():
lbl.set_horizontalalignment('right')
for lbl in ax.get_yticklabels():
lbl.set_verticalalignment('center')
# Grid lines around the pixels
if grid:
offset = -.5
xlim = [-.5, len(df.columns)]
ylim = [-.5, len(df.index)]
segments = []
for x in range(ylim[1]):
xdata = [x + offset, x + offset]
ydata = ylim
segment = list(zip(xdata, ydata))
segments.append(segment)
for y in range(xlim[1]):
xdata = xlim
ydata = [y + offset, y + offset]
segment = list(zip(xdata, ydata))
segments.append(segment)
bingrid = mpl.collections.LineCollection(segments, color='w', linewidths=1)
ax.add_collection(bingrid)
if showvals:
x_basis = np.arange(len(df.columns))
y_basis = np.arange(len(df.index))
x, y = np.meshgrid(x_basis, y_basis)
for c, r in zip(x.flatten(), y.flatten()):
val = df.iloc[r, c]
ax.text(c, r, val, va='center', ha='center', color='white')
return ax
def axes_extent(axs, pad=0.0):
"""
Get the full extent of a group of axes, including axes labels, tick labels,
and titles.
"""
import itertools as it
import matplotlib as mpl
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
#mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def extract_axes_extents(fig, combine=False, pad=0.0):
# Make sure we draw the axes first so we can
# extract positions from the text objects
import matplotlib as mpl
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
dpi_scale_trans_inv = fig.dpi_scale_trans.inverted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inv) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://stackoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inv)
# extent = extent_
axes_extents_.append(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def render_figure_to_image(fig, **savekw):
import io
import cv2
import matplotlib as mpl
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
# fig.savefig(stream, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
im_bgra = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
return im_bgra
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
def dict_intersection(dict1, dict2):
r"""
Args:
dict1 (dict):
dict2 (dict):
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> mergedict_ = dict_intersection(dict1, dict2)
>>> print(ub.repr2(mergedict_, nl=0))
{'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
# maintain order if possible
if isinstance(dict1, ub.odict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = ub.odict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect
def _dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m .draw_func2 --exec-_dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> fig = figure()
>>> _dark_background()
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
def is_using_style(style):
style_dict = mpl.style.library[style]
return len(dict_intersection(style_dict, mpl.rcParams)) == len(style_dict)
if force:
from mpl_toolkits.mplot3d import Axes3D
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
# Should use mpl style dark background instead
bgcolor = BLACK * .9
if ax is None:
ax = plt.gca()
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = _get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_patch(rect)
def _get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
""" gets geometry of a subplot """
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
_LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True,
size=None, fontfamily=None, fontweight=None,
fig=None):
r"""
Args:
figtitle (?):
subtitle (str): (default = '')
forcefignum (bool): (default = True)
incanvas (bool): (default = True)
fontfamily (None): (default = None)
fontweight (None): (default = None)
size (None): (default = None)
fig (None): (default = None)
CommandLine:
python -m .custom_figure set_figtitle --show
Example:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> fig = figure(fnum=1, doclf=True)
>>> result = set_figtitle(figtitle='figtitle', fig=fig)
>>> # xdoc: +REQUIRES(--show)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
if figtitle is None:
figtitle = ''
if fig is None:
fig = plt.gcf()
figtitle = ub.ensure_unicode(figtitle)
subtitle = ub.ensure_unicode(subtitle)
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
prop = {
'family': fontfamily,
'weight': fontweight,
'size': size,
}
prop = {k: v for k, v in prop.items() if v is not None}
sup = fig.suptitle(figtitle + subtitle)
if prop:
fontproperties = sup.get_fontproperties().copy()
for key, val in prop.items():
getattr(fontproperties, 'set_' + key)(val)
sup.set_fontproperties(fontproperties)
# fontproperties = mpl.font_manager.FontProperties(**prop)
else:
fig.suptitle('')
# Set title in the window
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
window_figtitle = window_figtitle.replace('\n', ' ')
fig.canvas.set_window_title(window_figtitle)
def legend(loc='best', fontproperties=None, size=None, fc='w', alpha=1,
ax=None, handles=None):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
Ignore:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> loc = 'best'
>>> xdata = np.linspace(-6, 6)
>>> ydata = np.sin(xdata)
>>> plt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
assert loc in _LEGEND_LOCATION or loc == 'best', (
'invalid loc. try one of %r' % (_LEGEND_LOCATION,))
if ax is None:
ax = plt.gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normal'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None):
r"""
Args:
N (int):
brightness (float):
Returns:
list: RGB_tuples
CommandLine:
python -m color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95
python -m .color_funcs --test-distinct_colors --N 3 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 4 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 6 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 20 --show
References:
http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
CommandLine:
python -m .color_funcs --exec-distinct_colors --show
python -m .color_funcs --exec-distinct_colors --show --no-randomize --N 50
python -m .color_funcs --exec-distinct_colors --show --cmap_seed=foobar
Ignore:
>>> # build test data
>>> autompl()
>>> N = ub.smartcast(ub.get_argval('--N', default=2), int) # FIXME
>>> randomize = not ub.argflag('--no-randomize')
>>> brightness = 0.878
>>> # execute function
>>> cmap_seed = ub.get_argval('--cmap_seed', default=None)
>>> hue_range = ub.smartcast(ub.get_argval('--hue-range', default=(0.00, 1.0)), list) #FIXME
>>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed)
>>> # verify results
>>> assert len(RGB_tuples) == N
>>> result = str(RGB_tuples)
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> color_list = RGB_tuples
>>> testshow_colors(color_list)
>>> show_if_requested()
"""
# TODO: Add sin wave modulation to the sat and value
# HACK for white figures
from matplotlib import pyplot as plt
import colorsys
remove_yellow = True
use_jet = False
if use_jet:
cmap = plt.cm.jet
RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N))))
elif cmap_seed is not None:
# Randomized map based on a seed
#cmap_ = 'Set1'
#cmap_ = 'Dark2'
choices = [
#'Set1', 'Dark2',
'jet',
#'gist_rainbow',
#'rainbow',
#'gnuplot',
#'Accent'
]
cmap_hack = ub.argval('--cmap-hack', default=None)
ncolor_hack = ub.argval('--ncolor-hack', default=None)
if cmap_hack is not None:
choices = [cmap_hack]
if ncolor_hack is not None:
N = int(ncolor_hack)
N_ = N
seed = sum(list(map(ord, ub.hash_data(cmap_seed))))
rng = np.random.RandomState(seed + 48930)
cmap_str = rng.choice(choices, 1)[0]
#print('cmap_str = %r' % (cmap_str,))
cmap = plt.cm.get_cmap(cmap_str)
#.hashstr27(cmap_seed)
#cmap_seed = 0
#pass
jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2)))
range_ = np.linspace(0, 1, N, endpoint=False)
#print('range_ = %r' % (range_,))
range_ = range_ + jitter
#print('range_ = %r' % (range_,))
while not (np.all(range_ >= 0) and np.all(range_ <= 1)):
range_[range_ < 0] = np.abs(range_[range_ < 0] )
range_[range_ > 1] = 2 - range_[range_ > 1]
#print('range_ = %r' % (range_,))
shift = rng.rand()
range_ = (range_ + shift) % 1
#print('jitter = %r' % (jitter,))
#print('shift = %r' % (shift,))
#print('range_ = %r' % (range_,))
if ncolor_hack is not None:
range_ = range_[0:N_]
RGB_tuples = list(map(tuple, cmap(range_)))
else:
sat = brightness
val = brightness
hmin, hmax = hue_range
if remove_yellow:
hue_skips = [(.13, .24)]
else:
hue_skips = []
hue_skip_ranges = [_[1] - _[0] for _ in hue_skips]
total_skip = sum(hue_skip_ranges)
hmax_ = hmax - total_skip
hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float)
# Remove colors (like hard to see yellows) in specified ranges
for skip, range_ in zip(hue_skips, hue_skip_ranges):
hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list]
HSV_tuples = [(hue, sat, val) for hue in hue_list]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
if randomize:
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def distinct_markers(num, style='astrisk', total=None, offset=0):
r"""
Args:
num (?):
CommandLine:
python -m .draw_func2 --exec-distinct_markers --show
python -m .draw_func2 --exec-distinct_markers --style=star --show
python -m .draw_func2 --exec-distinct_markers --style=polygon --show
Ignore:
>>> autompl()
>>> style = ub.get_argval('--style', type_=str, default='astrisk')
>>> marker_list = distinct_markers(10, style)
>>> x_data = np.arange(0, 3)
>>> for count, (marker) in enumerate(marker_list):
>>> plt.plot(x_data, [count] * len(x_data), marker=marker, markersize=10, linestyle='', label=str(marker))
>>> legend()
>>> show_if_requested()
"""
num_sides = 3
style_num = {
'astrisk': 2,
'star': 1,
'polygon': 0,
'circle': 3
}[style]
if total is None:
total = num
total_degrees = 360 / num_sides
marker_list = [
(num_sides, style_num, total_degrees * (count + offset) / total)
for count in range(num)
]
return marker_list
def deterministic_shuffle(list_, rng=0):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
Example:
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
from netharn import util
rng = util.ensure_rng(rng)
rng.shuffle(list_)
return list_
_BASE_FNUM = 9001
def next_fnum(new_base=None):
global _BASE_FNUM
if new_base is not None:
_BASE_FNUM = new_base
_BASE_FNUM += 1
return _BASE_FNUM
def ensure_fnum(fnum):
if fnum is None:
return next_fnum()
return fnum
def _save_requested(fpath_, save_parts):
raise NotImplementedError('havent done this yet')
# dpi = ub.argval('--dpi', type_=int, default=200)
from os.path import expanduser
from matplotlib import pyplot as plt
dpi = 200
fpath_ = expanduser(fpath_)
print('Figure save was requested')
# arg_dict = ut.get_arg_dict(prefix_list=['--', '-'],
# type_hints={'t': list, 'a': list})
arg_dict = {}
# HACK
arg_dict = {
key: (val[0] if len(val) == 1 else '[' + ']['.join(val) + ']')
if isinstance(val, list) else val
for key, val in arg_dict.items()
}
fpath_ = fpath_.format(**arg_dict)
fpath_ = fpath_.replace(' ', '').replace('\'', '').replace('"', '')
dpath = ub.argval('--dpath', type_=str, default=None)
if dpath is None:
gotdpath = False
dpath = '.'
else:
gotdpath = True
fpath = join(dpath, fpath_)
if not gotdpath:
dpath = dirname(fpath_)
print('dpath = %r' % (dpath,))
fig = plt.gcf()
fig.dpi = dpi
fpath_strict = ub.truepath(fpath)
CLIP_WHITE = ub.argflag('--clipwhite')
from netharn import util
if save_parts:
# TODO: call save_parts instead, but we still need to do the
# special grouping.
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
div = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if div is not None:
df2_div_axes = _get_plotdat_dict(ax).get('df2_div_axes', [])
seen_.add(ax)
seen_.update(set(df2_div_axes))
atomic_axes.append([ax] + df2_div_axes)
# TODO: pad these a bit
else:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
hack_axes_group_row = ub.argflag('--grouprows')
if hack_axes_group_row:
groupid_list = []
for axs in atomic_axes:
for ax in axs:
groupid = ax.colNum
groupid_list.append(groupid)
groups = ub.group_items(atomic_axes, groupid_list)
new_groups = list(map(ub.flatten, groups.values()))
atomic_axes = new_groups
#[[(ax.rowNum, ax.colNum) for ax in axs] for axs in atomic_axes]
# save all rows of each column
subpath_list = save_parts(fig=fig, fpath=fpath_strict,
grouped_axes=atomic_axes, dpi=dpi)
absfpath_ = subpath_list[-1]
if CLIP_WHITE:
for subpath in subpath_list:
# remove white borders
util.clipwhite_ondisk(subpath, subpath)
else:
savekw = {}
# savekw['transparent'] = fpath.endswith('.png') and not noalpha
savekw['transparent'] = ub.argflag('--alpha')
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
savekw['bbox_inches'] = extract_axes_extents(fig, combine=True) # replaces need for clipwhite
absfpath_ = ub.truepath(fpath)
fig.savefig(absfpath_, **savekw)
if CLIP_WHITE:
# remove white borders
fpath_in = fpath_out = absfpath_
util.clipwhite_ondisk(fpath_in, fpath_out)
if ub.argflag(('--diskshow', '--ds')):
# show what we wrote
ub.startfile(absfpath_)
def show_if_requested(N=1):
"""
Used at the end of tests. Handles command line arguments for saving figures
Referencse:
http://stackoverflow.com/questions/4325733/save-a-subplot-in-matplotlib
"""
import matplotlib.pyplot as plt
# Process figures adjustments from command line before a show or a save
# udpate_adjust_subplots()
# if use_argv:
# # hack to take args from commandline
# adjust_dict = ut.parse_dict_from_argv(adjust_dict)
# adjust_subplots(use_argv=True)
# def update_figsize():
# """ updates figsize based on command line """
# figsize = ub.argval('--figsize', type_=list, default=None)
# if figsize is not None:
# # Enforce inches and DPI
# fig = plt.gcf()
# figsize = [eval(term) if isinstance(term, str) else term
# for term in figsize]
# figw, figh = figsize[0], figsize[1]
# print('get_size_inches = %r' % (fig.get_size_inches(),))
# print('fig w,h (inches) = %r, %r' % (figw, figh))
# fig.set_size_inches(figw, figh)
# #print('get_size_inches = %r' % (fig.get_size_inches(),))
# update_figsize()
save_parts = ub.argflag('--saveparts')
fpath_ = ub.argval('--save', default=None)
if fpath_ is None:
fpath_ = ub.argval('--saveparts', default=None)
save_parts = True
if fpath_ is not None:
_save_requested(fpath_, save_parts)
# elif ub.argflag('--cmd'):
# pass
if ub.argflag('--show'):
# if ub.argflag('--tile'):
# if ut.get_computer_name().lower() in ['hyrule']:
# fig_presenter.all_figures_tile(percent_w=.5, monitor_num=0)
# else:
# fig_presenter.all_figures_tile()
# if ub.argflag('--present'):
# fig_presenter.present()
# for fig in fig_presenter.get_all_figures():
# fig.set_dpi(80)
plt.show()
def save_parts(fig, fpath, grouped_axes=None, dpi=None):
"""
FIXME: this works in mpl 2.0.0, but not 2.0.2
Args:
fig (?):
fpath (str): file path string
dpi (None): (default = None)
Returns:
list: subpaths
CommandLine:
python -m draw_func2 save_parts
Ignore:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> def testimg(fname):
>>> return plt.imread(mpl.cbook.get_sample_data(fname))
>>> fnames = ['grace_hopper.png', 'ada.png'] * 4
>>> fig = plt.figure(1)
>>> for c, fname in enumerate(fnames, start=1):
>>> ax = fig.add_subplot(3, 4, c)
>>> ax.imshow(testimg(fname))
>>> ax.set_title(fname[0:3] + str(c))
>>> ax.set_xticks([])
>>> ax.set_yticks([])
>>> ax = fig.add_subplot(3, 1, 3)
>>> ax.plot(np.sin(np.linspace(0, np.pi * 2)))
>>> ax.set_xlabel('xlabel')
>>> ax.set_ylabel('ylabel')
>>> ax.set_title('title')
>>> fpath = 'test_save_parts.png'
>>> adjust_subplots(fig=fig, wspace=.3, hspace=.3, top=.9)
>>> subpaths = save_parts(fig, fpath, dpi=300)
>>> fig.savefig(fpath)
>>> ub.startfile(subpaths[0])
>>> ub.startfile(fpath)
"""
if dpi:
# Need to set figure dpi before we draw
fig.dpi = dpi
# We need to draw the figure before calling get_window_extent
# (or we can figure out how to set the renderer object)
# if getattr(fig.canvas, 'renderer', None) is None:
fig.canvas.draw()
# Group axes that belong together
if grouped_axes is None:
grouped_axes = []
for ax in fig.axes:
grouped_axes.append([ax])
subpaths = []
_iter = enumerate(grouped_axes, start=0)
_iter = ub.ProgIter(list(_iter), label='save subfig')
for count, axs in _iter:
subpath = ub.augpath(fpath, suffix=chr(count + 65))
extent = axes_extent(axs).transformed(fig.dpi_scale_trans.inverted())
savekw = {}
savekw['transparent'] = ub.argflag('--alpha')
if dpi is not None:
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
fig.savefig(subpath, bbox_inches=extent, **savekw)
subpaths.append(subpath)
return subpaths
_qtensured = False
def _current_ipython_session():
"""
Returns a reference to the current IPython session, if one is running
"""
try:
__IPYTHON__
except NameError:
return None
else:
import IPython
ipython = IPython.get_ipython()
# if ipython is None we must have exited ipython at some point
return ipython
def qtensure():
"""
If you are in an IPython session, ensures that your backend is Qt.
"""
global _qtensured
if not _qtensured:
ipython = _current_ipython_session()
if ipython:
import sys
if 'PyQt4' in sys.modules:
ipython.magic('pylab qt4 --no-import-all')
_qtensured = True
else:
ipython.magic('pylab qt5 --no-import-all')
_qtensured = True
def aggensure():
"""
Ensures that you are in agg mode as long as IPython is not running
This might help prevent errors in tmux like:
qt.qpa.screen: QXcbConnection: Could not connect to display localhost:10.0
Could not connect to any X display.
"""
import matplotlib as mpl
current_backend = mpl.get_backend()
if current_backend != 'agg':
ipython = _current_ipython_session()
if not ipython:
set_mpl_backend('agg')
def set_mpl_backend(backend):
"""
Args:
backend (str): name of backend to use (e.g. Agg, PyQt)
"""
import sys
import matplotlib as mpl
if backend.lower().startswith('qt'):
# handle interactive qt case
qtensure()
if backend != mpl.get_backend():
# If we have already imported pyplot, then we need to use experimental
# behavior. Otherwise, we can just set the backend.
if 'matplotlib.pyplot' in sys.modules:
from matplotlib import pyplot as plt
plt.switch_backend(backend)
else:
mpl.use(backend)
def autompl():
"""
Uses platform heuristics to automatically set the mpl backend.
If no display is available it will be set to agg, otherwise we will try to
use the cross-platform Qt5Agg backend.
"""
import os
import sys
if sys.platform.startswith('win32'):
# TODO: something reasonable
pass
else:
DISPLAY = os.environ.get('DISPLAY', '')
if not DISPLAY:
set_mpl_backend('agg')
else:
set_mpl_backend('Qt5Agg')
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', cmap=None, heatmap=False,
data_colorbar=False, xlabel=None, redraw_image=True,
colorspace='bgr', ax=None, alpha=None, norm=None, **kwargs):
r"""
Args:
img (ndarray): image data
fnum (int): figure number
colorspace (str): if the data is 3-4 channels, this indicates the colorspace
1 channel data is assumed grayscale. 4 channels assumes alpha.
title (str):
figtitle (None):
pnum (tuple): plot number
interpolation (str): other interpolations = nearest, bicubic, bilinear
cmap (None):
heatmap (bool):
data_colorbar (bool):
darken (None):
redraw_image (bool): used when calling imshow over and over. if false
doesnt do the image part.
Returns:
tuple: (fig, ax)
Kwargs:
docla, doclf, projection
Returns:
tuple: (fig, ax)
Ignore:
>>> autompl()
>>> img_fpath = ut.grab_test_imgpath('carl.jpg')
>>> img = util.imread(img_fpath)
>>> (fig, ax) = imshow(img)
>>> result = ('(fig, ax) = %s' % (str((fig, ax)),))
>>> print(result)
>>> ut.show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
if ax is not None:
fig = ax.figure
nospecial = True
else:
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = plt.gca()
nospecial = False
#ax.set_xticks([])
#ax.set_yticks([])
#return fig, ax
if not redraw_image:
return fig, ax
if isinstance(img, six.string_types):
# Allow for path to image to be specified
from netharn import util
img_fpath = img
img = util.imread(img_fpath)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
}
if alpha is not None:
plt_imshow_kwargs['alpha'] = alpha
if norm is not None:
if norm is True:
norm = mpl.colors.Normalize()
plt_imshow_kwargs['norm'] = norm
else:
if cmap is None and not heatmap and not nospecial:
plt_imshow_kwargs['vmin'] = 0
plt_imshow_kwargs['vmax'] = 255
if heatmap:
cmap = 'hot'
# Handle tensor chw format in most cases
if img.ndim == 3:
if img.shape[0] == 3 or img.shape[0] == 1:
if img.shape[2] > 4:
# probably in chw format
img = img.transpose(1, 2, 0)
try:
if len(img.shape) == 3 and (img.shape[2] == 3 or img.shape[2] == 4):
# img is in a color format
from netharn import util
dst_space = 'rgb'
if img.shape[2] == 4:
colorspace += 'a'
dst_space += 'a'
imgRGB = util.convert_colorspace(img, dst_space=dst_space,
src_space=colorspace)
if imgRGB.dtype.kind == 'f':
maxval = imgRGB.max()
if maxval > 1.01 and maxval < 256:
imgRGB = np.array(imgRGB, dtype=np.uint8)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2 or (len(img.shape) == 3 and img.shape[2] == 1):
# img is in grayscale
if len(img.shape) == 3:
imgGRAY = img.reshape(img.shape[0:2])
else:
imgGRAY = img
if cmap is None:
cmap = plt.get_cmap('gray')
if isinstance(cmap, six.string_types):
cmap = plt.get_cmap(cmap)
# for some reason gray floats aren't working right
if imgGRAY.max() <= 1.01 and imgGRAY.min() >= -1E-9:
imgGRAY = (imgGRAY * 255).astype(np.uint8)
ax.imshow(imgGRAY, cmap=cmap, **plt_imshow_kwargs)
else:
raise AssertionError(
'unknown image format. img.dtype=%r, img.shape=%r' %
(img.dtype, img.shape))
except TypeError as te:
print('[df2] imshow ERROR %r' % (te,))
raise
except Exception as ex:
print('!!!!!!!!!!!!!!WARNING!!!!!!!!!!!')
print('[df2] type(img) = %r' % type(img))
if not isinstance(img, np.ndarray):
print('!!!!!!!!!!!!!!ERRROR!!!!!!!!!!!')
pass
#print('img = %r' % (img,))
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
if data_colorbar is True:
scores = np.unique(img.flatten())
if cmap is None:
cmap = 'hot'
colors = scores_to_color(scores, cmap)
colorbar(scores, colors)
if xlabel is not None:
ax.set_xlabel(xlabel)
if figtitle is not None:
set_figtitle(figtitle)
return fig, ax
def colorbar(scalars, colors, custom=False, lbl=None, ticklabels=None,
float_format='%.2f', **kwargs):
"""
adds a color bar next to the axes based on specific scalars
Args:
scalars (ndarray):
colors (ndarray):
custom (bool): use custom ticks
Kwargs:
See plt.colorbar
Returns:
cb : matplotlib colorbar object
Ignore:
>>> autompl()
>>> scalars = np.array([-1, -2, 1, 1, 2, 7, 10])
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = True
>>> reverse_cmap = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale, reverse_cmap=reverse_cmap, val2_customcolor=val2_customcolor)
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
Ignore:
>>> # ENABLE_DOCTEST
>>> scalars = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = False
>>> reverse_cmap = False
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale,
>>> reverse_cmap=reverse_cmap)
>>> colors = [lighten_rgb(c, .3) for c in colors]
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
assert len(scalars) == len(colors), 'scalars and colors must be corresponding'
if len(scalars) == 0:
return None
# Parameters
ax = plt.gca()
divider = _ensure_divider(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
xy, width, height = _get_axis_xy_width_height(ax)
#orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
#
# Create scalar mappable with cmap
if custom:
# FIXME: clean this code up and change the name custom
# to be meaningful. It is more like: display unique colors
unique_scalars, unique_idx = np.unique(scalars, return_index=True)
unique_colors = np.array(colors)[unique_idx]
#max_, min_ = unique_scalars.max(), unique_scalars.min()
#extent_ = max_ - min_
#bounds = np.linspace(min_, max_ + 1, extent_ + 2)
listed_cmap = mpl.colors.ListedColormap(unique_colors)
#norm = mpl.colors.BoundaryNorm(bounds, listed_cmap.N)
#sm = mpl.cm.ScalarMappable(cmap=listed_cmap, norm=norm)
sm = mpl.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(np.linspace(0, 1, len(unique_scalars) + 1))
else:
sorted_scalars = sorted(scalars)
listed_cmap = scores_to_cmap(scalars, colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
#COLORBAR_SHRINK = .42 # 1
#COLORBAR_PAD = .01 # 1
#COLORBAR_ASPECT = np.abs(20 * height / (width)) # 1
cb = plt.colorbar(sm, cax=cax, **kwargs)
## Add the colorbar to the correct label
#axis = cb.ax.yaxis # if orientation == 'horizontal' else cb.ax.yaxis
#position = 'bottom' if orientation == 'horizontal' else 'right'
#axis.set_ticks_position(position)
# This line alone removes data
# axis.set_ticks([0, .5, 1])
if custom:
ticks = np.linspace(0, 1, len(unique_scalars) + 1)
if len(ticks) < 2:
ticks += .5
else:
# SO HACKY
ticks += (ticks[1] - ticks[0]) / 2
if isinstance(unique_scalars, np.ndarray) and unique_scalars.dtype.kind == 'f':
ticklabels = [float_format % scalar for scalar in unique_scalars]
else:
ticklabels = unique_scalars
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels) # tick labels
elif ticklabels is not None:
ticks_ = cb.ax.get_yticks()
mx = ticks_.max()
mn = ticks_.min()
ticks = np.linspace(mn, mx, len(ticklabels))
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels)
#cb.ax.get_yticks()
#cb.set_ticks(ticks) # tick locations
#cb.set_ticklabels(ticklabels) # tick labels
# _set_plotdat(cb.ax, 'viztype', 'colorbar-%s' % (lbl,))
# _set_plotdat(cb.ax, 'sm', sm)
# FIXME: Figure out how to make a maximum number of ticks
# and to enforce them to be inside the data bounds
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
# Sets current axis
plt.sca(ax)
if lbl is not None:
cb.set_label(lbl)
return cb
_DF2_DIVIDER_KEY = '_df2_divider'
def _get_plotdat(ax, key, default=None):
""" returns internal property from a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
val = _plotdat.get(key, default)
return val
def _set_plotdat(ax, key, val):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
_plotdat[key] = val
def _del_plotdat(ax, key):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
if key in _plotdat:
del _plotdat[key]
def _get_plotdat_dict(ax):
""" sets internal property to a matplotlib axis """
if '_plotdat' not in ax.__dict__:
ax.__dict__['_plotdat'] = {}
plotdat_dict = ax.__dict__['_plotdat']
return plotdat_dict
def _ensure_divider(ax):
""" Returns previously constructed divider or creates one """
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if divider is None:
divider = make_axes_locatable(ax)
_set_plotdat(ax, _DF2_DIVIDER_KEY, divider)
orig_append_axes = divider.append_axes
def df2_append_axes(divider, position, size, pad=None, add_to_figure=True, **kwargs):
""" override divider add axes to register the divided axes """
div_axes = _get_plotdat(ax, 'df2_div_axes', [])
new_ax = orig_append_axes(position, size, pad=pad, add_to_figure=add_to_figure, **kwargs)
div_axes.append(new_ax)
_set_plotdat(ax, 'df2_div_axes', div_axes)
return new_ax
new_method = df2_append_axes.__get__(divider, divider.__class__)
setattr(divider, 'append_axes', new_method)
# ut.inject_func_as_method(divider, df2_append_axes, 'append_axes', allow_override=True)
return divider
def scores_to_cmap(scores, colors=None, cmap_='hot'):
import matplotlib as mpl
if colors is None:
colors = scores_to_color(scores, cmap_=cmap_)
scores = np.array(scores)
colors = np.array(colors)
sortx = scores.argsort()
sorted_colors = colors[sortx]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
return listed_cmap
def scores_to_color(score_list, cmap_='hot', logscale=False, reverse_cmap=False,
custom=False, val2_customcolor=None, score_range=None,
cmap_range=(.1, .9)):
"""
Other good colormaps are 'spectral', 'gist_rainbow', 'gist_ncar', 'Set1',
'Set2', 'Accent'
# TODO: plasma
Args:
score_list (list):
cmap_ (str): defaults to hot
logscale (bool):
cmap_range (tuple): restricts to only a portion of the cmap to avoid extremes
Returns:
<class '_ast.ListComp'>
Ignore:
>>> ut.exec_funckw(scores_to_color, globals())
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> # score_list = np.array([0, .1, .11, .12, .13, .8])
>>> # score_list = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> colors = scores_to_color(score_list, cmap_)
>>> imgRGB = util.atleast_nd(np.array(colors)[:, 0:3], 3, tofront=True)
>>> imgRGB = imgRGB.astype(np.float32)
>>> imgBGR = util.convert_colorspace(imgRGB, 'BGR', 'RGB')
>>> imshow(imgBGR)
>>> show_if_requested()
Ignore:
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> cmap_ = 'hot'
>>> logscale = False
>>> reverse_cmap = True
>>> custom = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
"""
import matplotlib.pyplot as plt
assert len(score_list.shape) == 1, 'score must be 1d'
if len(score_list) == 0:
return []
def apply_logscale(scores):
scores = np.array(scores)
above_zero = scores >= 0
scores_ = scores.copy()
scores_[above_zero] = scores_[above_zero] + 1
scores_[~above_zero] = scores_[~above_zero] - 1
scores_ = np.log2(scores_)
return scores_
if logscale:
# Hack
score_list = apply_logscale(score_list)
#if loglogscale
#score_list = np.log2(np.log2(score_list + 2) + 1)
#if isinstance(cmap_, six.string_types):
cmap = plt.get_cmap(cmap_)
#else:
# cmap = cmap_
if reverse_cmap:
cmap = reverse_colormap(cmap)
#if custom:
# base_colormap = cmap
# data = score_list
# cmap = customize_colormap(score_list, base_colormap)
if score_range is None:
min_ = score_list.min()
max_ = score_list.max()
else:
min_ = score_range[0]
max_ = score_range[1]
if logscale:
min_, max_ = apply_logscale([min_, max_])
if cmap_range is None:
cmap_scale_min, cmap_scale_max = 0., 1.
else:
cmap_scale_min, cmap_scale_max = cmap_range
extent_ = max_ - min_
if extent_ == 0:
colors = [cmap(.5) for fx in range(len(score_list))]
else:
if False and logscale:
# hack
def score2_01(score):
return np.log2(
1 + cmap_scale_min + cmap_scale_max *
(float(score) - min_) / (extent_))
score_list = np.array(score_list)
#rank_multiplier = score_list.argsort() / len(score_list)
#normscore = np.array(list(map(score2_01, score_list))) * rank_multiplier
normscore = np.array(list(map(score2_01, score_list)))
colors = list(map(cmap, normscore))
else:
def score2_01(score):
return cmap_scale_min + cmap_scale_max * (float(score) - min_) / (extent_)
colors = [cmap(score2_01(score)) for score in score_list]
if val2_customcolor is not None:
colors = [
np.array(val2_customcolor.get(score, color))
for color, score in zip(colors, score_list)]
return colors
def reverse_colormap(cmap):
"""
References:
http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
"""
import matplotlib as mpl
if isinstance(cmap, mpl.colors.ListedColormap):
return mpl.colors.ListedColormap(cmap.colors[::-1])
else:
reverse = []
k = []
for key, channel in six.iteritems(cmap._segmentdata):
data = []
for t in channel:
data.append((1 - t[0], t[1], t[2]))
k.append(key)
reverse.append(sorted(data))
cmap_reversed = mpl.colors.LinearSegmentedColormap(
cmap.name + '_reversed', dict(zip(k, reverse)))
return cmap_reversed
class PlotNums(object):
"""
Convinience class for dealing with plot numberings (pnums)
Example:
>>> pnum_ = PlotNums(nRows=2, nCols=2)
>>> # Indexable
>>> print(pnum_[0])
(2, 2, 1)
>>> # Iterable
>>> print(ub.repr2(list(pnum_), nl=0, nobr=True))
(2, 2, 1), (2, 2, 2), (2, 2, 3), (2, 2, 4)
>>> # Callable (iterates through a default iterator)
>>> print(pnum_())
(2, 2, 1)
>>> print(pnum_())
(2, 2, 2)
"""
def __init__(self, nRows=None, nCols=None, nSubplots=None, start=0):
nRows, nCols = self._get_num_rc(nSubplots, nRows, nCols)
self.nRows = nRows
self.nCols = nCols
base = 0
self.offset = 0 if base == 1 else 1
self.start = start
self._iter = None
def __getitem__(self, px):
return (self.nRows, self.nCols, px + self.offset)
def __call__(self):
"""
replacement for make_pnum_nextgen
Example:
>>> import itertools as it
>>> pnum_ = PlotNums(nSubplots=9)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
Example:
>>> import itertools as it
>>> for nRows, nCols, nSubplots in it.product([None, 3], [None, 3], [None, 9]):
>>> start = 0
>>> pnum_ = PlotNums(nRows, nCols, nSubplots, start)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> print((nRows, nCols, nSubplots))
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
"""
if self._iter is None:
self._iter = iter(self)
return six.next(self._iter)
def __iter__(self):
r"""
Yields:
tuple : pnum
Example:
>>> pnum_ = iter(PlotNums(nRows=3, nCols=2))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 1),
(3, 2, 2),
(3, 2, 3),
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
Example:
>>> nRows = 3
>>> nCols = 2
>>> pnum_ = iter(PlotNums(nRows, nCols, start=3))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
"""
for px in range(self.start, len(self)):
yield self[px]
def __len__(self):
total_plots = self.nRows * self.nCols
return total_plots
@classmethod
def _get_num_rc(PlotNums, nSubplots=None, nRows=None, nCols=None):
r"""
Gets a constrained row column plot grid
Args:
nSubplots (None): (default = None)
nRows (None): (default = None)
nCols (None): (default = None)
Returns:
tuple: (nRows, nCols)
Example:
>>> cases = [
>>> dict(nRows=None, nCols=None, nSubplots=None),
>>> dict(nRows=2, nCols=None, nSubplots=5),
>>> dict(nRows=None, nCols=2, nSubplots=5),
>>> dict(nRows=None, nCols=None, nSubplots=5),
>>> ]
>>> for kw in cases:
>>> print('----')
>>> size = PlotNums._get_num_rc(**kw)
>>> if kw['nSubplots'] is not None:
>>> assert size[0] * size[1] >= kw['nSubplots']
>>> print('**kw = %s' % (ub.repr2(kw),))
>>> print('size = %r' % (size,))
"""
if nSubplots is None:
if nRows is None:
nRows = 1
if nCols is None:
nCols = 1
else:
if nRows is None and nCols is None:
nRows, nCols = PlotNums._get_square_row_cols(nSubplots)
elif nRows is not None:
nCols = int(np.ceil(nSubplots / nRows))
elif nCols is not None:
nRows = int(np.ceil(nSubplots / nCols))
return nRows, nCols
def _get_square_row_cols(nSubplots, max_cols=None, fix=False, inclusive=True):
r"""
Args:
nSubplots (int):
max_cols (int):
Returns:
tuple: (int, int)
Example:
>>> nSubplots = 9
>>> nSubplots_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> max_cols = None
>>> rc_list = [PlotNums._get_square_row_cols(nSubplots, fix=True) for nSubplots in nSubplots_list]
>>> print(repr(np.array(rc_list).T))
array([[1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3],
[1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4]])
"""
if nSubplots == 0:
return 0, 0
if inclusive:
rounder = np.ceil
else:
rounder = np.floor
if fix:
# This function is very broken, but it might have dependencies
# this is the correct version
nCols = int(rounder(np.sqrt(nSubplots)))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
else:
# This is the clamped num cols version
# probably used in ibeis.viz
if max_cols is None:
max_cols = 5
if nSubplots in [4]:
max_cols = 2
if nSubplots in [5, 6, 7]:
max_cols = 3
if nSubplots in [8]:
max_cols = 4
nCols = int(min(nSubplots, max_cols))
#nCols = int(min(rounder(np.sqrt(nrids)), 5))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
def draw_border(ax, color, lw=2, offset=None, adjust=True):
'draws rectangle border around a subplot'
if adjust:
xy, width, height = _get_axis_xy_width_height(ax, -.7, -.2, 1, .4)
else:
xy, width, height = _get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
import matplotlib as mpl
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
def draw_boxes(boxes, box_format='xywh', color='blue', labels=None,
textkw=None, ax=None):
"""
Args:
boxes (list): list of coordindates in xywh, tlbr, or cxywh format
box_format (str): specify how boxes are formated
xywh is the top left x and y pixel width and height
cxywh is the center xy pixel width and height
tlbr is the top left xy and the bottom right xy
color (str): edge color of the boxes
labels (list): if specified, plots a text annotation on each box
Example:
>>> from netharn.util.mplutil import *
>>> autompl()
>>> bboxes = [[.1, .1, .6, .3], [.3, .5, .5, .6]]
>>> col = draw_boxes(bboxes)
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
from netharn import util
if isinstance(boxes, util.Boxes):
box_format = boxes.format
boxes = boxes.data
if not len(boxes):
return
boxes = np.asarray(boxes)
if box_format == 'xywh':
xywh = boxes
elif box_format == 'cxywh':
cx, cy, w, h = boxes.T[0:4]
x1 = cx - (w / 2)
y1 = cy - (h / 2)
xywh = np.vstack([x1, y1, w, h]).T
elif box_format == 'tlbr':
x1, y1 = boxes.T[0:2]
w, h = boxes.T[2:4] - boxes.T[0:2]
xywh = np.vstack([x1, y1, w, h]).T
else:
raise KeyError(box_format)
edgecolor = Color(color).as01('rgba')
facecolor = Color((0, 0, 0, 0)).as01('rgba')
rectkw = dict(ec=edgecolor, fc=facecolor, lw=2, linestyle='solid')
patches = [mpl.patches.Rectangle((x, y), w, h, **rectkw)
for x, y, w, h in xywh]
col = mpl.collections.PatchCollection(patches, match_original=True)
ax.add_collection(col)
if labels:
texts = []
default_textkw = {
'horizontalalignment': 'left',
'verticalalignment': 'top',
'backgroundcolor': (0, 0, 0, .3),
'color': 'white',
'fontproperties': mpl.font_manager.FontProperties(
size=6, family='monospace'),
}
tkw = default_textkw.copy()
if textkw is not None:
tkw.update(textkw)
for (x1, y1, w, h), label in zip(xywh, labels):
texts.append((x1, y1, label, tkw))
for (x1, y1, catname, tkw) in texts:
ax.text(x1, y1, catname, **tkw)
return col
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
"""
draws `N` line segments between `N` pairs of points
Args:
pts1 (ndarray): Nx2
pts2 (ndarray): Nx2
ax (None): (default = None)
**kwargs: lw, alpha, colors
CommandLine:
python -m netharn.util.mplutil draw_line_segments --show
Example:
>>> pts1 = np.array([(.1, .8), (.6, .8)])
>>> pts2 = np.array([(.6, .7), (.4, .1)])
>>> figure(fnum=None)
>>> draw_line_segments(pts1, pts2)
>>> # xdoc: +REQUIRES(--show)
>>> import matplotlib.pyplot as plt
>>> ax = plt.gca()
>>> ax.set_xlim(0, 1)
>>> ax.set_ylim(0, 1)
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
if 'color' in kwargs:
kwargs['colors'] = kwargs['color']
# mpl.colors.ColorConverter().to_rgb(kwargs['color'])
line_group = mpl.collections.LineCollection(segments, linewidths=linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def make_heatmask(probs, cmap='plasma', with_alpha=True):
"""
Colorizes a single-channel intensity mask (with an alpha channel)
"""
import matplotlib as mpl
from netharn.util import imutil
assert len(probs.shape) == 2
cmap_ = mpl.cm.get_cmap(cmap)
probs = imutil.ensure_float01(probs)
heatmask = cmap_(probs)
if with_alpha:
heatmask[:, :, 0:3] = heatmask[:, :, 0:3][:, :, ::-1]
heatmask[:, :, 3] = probs
return heatmask
def colorbar_image(domain, cmap='plasma', dpi=96, shape=(200, 20), transparent=False):
"""
Notes:
shape is approximate
Ignore:
domain = np.linspace(-30, 200)
cmap='plasma'
dpi = 80
dsize = (20, 200)
util.imwrite('foo.png', util.colorbar_image(np.arange(0, 1)), shape=(400, 80))
import plottool as pt
pt.qtensure()
import matplotlib as mpl
mpl.style.use('ggplot')
util.imwrite('foo.png', util.colorbar_image(np.linspace(0, 1, 100), dpi=200, shape=(1000, 40), transparent=1))
ub.startfile('foo.png')
"""
import matplotlib as mpl
mpl.use('agg', force=False, warn=False)
from matplotlib import pyplot as plt
fig = plt.figure(dpi=dpi)
w, h = shape[1] / dpi, shape[0] / dpi
# w, h = 1, 10
fig.set_size_inches(w, h)
ax = fig.add_subplot('111')
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap(cmap))
sm.set_array(domain)
plt.colorbar(sm, cax=ax)
cb_img = render_figure_to_image(fig, dpi=dpi, transparent=transparent)
plt.close(fig)
return cb_img
class Color(ub.NiceRepr):
"""
move to colorutil?
Example:
>>> from netharn.util.mplutil import *
>>> print(Color('g'))
>>> print(Color('orangered'))
>>> print(Color('#AAAAAA').as255())
>>> print(Color([0, 255, 0]))
>>> print(Color([1, 1, 1.]))
>>> print(Color([1, 1, 1]))
>>> print(Color(Color([1, 1, 1])).as255())
>>> print(Color(Color([1., 0, 1, 0])).ashex())
>>> print(Color([1, 1, 1], alpha=255))
>>> print(Color([1, 1, 1], alpha=255, space='lab'))
"""
def __init__(self, color, alpha=None, space=None):
if isinstance(color, Color):
assert alpha is None
assert space is None
space = color.space
color = color.color01
else:
color = self._ensure_color01(color)
if alpha is not None:
alpha = self._ensure_color01([alpha])[0]
if space is None:
space = 'rgb'
# always normalize the color down to 01
color01 = list(color)
if alpha is not None:
if len(color01) not in [1, 3]:
raise ValueError('alpha already in color')
color01 = color01 + [alpha]
# correct space if alpha is given
if len(color01) in [2, 4]:
if not space.endswith('a'):
space += 'a'
self.color01 = color01
self.space = space
def __nice__(self):
colorpart = ', '.join(['{:.2f}'.format(c) for c in self.color01])
return self.space + ': ' + colorpart
def ashex(self, space=None):
c255 = self.as255(space)
return '#' + ''.join(['{:02x}'.format(c) for c in c255])
def as255(self, space=None):
color = (np.array(self.as01(space)) * 255).astype(np.uint8)
return tuple(map(int, color))
def as01(self, space=None):
"""
self = mplutil.Color('red')
mplutil.Color('green').as01('rgba')
"""
color = tuple(self.color01)
if space is not None:
if space == self.space:
pass
elif space == 'rgba' and self.space == 'rgb':
color = color + (1,)
elif space == 'bgr' and self.space == 'rgb':
color = color[::-1]
elif space == 'rgb' and self.space == 'bgr':
color = color[::-1]
else:
assert False
return tuple(map(float, color))
@classmethod
def _is_base01(channels):
""" check if a color is in base 01 """
def _test_base01(channels):
tests01 = {
'is_float': all([isinstance(c, (float, np.float64)) for c in channels]),
'is_01': all([c >= 0.0 and c <= 1.0 for c in channels]),
}
return tests01
if isinstance(channels, six.string_types):
return False
return all(_test_base01(channels).values())
@classmethod
def _is_base255(Color, channels):
""" there is a one corner case where all pixels are 1 or less """
if (all(c > 0.0 and c <= 255.0 for c in channels) and any(c > 1.0 for c in channels)):
# Definately in 255 space
return True
else:
# might be in 01 or 255
return all(isinstance(c, int) for c in channels)
@classmethod
def _hex_to_01(Color, hex_color):
"""
hex_color = '#6A5AFFAF'
"""
assert hex_color.startswith('#'), 'not a hex string %r' % (hex_color,)
parts = hex_color[1:].strip()
color255 = tuple(int(parts[i: i + 2], 16) for i in range(0, len(parts), 2))
assert len(color255) in [3, 4], 'must be length 3 or 4'
return Color._255_to_01(color255)
def _ensure_color01(Color, color):
""" Infer what type color is and normalize to 01 """
if isinstance(color, six.string_types):
color = Color._string_to_01(color)
elif Color._is_base255(color):
color = Color._255_to_01(color)
return color
@classmethod
def _255_to_01(Color, color255):
""" converts base 255 color to base 01 color """
return [channel / 255.0 for channel in color255]
@classmethod
def _string_to_01(Color, color):
"""
mplutil.Color._string_to_01('green')
mplutil.Color._string_to_01('red')
"""
from matplotlib import colors as mcolors
if color in mcolors.BASE_COLORS:
color01 = mcolors.BASE_COLORS[color]
elif color in mcolors.CSS4_COLORS:
color_hex = mcolors.CSS4_COLORS[color]
color01 = Color._hex_to_01(color_hex)
elif color.startswith('#'):
color01 = Color._hex_to_01(color)
else:
raise ValueError('unknown color=%r' % (color,))
return color01
@classmethod
def named_colors():
from matplotlib import colors as mcolors
names = sorted(list(mcolors.BASE_COLORS.keys()) + list(mcolors.CSS4_COLORS.keys()))
return names
@classmethod
def distinct(Color, num, space='rgb'):
"""
Make multiple distinct colors
"""
import matplotlib as mpl
import matplotlib._cm as _cm
cm = mpl.colors.LinearSegmentedColormap.from_list(
'gist_rainbow', _cm.datad['gist_rainbow'],
mpl.rcParams['image.lut'])
distinct_colors = [
np.array(cm(i / num)).tolist()[0:3]
for i in range(num)
]
if space == 'rgb':
return distinct_colors
else:
return [Color(c, space='rgb').as01(space=space) for c in distinct_colors]
if __name__ == '__main__':
r"""
CommandLine:
python -m netharn.util.mplutil
"""
import xdoctest
xdoctest.doctest_module(__file__)
|
[
"numpy.sqrt",
"sys.platform.startswith",
"io.BytesIO",
"colorsys.hsv_to_rgb",
"matplotlib.collections.LineCollection",
"numpy.array",
"matplotlib.colors.CSS4_COLORS.keys",
"numpy.isfinite",
"cv2.imdecode",
"matplotlib.pyplot.switch_backend",
"netharn.util.imutil.ensure_float01",
"netharn.util.clipwhite_ondisk",
"numpy.random.RandomState",
"ubelt.argflag",
"netharn.util.convert_colorspace",
"matplotlib.get_backend",
"numpy.asarray",
"ubelt.hash_data",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"matplotlib.cm.ScalarMappable",
"ubelt.dict_take",
"numpy.linspace",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.vstack",
"numpy.meshgrid",
"matplotlib.cm.get_cmap",
"os.path.expanduser",
"ubelt.truepath",
"matplotlib.rcParams.copy",
"IPython.get_ipython",
"matplotlib.pyplot.cm.ScalarMappable",
"six.next",
"numpy.abs",
"ubelt.startfile",
"numpy.ceil",
"matplotlib.use",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"ubelt.group_items",
"xdoctest.doctest_module",
"numpy.floor",
"os.path.dirname",
"matplotlib.transforms.Bbox.union",
"cv2.cvtColor",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.cm.get_cmap",
"numpy.log2",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.get_cmap",
"ubelt.argval",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"numpy.unique",
"matplotlib.font_manager.FontProperties",
"netharn.util.imread",
"matplotlib.pyplot.colorbar",
"os.path.join",
"matplotlib.pyplot.sca",
"os.environ.get",
"matplotlib.collections.PatchCollection",
"ubelt.take",
"numpy.diag",
"matplotlib.pyplot.figure",
"netharn.util.ensure_rng",
"ubelt.ensure_unicode",
"six.moves.zip_longest",
"numpy.all",
"six.iteritems",
"matplotlib.pyplot.subplot",
"matplotlib.colors.BASE_COLORS.keys"
] |
[((8034, 8054), 'numpy.array', 'np.array', (['ydata_list'], {}), '(ydata_list)\n', (8042, 8054), True, 'import numpy as np\n'), ((12667, 12686), 'matplotlib.rcParams.copy', 'mpl.rcParams.copy', ([], {}), '()\n', (12684, 12686), True, 'import matplotlib as mpl\n'), ((14038, 14099), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'family': 'family', 'weight': 'weight'}), '(family=family, weight=weight)\n', (14069, 14099), True, 'import matplotlib as mpl\n'), ((24865, 24874), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24872, 24874), True, 'from matplotlib import pyplot as plt\n'), ((25599, 25625), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['aximg'], {'ax': 'ax'}), '(aximg, ax=ax)\n', (25611, 25625), True, 'from matplotlib import pyplot as plt\n'), ((28012, 28046), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['extents'], {}), '(extents)\n', (28037, 28046), True, 'import matplotlib as mpl\n'), ((30635, 30674), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents'], {}), '(axes_extents)\n', (30660, 30674), True, 'import matplotlib as mpl\n'), ((30977, 31017), 'cv2.imdecode', 'cv2.imdecode', (['data', 'cv2.IMREAD_UNCHANGED'], {}), '(data, cv2.IMREAD_UNCHANGED)\n', (30989, 31017), False, 'import cv2\n'), ((31876, 31918), 'cv2.cvtColor', 'cv2.cvtColor', (['im_bgra', 'cv2.COLOR_BGRA2RGBA'], {}), '(im_bgra, cv2.COLOR_BGRA2RGBA)\n', (31888, 31918), False, 'import cv2\n'), ((36533, 36560), 'ubelt.ensure_unicode', 'ub.ensure_unicode', (['figtitle'], {}), '(figtitle)\n', (36550, 36560), True, 'import ubelt as ub\n'), ((36576, 36603), 'ubelt.ensure_unicode', 'ub.ensure_unicode', (['subtitle'], {}), '(subtitle)\n', (36593, 36603), True, 'import ubelt as ub\n'), ((45094, 45114), 'netharn.util.ensure_rng', 'util.ensure_rng', (['rng'], {}), '(rng)\n', (45109, 45114), False, 'from netharn import util\n'), ((45674, 45692), 'os.path.expanduser', 'expanduser', (['fpath_'], {}), '(fpath_)\n', (45684, 45692), False, 'from os.path import expanduser\n'), ((46187, 46232), 'ubelt.argval', 'ub.argval', (['"""--dpath"""'], {'type_': 'str', 'default': 'None'}), "('--dpath', type_=str, default=None)\n", (46196, 46232), True, 'import ubelt as ub\n'), ((46347, 46366), 'os.path.join', 'join', (['dpath', 'fpath_'], {}), '(dpath, fpath_)\n', (46351, 46366), False, 'from os.path import join, dirname\n'), ((46466, 46475), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (46473, 46475), True, 'from matplotlib import pyplot as plt\n'), ((46514, 46532), 'ubelt.truepath', 'ub.truepath', (['fpath'], {}), '(fpath)\n', (46525, 46532), True, 'import ubelt as ub\n'), ((46550, 46575), 'ubelt.argflag', 'ub.argflag', (['"""--clipwhite"""'], {}), "('--clipwhite')\n", (46560, 46575), True, 'import ubelt as ub\n'), ((48775, 48809), 'ubelt.argflag', 'ub.argflag', (["('--diskshow', '--ds')"], {}), "(('--diskshow', '--ds'))\n", (48785, 48809), True, 'import ubelt as ub\n'), ((50122, 50147), 'ubelt.argflag', 'ub.argflag', (['"""--saveparts"""'], {}), "('--saveparts')\n", (50132, 50147), True, 'import ubelt as ub\n'), ((50162, 50195), 'ubelt.argval', 'ub.argval', (['"""--save"""'], {'default': 'None'}), "('--save', default=None)\n", (50171, 50195), True, 'import ubelt as ub\n'), ((50427, 50447), 'ubelt.argflag', 'ub.argflag', (['"""--show"""'], {}), "('--show')\n", (50437, 50447), True, 'import ubelt as ub\n'), ((54480, 54497), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (54495, 54497), True, 'import matplotlib as mpl\n'), ((55510, 55542), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (55533, 55542), False, 'import sys\n'), ((62645, 62654), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (62652, 62654), True, 'from matplotlib import pyplot as plt\n'), ((64029, 64064), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax'}), '(sm, cax=cax, **kwargs)\n', (64041, 64064), True, 'from matplotlib import pyplot as plt\n'), ((65568, 65579), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (65575, 65579), True, 'from matplotlib import pyplot as plt\n'), ((67660, 67676), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (67668, 67676), True, 'import numpy as np\n'), ((67690, 67706), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (67698, 67706), True, 'import numpy as np\n'), ((67837, 67877), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['sorted_colors'], {}), '(sorted_colors)\n', (67862, 67877), True, 'import matplotlib as mpl\n'), ((70011, 70030), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap_'], {}), '(cmap_)\n', (70023, 70030), True, 'from matplotlib import pyplot as plt\n'), ((78751, 78798), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['xy', 'width', 'height'], {'lw': 'lw'}), '(xy, width, height, lw=lw)\n', (78772, 78798), True, 'import matplotlib as mpl\n'), ((79964, 79981), 'numpy.asarray', 'np.asarray', (['boxes'], {}), '(boxes)\n', (79974, 79981), True, 'import numpy as np\n'), ((80663, 80724), 'matplotlib.collections.PatchCollection', 'mpl.collections.PatchCollection', (['patches'], {'match_original': '(True)'}), '(patches, match_original=True)\n', (80694, 80724), True, 'import matplotlib as mpl\n'), ((82598, 82687), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['segments'], {'linewidths': 'linewidth', 'alpha': 'alpha'}), '(segments, linewidths=linewidth, alpha=alpha,\n **kwargs)\n', (82628, 82687), True, 'import matplotlib as mpl\n'), ((83022, 83043), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (83037, 83043), True, 'import matplotlib as mpl\n'), ((83056, 83084), 'netharn.util.imutil.ensure_float01', 'imutil.ensure_float01', (['probs'], {}), '(probs)\n', (83077, 83084), False, 'from netharn.util import imutil\n'), ((83903, 83942), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {'force': '(False)', 'warn': '(False)'}), "('agg', force=False, warn=False)\n", (83910, 83942), True, 'import matplotlib as mpl\n'), ((83995, 84014), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': 'dpi'}), '(dpi=dpi)\n', (84005, 84014), True, 'from matplotlib import pyplot as plt\n'), ((84227, 84251), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'ax'}), '(sm, cax=ax)\n', (84239, 84251), True, 'from matplotlib import pyplot as plt\n'), ((84333, 84347), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (84342, 84347), True, 'from matplotlib import pyplot as plt\n'), ((90223, 90256), 'xdoctest.doctest_module', 'xdoctest.doctest_module', (['__file__'], {}), '(__file__)\n', (90246, 90256), False, 'import xdoctest\n'), ((7332, 7370), 'ubelt.dict_take', 'ub.dict_take', (['plot_list_kw', 'valid_keys'], {}), '(plot_list_kw, valid_keys)\n', (7344, 7370), True, 'import ubelt as ub\n'), ((7555, 7596), 'ubelt.dict_take', 'ub.dict_take', (['plot_list_kw', 'extra_kw_keys'], {}), '(plot_list_kw, extra_kw_keys)\n', (7567, 7596), True, 'import ubelt as ub\n'), ((7907, 7916), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7914, 7916), True, 'from matplotlib import pyplot as plt\n'), ((7935, 7946), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (7942, 7946), True, 'from matplotlib import pyplot as plt\n'), ((8510, 8574), 'six.moves.zip_longest', 'zip_longest', (['xdata_list', 'ydata_list', 'plot_kw_list', 'extra_kw_list'], {}), '(xdata_list, ydata_list, plot_kw_list, extra_kw_list)\n', (8521, 8574), False, 'from six.moves import zip_longest\n'), ((8653, 8672), 'numpy.isfinite', 'np.isfinite', (['_ydata'], {}), '(_ydata)\n', (8664, 8672), True, 'import numpy as np\n'), ((13834, 13911), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'weight': 'weight', 'family': 'family', 'size': 'labelsize'}), '(weight=weight, family=family, size=labelsize)\n', (13865, 13911), True, 'import matplotlib as mpl\n'), ((23188, 23218), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['nrow', 'ncols'], {}), '(nrow, ncols)\n', (23205, 23218), True, 'import matplotlib.gridspec as gridspec\n'), ((24371, 24380), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24378, 24380), True, 'from matplotlib import pyplot as plt\n'), ((24846, 24855), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24853, 24855), True, 'from matplotlib import pyplot as plt\n'), ((25274, 25295), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (25289, 25295), True, 'import matplotlib as mpl\n'), ((26762, 26827), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['segments'], {'color': '"""w"""', 'linewidths': '(1)'}), "(segments, color='w', linewidths=1)\n", (26792, 26827), True, 'import matplotlib as mpl\n'), ((26984, 27013), 'numpy.meshgrid', 'np.meshgrid', (['x_basis', 'y_basis'], {}), '(x_basis, y_basis)\n', (26995, 27013), True, 'import numpy as np\n'), ((29300, 29340), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents_'], {}), '(axes_extents_)\n', (29325, 29340), True, 'import matplotlib as mpl\n'), ((30282, 30291), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (30289, 30291), True, 'from matplotlib import pyplot as plt\n'), ((30684, 30696), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (30694, 30696), False, 'import io\n'), ((31353, 31392), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents'], {}), '(axes_extents)\n', (31378, 31392), True, 'import matplotlib as mpl\n'), ((34692, 34748), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['xy', 'width', 'height'], {'lw': '(0)', 'zorder': '(0)'}), '(xy, width, height, lw=0, zorder=0)\n', (34713, 34748), True, 'import matplotlib as mpl\n'), ((35098, 35107), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35105, 35107), True, 'from matplotlib import pyplot as plt\n'), ((36508, 36517), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (36515, 36517), True, 'from matplotlib import pyplot as plt\n'), ((38281, 38290), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38288, 38290), True, 'from matplotlib import pyplot as plt\n'), ((46404, 46419), 'os.path.dirname', 'dirname', (['fpath_'], {}), '(fpath_)\n', (46411, 46419), False, 'from os.path import join, dirname\n'), ((47348, 47373), 'ubelt.argflag', 'ub.argflag', (['"""--grouprows"""'], {}), "('--grouprows')\n", (47358, 47373), True, 'import ubelt as ub\n'), ((48338, 48359), 'ubelt.argflag', 'ub.argflag', (['"""--alpha"""'], {}), "('--alpha')\n", (48348, 48359), True, 'import ubelt as ub\n'), ((48548, 48566), 'ubelt.truepath', 'ub.truepath', (['fpath'], {}), '(fpath)\n', (48559, 48566), True, 'import ubelt as ub\n'), ((48848, 48871), 'ubelt.startfile', 'ub.startfile', (['absfpath_'], {}), '(absfpath_)\n', (48860, 48871), True, 'import ubelt as ub\n'), ((50236, 50274), 'ubelt.argval', 'ub.argval', (['"""--saveparts"""'], {'default': 'None'}), "('--saveparts', default=None)\n", (50245, 50274), True, 'import ubelt as ub\n'), ((50864, 50874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50872, 50874), True, 'from matplotlib import pyplot as plt\n'), ((53051, 53072), 'ubelt.argflag', 'ub.argflag', (['"""--alpha"""'], {}), "('--alpha')\n", (53061, 53072), True, 'import ubelt as ub\n'), ((53549, 53570), 'IPython.get_ipython', 'IPython.get_ipython', ([], {}), '()\n', (53568, 53570), False, 'import IPython\n'), ((54915, 54932), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (54930, 54932), True, 'import matplotlib as mpl\n'), ((55622, 55651), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (55636, 55651), False, 'import os\n'), ((57335, 57344), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (57342, 57344), True, 'from matplotlib import pyplot as plt\n'), ((57662, 57684), 'netharn.util.imread', 'util.imread', (['img_fpath'], {}), '(img_fpath)\n', (57673, 57684), False, 'from netharn import util\n'), ((63102, 63139), 'numpy.unique', 'np.unique', (['scalars'], {'return_index': '(True)'}), '(scalars, return_index=True)\n', (63111, 63139), True, 'import numpy as np\n'), ((63370, 63410), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['unique_colors'], {}), '(unique_colors)\n', (63395, 63410), True, 'import matplotlib as mpl\n'), ((63552, 63591), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'cmap': 'listed_cmap'}), '(cmap=listed_cmap)\n', (63573, 63591), True, 'import matplotlib as mpl\n'), ((63775, 63814), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'listed_cmap'}), '(cmap=listed_cmap)\n', (63796, 63814), True, 'from matplotlib import pyplot as plt\n'), ((66673, 66696), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (66692, 66696), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((69541, 69557), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (69549, 69557), True, 'import numpy as np\n'), ((69751, 69767), 'numpy.log2', 'np.log2', (['scores_'], {}), '(scores_)\n', (69758, 69767), True, 'import numpy as np\n'), ((71964, 72008), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['cmap.colors[::-1]'], {}), '(cmap.colors[::-1])\n', (71989, 72008), True, 'import matplotlib as mpl\n'), ((72083, 72115), 'six.iteritems', 'six.iteritems', (['cmap._segmentdata'], {}), '(cmap._segmentdata)\n', (72096, 72115), False, 'import six\n'), ((74272, 74292), 'six.next', 'six.next', (['self._iter'], {}), '(self._iter)\n', (74280, 74292), False, 'import six\n'), ((79773, 79782), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (79780, 79782), True, 'from matplotlib import pyplot as plt\n'), ((80568, 80613), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['(x, y)', 'w', 'h'], {}), '((x, y), w, h, **rectkw)\n', (80589, 80613), True, 'import matplotlib as mpl\n'), ((82233, 82242), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (82240, 82242), True, 'from matplotlib import pyplot as plt\n'), ((89678, 89797), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""gist_rainbow"""', "_cm.datad['gist_rainbow']", "mpl.rcParams['image.lut']"], {}), "('gist_rainbow', _cm.datad[\n 'gist_rainbow'], mpl.rcParams['image.lut'])\n", (89722, 89797), True, 'import matplotlib as mpl\n'), ((3125, 3151), 'ubelt.take', 'ub.take', (['ydata_list', 'ykeys'], {}), '(ydata_list, ykeys)\n', (3132, 3151), True, 'import ubelt as ub\n'), ((3897, 3917), 'numpy.array', 'np.array', (['ydata_list'], {}), '(ydata_list)\n', (3905, 3917), True, 'import numpy as np\n'), ((4116, 4139), 'numpy.array', 'np.array', (['xd'], {'copy': '(True)'}), '(xd, copy=True)\n', (4124, 4139), True, 'import numpy as np\n'), ((10873, 10889), 'numpy.array', 'np.array', (['_xdata'], {}), '(_xdata)\n', (10881, 10889), True, 'import numpy as np\n'), ((10954, 10970), 'numpy.array', 'np.array', (['ydata_'], {}), '(ydata_)\n', (10962, 10970), True, 'import numpy as np\n'), ((10996, 11012), 'numpy.array', 'np.array', (['spread'], {}), '(spread)\n', (11004, 11012), True, 'import numpy as np\n'), ((11126, 11135), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11133, 11135), True, 'from matplotlib import pyplot as plt\n'), ((12466, 12489), 'ubelt.ensure_unicode', 'ub.ensure_unicode', (['text'], {}), '(text)\n', (12483, 12489), True, 'import ubelt as ub\n'), ((14813, 14855), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['xtickformat'], {}), '(xtickformat)\n', (14842, 14855), True, 'import matplotlib as mpl\n'), ((14926, 14968), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['ytickformat'], {}), '(ytickformat)\n', (14955, 14968), True, 'import matplotlib as mpl\n'), ((16827, 16862), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'num_xticks'], {}), '(xmin, xmax, num_xticks)\n', (16838, 16862), True, 'import numpy as np\n'), ((17125, 17160), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'num_yticks'], {}), '(ymin, ymax, num_yticks)\n', (17136, 17160), True, 'import numpy as np\n'), ((19626, 19703), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'family': 'family', 'weight': 'weight', 'size': 'titlesize'}), '(family=family, weight=weight, size=titlesize)\n', (19657, 19703), True, 'import matplotlib as mpl\n'), ((20078, 20156), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'family': 'family', 'weight': 'weight', 'size': 'legendsize'}), '(family=family, weight=weight, size=legendsize)\n', (20109, 20156), True, 'import matplotlib as mpl\n'), ((34075, 34099), 'numpy.array', 'np.array', (['(0, 0, 0, 255)'], {}), '((0, 0, 0, 255))\n', (34083, 34099), True, 'import numpy as np\n'), ((34238, 34247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34245, 34247), True, 'from matplotlib import pyplot as plt\n'), ((41356, 41394), 'ubelt.argval', 'ub.argval', (['"""--cmap-hack"""'], {'default': 'None'}), "('--cmap-hack', default=None)\n", (41365, 41394), True, 'import ubelt as ub\n'), ((41417, 41457), 'ubelt.argval', 'ub.argval', (['"""--ncolor-hack"""'], {'default': 'None'}), "('--ncolor-hack', default=None)\n", (41426, 41457), True, 'import ubelt as ub\n'), ((41688, 41723), 'numpy.random.RandomState', 'np.random.RandomState', (['(seed + 48930)'], {}), '(seed + 48930)\n', (41709, 41723), True, 'import numpy as np\n'), ((41830, 41855), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap_str'], {}), '(cmap_str)\n', (41845, 41855), True, 'from matplotlib import pyplot as plt\n'), ((42033, 42069), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {'endpoint': '(False)'}), '(0, 1, N, endpoint=False)\n', (42044, 42069), True, 'import numpy as np\n'), ((43071, 43130), 'numpy.linspace', 'np.linspace', (['hmin', 'hmax_', 'N'], {'endpoint': '(False)', 'dtype': 'np.float'}), '(hmin, hmax_, N, endpoint=False, dtype=np.float)\n', (43082, 43130), True, 'import numpy as np\n'), ((47610, 47651), 'ubelt.group_items', 'ub.group_items', (['atomic_axes', 'groupid_list'], {}), '(atomic_axes, groupid_list)\n', (47624, 47651), True, 'import ubelt as ub\n'), ((48724, 48766), 'netharn.util.clipwhite_ondisk', 'util.clipwhite_ondisk', (['fpath_in', 'fpath_out'], {}), '(fpath_in, fpath_out)\n', (48745, 48766), False, 'from netharn import util\n'), ((55181, 55208), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['backend'], {}), '(backend)\n', (55199, 55208), True, 'from matplotlib import pyplot as plt\n'), ((55235, 55251), 'matplotlib.use', 'mpl.use', (['backend'], {}), '(backend)\n', (55242, 55251), True, 'import matplotlib as mpl\n'), ((57936, 57958), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {}), '()\n', (57956, 57958), True, 'import matplotlib as mpl\n'), ((58747, 58818), 'netharn.util.convert_colorspace', 'util.convert_colorspace', (['img'], {'dst_space': 'dst_space', 'src_space': 'colorspace'}), '(img, dst_space=dst_space, src_space=colorspace)\n', (58770, 58818), False, 'from netharn import util\n'), ((63164, 63180), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (63172, 63180), True, 'import numpy as np\n'), ((71004, 71024), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (71012, 71024), True, 'import numpy as np\n'), ((81003, 81062), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'size': '(6)', 'family': '"""monospace"""'}), "(size=6, family='monospace')\n", (81034, 81062), True, 'import matplotlib as mpl\n'), ((84177, 84195), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (84189, 84195), True, 'from matplotlib import pyplot as plt\n'), ((4189, 4215), 'numpy.array', 'np.array', (['xdata'], {'copy': '(True)'}), '(xdata, copy=True)\n', (4197, 4215), True, 'import numpy as np\n'), ((22435, 22444), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22442, 22444), True, 'from matplotlib import pyplot as plt\n'), ((22569, 22585), 'matplotlib.pyplot.figure', 'plt.figure', (['fnum'], {}), '(fnum)\n', (22579, 22585), True, 'from matplotlib import pyplot as plt\n'), ((23987, 23996), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (23994, 23996), True, 'from matplotlib import pyplot as plt\n'), ((24114, 24135), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*subspec'], {}), '(*subspec)\n', (24125, 24135), True, 'from matplotlib import pyplot as plt\n'), ((24175, 24184), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24182, 24184), True, 'from matplotlib import pyplot as plt\n'), ((24980, 24995), 'numpy.diag', 'np.diag', (['values'], {}), '(values)\n', (24987, 24995), True, 'import numpy as np\n'), ((42285, 42311), 'numpy.abs', 'np.abs', (['range_[range_ < 0]'], {}), '(range_[range_ < 0])\n', (42291, 42311), True, 'import numpy as np\n'), ((43429, 43452), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (43448, 43452), False, 'import colorsys\n'), ((48163, 48202), 'netharn.util.clipwhite_ondisk', 'util.clipwhite_ondisk', (['subpath', 'subpath'], {}), '(subpath, subpath)\n', (48184, 48202), False, 'from netharn import util\n'), ((80168, 80193), 'numpy.vstack', 'np.vstack', (['[x1, y1, w, h]'], {}), '([x1, y1, w, h])\n', (80177, 80193), True, 'import numpy as np\n'), ((22503, 22515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22513, 22515), True, 'from matplotlib import pyplot as plt\n'), ((22644, 22653), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22651, 22653), True, 'from matplotlib import pyplot as plt\n'), ((41012, 41032), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (41023, 41032), True, 'import numpy as np\n'), ((42206, 42225), 'numpy.all', 'np.all', (['(range_ >= 0)'], {}), '(range_ >= 0)\n', (42212, 42225), True, 'import numpy as np\n'), ((42230, 42249), 'numpy.all', 'np.all', (['(range_ <= 1)'], {}), '(range_ <= 1)\n', (42236, 42249), True, 'import numpy as np\n'), ((59023, 59055), 'numpy.array', 'np.array', (['imgRGB'], {'dtype': 'np.uint8'}), '(imgRGB, dtype=np.uint8)\n', (59031, 59055), True, 'import numpy as np\n'), ((59413, 59433), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (59425, 59433), True, 'from matplotlib import pyplot as plt\n'), ((59508, 59526), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (59520, 59526), True, 'from matplotlib import pyplot as plt\n'), ((77625, 77643), 'numpy.sqrt', 'np.sqrt', (['nSubplots'], {}), '(nSubplots)\n', (77632, 77643), True, 'import numpy as np\n'), ((80315, 80340), 'numpy.vstack', 'np.vstack', (['[x1, y1, w, h]'], {}), '([x1, y1, w, h])\n', (80324, 80340), True, 'import numpy as np\n'), ((89386, 89412), 'matplotlib.colors.BASE_COLORS.keys', 'mcolors.BASE_COLORS.keys', ([], {}), '()\n', (89410, 89412), True, 'from matplotlib import colors as mcolors\n'), ((89421, 89447), 'matplotlib.colors.CSS4_COLORS.keys', 'mcolors.CSS4_COLORS.keys', ([], {}), '()\n', (89445, 89447), True, 'from matplotlib import colors as mcolors\n'), ((16699, 16712), 'numpy.ceil', 'np.ceil', (['xmin'], {}), '(xmin)\n', (16706, 16712), True, 'import numpy as np\n'), ((16714, 16728), 'numpy.floor', 'np.floor', (['xmax'], {}), '(xmax)\n', (16722, 16728), True, 'import numpy as np\n'), ((16997, 17010), 'numpy.ceil', 'np.ceil', (['ymin'], {}), '(ymin)\n', (17004, 17010), True, 'import numpy as np\n'), ((17012, 17026), 'numpy.floor', 'np.floor', (['ymax'], {}), '(ymax)\n', (17020, 17026), True, 'import numpy as np\n'), ((41647, 41670), 'ubelt.hash_data', 'ub.hash_data', (['cmap_seed'], {}), '(cmap_seed)\n', (41659, 41670), True, 'import ubelt as ub\n'), ((76518, 76544), 'numpy.ceil', 'np.ceil', (['(nSubplots / nRows)'], {}), '(nSubplots / nRows)\n', (76525, 76544), True, 'import numpy as np\n'), ((76610, 76636), 'numpy.ceil', 'np.ceil', (['(nSubplots / nCols)'], {}), '(nSubplots / nCols)\n', (76617, 76636), True, 'import numpy as np\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 20:11:19 2016
@author: stephen
"""
from __future__ import print_function
from keras.models import Model
from keras.utils import np_utils
import numpy as np
import os
from keras.callbacks import ModelCheckpoint
import pandas as pd
import sys
import keras
from keras.callbacks import ReduceLROnPlateau
def readucr(filename):
data = np.loadtxt(filename, delimiter = ',')
Y = data[:,0]
X = data[:,1:]
return X, Y
nb_epochs = 300
#flist = ['Adiac', 'Beef', 'CBF', 'ChlorineConcentration', 'CinC_ECG_torso', 'Coffee', 'Cricket_X', 'Cricket_Y', 'Cricket_Z',
#'DiatomSizeReduction', 'ECGFiveDays', 'FaceAll', 'FaceFour', 'FacesUCR', '50words', 'FISH', 'Gun_Point', 'Haptics',
#'InlineSkate', 'ItalyPowerDemand', 'Lighting2', 'Lighting7', 'MALLAT', 'MedicalImages', 'MoteStrain', 'NonInvasiveFatalECG_Thorax1',
#'NonInvasiveFatalECG_Thorax2', 'OliveOil', 'OSULeaf', 'SonyAIBORobotSurface', 'SonyAIBORobotSurfaceII', 'StarLightCurves', 'SwedishLeaf', 'Symbols',
#'synthetic_control', 'Trace', 'TwoLeadECG', 'Two_Patterns', 'uWaveGestureLibrary_X', 'uWaveGestureLibrary_Y', 'uWaveGestureLibrary_Z', 'wafer', 'WordsSynonyms', 'yoga']
flist = [ sys.argv[1] ]
for each in flist:
fname = each
x_train, y_train = readucr(fname+'/'+fname+'_TRAIN')
x_test, y_test = readucr(fname+'/'+fname+'_TEST')
nb_classes = len(np.unique(y_test))
batch_size = int(min(x_train.shape[0]/10, 16))
y_train = (y_train - y_train.min())/(y_train.max()-y_train.min())*(nb_classes-1)
y_test = (y_test - y_test.min())/(y_test.max()-y_test.min())*(nb_classes-1)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean)/(x_train_std)
x_test = (x_test - x_train_mean)/(x_train_std)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
print ("class:"+each+", number of classes: "+str(nb_classes))
x = keras.layers.Input(x_train.shape[1:])
# drop_out = Dropout(0.2)(x)
conv1 = keras.layers.Conv1D(filters=32, kernel_size=8, strides=1, activation='relu', input_shape=(32,1))(x)
conv1 = keras.layers.normalization.BatchNormalization()(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
# drop_out = Dropout(0.2)(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=5, border_mode='same')(conv1)
conv2 = keras.layers.normalization.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
# drop_out = Dropout(0.2)(conv2)
conv3 = keras.layers.Conv1D(filters=32, kernel_size=3, border_mode='same')(conv2)
conv3 = keras.layers.normalization.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
full = keras.layers.pooling.GlobalAveragePooling1D()(conv3)
out = keras.layers.Dense(nb_classes, activation='softmax')(full)
model = Model(input=x, output=out)
optimizer = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
patience=50, min_lr=0.0001)
# if os.path.isfile(fname+"_best.hdf5"):
# model.load_weights(fname+'_best.hdf5')
# model.load_weights(fname+'_shapelet_best.hdf5')
checkpointer = ModelCheckpoint(filepath=fname+"_best.hdf5",
monitor = 'val_accuracy',
verbose=2,
save_best_only=True)
# hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
# verbose=1, callbacks=[reduce_lr], validation_data=(x_test, Y_test))
hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
verbose=1, callbacks=[checkpointer,reduce_lr], validation_data=(x_test, Y_test))
#Print the testing results which has the lowest training loss.
log = pd.DataFrame(hist.history)
print (log.loc[log['loss'].idxmin]['loss'], log.loc[log['loss'].idxmin])
|
[
"keras.optimizers.Adam",
"keras.layers.pooling.GlobalAveragePooling1D",
"numpy.unique",
"keras.callbacks.ModelCheckpoint",
"keras.layers.normalization.BatchNormalization",
"keras.callbacks.ReduceLROnPlateau",
"keras.layers.Dense",
"keras.layers.Input",
"keras.utils.np_utils.to_categorical",
"keras.models.Model",
"keras.layers.Activation",
"pandas.DataFrame",
"numpy.loadtxt",
"keras.layers.Conv1D"
] |
[((420, 455), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (430, 455), True, 'import numpy as np\n'), ((1683, 1727), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'nb_classes'], {}), '(y_train, nb_classes)\n', (1706, 1727), False, 'from keras.utils import np_utils\n'), ((1741, 1784), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'nb_classes'], {}), '(y_test, nb_classes)\n', (1764, 1784), False, 'from keras.utils import np_utils\n'), ((2141, 2178), 'keras.layers.Input', 'keras.layers.Input', (['x_train.shape[1:]'], {}), '(x_train.shape[1:])\n', (2159, 2178), False, 'import keras\n'), ((3096, 3122), 'keras.models.Model', 'Model', ([], {'input': 'x', 'output': 'out'}), '(input=x, output=out)\n', (3101, 3122), False, 'from keras.models import Model\n'), ((3145, 3168), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (3166, 3168), False, 'import keras\n'), ((3321, 3394), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.5)', 'patience': '(50)', 'min_lr': '(0.0001)'}), "(monitor='loss', factor=0.5, patience=50, min_lr=0.0001)\n", (3338, 3394), False, 'from keras.callbacks import ReduceLROnPlateau\n'), ((3584, 3690), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(fname + '_best.hdf5')", 'monitor': '"""val_accuracy"""', 'verbose': '(2)', 'save_best_only': '(True)'}), "(filepath=fname + '_best.hdf5', monitor='val_accuracy',\n verbose=2, save_best_only=True)\n", (3599, 3690), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4137, 4163), 'pandas.DataFrame', 'pd.DataFrame', (['hist.history'], {}), '(hist.history)\n', (4149, 4163), True, 'import pandas as pd\n'), ((1428, 1445), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (1437, 1445), True, 'import numpy as np\n'), ((2224, 2325), 'keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(8)', 'strides': '(1)', 'activation': '"""relu"""', 'input_shape': '(32, 1)'}), "(filters=32, kernel_size=8, strides=1, activation='relu',\n input_shape=(32, 1))\n", (2243, 2325), False, 'import keras\n'), ((2336, 2383), 'keras.layers.normalization.BatchNormalization', 'keras.layers.normalization.BatchNormalization', ([], {}), '()\n', (2381, 2383), False, 'import keras\n'), ((2403, 2434), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2426, 2434), False, 'import keras\n'), ((2495, 2561), 'keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'border_mode': '"""same"""'}), "(filters=64, kernel_size=5, border_mode='same')\n", (2514, 2561), False, 'import keras\n'), ((2581, 2628), 'keras.layers.normalization.BatchNormalization', 'keras.layers.normalization.BatchNormalization', ([], {}), '()\n', (2626, 2628), False, 'import keras\n'), ((2648, 2679), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2671, 2679), False, 'import keras\n'), ((2740, 2806), 'keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'border_mode': '"""same"""'}), "(filters=32, kernel_size=3, border_mode='same')\n", (2759, 2806), False, 'import keras\n'), ((2826, 2873), 'keras.layers.normalization.BatchNormalization', 'keras.layers.normalization.BatchNormalization', ([], {}), '()\n', (2871, 2873), False, 'import keras\n'), ((2893, 2924), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2916, 2924), False, 'import keras\n'), ((2948, 2993), 'keras.layers.pooling.GlobalAveragePooling1D', 'keras.layers.pooling.GlobalAveragePooling1D', ([], {}), '()\n', (2991, 2993), False, 'import keras\n'), ((3015, 3067), 'keras.layers.Dense', 'keras.layers.Dense', (['nb_classes'], {'activation': '"""softmax"""'}), "(nb_classes, activation='softmax')\n", (3033, 3067), False, 'import keras\n')]
|
#!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Probability(object):
"""Abstract base class for probability representation (grid, particle, etc)
long description of Probability
Parameters
----------
bounds : Array-like
Bounding coordinates for the probability map.
res : float
Resolution used for discretization of the probability map.
"""
def __init__(self, bounds, res):
self.bounds = bounds
self.ndims = int(len(bounds) / 2)
self.res = res
def entropy(self):
"""
"""
# <>TODO: figure this out. Look at papers!
# http://www-personal.acfr.usyd.edu.au/tbailey/papers/mfi08_huber.pdf
if not hasattr(self, 'pos'):
self._discretize()
if not hasattr(self, 'prob'):
self.pdf()
p_i = self.prob #TODO: change to 4 dims.
H = -np.nansum(p_i * np.log(p_i)) * self.res ** self.ndims # sum of elementwise entropy values
return H
def compute_kld(self, other_gm):
"""Computes the KLD of self from another GM.
Use a truth GM as other_gm.
"""
q_i = self.prob
p_i = other_gm.prob
kld = np.nansum(p_i * np.log(p_i / q_i)) * self.res ** self.ndims
return kld
# def _discretize(self, bounds=None, res=None, all_dims=False):
# if res is not None:
# self.res = res
# if bounds is None and self.bounds is None:
# b = [-10, 10] # bounds in any dimension
# bounds = [[d] * self.ndims for d in b] # apply bounds to each dim
# self.bounds = [d for dim in bounds for d in dim] # flatten bounds
# elif self.bounds is None:
# self.bounds = bounds
# # Create grid
# if self.ndims == 1:
# x = np.arange(self.bounds[0], self.bounds[1], res)
# self.x = x
# self.pos = x
# elif self.ndims == 2:
# X, Y = np.mgrid[self.bounds[0]:self.bounds[2] + self.res:self.res,
# self.bounds[1]:self.bounds[3] + self.res:self.res]
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X; pos[:, :, 1] = Y
# self.X = X; self.Y = Y
# self.pos = pos
# elif self.ndims > 2:
# logging.debug('Using first two variables as x and y')
# X, Y = np.mgrid[self.bounds[0]:self.bounds[2]
# + res:res,
# self.bounds[1]:self.bounds[3]
# + res:res]
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X; pos[:, :, 1] = Y
# self.X = X; self.Y = Y
# self.pos = pos
# if all_dims:
# #<>TODO: use more than the ndims == 4 case
# full_bounds = self.bounds[0:2] + [-0.5, -0.5] \
# + self.bounds[2:] + [0.5, 0.5]
# v_spacing = 0.1
# grid = np.mgrid[full_bounds[0]:full_bounds[4] + res:res,
# full_bounds[1]:full_bounds[5] + res:res,
# full_bounds[2]:full_bounds[6] + v_spacing:v_spacing,
# full_bounds[3]:full_bounds[7] + v_spacing:v_spacing,
# ]
# pos = np.empty(grid[0].shape + (4,))
# pos[:, :, :, :, 0] = grid[0]
# pos[:, :, :, :, 1] = grid[1]
# pos[:, :, :, :, 2] = grid[2]
# pos[:, :, :, :, 3] = grid[3]
# self.pos_all = pos
# else:
# logging.error('This should be impossible, a gauss mixture with no variables')
# raise ValueError
def plot(self, title=None, alpha=1.0, show_colorbar=True, **kwargs):
if not hasattr(self,'ax') or 'ax' in kwargs:
self.plot_setup(**kwargs)
if title is None:
title = self.__str__()
self.contourf = self.ax.contourf(self.X, self.Y,
self.prob,
levels=self.levels,
# cmap=plt.get_cmap('jet'),
alpha=alpha,
interpolation='none',
antialiased=False
)
if show_colorbar and not hasattr(self, 'cbar'):
divider = make_axes_locatable(self.ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(self.contourf, cax)
cbar.ax.tick_params(labelsize=20)
self.cbar = cbar
self.ax.set_title(title, fontsize=20)
if self.show_ellipses:
if hasattr(self.distribution, 'camera_viewcone'):
poly = self.distribution.camera_viewcone
else:
poly = None
self.ellipse_patches = distribution.plot_ellipses(ax=self.ax,
poly=poly)
return self.contourf
def plot_setup(self, fig=None, ax=None, bounds=None, levels=None,
num_levels=50, resolution=0.1, show_ellipses=False):
self.show_ellipses = show_ellipses
if fig is None:
self.fig = plt.gcf()
else:
self.fig = fig
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
if bounds is None:
bounds = self.bounds
if not hasattr(self,'pos'):
self._discretize(bounds=bounds)
# Set levels
if levels is None:
_, max_prob = self.find_MAP()
self.levels = np.linspace(0, max_prob * 1.2, num_levels)
else:
self.levels = levels
# Set bounds
plt.axis('scaled')
self.ax.set_xlim([bounds[0], bounds[2]])
self.ax.set_ylim([bounds[1], bounds[3]])
def plot_remove(self):
"""Removes all plotted elements related to this gaussian mixture.
"""
if hasattr(self,'contourf'):
for collection in self.contourf.collections:
collection.remove()
del self.contourf
if hasattr(self, 'ellipse_patches'):
for patch in self.ellipse_patches:
patch.remove()
del self.ellipse_patches
def update_plot(self, i=0, **kwargs):
logging.debug('Probability update {}'.format(i))
self.plot_remove()
self.plot(**kwargs)
def copy(self):
return deepcopy(self)
|
[
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.colorbar",
"numpy.log",
"numpy.linspace",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"copy.deepcopy",
"matplotlib.pyplot.axis"
] |
[((6354, 6372), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (6362, 6372), True, 'import matplotlib.pyplot as plt\n'), ((7099, 7113), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (7107, 7113), False, 'from copy import deepcopy\n'), ((4944, 4972), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['self.ax'], {}), '(self.ax)\n', (4963, 4972), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5059, 5091), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['self.contourf', 'cax'], {}), '(self.contourf, cax)\n', (5071, 5091), True, 'import matplotlib.pyplot as plt\n'), ((5821, 5830), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5828, 5830), True, 'import matplotlib.pyplot as plt\n'), ((5918, 5927), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5925, 5927), True, 'import matplotlib.pyplot as plt\n'), ((6226, 6268), 'numpy.linspace', 'np.linspace', (['(0)', '(max_prob * 1.2)', 'num_levels'], {}), '(0, max_prob * 1.2, num_levels)\n', (6237, 6268), True, 'import numpy as np\n'), ((1629, 1646), 'numpy.log', 'np.log', (['(p_i / q_i)'], {}), '(p_i / q_i)\n', (1635, 1646), True, 'import numpy as np\n'), ((1314, 1325), 'numpy.log', 'np.log', (['p_i'], {}), '(p_i)\n', (1320, 1325), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn import metrics
class Options(object):
"""Options used by the model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.embedding_size = 32
# The initial learning rate.
self.learning_rate = 1.
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = 100
# Number of examples for one training step.
self.batch_size = 128
self.log_path = './ctr.log'
def read_file(path, infinite=True):
while True:
fi = open(path,'r')
for line in fi:
yield map(int,line.replace('\n', '').split(' '))
if infinite == False:
break
yield None
def ctr_batch_generator(opts, train=True):
if train:
file_reader = read_file(opts.train_path, True)
else:
file_reader = read_file(opts.test_path, False)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = np.ndarray(shape=(opts.batch_size))
for i in xrange(opts.batch_size):
single_sample = file_reader.next()
if single_sample is None:
break
target = single_sample[0]
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels[i] = target
if len(labels) == opts.batch_size and single_sample is not None:
yield np.array(batch), labels
else:
break
def get_substitute_cate(sample, target_index, opts):
field_i = opts.fields_index_inverse.get(sample[target_index])
if field_i is None:
field_i = np.random.choice(opts.fields_index.keys(),1)[0]
field_cates = opts.fields_index[field_i]
rst = np.random.choice(field_cates,1)[0]
if len(field_cates) == 1:
rst = np.random.randint(opts.vocabulary_size)
return rst
def generate_fake_sample(temp, opts):
temp_sequence_length = len(temp)
temp = temp[0:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
else:
temp_sequence_length = opts.sequence_length
assert len(temp) == opts.sequence_length
targets_to_avoid = set(temp)
indices_to_avoid = set()
substitute_index = np.random.randint(temp_sequence_length)
substitute_target = get_substitute_cate(temp, substitute_index, opts)
for _ in range(opts.substitute_num):
while substitute_index in indices_to_avoid:
substitute_index = np.random.randint(temp_sequence_length)
indices_to_avoid.add(substitute_index)
count = 0
while substitute_target in targets_to_avoid:
if count > 5:
break
substitute_target = get_substitute_cate(temp, substitute_index, opts)
count += 1
targets_to_avoid.add(substitute_target)
temp[substitute_index] = substitute_target
return temp
def generate_discriminant_batch(opts, is_train=True, rate=0.5):
data_index = 0
if is_train:
file_reader = read_file(opts.train_path)
else:
file_reader = read_file(opts.test_path)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = []
for i in xrange(opts.batch_size):
if np.random.random() > rate:
single_sample = file_reader.next()
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels.append(1.)
else:
single_sample = file_reader.next()
temp = single_sample[1:opts.sequence_length]
batch[i] = generate_fake_sample(temp, opts)
labels.append(0.)
yield batch, np.array(labels)
def read_feat_index(opts):
vocabulary_size = 0
reverse_dictionary_raw = np.array(pd.read_csv(opts.featindex, sep='\t', header=None))
reverse_dictionary = {}
dictionary = {}
for item in reverse_dictionary_raw:
reverse_dictionary[int(item[1])] = item[0]
dictionary[item[0]] = int(item[1])
if item[1] > vocabulary_size:
vocabulary_size = item[1]
vocabulary_size = len(dictionary.keys())
print('vocabulary_size: ',vocabulary_size)
return reverse_dictionary, dictionary, vocabulary_size
def eval_auc(model, opts, target=None, get_prob=None):
testing_batch_generator = ctr_batch_generator(opts,train=False)
batch_num = 0
y = []
pred = []
for batch, labels in testing_batch_generator:
if target is None or get_prob is None:
probs = model.predict_proba(batch, batch_size=opts.batch_size, verbose=0)
else:
probs = get_prob([batch])[0]
y.extend(labels)
pred.extend([p[0] for p in probs])
batch_num += 1
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
loss = metrics.log_loss(y, pred)
print("Total testing sample: ", len(y), " Positive sample: ", sum(y))
opts.auc = auc
opts.loss = loss
with open(opts.log_path, 'a') as f:
f.write(str(opts.__dict__)+'\r')
print("AUC:", auc, ', log loss: ', loss)
|
[
"pandas.read_csv",
"numpy.random.choice",
"sklearn.metrics.auc",
"numpy.random.random",
"numpy.array",
"numpy.random.randint",
"sklearn.metrics.log_loss",
"sklearn.metrics.roc_curve",
"numpy.ndarray"
] |
[((2763, 2802), 'numpy.random.randint', 'np.random.randint', (['temp_sequence_length'], {}), '(temp_sequence_length)\n', (2780, 2802), True, 'import numpy as np\n'), ((5627, 5666), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y', 'pred'], {'pos_label': '(1)'}), '(y, pred, pos_label=1)\n', (5644, 5666), False, 'from sklearn import metrics\n'), ((5677, 5698), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5688, 5698), False, 'from sklearn import metrics\n'), ((5710, 5735), 'sklearn.metrics.log_loss', 'metrics.log_loss', (['y', 'pred'], {}), '(y, pred)\n', (5726, 5735), False, 'from sklearn import metrics\n'), ((1101, 1158), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(opts.batch_size, opts.sequence_length)'}), '(shape=(opts.batch_size, opts.sequence_length))\n', (1111, 1158), True, 'import numpy as np\n'), ((1176, 1209), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'opts.batch_size'}), '(shape=opts.batch_size)\n', (1186, 1209), True, 'import numpy as np\n'), ((2147, 2179), 'numpy.random.choice', 'np.random.choice', (['field_cates', '(1)'], {}), '(field_cates, 1)\n', (2163, 2179), True, 'import numpy as np\n'), ((2226, 2265), 'numpy.random.randint', 'np.random.randint', (['opts.vocabulary_size'], {}), '(opts.vocabulary_size)\n', (2243, 2265), True, 'import numpy as np\n'), ((2520, 2546), 'numpy.array', 'np.array', (['(temp + [0] * gap)'], {}), '(temp + [0] * gap)\n', (2528, 2546), True, 'import numpy as np\n'), ((3725, 3782), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(opts.batch_size, opts.sequence_length)'}), '(shape=(opts.batch_size, opts.sequence_length))\n', (3735, 3782), True, 'import numpy as np\n'), ((4650, 4700), 'pandas.read_csv', 'pd.read_csv', (['opts.featindex'], {'sep': '"""\t"""', 'header': 'None'}), "(opts.featindex, sep='\\t', header=None)\n", (4661, 4700), True, 'import pandas as pd\n'), ((3017, 3056), 'numpy.random.randint', 'np.random.randint', (['temp_sequence_length'], {}), '(temp_sequence_length)\n', (3034, 3056), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.array', 'np.array', (['(temp + [0] * gap)'], {}), '(temp + [0] * gap)\n', (1592, 1610), True, 'import numpy as np\n'), ((3860, 3878), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3876, 3878), True, 'import numpy as np\n'), ((4535, 4551), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4543, 4551), True, 'import numpy as np\n'), ((1814, 1829), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (1822, 1829), True, 'import numpy as np\n'), ((4139, 4165), 'numpy.array', 'np.array', (['(temp + [0] * gap)'], {}), '(temp + [0] * gap)\n', (4147, 4165), True, 'import numpy as np\n')]
|
from __future__ import division
import pandas as pd
import numpy as np
import calendar
import os.path as op
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from scipy.stats import percentileofscore
from scipy.stats import scoreatpercentile, pearsonr
from math import *
import time
from BCSD_stats_functions import *
import xarray as xr
import os, errno
def CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, MONTH_NAME, count_grid, BC_VAR, TINY):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM))*-999
for LEAD_NUM in range(0, LEAD_FINAL): ## Loop from lead =0 to Final Lead
TARGET_MONTH = MON + LEAD_NUM; ## This is the target forecast month
## Check for the cases when the target forecast month is in the next year (e.g. February 1983 forecast initialized in December 1982)
if (TARGET_MONTH>12):
TARGET_MONTH-=12 #subtracting 12 so 13 becomes 1 meaning the month of January and so on.
## Just checking if the lead and target month combination is working as expected
if (count_grid==0): #Only printing the following for the first grid cell, no need to repeat
print ("Initial forecast month is {} Lead is {} and Target month is {}".format(MONTH_NAME, LEAD_NUM, calendar.month_name[TARGET_MONTH]))
# Retriving Observed and forecast time series for given target month
OBS_QUANT_TS, OBS_CLIM_TS = OBS_CLIM_ALL[0, :], OBS_CLIM_ALL[TARGET_MONTH, :] ## Note that the first column is quantile time series
FCST_QUANT_TS, FCST_CLIM_TS = FCST_CLIM_ALL[0, :], FCST_CLIM_ALL[LEAD_NUM+1, :] ## Note that the first column is quantile time series
## Now calculating mean, standard deviation and skew of both observed and forecast time series
obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS, TINY)
fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS, TINY)
#obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS.values, TINY)
#fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS.values, TINY)
## Ok, now getting started on the bias correction
## Note that bias correction is done seprately for each ensemble member of all years
for fcst_yr in range(TARGET_FCST_SYR-FCST_SYR, (TARGET_FCST_EYR-FCST_SYR)+1):
for ens_num in range (0, ENS_NUM):
TARGET_FCST_VAL = TARGET_FCST_VAL_ARR[fcst_yr, LEAD_NUM, ens_num]
## First determine the quantile for given target forecast value
TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS, FCST_QUANT_TS, len(FCST_CLIM_TS), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
#TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS.values, FCST_QUANT_TS.values, len(FCST_CLIM_TS.values), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
## Also note that QUAN helps the the function lookup determine if we are trying to convert a value to quantile or VICE versa
## For converting a value to quantile use 'QUAN' for converting quantile to value use 'DATA'
## Now using the quantile above determine the corresponding value from the observed climatology
BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS, OBS_CLIM_TS, len(OBS_CLIM_TS), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
#BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS.values, OBS_CLIM_TS.values, len(OBS_CLIM_TS.values), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
if (BC_VAR=='PRCP') and (BIAS_CORRECTED_VALUE<0): ## This is just a hack to check we are not getting negative value of precipitation
print (TARGET_FCST_VAL, TARGET_FCST_QUANT, fcst_yr, LEAD_NUM, ens_num)
## Now storing the bias corrected anomaly
CORRECT_FCST_COARSE[fcst_yr, LEAD_NUM, ens_num] = BIAS_CORRECTED_VALUE
return CORRECT_FCST_COARSE
def latlon_calculations(ilat_min, ilat_max, ilon_min, ilon_max, nlats, nlons, \
np_OBS_CLIM_ARRAY, np_FCST_CLIM_ARRAY, \
LEAD_FINAL, TARGET_FCST_EYR, TARGET_FCST_SYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, BC_VAR, TINY, FCST_COARSE):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM, nlats, nlons))*-999
num_lats = ilat_max-ilat_min+1
num_lons = ilon_max-ilon_min+1
print("num_lats = ", num_lats, np_OBS_CLIM_ARRAY.shape)
print("num_lons = ", num_lons, FCST_COARSE.shape)
for ilat in range(num_lats):
lat_num = ilat_min + ilat
for ilon in range(num_lons):
lon_num = ilon_min + ilon
count_grid = ilon + ilat*num_lons
OBS_CLIM_ALL = np_OBS_CLIM_ARRAY[:, :, ilat, ilon]
FCST_CLIM_ALL = np_FCST_CLIM_ARRAY[:, :, ilat, ilon]
TARGET_FCST_VAL_ARR = FCST_COARSE[:, :, :, lat_num, lon_num]
CORRECT_FCST_COARSE[:, :, :, lat_num, lon_num] = CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, \
TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, \
TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, count_grid, BC_VAR, TINY)
return CORRECT_FCST_COARSE
|
[
"numpy.ones"
] |
[((598, 667), 'numpy.ones', 'np.ones', (['(TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM)'], {}), '((TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM))\n', (605, 667), True, 'import numpy as np\n'), ((4583, 4670), 'numpy.ones', 'np.ones', (['(TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM, nlats, nlons)'], {}), '((TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM, nlats,\n nlons))\n', (4590, 4670), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
obspy.io.nied.knet - K-NET/KiK-net read support for ObsPy
=========================================================
Reading of the K-NET and KiK-net ASCII format as defined on
http://www.kyoshin.bosai.go.jp.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import re
import numpy as np
from obspy import UTCDateTime, Stream, Trace
from obspy.core.trace import Stats
class KNETException(Exception):
pass
def _buffer_proxy(filename_or_buf, function, reset_fp=True,
file_mode="rb", *args, **kwargs):
"""
Calls a function with an open file or file-like object as the first
argument. If the file originally was a filename, the file will be
opened, otherwise it will just be passed to the underlying function.
:param filename_or_buf: File to pass.
:type filename_or_buf: str, open file, or file-like object.
:param function: The function to call.
:param reset_fp: If True, the file pointer will be set to the initial
position after the function has been called.
:type reset_fp: bool
:param file_mode: Mode to open file in if necessary.
"""
try:
position = filename_or_buf.tell()
is_buffer = True
except AttributeError:
is_buffer = False
if is_buffer is True:
ret_val = function(filename_or_buf, *args, **kwargs)
if reset_fp:
filename_or_buf.seek(position, 0)
return ret_val
else:
with open(filename_or_buf, file_mode) as fh:
return function(fh, *args, **kwargs)
def _is_knet_ascii(filename_or_buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param filename_or_buf: File to test.
:type filename_or_buf: str or file-like object.
"""
try:
return _buffer_proxy(filename_or_buf, _internal_is_knet_ascii,
reset_fp=True)
# Happens for example when passing the data as a string which would be
# interpreted as a filename.
except (OSError, UnicodeDecodeError):
return False
def _internal_is_knet_ascii(buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
first_string = buf.read(11).decode()
# File has less than 11 characters
if len(first_string) != 11:
return False
if first_string == 'Origin Time':
return True
return False
def _prep_hdr_line(name, line):
"""
Helper function to check the contents of a header line and split it.
:param name: String that the line should start with.
:type name: str
:param line: Line to check and split.
:type line: str
"""
if not line.startswith(name):
raise KNETException("Expected line to start with %s but got %s "
% (name, line))
else:
return line.split()
def _read_knet_hdr(hdrlines, convert_stnm=False, **kwargs):
"""
Read the header values into a dictionary.
:param hdrlines: List of the header lines of a a K-NET/KiK-net ASCII file
:type hdrlines: list
:param convert_stnm: For station names with 6 letters write the last two
letters of the station code to the 'location' field
:type convert_stnm: bool
"""
hdrdict = {'knet': {}}
hdrnames = ['Origin Time', 'Lat.', 'Long.', 'Depth. (km)', 'Mag.',
'Station Code', 'Station Lat.', 'Station Long.',
'Station Height(m)', 'Record Time', 'Sampling Freq(Hz)',
'Duration Time(s)', 'Dir.', 'Scale Factor', 'Max. Acc. (gal)',
'Last Correction', 'Memo.']
_i = 0
# Event information
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['evot'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lat = float(flds[1])
hdrdict['knet']['evla'] = lat
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lon = float(flds[1])
hdrdict['knet']['evlo'] = lon
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dp = float(flds[2])
hdrdict['knet']['evdp'] = dp
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
mag = float(flds[1])
hdrdict['knet']['mag'] = mag
# Station information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
# K-NET and KiK-Net station names can be more than 5 characters long
# which will cause the station name to be truncated when writing the
# the trace as miniSEED; if convert_stnm is enabled, the last two
# letters of the station code are written to the 'location' field
stnm = flds[2]
location = ''
if convert_stnm and len(stnm) > 5:
location = stnm[-2:]
stnm = stnm[:-2]
if len(stnm) > 7:
raise KNETException(
"Station name can't be more than 7 characters long!")
hdrdict['station'] = stnm
hdrdict['location'] = location
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stla'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stlo'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stel'] = float(flds[2])
# Data information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
# A 15 s delay is added to the record time by the
# the K-NET and KiK-Net data logger
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S') - 15.0
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['starttime'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
freqstr = flds[2]
m = re.search('[0-9]*', freqstr)
freq = int(m.group())
hdrdict['sampling_rate'] = freq
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['duration'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
channel = flds[1].replace('-', '')
kiknetcomps = {'1': 'NS1', '2': 'EW1', '3': 'UD1',
'4': 'NS2', '5': 'EW2', '6': 'UD2'}
if channel.strip() in kiknetcomps.keys(): # kiknet directions are 1-6
channel = kiknetcomps[channel.strip()]
hdrdict['channel'] = channel
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
eqn = flds[2]
num, denom = eqn.split('/')
num = float(re.search('[0-9]*', num).group())
denom = float(denom)
# convert the calibration from gal to m/s^2
hdrdict['calib'] = 0.01 * num / denom
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
acc = float(flds[3])
hdrdict['knet']['accmax'] = acc
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['last correction'] = dt
# The comment ('Memo') field is optional
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
if len(flds) > 1:
hdrdict['knet']['comment'] = ' '.join(flds[1:])
if len(hdrlines) != _i + 1:
raise KNETException("Expected %d header lines but got %d"
% (_i + 1, len(hdrlines)))
return hdrdict
def _read_knet_ascii(filename_or_buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param filename: K-NET/KiK-net ASCII file to be read.
:type filename: str or file-like object.
"""
return _buffer_proxy(filename_or_buf, _internal_read_knet_ascii, **kwargs)
def _internal_read_knet_ascii(buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
data = []
hdrdict = {}
cur_pos = buf.tell()
buf.seek(0, 2)
size = buf.tell()
buf.seek(cur_pos, 0)
# First read the headerlines
headerlines = []
while buf.tell() < size:
line = buf.readline().decode()
headerlines.append(line)
if line.startswith('Memo'):
hdrdict = _read_knet_hdr(headerlines, **kwargs)
break
while buf.tell() < size:
line = buf.readline()
parts = line.strip().split()
data += [float(p) for p in parts]
hdrdict['npts'] = len(data)
# The FDSN network code for the National Research Institute for Earth
# Science and Disaster Prevention (NEID JAPAN) is BO (Bosai-Ken Network)
hdrdict['network'] = 'BO'
data = np.array(data)
stats = Stats(hdrdict)
trace = Trace(data, header=stats)
return Stream([trace])
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
|
[
"obspy.Stream",
"obspy.UTCDateTime.strptime",
"numpy.array",
"doctest.testmod",
"obspy.Trace",
"obspy.core.trace.Stats",
"re.search"
] |
[((3927, 3972), 'obspy.UTCDateTime.strptime', 'UTCDateTime.strptime', (['dt', '"""%Y/%m/%d %H:%M:%S"""'], {}), "(dt, '%Y/%m/%d %H:%M:%S')\n", (3947, 3972), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((6132, 6160), 're.search', 're.search', (['"""[0-9]*"""', 'freqstr'], {}), "('[0-9]*', freqstr)\n", (6141, 6160), False, 'import re\n'), ((7229, 7274), 'obspy.UTCDateTime.strptime', 'UTCDateTime.strptime', (['dt', '"""%Y/%m/%d %H:%M:%S"""'], {}), "(dt, '%Y/%m/%d %H:%M:%S')\n", (7249, 7274), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((9412, 9426), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9420, 9426), True, 'import numpy as np\n'), ((9439, 9453), 'obspy.core.trace.Stats', 'Stats', (['hdrdict'], {}), '(hdrdict)\n', (9444, 9453), False, 'from obspy.core.trace import Stats\n'), ((9466, 9491), 'obspy.Trace', 'Trace', (['data'], {'header': 'stats'}), '(data, header=stats)\n', (9471, 9491), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((9503, 9518), 'obspy.Stream', 'Stream', (['[trace]'], {}), '([trace])\n', (9509, 9518), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((9571, 9606), 'doctest.testmod', 'doctest.testmod', ([], {'exclude_empty': '(True)'}), '(exclude_empty=True)\n', (9586, 9606), False, 'import doctest\n'), ((5856, 5901), 'obspy.UTCDateTime.strptime', 'UTCDateTime.strptime', (['dt', '"""%Y/%m/%d %H:%M:%S"""'], {}), "(dt, '%Y/%m/%d %H:%M:%S')\n", (5876, 5901), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((6843, 6867), 're.search', 're.search', (['"""[0-9]*"""', 'num'], {}), "('[0-9]*', num)\n", (6852, 6867), False, 'import re\n')]
|
from sweeps.sweepFunctions import *
import numpy as np
def SMTBFSweep(SMTBFSweepInput,ourInput):
myRange = SMTBFSweepInput["range"] if dictHasKey(SMTBFSweepInput,"range") else False
myStickyRange=SMTBFSweepInput["sticky-range"] if dictHasKey(SMTBFSweepInput,"sticky-range") else False
sticky=False if type(myStickyRange) == bool else True
myFormula = SMTBFSweepInput["formula"] if dictHasKey(SMTBFSweepInput,"formula") else False
fixedToNode = SMTBFSweepInput["compute-SMTBF-from-NMTBF"] if dictHasKey(SMTBFSweepInput,"compute-SMTBF-from-NMTBF") else False
if type(myRange) == bool and type(myStickyRange) == bool:
#ok so we are going to have a min,max,step
minimum = float(SMTBFSweepInput["min"])
maximum = float(SMTBFSweepInput["max"])
step = float(SMTBFSweepInput["step"])
if myFormula:
#ok so we have a formula
formula_range = list(np.arange(minimum,maximum+step,step))
SMTBFRange = [eval(myFormula) for i in formula_range]
else:
SMTBFRange = list(np.arange(minimum,maximum+step,step))
elif myFormula:
if sticky:
formula_range = myStickyRange
else:
formula_range = myRange
SMTBFRange = [eval(myFormula) for i in formula_range]
else:
if sticky:
SMTBFRange = myStickyRange
else:
SMTBFRange = myRange
currentExperiments = len(ourInput.keys())
if sticky and not(len(SMTBFRange) == currentExperiments):
print("chose sticky-range for SMTBF but length of sticky-range does not match length of currentExperiments\n"+"SMTBFRange: "+str(len(SMTBFRange))
+" currentExperiments: "+ str(currentExperiments))
raise ValueError("chose sticky-range for SMTBF but length of sticky-range does not match length of currentExperiments\n"+"SMTBFRange: "+str(len(SMTBFRange))
+" currentExperiments: "+ str(currentExperiments))
#if there were no sweeps before. Notice compute-SMTBF-from-NMTBF doesn't make sense if this is the case since there will be no nodes
if currentExperiments == 0:
count = 1
for i in SMTBFRange:
ourInput["experiment_{count}".format(count=count)]={"SMTBF":i}
count+=1
#there were sweeps before
else:
tmpInput = ourInput.copy()
count = 1
# update the current experiments first, if sticky ONLY update the current experiments
for i in ourInput.keys():
data = ourInput[i]
if fixedToNode == True:
nodes = data["nodes"] if dictHasKey(data,"nodes") else False
if type(nodes) == bool:
print("compute-SMTBF-from-NMTBF set but no nodes set")
sys.exit(1)
if sticky:
data["SMTBF"] = SMTBFRange[count-1]/nodes
else:
data["SMTBF"] = SMTBFRange[0]/nodes
else:
data["SMTBF"] = SMTBFRange[0]
ourInput[i] = data
count+=1
if not sticky:
for i in SMTBFRange:
if not i == SMTBFRange[0]: #skip the first, we already did it
for j in tmpInput.keys():
data = tmpInput[j].copy()
if fixedToNode == True:
nodes = data["nodes"] if dictHasKey(data,"nodes") else False
if type(nodes) == bool:
print("compute-SMTBF-from-NMTBF set but no nodes set")
sys.exit(1)
data["SMTBF"] = i/nodes
else:
data["SMTBF"] = i
ourInput["experiment_{count}".format(count=count)] = data
count+=1
|
[
"numpy.arange"
] |
[((926, 966), 'numpy.arange', 'np.arange', (['minimum', '(maximum + step)', 'step'], {}), '(minimum, maximum + step, step)\n', (935, 966), True, 'import numpy as np\n'), ((1074, 1114), 'numpy.arange', 'np.arange', (['minimum', '(maximum + step)', 'step'], {}), '(minimum, maximum + step, step)\n', (1083, 1114), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# author: <NAME> <<EMAIL>>
import pandas as pd
import numpy as np
from itertools import islice
from sklearn.utils.validation import check_X_y
class KTopScoringPair:
""" K-Top Scoring Pair classifier.
This classifier evaluate maximum-likelihood estimation for P(X_i < X_i | Y),
with X_i < X_i a pair of feature given a class Y. K determine how many pair
evaluate. Then pairs are ranked by the primary score:
s = P(X_i < X_j | 0) - P(X_i < X_j | 1)
Further detail can be found in [1].
For its nature this is a binary classifier but it will not provide any error
if found multiple label, score will be computed between first and second
class. Multi-class classification can be achieved by using sklearn multiclass
wrappers.
Parameters
----------
pairs : list of tuples with index of the feature to be considered.
The feature will be tested in order, that is (X_i, X_j) will be counted
for X_i < X_j.
K : int. How many pairs will contribute to classification.
It should be chosen as an odd int, to allow majority voting.
t : int, optional (default=0)
It can be used to adjust accuracy/specificity. By default it means that
score_{ij} = (P(X_i < X_j | 0) - P(X_i < X_j | 1)) > t
Attributes
----------
estimated_proba_ : 2d array of float
Estimated probability computed from training.
rules_ : array of shape = [n_classes]
Human-readable K rules found with training.
----------
.. [1] AFSARI, Bahman, et al. Rank discriminants for predicting phenotypes
from RNA expression. The Annals of Applied Statistics, 2014, 8.3: 1469-1491.
"""
def __init__(self, pairs, K, t=0):
self.pairs = pairs
self.K = K
self.t = t
self._estimator_type = "classifier"
# Defined after fitting
self.estimated_proba_ = None
self.rules_ = []
self.classes_ = []
def fit(self, X, y):
""" Train the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
y : array-like of shape = [n_samples]
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y) # Assert input is safe
# Determine class and convert y accordingly
self.classes_, y = np.unique(y, return_inverse=True)
# Main statistics gathering
Frequencies, Sizes = self._fit(X, y, self.pairs)
# Compute likelihood probabilities
self._compute_proba(Frequencies, Sizes)
return self
def _fit(self, X, y, pairs):
# Instantiate dictionary as counter for (X_i, X_j) = |{X_i < X_i | Y}|
pairs_dict = {l: dict() for l in range(len(self.classes_))}
class_size = {l: 0 for l in range(len(self.classes_))}
# Class loop
for label in pairs_dict.keys():
X_given_y = X[y==label]
class_size[label] = X_given_y.shape[0]
class_pairs = pairs_dict[label]
# Pairs loop
for X_i, X_j in pairs:
class_pairs[(X_i, X_j)] = sum(X_given_y[:, X_i] < X_given_y[:, X_j])
# Return statistics in a convenient format
Freq, Size = pd.DataFrame(pairs_dict), pd.Series(class_size)
return Freq, Size
def predict(self, X, K=None, t=None):
""" Predict the provided X.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
K : int, optional.
Once estimated_proba_ were computed there is no problem to vary K and
use K-rules different from __init__ time K
t : int, optional
Same as above
Returns
-------
y : array-like of shape = [n_samples]
"""
P = self.predict_proba(X, K)
# Translate most probable class with its label
return self.classes_[np.argmax(P, axis=1)]
def predict_proba(self, X, K=None, t=None):
""" Predict the provided X with probabilities.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
K : int, optional.
Once estimated_proba_ were computed there is no problem to vary K and
use K-rules different from __init__ time K
t : int, optional
Same as above
Returns
-------
P : array of shape = [n_samples, n_class]
"""
def vote_for(x):
return [r['i<j'] if x[r['i']] < x[r['j']] else r['j<i'] for r in self.rules_]
# Rebuild rules if K or t is different from __init__ time K
if (K is not None and K != self.K) or (t is not None and t != self.t):
P = self.estimated_proba_
self.K = self.K if K is None else K
self.t = self.t if t is None else t
self.rules_ = self._scorer(P, self.K, self.t, P.columns[0], P.columns[1])
# Gather votes for every sample -> V = (n, k)
V = [vote_for(x) for _, x in X.iterrows()]
# Group votes by class -> P (n, c)
P = [{k: v for k, v in zip(*np.unique(v, return_counts=True))} for v in V]
P = pd.DataFrame(P, columns=self.classes_).fillna(0)
# Normalized it to emit probabilities
return (P / self.K).as_matrix()
def partial_fit(self, X_batch, y_batch, classes):
""" Train the classifier by chunk. This can take advantage of multiprocessing
computation. Choose chunk dimension it is your discretion.
Parameters
----------
X_batch : iterator for an {array-like, sparse matrix} of
shape = [n_samples, n_features]
y_batch : iterator for an array-like of shape = [n_samples]
classes : array-like, shape (n_classes,)
Can't be inferred, then classes need to be passed as argument.
Returns
-------
self : returns an instance of self.
"""
from multiprocessing import Pool
self.classes_ = np.array(sorted(classes))
pool = Pool()
# Process mapping (zip is needed because map can handle only one argument)
Freq_chunks, Size_chunks = zip(*pool.map(self._chunk_worker, zip(X_batch, y_batch)))
# Concatenate resultant dictionary for missing pairs, then group-by and
# aggregate totals with a sum
F, S = pd.concat(Freq_chunks), pd.concat(Size_chunks)
Frequencies, Sizes = F.groupby(level=[0, 1]).sum(), S.groupby(S.index).sum()
# Now statistics are complete, compute as normal fit
self._compute_proba(Frequencies, Sizes)
return self
def _chunk_worker(self, X_y):
# Assert input safely
X, y = X_y
X, y = check_X_y(X, y)
# Translate y as label
d = {k:v for k,v in zip(self.classes_, range(len(self.classes_)))}
y = np.array(list(map(lambda x: d[x], y)))
# Count frequencies-sizes for this chunk
return self._fit(X, y, self.pairs)
def _scorer(self, P, K, t, minus, plus):
# Not efficient friendly, but produce human-readable rules.
def formatted_rule(i, j, isPositive, score):
if isPositive:
return {"i":i, "j":j, "i<j":minus, "j<i":plus, "score":score}
else:
return {"i":i, "j":j, "i<j":plus, "j<i":minus, "score":score}
# +/- scores depends on what is subtracted from what
scores = P[minus] - P[plus]
ranked = scores.abs().sort_values(ascending=False)
# Compute rules, ranked by descending score
rules = [formatted_rule(k[0], k[1], scores[k] > t, scores[k])
for k in islice(iter(ranked.keys()), K)]
return rules
def _compute_proba(self, Frequencies, Sizes):
# Mainly for debugging purposes
self.frequencies_, self.sizes_ = Frequencies, Sizes
# Compute P = |{X_i < X_i | Y}| / |Y|
P = Frequencies / Sizes
self.estimated_proba_ = P
# Build rules
self.rules_ = self._scorer(P, self.K, self.t, P.columns[0], P.columns[1])
def get_params(self, deep=True):
return {"pairs": self.pairs, "K": self.K, "t": self.t}
def set_params(self, **parameters):
for parameter, value in parameters.items():
self.setattr(parameter, value)
return self
def human_rules(self, features):
""" Allow rules convertion for human reading.
Parameters
----------
features : list of feature name corresponding to i,j indexing
Returns
-------
hr_rules : list of rules, with label converted according to input
"""
import copy as cp
hr_rules = cp.deepcopy(self.rules_)
for d in hr_rules:
d['i'], d['j'] = features[d['i']], features[d['j']]
d['i<j'], d['j<i'] = self.classes_[d['i<j']], self.classes_[d['j<i']]
return hr_rules
|
[
"pandas.Series",
"numpy.unique",
"numpy.argmax",
"multiprocessing.Pool",
"copy.deepcopy",
"pandas.DataFrame",
"pandas.concat",
"sklearn.utils.validation.check_X_y"
] |
[((2475, 2490), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (2484, 2490), False, 'from sklearn.utils.validation import check_X_y\n'), ((2594, 2627), 'numpy.unique', 'np.unique', (['y'], {'return_inverse': '(True)'}), '(y, return_inverse=True)\n', (2603, 2627), True, 'import numpy as np\n'), ((6463, 6469), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (6467, 6469), False, 'from multiprocessing import Pool\n'), ((7139, 7154), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (7148, 7154), False, 'from sklearn.utils.validation import check_X_y\n'), ((9142, 9166), 'copy.deepcopy', 'cp.deepcopy', (['self.rules_'], {}), '(self.rules_)\n', (9153, 9166), True, 'import copy as cp\n'), ((3485, 3509), 'pandas.DataFrame', 'pd.DataFrame', (['pairs_dict'], {}), '(pairs_dict)\n', (3497, 3509), True, 'import pandas as pd\n'), ((3511, 3532), 'pandas.Series', 'pd.Series', (['class_size'], {}), '(class_size)\n', (3520, 3532), True, 'import pandas as pd\n'), ((4222, 4242), 'numpy.argmax', 'np.argmax', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (4231, 4242), True, 'import numpy as np\n'), ((6779, 6801), 'pandas.concat', 'pd.concat', (['Freq_chunks'], {}), '(Freq_chunks)\n', (6788, 6801), True, 'import pandas as pd\n'), ((6803, 6825), 'pandas.concat', 'pd.concat', (['Size_chunks'], {}), '(Size_chunks)\n', (6812, 6825), True, 'import pandas as pd\n'), ((5542, 5580), 'pandas.DataFrame', 'pd.DataFrame', (['P'], {'columns': 'self.classes_'}), '(P, columns=self.classes_)\n', (5554, 5580), True, 'import pandas as pd\n'), ((5483, 5515), 'numpy.unique', 'np.unique', (['v'], {'return_counts': '(True)'}), '(v, return_counts=True)\n', (5492, 5515), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
""" A data clustering widget for the Orange3.
This is a data clustering widget for Orange3, that implements the OPTICS algorithm.
OPTICS stands for "Ordering Points To Identify the Clustering Structure".
This is a very useful algorithm for clustering data when the dataset is unlabeled with
Non-flat geometry or when it has uneven cluster sizes or variable cluster density.
The package used is called "sklearn". Source: https://scikit-learn.org/stable/index.html
To run the addon, just install it using 'pip install -e .' from its package folder.
Don't forget to first activate the orange environment.
__author__ = <NAME>
__date__ = Feb 2020
__version__ = 0.1.0
__type__ = Orange Addon
__platform__ = Windows (Orange enviroment)
__email__ = '<NAME>' <<EMAIL>>
__status__ = Dev
"""
import numpy as np
from AnyQt.QtCore import Qt
from AnyQt.QtGui import QColor
from Orange.widgets import widget, gui
from Orange.widgets import settings
from Orange.widgets.widget import Msg
from Orange.widgets.utils.signals import Input, Output
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.utils.slidergraph import SliderGraph
from Orange.data import Table, Domain, DiscreteVariable
from pyqtgraph import mkPen
from pyqtgraph.functions import intColor
from sklearn.cluster import OPTICS
from sklearn.neighbors import VALID_METRICS
""" OPTICS Parameters
class sklearn.cluster.OPTICS(
* min_samples=5, {default=5 or int > 1}, title: Min samples
max_eps=inf, {default=np.inf}, not changed
* metric='minkowski', {default='minkowski' or [1]}, title: Metric
p=2, {default=2}, not changed
cluster_method='xi', {default='xi'}, not changed
eps=None, {default=None}, not changed
* xi=0.05, {default=0.05 or float, between 0 and 1}, title: Minimum steepness
predecessor_correction=True, {default=True}, not changed
min_cluster_size=None, {default=None}, not changed
* algorithm='auto', {default=auto or ball_tree, kd_tree, brute, auto}, title: Algorithm for nearest neighbors:
leaf_size=30, {default=30}, not changed
n_jobs=None, {default=None}, not changed
)
[1] Valid values for metric are:
from scikit-learn: [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’, ‘manhattan’]
from scipy.spatial.distance: [‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’,
‘kulsinski’, ‘mahalanobis’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’]
See the documentation for scipy.spatial.distance for details on these metrics.
"""
OPTICS_METRICS = [
("cityblock", "cityblock"),
("cosine", "cosine"),
("euclidean", "euclidean"),
("l1", "l1"),
("l2", "l2"),
("manhattan", "manhattan"),
("braycurtis", "braycurtis"),
("canberra", "canberra"),
("chebyshev", "chebyshev"),
("correlation", "correlation"),
("hamming", "hamming"),
("minkowski", "minkowski"),
("sqeuclidean", "sqeuclidean"),
]
OPTICS_ALGORITHM = [
("Auto","auto"),
("Ball Tree","ball_tree"),
("kd Tree","kd_tree"),
("Brute","brute"),
]
class OPTICS_w(widget.OWWidget):
name = "OPTICS"
description = "dynamicaly clustering unlabeled data by density"
icon = "icons/OPTICS.svg"
priority = 20
class Inputs:
data = Input("Data", Table)
class Outputs:
annotated_data = Output("Data", Table)
class Error(widget.OWWidget.Error):
not_enough_instances = Msg("Not enough unique data instances. "
"At least two are required.")
minimum_samples = settings.Setting(5)
metric_methode = settings.Setting(11)
xi_value = settings.Setting(0.05)
algorithm_base = settings.Setting(0)
auto_commit = settings.Setting(False)
cut_point = xi_value
want_main_area = True
def __init__(self):
super().__init__()
self.data = None
self.dataset = None
self.annotated_data = None
# GUI
infobox = gui.widgetBox(self.controlArea, "Info")
self.infoa = gui.widgetLabel(infobox, "No data on input yet, waiting to get something.")
self.infob = gui.widgetLabel(infobox, "")
self.infoc = gui.widgetLabel(infobox, "")
self.infod = gui.widgetLabel(infobox, "")
self.optionsBox = gui.widgetBox(self.controlArea, "OPTICS Options")
gui.spin(
self.optionsBox,
self,
"minimum_samples",
minv=1,
maxv=100,
step=1,
label="Core point neighbors ",
callback=self._min_samples_changed
)
gui.comboBox(
self.optionsBox,
self,
"metric_methode",
orientation=Qt.Horizontal,
label="Distance metric: ",
items=[d[0] for d in OPTICS_METRICS],
callback=self._metric_changed
)
gui.doubleSpin(
self.optionsBox,
self,
"xi_value",
minv=(0.000),
maxv=(0.999),
step=(0.001),
label="Minimum steepness: ",
callback=self._xi_changed
)
gui.comboBox(
self.optionsBox,
self,
"algorithm_base",
orientation=Qt.Horizontal,
label="neighborhood algorithm: ",
items=[d[0] for d in OPTICS_ALGORITHM],
callback=self._algorithm_changed
)
self.optionsBox.setDisabled(True)
gui.auto_apply(self.controlArea, self, "auto_commit")
gui.rubber(self.controlArea)
self.controlArea.layout().addStretch()
self.plot = SliderGraph(
x_axis_label="Ordering of the points as processed by OPTICS",
y_axis_label="Reachability distance (epsilon distance)",
callback=self._on_changed
)
self.mainArea.layout().addWidget(self.plot)
def check_data_size(self, data):
if data is None:
return False
if len(data) < 2:
self.Error.not_enough_instances()
return False
return True
def normalizing(self,model):
clusters = [c if c >= 0 else np.nan for c in model.labels_]
k = len(set(clusters) - {np.nan})
clusters = np.array(clusters).reshape(len(self.data), 1)
clust_var = DiscreteVariable("Cluster", values=["C%d" % (x + 1) for x in range(k)])
domain = self.data.domain
attributes, classes = domain.attributes, domain.class_vars
meta_attrs = domain.metas
x, y, metas = self.data.X, self.data.Y, self.data.metas
meta_attrs += (clust_var, )
metas = np.hstack((metas, clusters))
domain = Domain(attributes, classes, meta_attrs)
new_table = Table(domain, x, y, metas, self.data.W)
# self.Outputs.annotated_data.send(new_table)
return new_table
def commit(self):
self.cluster()
return
def cluster(self):
if not self.check_data_size(self.data):
return
model = OPTICS(min_samples=self.minimum_samples,
metric=OPTICS_METRICS[self.metric_methode][1],
xi=self.xi_value,
algorithm=OPTICS_ALGORITHM[self.algorithm_base][1],
)
model.fit(self.data.X)
self._plot_graph(model)
self.result_OPTICS = self.normalizing(model)
self.send_data()
def _plot_graph(self,model):
reachability = model.reachability_[model.ordering_]
space = np.arange(len(reachability))
reachability[reachability == np.inf] = np.nanmax(reachability[reachability != np.inf])
labels = model.labels_[model.ordering_]
cluster_count = (len(np.unique(labels[labels[:]>=0])))
self.infoc.setText("%d values in the cluster outcome" % cluster_count)
noisy_counter = len(space[labels==-1])
self.infod.setText("%d noisy samples in the leaf cluster" % noisy_counter)
x_plot = space
y_plot = reachability
self.plot.clear_plot()
colors = np.arange(150, (150+cluster_count))
for klaster, color in zip(range(0, cluster_count), colors):
Xk = space[labels == klaster]
Rk = reachability[labels == klaster]
self.plot.plot(Xk, Rk, pen=mkPen(intColor(color), width=2), antialias=True)
self.plot.plot(x_plot[labels==-1], y_plot[labels==-1], pen=mkPen(QColor('black'), width=2), antialias=True)
@Inputs.data
def set_data(self, dataset):
self.Error.clear()
if not self.check_data_size(dataset):
self.optionsBox.setDisabled(True)
self.plot.clear_plot()
self.infoa.setText(
"No data on input yet, waiting to get something.")
self.infob.setText('')
self.infoc.setText('')
self.infod.setText('')
self.dataset = None
self.annotated_data = None
self.Outputs.annotated_data.send(None)
return
self.data = dataset
self.optionsBox.setDisabled(False)
self.numberOfInputInstances = len(self.data)
self.infoa.setText("%d instances in input data set" % self.numberOfInputInstances)
numOfclasses = len(self.data.domain.class_var.values)
self.infob.setText("%d values in the categorical outcome" % numOfclasses)
self.commit()
def checkCommit(self):
if self.commitOnChange:
self.commit()
def send_data(self):
self.Outputs.annotated_data.send(self.result_OPTICS)
def _min_samples_changed(self):
if self.data is None:
return
self.commit()
def _metric_changed(self):
if self.data is None:
return
self.algorithm_base = 0
self.commit()
def _xi_changed(self):
self.commit()
def _algorithm_changed(self):
if self.data is None:
return
if self.algorithm_base != 0:
if OPTICS_METRICS[self.metric_methode][1] not in VALID_METRICS[OPTICS_ALGORITHM[self.algorithm_base][1]]:
self.algorithm_base = 0
self.commit()
def _on_changed(self, value):
self.cut_point = value
if __name__ == "__main__":
WidgetPreview(OPTICS_w).run(Table("iris-imbalanced"))
|
[
"Orange.widgets.utils.signals.Input",
"numpy.hstack",
"numpy.array",
"Orange.widgets.utils.widgetpreview.WidgetPreview",
"numpy.arange",
"Orange.widgets.utils.slidergraph.SliderGraph",
"Orange.widgets.utils.signals.Output",
"pyqtgraph.functions.intColor",
"Orange.widgets.settings.Setting",
"Orange.widgets.gui.rubber",
"numpy.nanmax",
"Orange.data.Domain",
"Orange.widgets.gui.widgetLabel",
"sklearn.cluster.OPTICS",
"Orange.widgets.gui.comboBox",
"Orange.widgets.gui.doubleSpin",
"Orange.widgets.gui.widgetBox",
"Orange.data.Table",
"numpy.unique",
"AnyQt.QtGui.QColor",
"Orange.widgets.widget.Msg",
"Orange.widgets.gui.auto_apply",
"Orange.widgets.gui.spin"
] |
[((3836, 3855), 'Orange.widgets.settings.Setting', 'settings.Setting', (['(5)'], {}), '(5)\n', (3852, 3855), False, 'from Orange.widgets import settings\n'), ((3877, 3897), 'Orange.widgets.settings.Setting', 'settings.Setting', (['(11)'], {}), '(11)\n', (3893, 3897), False, 'from Orange.widgets import settings\n'), ((3913, 3935), 'Orange.widgets.settings.Setting', 'settings.Setting', (['(0.05)'], {}), '(0.05)\n', (3929, 3935), False, 'from Orange.widgets import settings\n'), ((3957, 3976), 'Orange.widgets.settings.Setting', 'settings.Setting', (['(0)'], {}), '(0)\n', (3973, 3976), False, 'from Orange.widgets import settings\n'), ((3995, 4018), 'Orange.widgets.settings.Setting', 'settings.Setting', (['(False)'], {}), '(False)\n', (4011, 4018), False, 'from Orange.widgets import settings\n'), ((3547, 3567), 'Orange.widgets.utils.signals.Input', 'Input', (['"""Data"""', 'Table'], {}), "('Data', Table)\n", (3552, 3567), False, 'from Orange.widgets.utils.signals import Input, Output\n'), ((3613, 3634), 'Orange.widgets.utils.signals.Output', 'Output', (['"""Data"""', 'Table'], {}), "('Data', Table)\n", (3619, 3634), False, 'from Orange.widgets.utils.signals import Input, Output\n'), ((3707, 3774), 'Orange.widgets.widget.Msg', 'Msg', (['"""Not enough unique data instances. At least two are required."""'], {}), "('Not enough unique data instances. At least two are required.')\n", (3710, 3774), False, 'from Orange.widgets.widget import Msg\n'), ((4249, 4288), 'Orange.widgets.gui.widgetBox', 'gui.widgetBox', (['self.controlArea', '"""Info"""'], {}), "(self.controlArea, 'Info')\n", (4262, 4288), False, 'from Orange.widgets import widget, gui\n'), ((4310, 4385), 'Orange.widgets.gui.widgetLabel', 'gui.widgetLabel', (['infobox', '"""No data on input yet, waiting to get something."""'], {}), "(infobox, 'No data on input yet, waiting to get something.')\n", (4325, 4385), False, 'from Orange.widgets import widget, gui\n'), ((4407, 4435), 'Orange.widgets.gui.widgetLabel', 'gui.widgetLabel', (['infobox', '""""""'], {}), "(infobox, '')\n", (4422, 4435), False, 'from Orange.widgets import widget, gui\n'), ((4457, 4485), 'Orange.widgets.gui.widgetLabel', 'gui.widgetLabel', (['infobox', '""""""'], {}), "(infobox, '')\n", (4472, 4485), False, 'from Orange.widgets import widget, gui\n'), ((4507, 4535), 'Orange.widgets.gui.widgetLabel', 'gui.widgetLabel', (['infobox', '""""""'], {}), "(infobox, '')\n", (4522, 4535), False, 'from Orange.widgets import widget, gui\n'), ((4563, 4612), 'Orange.widgets.gui.widgetBox', 'gui.widgetBox', (['self.controlArea', '"""OPTICS Options"""'], {}), "(self.controlArea, 'OPTICS Options')\n", (4576, 4612), False, 'from Orange.widgets import widget, gui\n'), ((4621, 4768), 'Orange.widgets.gui.spin', 'gui.spin', (['self.optionsBox', 'self', '"""minimum_samples"""'], {'minv': '(1)', 'maxv': '(100)', 'step': '(1)', 'label': '"""Core point neighbors """', 'callback': 'self._min_samples_changed'}), "(self.optionsBox, self, 'minimum_samples', minv=1, maxv=100, step=1,\n label='Core point neighbors ', callback=self._min_samples_changed)\n", (4629, 4768), False, 'from Orange.widgets import widget, gui\n'), ((4879, 5064), 'Orange.widgets.gui.comboBox', 'gui.comboBox', (['self.optionsBox', 'self', '"""metric_methode"""'], {'orientation': 'Qt.Horizontal', 'label': '"""Distance metric: """', 'items': '[d[0] for d in OPTICS_METRICS]', 'callback': 'self._metric_changed'}), "(self.optionsBox, self, 'metric_methode', orientation=Qt.\n Horizontal, label='Distance metric: ', items=[d[0] for d in\n OPTICS_METRICS], callback=self._metric_changed)\n", (4891, 5064), False, 'from Orange.widgets import widget, gui\n'), ((5158, 5301), 'Orange.widgets.gui.doubleSpin', 'gui.doubleSpin', (['self.optionsBox', 'self', '"""xi_value"""'], {'minv': '(0.0)', 'maxv': '(0.999)', 'step': '(0.001)', 'label': '"""Minimum steepness: """', 'callback': 'self._xi_changed'}), "(self.optionsBox, self, 'xi_value', minv=0.0, maxv=0.999,\n step=0.001, label='Minimum steepness: ', callback=self._xi_changed)\n", (5172, 5301), False, 'from Orange.widgets import widget, gui\n'), ((5420, 5617), 'Orange.widgets.gui.comboBox', 'gui.comboBox', (['self.optionsBox', 'self', '"""algorithm_base"""'], {'orientation': 'Qt.Horizontal', 'label': '"""neighborhood algorithm: """', 'items': '[d[0] for d in OPTICS_ALGORITHM]', 'callback': 'self._algorithm_changed'}), "(self.optionsBox, self, 'algorithm_base', orientation=Qt.\n Horizontal, label='neighborhood algorithm: ', items=[d[0] for d in\n OPTICS_ALGORITHM], callback=self._algorithm_changed)\n", (5432, 5617), False, 'from Orange.widgets import widget, gui\n'), ((5762, 5815), 'Orange.widgets.gui.auto_apply', 'gui.auto_apply', (['self.controlArea', 'self', '"""auto_commit"""'], {}), "(self.controlArea, self, 'auto_commit')\n", (5776, 5815), False, 'from Orange.widgets import widget, gui\n'), ((5824, 5852), 'Orange.widgets.gui.rubber', 'gui.rubber', (['self.controlArea'], {}), '(self.controlArea)\n', (5834, 5852), False, 'from Orange.widgets import widget, gui\n'), ((5922, 6088), 'Orange.widgets.utils.slidergraph.SliderGraph', 'SliderGraph', ([], {'x_axis_label': '"""Ordering of the points as processed by OPTICS"""', 'y_axis_label': '"""Reachability distance (epsilon distance)"""', 'callback': 'self._on_changed'}), "(x_axis_label='Ordering of the points as processed by OPTICS',\n y_axis_label='Reachability distance (epsilon distance)', callback=self.\n _on_changed)\n", (5933, 6088), False, 'from Orange.widgets.utils.slidergraph import SliderGraph\n'), ((6939, 6967), 'numpy.hstack', 'np.hstack', (['(metas, clusters)'], {}), '((metas, clusters))\n', (6948, 6967), True, 'import numpy as np\n'), ((6986, 7025), 'Orange.data.Domain', 'Domain', (['attributes', 'classes', 'meta_attrs'], {}), '(attributes, classes, meta_attrs)\n', (6992, 7025), False, 'from Orange.data import Table, Domain, DiscreteVariable\n'), ((7046, 7085), 'Orange.data.Table', 'Table', (['domain', 'x', 'y', 'metas', 'self.data.W'], {}), '(domain, x, y, metas, self.data.W)\n', (7051, 7085), False, 'from Orange.data import Table, Domain, DiscreteVariable\n'), ((7335, 7502), 'sklearn.cluster.OPTICS', 'OPTICS', ([], {'min_samples': 'self.minimum_samples', 'metric': 'OPTICS_METRICS[self.metric_methode][1]', 'xi': 'self.xi_value', 'algorithm': 'OPTICS_ALGORITHM[self.algorithm_base][1]'}), '(min_samples=self.minimum_samples, metric=OPTICS_METRICS[self.\n metric_methode][1], xi=self.xi_value, algorithm=OPTICS_ALGORITHM[self.\n algorithm_base][1])\n', (7341, 7502), False, 'from sklearn.cluster import OPTICS\n'), ((7963, 8010), 'numpy.nanmax', 'np.nanmax', (['reachability[reachability != np.inf]'], {}), '(reachability[reachability != np.inf])\n', (7972, 8010), True, 'import numpy as np\n'), ((8442, 8477), 'numpy.arange', 'np.arange', (['(150)', '(150 + cluster_count)'], {}), '(150, 150 + cluster_count)\n', (8451, 8477), True, 'import numpy as np\n'), ((10691, 10715), 'Orange.data.Table', 'Table', (['"""iris-imbalanced"""'], {}), "('iris-imbalanced')\n", (10696, 10715), False, 'from Orange.data import Table, Domain, DiscreteVariable\n'), ((8088, 8121), 'numpy.unique', 'np.unique', (['labels[labels[:] >= 0]'], {}), '(labels[labels[:] >= 0])\n', (8097, 8121), True, 'import numpy as np\n'), ((10663, 10686), 'Orange.widgets.utils.widgetpreview.WidgetPreview', 'WidgetPreview', (['OPTICS_w'], {}), '(OPTICS_w)\n', (10676, 10686), False, 'from Orange.widgets.utils.widgetpreview import WidgetPreview\n'), ((6547, 6565), 'numpy.array', 'np.array', (['clusters'], {}), '(clusters)\n', (6555, 6565), True, 'import numpy as np\n'), ((8798, 8813), 'AnyQt.QtGui.QColor', 'QColor', (['"""black"""'], {}), "('black')\n", (8804, 8813), False, 'from AnyQt.QtGui import QColor\n'), ((8682, 8697), 'pyqtgraph.functions.intColor', 'intColor', (['color'], {}), '(color)\n', (8690, 8697), False, 'from pyqtgraph.functions import intColor\n')]
|
# LSTM(GRU) 예시 : KODEX200 주가 (2010 ~ 현재)를 예측해 본다.
# KODEX200의 종가와, 10일, 40일 이동평균을 이용하여 향후 10일 동안의 종가를 예측해 본다.
# 과거 20일 (step = 20) 종가, 이동평균 패턴을 학습하여 예측한다.
# 일일 주가에 대해 예측이 가능할까 ??
#
# 2018.11.22, 아마추어퀀트 (조성현)
# --------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from MyUtil import YahooData
nInput = 3
nOutput = 3
nStep = 20
nNeuron = 50
# 2차원 배열의 시계열 데이터로 학습용 배치 파일을 만든다.
# return : xBatch - RNN 입력
# yBatch - RNN 출력
#
# step = 2, n = 3 이라면,
# xData = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], ...]
# xBatch = [[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]], ...]
# yBatch = [[[4,5,6], [7,8,9]], [[10,11,12], [13,14,15]], ...]
def createTrainData(xData, step, n=nInput):
m = np.arange(len(xData) - step)
np.random.shuffle(m)
x, y = [], []
for i in m:
a = xData[i:(i+step)]
x.append(a)
xBatch = np.reshape(np.array(x), (len(m), step, n))
for i in m+1:
a = xData[i:(i+step)]
y.append(a)
yBatch = np.reshape(np.array(y), (len(m), step, n))
return xBatch, yBatch
# 주가 데이터
#df = YahooData.getStockDataYahoo('^KS11', start='2007-01-01')
df = pd.read_csv('StockData/^KS11.csv', index_col=0, parse_dates=True)
df = pd.DataFrame(df['Close'])
df['ma_10'] = pd.DataFrame(df['Close']).rolling(window=10).mean()
df['ma_40'] = pd.DataFrame(df['Close']).rolling(window=40).mean()
df = df.dropna()
df = (df - df.mean()) / df.std()
# 학습 데이터를 생성한다.
data = np.array(df)
xBatch, yBatch = createTrainData(data, nStep)
# RNN 그래프를 생성한다 (Wx, Wh). xBatch를 RNN에 입력한다.
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, nStep, nInput])
rnn = tf.nn.rnn_cell.LSTMCell(nNeuron)
#rnn = tf.nn.rnn_cell.GRUCell(nNeuron)
output, state = tf.nn.dynamic_rnn(rnn, x, dtype=tf.float32)
# RNN의 출력값을 입력으로 받아 3개의 y가 출력되도록 하는 feed-forward network를 생성한다. (Wy)
y = tf.placeholder(tf.float32, [None, nStep, nOutput])
inFC = tf.reshape(output, [-1, nNeuron])
fc1 = tf.contrib.layers.fully_connected(inputs=inFC, num_outputs=nNeuron)
predY = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=nOutput, activation_fn=None)
predY = tf.reshape(predY, [-1, nStep, nOutput])
# Mean square error (MSE)로 Loss를 정의한다. xBatch가 입력되면 yBatch가 출력되도록 함.
loss = tf.reduce_sum(tf.square(predY - y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
minLoss = optimizer.minimize(loss)
# 그래프를 실행한다. 학습한다. (Wx, Wh, Wy를 업데이트함)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
lossHist = []
for i in range(300):
sess.run(minLoss, feed_dict={x: xBatch, y: yBatch})
if i % 5 == 0:
ploss = sess.run(loss, feed_dict={x: xBatch, y: yBatch})
lossHist.append(ploss)
print(i, "\tLoss:", ploss)
# 향후 10 기간 데이터를 예측한다. 향후 1 기간을 예측하고, 예측값을 다시 입력하여 2 기간을 예측한다.
# 이런 방식으로 10 기간까지 예측한다.
nFuture = 10
if len(data) > 100:
lastData = np.copy(data[-100:]) # 원 데이터의 마지막 100개만 그려본다
else:
lastData = np.copy(data)
dx = np.copy(lastData)
estimate = [dx[-1]]
for i in range(nFuture):
# 마지막 nStep 만큼 입력데이로 다음 값을 예측한다
px = dx[-nStep:,]
px = np.reshape(px, (1, nStep, nInput))
# 다음 값을 예측한다.
yHat = sess.run(predY, feed_dict={x: px})[0][-1]
# 예측값을 저장해 둔다
estimate.append(yHat)
# 이전 예측값을 포함하여 또 다음 값을 예측하기위해 예측한 값을 저장해 둔다
dx = np.vstack([dx, yHat])
# Loss history를 그린다
plt.figure(figsize=(8, 3))
plt.plot(lossHist, color='red')
plt.title("Loss History")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
# 주가 차트와 이동평균을 그린다.
plt.figure(figsize=(8, 3))
plt.plot(df['Close'], color='red')
plt.plot(df['ma_10'], color='blue')
plt.plot(df['ma_40'], color='green')
plt.title("KODEX-200 stock price")
plt.show()
# 원 시계열과 예측된 시계열을 그린다
CLOSE = 0 # 종가를 예측한다
estimate = np.array(estimate)
ax1 = np.arange(1, len(lastData[:, CLOSE]) + 1)
ax2 = np.arange(len(lastData), len(lastData) + len(estimate))
plt.figure(figsize=(8, 3))
plt.plot(ax1, lastData[:, CLOSE], 'b-o', color='blue', markersize=4, label='Stock price', linewidth=1)
plt.plot(ax2, estimate[:, CLOSE], 'b-o', color='red', markersize=4, label='Estimate')
plt.axvline(x=ax1[-1], linestyle='dashed', linewidth=1)
plt.legend()
plt.title("KODEX-200 prediction")
plt.show()
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.reshape",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.nn.dynamic_rnn",
"matplotlib.pyplot.xlabel",
"numpy.vstack",
"tensorflow.square",
"pandas.DataFrame",
"tensorflow.train.AdamOptimizer",
"tensorflow.reshape",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.copy",
"tensorflow.reset_default_graph",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axvline",
"numpy.random.shuffle"
] |
[((1239, 1304), 'pandas.read_csv', 'pd.read_csv', (['"""StockData/^KS11.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('StockData/^KS11.csv', index_col=0, parse_dates=True)\n", (1250, 1304), True, 'import pandas as pd\n'), ((1310, 1335), 'pandas.DataFrame', 'pd.DataFrame', (["df['Close']"], {}), "(df['Close'])\n", (1322, 1335), True, 'import pandas as pd\n'), ((1542, 1554), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (1550, 1554), True, 'import numpy as np\n'), ((1647, 1671), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1669, 1671), True, 'import tensorflow as tf\n'), ((1676, 1725), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, nStep, nInput]'], {}), '(tf.float32, [None, nStep, nInput])\n', (1690, 1725), True, 'import tensorflow as tf\n'), ((1734, 1766), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['nNeuron'], {}), '(nNeuron)\n', (1757, 1766), True, 'import tensorflow as tf\n'), ((1822, 1865), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['rnn', 'x'], {'dtype': 'tf.float32'}), '(rnn, x, dtype=tf.float32)\n', (1839, 1865), True, 'import tensorflow as tf\n'), ((1940, 1990), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, nStep, nOutput]'], {}), '(tf.float32, [None, nStep, nOutput])\n', (1954, 1990), True, 'import tensorflow as tf\n'), ((1998, 2031), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, nNeuron]'], {}), '(output, [-1, nNeuron])\n', (2008, 2031), True, 'import tensorflow as tf\n'), ((2048, 2115), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'inFC', 'num_outputs': 'nNeuron'}), '(inputs=inFC, num_outputs=nNeuron)\n', (2081, 2115), True, 'import tensorflow as tf\n'), ((2124, 2214), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'fc1', 'num_outputs': 'nOutput', 'activation_fn': 'None'}), '(inputs=fc1, num_outputs=nOutput,\n activation_fn=None)\n', (2157, 2214), True, 'import tensorflow as tf\n'), ((2223, 2262), 'tensorflow.reshape', 'tf.reshape', (['predY', '[-1, nStep, nOutput]'], {}), '(predY, [-1, nStep, nOutput])\n', (2233, 2262), True, 'import tensorflow as tf\n'), ((2392, 2435), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2414, 2435), True, 'import tensorflow as tf\n'), ((2527, 2539), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2537, 2539), True, 'import tensorflow as tf\n'), ((3051, 3068), 'numpy.copy', 'np.copy', (['lastData'], {}), '(lastData)\n', (3058, 3068), True, 'import numpy as np\n'), ((3446, 3472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (3456, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3504), 'matplotlib.pyplot.plot', 'plt.plot', (['lossHist'], {'color': '"""red"""'}), "(lossHist, color='red')\n", (3481, 3504), True, 'import matplotlib.pyplot as plt\n'), ((3505, 3530), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss History"""'], {}), "('Loss History')\n", (3514, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3531, 3550), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3541, 3550), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (3561, 3569), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3578, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3628), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (3612, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3629, 3663), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Close']"], {'color': '"""red"""'}), "(df['Close'], color='red')\n", (3637, 3663), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3699), 'matplotlib.pyplot.plot', 'plt.plot', (["df['ma_10']"], {'color': '"""blue"""'}), "(df['ma_10'], color='blue')\n", (3672, 3699), True, 'import matplotlib.pyplot as plt\n'), ((3700, 3736), 'matplotlib.pyplot.plot', 'plt.plot', (["df['ma_40']"], {'color': '"""green"""'}), "(df['ma_40'], color='green')\n", (3708, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3737, 3771), 'matplotlib.pyplot.title', 'plt.title', (['"""KODEX-200 stock price"""'], {}), "('KODEX-200 stock price')\n", (3746, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3780, 3782), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3862), 'numpy.array', 'np.array', (['estimate'], {}), '(estimate)\n', (3852, 3862), True, 'import numpy as np\n'), ((3973, 3999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (3983, 3999), True, 'import matplotlib.pyplot as plt\n'), ((4000, 4107), 'matplotlib.pyplot.plot', 'plt.plot', (['ax1', 'lastData[:, CLOSE]', '"""b-o"""'], {'color': '"""blue"""', 'markersize': '(4)', 'label': '"""Stock price"""', 'linewidth': '(1)'}), "(ax1, lastData[:, CLOSE], 'b-o', color='blue', markersize=4, label=\n 'Stock price', linewidth=1)\n", (4008, 4107), True, 'import matplotlib.pyplot as plt\n'), ((4103, 4193), 'matplotlib.pyplot.plot', 'plt.plot', (['ax2', 'estimate[:, CLOSE]', '"""b-o"""'], {'color': '"""red"""', 'markersize': '(4)', 'label': '"""Estimate"""'}), "(ax2, estimate[:, CLOSE], 'b-o', color='red', markersize=4, label=\n 'Estimate')\n", (4111, 4193), True, 'import matplotlib.pyplot as plt\n'), ((4189, 4244), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'ax1[-1]', 'linestyle': '"""dashed"""', 'linewidth': '(1)'}), "(x=ax1[-1], linestyle='dashed', linewidth=1)\n", (4200, 4244), True, 'import matplotlib.pyplot as plt\n'), ((4246, 4258), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4256, 4258), True, 'import matplotlib.pyplot as plt\n'), ((4259, 4292), 'matplotlib.pyplot.title', 'plt.title', (['"""KODEX-200 prediction"""'], {}), "('KODEX-200 prediction')\n", (4268, 4292), True, 'import matplotlib.pyplot as plt\n'), ((4293, 4303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4301, 4303), True, 'import matplotlib.pyplot as plt\n'), ((835, 855), 'numpy.random.shuffle', 'np.random.shuffle', (['m'], {}), '(m)\n', (852, 855), True, 'import numpy as np\n'), ((2354, 2374), 'tensorflow.square', 'tf.square', (['(predY - y)'], {}), '(predY - y)\n', (2363, 2374), True, 'import tensorflow as tf\n'), ((2549, 2582), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2580, 2582), True, 'import tensorflow as tf\n'), ((2965, 2985), 'numpy.copy', 'np.copy', (['data[-100:]'], {}), '(data[-100:])\n', (2972, 2985), True, 'import numpy as np\n'), ((3032, 3045), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (3039, 3045), True, 'import numpy as np\n'), ((3181, 3215), 'numpy.reshape', 'np.reshape', (['px', '(1, nStep, nInput)'], {}), '(px, (1, nStep, nInput))\n', (3191, 3215), True, 'import numpy as np\n'), ((3403, 3424), 'numpy.vstack', 'np.vstack', (['[dx, yHat]'], {}), '([dx, yHat])\n', (3412, 3424), True, 'import numpy as np\n'), ((969, 980), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (977, 980), True, 'import numpy as np\n'), ((1098, 1109), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1106, 1109), True, 'import numpy as np\n'), ((1350, 1375), 'pandas.DataFrame', 'pd.DataFrame', (["df['Close']"], {}), "(df['Close'])\n", (1362, 1375), True, 'import pandas as pd\n'), ((1416, 1441), 'pandas.DataFrame', 'pd.DataFrame', (["df['Close']"], {}), "(df['Close'])\n", (1428, 1441), True, 'import pandas as pd\n')]
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model_utils import *
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
# Initialize convolutional layers.
# self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
# self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=outChannels, kernel_size=filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv2 = MetaConv2dLayer(in_channels=outChannels, out_channels=outChannels, kernel_size=filterSize, stride=1, padding=int((filterSize - 1) / 2))
def forward(self, x, params=None):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Average pooling with kernel size 2 (2 x 2).
x = F.avg_pool2d(x, 2)
# (Convolution + Leaky ReLU) x 2
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
x = F.leaky_relu(self.conv2(x, params=param_dict['conv2']), negative_slope = 0.1)
else:
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
x = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
# Initialize convolutional layers.
# self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
# (2 * outChannels) is used for accommodating skip connection.
# self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = MetaConv2dLayer(in_channels=2 * outChannels, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
def forward(self, x, skpCn, params=None):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Bilinear interpolation with scaling 2.
x = F.interpolate(x, scale_factor=2, mode='bilinear')
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1), params=param_dict['conv2']), negative_slope = 0.1)
else:
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope = 0.1)
return x
class UNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, W, H, device):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
self.W = W
self.H = H
self.gridX = torch.tensor(gridX, requires_grad=False, device=device)
self.gridY = torch.tensor(gridY, requires_grad=False, device=device)
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).float() + u
y = self.gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid)
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
t = np.linspace(0.125, 0.875, 7)
def getFlowCoeff (indices, device):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C11 = C00 = - (1 - (t[ind])) * (t[ind])
C01 = (t[ind]) * (t[ind])
C10 = (1 - (t[ind])) * (1 - (t[ind]))
return torch.Tensor(C00)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C01)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C10)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C11)[None, None, None, :].permute(3, 0, 1, 2).to(device)
def getWarpCoeff (indices, device):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C0 = 1 - t[ind]
C1 = t[ind]
return torch.Tensor(C0)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C1)[None, None, None, :].permute(3, 0, 1, 2).to(device)
class SuperSloMoModel(nn.Module):
def __init__(self, device):
super(SuperSloMoModel, self).__init__()
self.device = device
self.flowComp = UNet(6, 4)
self.arbTimeFlowIntrp = UNet(20, 5)
self.backwarp = None
def forward(self, I0, I1, ind):
w, h = I0.size(3), I0.size(2)
s = 6 # bits to shift
padW, padH = 0, 0
if w != ((w >> s) << s):
padW = (((w >> s) + 1) << s) - w
if h != ((h >> s) << s):
padH = (((h >> s) + 1) << s) - h
paddingInput = nn.ReflectionPad2d(padding=[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2])
paddingOutput = nn.ReflectionPad2d(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH])
I0 = paddingInput(I0)
I1 = paddingInput(I1)
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = self.backwarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.backwarp(I1, F_t_1_f)
wCoeff = getWarpCoeff(ind, self.device)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
warped_I0, warped_I1 = self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)
Ft_p = paddingOutput(Ft_p)
F_0_1, F_1_0 = paddingOutput(F_0_1), paddingOutput(F_1_0)
g_I0_F_t_0, g_I1_F_t_1 = paddingOutput(g_I0_F_t_0), paddingOutput(g_I1_F_t_1)
warped_I0, warped_I1 = paddingOutput(warped_I0), paddingOutput(warped_I1)
#return Ft_p, # output image
# (F_0_1, F_1_0), # bidirectional flow maps
# (g_I0_F_t_0, g_I1_F_t_1), # warped intermediate images
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)) # warped input image (0-1, 1-0)
return Ft_p, \
(F_0_1, F_1_0), \
(g_I0_F_t_0, g_I1_F_t_1), \
(warped_I0, warped_I1)
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1))
class MetaUNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(MetaUNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = MetaConv2dLayer(in_channels=inChannels, out_channels=32, kernel_size=7, stride=1, padding=3)
self.conv2 = MetaConv2dLayer(in_channels=32, out_channels=32, kernel_size=7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = MetaConv2dLayer(in_channels=32, out_channels=outChannels, kernel_size=3, stride=1, padding=1)
def forward(self, x, params=None):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
x = F.leaky_relu(self.conv1(x, params=param_dict['conv1']), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x, params=param_dict['conv2']), negative_slope = 0.1)
s2 = self.down1(s1, params=param_dict['down1'])
s3 = self.down2(s2, params=param_dict['down2'])
s4 = self.down3(s3, params=param_dict['down3'])
s5 = self.down4(s4, params=param_dict['down4'])
x = self.down5(s5, params=param_dict['down5'])
x = self.up1(x, s5, params=param_dict['up1'])
x = self.up2(x, s4, params=param_dict['up2'])
x = self.up3(x, s3, params=param_dict['up3'])
x = self.up4(x, s2, params=param_dict['up4'])
x = self.up5(x, s1, params=param_dict['up5'])
x = F.leaky_relu(self.conv3(x, params=param_dict['conv3']), negative_slope = 0.1)
else:
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class MetaSuperSloMo(nn.Module):
def __init__(self, device, resume=False):
super(MetaSuperSloMo, self).__init__()
self.device = device
self.flowComp = MetaUNet(6, 4)
self.arbTimeFlowIntrp = MetaUNet(20, 5)
self.backwarp = None
if resume:
print('Loading model: pretrained_models/superslomo_base.pth')
# checkpoint = torch.load('pretrained_models/meta_superslomo.pth')
checkpoint = torch.load('pretrained_models/superslomo_base.pth')
self.flowComp.load_state_dict(checkpoint['state_dictFC'])
self.arbTimeFlowIntrp.load_state_dict(checkpoint['state_dictAT'])
def forward(self, I0, I1, ind=3, params=None, **kwargs):
ind = ind * torch.ones(I0.size(0), dtype=int)
w, h = I0.size(3), I0.size(2)
s = 6 # bits to shift
padW, padH = 0, 0
if w != ((w >> s) << s):
padW = (((w >> s) + 1) << s) - w
if h != ((h >> s) << s):
padH = (((h >> s) + 1) << s) - h
paddingInput = nn.ReflectionPad2d(padding=[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2])
paddingOutput = nn.ReflectionPad2d(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH])
I0 = paddingInput(I0)
I1 = paddingInput(I1)
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
flowOut = self.flowComp(torch.cat((I0, I1), dim=1), params=param_dict['flowComp'])
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1),
params=param_dict['arbTimeFlowIntrp'])
else:
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:, :2, :, :]
F_1_0 = flowOut[:, 2:, :, :]
fCoeff = getFlowCoeff(ind, self.device)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
if self.backwarp is None or self.backwarp.W != I0.size(3) or self.backwarp.H != I0.size(2):
self.backwarp = backWarp(I0.size(3), I0.size(2), self.device) # make grid
g_I0_F_t_0 = self.backwarp(I0, F_t_0)
g_I1_F_t_1 = self.backwarp(I1, F_t_1)
intrpOut = self.arbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = self.backwarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.backwarp(I1, F_t_1_f)
wCoeff = getWarpCoeff(ind, self.device)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
warped_I0, warped_I1 = self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)
Ft_p = paddingOutput(Ft_p)
F_0_1, F_1_0 = paddingOutput(F_0_1), paddingOutput(F_1_0)
g_I0_F_t_0, g_I1_F_t_1 = paddingOutput(g_I0_F_t_0), paddingOutput(g_I1_F_t_1)
warped_I0, warped_I1 = paddingOutput(warped_I0), paddingOutput(warped_I1)
#return Ft_p, # output image
# (F_0_1, F_1_0), # bidirectional flow maps
# (g_I0_F_t_0, g_I1_F_t_1), # warped intermediate images
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1)) # warped input image (0-1, 1-0)
return Ft_p, {
'bidirectional_flow': (F_0_1, F_1_0),
'warped_intermediate_frames': (g_I0_F_t_0, g_I1_F_t_1),
'warped_input_frames': (warped_I0, warped_I1)}
# (self.backwarp(I0, F_1_0), self.backwarp(I1, F_0_1))
# return Ft_p
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
pass # no batch statistics used
|
[
"torch.nn.functional.grid_sample",
"torch.nn.ReflectionPad2d",
"torch.load",
"torch.stack",
"torch.Tensor",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.functional.sigmoid",
"torch.tensor",
"numpy.linspace",
"torch.cat",
"torch.sum",
"torch.nn.functional.interpolate",
"numpy.arange"
] |
[((9659, 9687), 'numpy.linspace', 'np.linspace', (['(0.125)', '(0.875)', '(7)'], {}), '(0.125, 0.875, 7)\n', (9670, 9687), True, 'import numpy as np\n'), ((2256, 2274), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2268, 2274), True, 'import torch.nn.functional as F\n'), ((4759, 4808), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(x, scale_factor=2, mode='bilinear')\n", (4772, 4808), True, 'import torch.nn.functional as F\n'), ((6207, 6256), 'torch.nn.Conv2d', 'nn.Conv2d', (['inChannels', '(32)', '(7)'], {'stride': '(1)', 'padding': '(3)'}), '(inChannels, 32, 7, stride=1, padding=3)\n', (6216, 6256), True, 'import torch.nn as nn\n'), ((6278, 6319), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(7)'], {'stride': '(1)', 'padding': '(3)'}), '(32, 32, 7, stride=1, padding=3)\n', (6287, 6319), True, 'import torch.nn as nn\n'), ((6700, 6750), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'outChannels', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(32, outChannels, 3, stride=1, padding=1)\n', (6709, 6750), True, 'import torch.nn as nn\n'), ((8448, 8503), 'torch.tensor', 'torch.tensor', (['gridX'], {'requires_grad': '(False)', 'device': 'device'}), '(gridX, requires_grad=False, device=device)\n', (8460, 8503), False, 'import torch\n'), ((8525, 8580), 'torch.tensor', 'torch.tensor', (['gridY'], {'requires_grad': '(False)', 'device': 'device'}), '(gridY, requires_grad=False, device=device)\n', (8537, 8580), False, 'import torch\n'), ((9388, 9414), 'torch.stack', 'torch.stack', (['(x, y)'], {'dim': '(3)'}), '((x, y), dim=3)\n', (9399, 9414), False, 'import torch\n'), ((9485, 9527), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['img', 'grid'], {}), '(img, grid)\n', (9516, 9527), False, 'import torch\n'), ((12444, 12535), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', ([], {'padding': '[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2]'}), '(padding=[padW // 2, padW - padW // 2, padH // 2, padH - \n padH // 2])\n', (12462, 12535), True, 'import torch.nn as nn\n'), ((12555, 12653), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', ([], {'padding': '[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH]'}), '(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2,\n padH // 2 - padH])\n', (12573, 12653), True, 'import torch.nn as nn\n'), ((13520, 13553), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['intrpOut[:, 4:5, :, :]'], {}), '(intrpOut[:, 4:5, :, :])\n', (13529, 13553), True, 'import torch.nn.functional as F\n'), ((19138, 19229), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', ([], {'padding': '[padW // 2, padW - padW // 2, padH // 2, padH - padH // 2]'}), '(padding=[padW // 2, padW - padW // 2, padH // 2, padH - \n padH // 2])\n', (19156, 19229), True, 'import torch.nn as nn\n'), ((19249, 19347), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', ([], {'padding': '[0 - padW // 2, padW // 2 - padW, 0 - padH // 2, padH // 2 - padH]'}), '(padding=[0 - padW // 2, padW // 2 - padW, 0 - padH // 2,\n padH // 2 - padH])\n', (19267, 19347), True, 'import torch.nn as nn\n'), ((21223, 21256), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['intrpOut[:, 4:5, :, :]'], {}), '(intrpOut[:, 4:5, :, :])\n', (21232, 21256), True, 'import torch.nn.functional as F\n'), ((8361, 8373), 'numpy.arange', 'np.arange', (['W'], {}), '(W)\n', (8370, 8373), True, 'import numpy as np\n'), ((8375, 8387), 'numpy.arange', 'np.arange', (['H'], {}), '(H)\n', (8384, 8387), True, 'import numpy as np\n'), ((12745, 12771), 'torch.cat', 'torch.cat', (['(I0, I1)'], {'dim': '(1)'}), '((I0, I1), dim=1)\n', (12754, 12771), False, 'import torch\n'), ((13326, 13404), 'torch.cat', 'torch.cat', (['(I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0)'], {'dim': '(1)'}), '((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1)\n', (13335, 13404), False, 'import torch\n'), ((18546, 18597), 'torch.load', 'torch.load', (['"""pretrained_models/superslomo_base.pth"""'], {}), "('pretrained_models/superslomo_base.pth')\n", (18556, 18597), False, 'import torch\n'), ((19571, 19597), 'torch.cat', 'torch.cat', (['(I0, I1)'], {'dim': '(1)'}), '((I0, I1), dim=1)\n', (19580, 19597), False, 'import torch\n'), ((20223, 20301), 'torch.cat', 'torch.cat', (['(I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0)'], {'dim': '(1)'}), '((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1)\n', (20232, 20301), False, 'import torch\n'), ((20408, 20434), 'torch.cat', 'torch.cat', (['(I0, I1)'], {'dim': '(1)'}), '((I0, I1), dim=1)\n', (20417, 20434), False, 'import torch\n'), ((21029, 21107), 'torch.cat', 'torch.cat', (['(I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0)'], {'dim': '(1)'}), '((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1)\n', (21038, 21107), False, 'import torch\n'), ((5176, 5200), 'torch.cat', 'torch.cat', (['(x, skpCn)', '(1)'], {}), '((x, skpCn), 1)\n', (5185, 5200), False, 'import torch\n'), ((5469, 5493), 'torch.cat', 'torch.cat', (['(x, skpCn)', '(1)'], {}), '((x, skpCn), 1)\n', (5478, 5493), False, 'import torch\n'), ((10586, 10603), 'torch.Tensor', 'torch.Tensor', (['C00'], {}), '(C00)\n', (10598, 10603), False, 'import torch\n'), ((10657, 10674), 'torch.Tensor', 'torch.Tensor', (['C01'], {}), '(C01)\n', (10669, 10674), False, 'import torch\n'), ((10728, 10745), 'torch.Tensor', 'torch.Tensor', (['C10'], {}), '(C10)\n', (10740, 10745), False, 'import torch\n'), ((10799, 10816), 'torch.Tensor', 'torch.Tensor', (['C11'], {}), '(C11)\n', (10811, 10816), False, 'import torch\n'), ((11735, 11751), 'torch.Tensor', 'torch.Tensor', (['C0'], {}), '(C0)\n', (11747, 11751), False, 'import torch\n'), ((11805, 11821), 'torch.Tensor', 'torch.Tensor', (['C1'], {}), '(C1)\n', (11817, 11821), False, 'import torch\n'), ((22850, 22871), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (22859, 22871), False, 'import torch\n'), ((23153, 23174), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (23162, 23174), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
In this file are all the needed functions to calculate an adaptive fractionation treatment plan. The value_eval and the result_calc function are the only ones that should be used
This file requires all sparing factors to be known, therefore, it isnt suited to do active treatment planning but to analyze patient data.
value_eval and result_calc_BEDNT are the most essential codes. The results from value_eval can be used to calculate a treatment plan with result_calc_BEDNT.
The optimal policies for each fraction can be extracted manually(pol4 = first fraction, first index in pol is the last fraction and the last index is the first fraction). but one must know what index represents which sparing factor
Note: This file does not assume all sparing factors to be known at the start, but simulates the treatment planning as if we would get a new sparing factor at each fraction!
This program uses a discrete state space and does not interpolate between states. Therefore, it is less precise than the interpolation programs
"""
import numpy as np
from scipy.stats import truncnorm
import time
from scipy.stats import invgamma
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
'''produces a truncated normal distribution'''
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def std_calc(measured_data,alpha,beta):
'''calculates the most likely standard deviation for a list of k sparing factors and an inverse-gamma conjugate prior
measured_data: list/array with k sparing factors
alpha: shape of inverse-gamma distribution
beta: scale of inverse-gamme distrinbution
return: most likely std based on the measured data and inverse-gamma prior'''
n = len(measured_data)
var_values = np.arange(0.00001,0.25,0.00001)
likelihood_values = np.zeros(len(var_values))
for index,value in enumerate(var_values):
likelihood_values[index] = value**(-alpha-1)/value**(n/2)*np.exp(-beta/value)*np.exp(-np.var(measured_data)*n/(2*value))
std = (np.sqrt(var_values[np.argmax(likelihood_values)]))
return std
def distribution_update(sparing_factors, alpha, beta):
'''produces the updated probability distribution for each fraction based on Variance prior
sparing_factors: list/array of k spraring factors
alpha: shape of inverse-gamma distribution
beta: scale of inverse-gamme distrinbution
return: k-1 dimensional mean and std arrays starting from the second sparing factor (index 1)
'''
means = np.zeros(len(sparing_factors))
stds = np.zeros(len(sparing_factors))
for i in range(len(sparing_factors)):
means[i] = np.mean(sparing_factors[:(i+1)])
stds[i] = std_calc(sparing_factors[:(i+1)],alpha,beta)
means = np.delete(means,0)
stds = np.delete(stds,0) #we get rid of the first value as it is only the planning value and not used in a fraction
return [means,stds]
def updated_distribution_calc(data,sparing_factors):
'''calculates the updated distribution based on prior data that is used to setup an inverse gamma distribution
data shape: nxk where n is the amount of patients and k the amount of sparingfactors per patient
sparing_factors shape: list/array with k entries with the first sparing factor being the planning sparing factor, therefore not being included in the treatment
return: updated means and stds for k-1 fractions.'''
variances = data.var(axis = 1)
alpha,loc,beta = invgamma.fit(variances, floc = 0) #here beta is the scale parameter
[means,stds] = distribution_update(sparing_factors,alpha,beta)
return[means,stds]
def probdistributions(means,stds):
'''produces the truncated normal distribution for several means and standard deviations
means: list/array of n means
stds: list/array of n standard deviations
return: n probability distributions for values [0.01,1.40]'''
distributions = np.zeros(141*len(means)).reshape(len(means),141)
for i in range(len(means)):
X = get_truncated_normal(means[i], stds[i], low=0, upp=1.4)
for index,value in enumerate(np.arange(0,1.41,0.01)):
distributions[i][index] = X.cdf(value+0.004999999999999999999)-X.cdf(value-0.005)
return distributions
def BED_calc0( dose, ab,sparing = 1):
BED = sparing*dose*(1+(sparing*dose)/ab)
return BED
def BED_calc( sf, ab,actionspace):
BED = np.outer(sf,actionspace)*(1+np.outer(sf,actionspace)/ab) #produces a sparing factors x actions space array
return BED
def value_eval(sparing_factors,data,abt = 10,abn = 3,bound = 90,riskfactor = 0):
'''calculates the best policy for a list of k sparing factors with k-1 fractions based on a dynamic programming algorithm. Estimation of the probability distribution is based on prior patient data
sparing_factors: list/array of k sparing factors. A planning sparing factor is necessary!
data: nxk dimensional data of n prior patients with k sparing factors.
abt: alpha beta ratio of tumor
abn: alpha beta ratio of Organ at risk
bound: upper limit of BED in OAR
riskfactor: "risk reducing" factor of zero is a full adaptive fractionation algorithm while a sparing factor of 0.1 slightly forces the algorithm to stay close to the 6Gy per fraction plan. a risk factor of 1 results in a 6Gy per fraction plan.
return:
Values: a sparing_factor-2 x BEDT x sf dimensional matrix with the value of each BEDT/sf state
Values4: Values of the first fraction
policy: a sparing_factor-2 x BEDT x sf dimensional matrix with the policy of each BEDT/sf state. fourth index = first fraction, first index = last fraction
policy4: policy of the first fraction'''
sf= np.arange(0,1.41,0.01) #list of all possible sparing factors
BEDT = np.arange(0,90.3,0.1) #list of all possible Biological effective doses
Values = np.zeros(len(BEDT)*len(sf)*4).reshape(4,len(BEDT),len(sf)) #2d values list with first indice being the BED and second being the sf
actionspace = np.arange(0,22.4,0.1) #list of all possible dose actions
[means,stds] =updated_distribution_calc(data,sparing_factors)
distributions = probdistributions(means,stds)
policy = np.zeros((4,len(BEDT),len(sf)))
upperbound = 90.2
start = time.time()
#here we add the calculation of the distance to the standard treatment
useless,calculator = np.meshgrid(np.zeros(len(actionspace)),sf) #calculator is matrix that has the correct sparing factors
actionspace_expand,useless = np.meshgrid(actionspace,sf)
risk_penalty = abs(6/calculator-actionspace_expand)
delivered_doses = np.round(BED_calc(sf,abn,actionspace),1)
BEDT_rew = BED_calc(1, abt,actionspace) #this is the reward for the dose deposited inside the normal tissue.
BEDT_transformed, meaningless = np.meshgrid(BEDT_rew,np.zeros(len(sf)))
risk_penalty[0] = risk_penalty[1]
for update_loop in range (0,5):
prob = distributions[update_loop]
for state in range(0,5-update_loop): #We have five fractionations with 2 special cases 0 and 4
print(str(state+1) +' loop done')
if state == 4: #first state with no prior dose delivered so we dont loop through BEDT
future_bed = delivered_doses
future_bed[future_bed > upperbound] = upperbound #any dose surpassing 95 is set to 95. Furthermore, 95 will be penalized so strong that the program avoids it at all costs. (95 is basically the upper bound and can be adapted)
future_values_prob = (Values[state-1][(future_bed*10).astype(int)]*prob).sum(axis = 2) #in this array are all future values multiplied with the probability of getting there. shape = sparing factors x actionspace
penalties = np.zeros(future_bed.shape)
penalties[future_bed > bound] = -(future_bed[future_bed > bound]-bound)*5
Vs = future_values_prob + BEDT_transformed + penalties - risk_penalty*riskfactor
policy4 = Vs.argmax(axis=1)
Values4 = Vs.max(axis=1)
else:
future_values_prob_all = (Values[state-1]*prob).sum(axis = 1)
for bed in range(len(BEDT)): #this and the next for loop allow us to loop through all states
future_bed = delivered_doses + bed/10
future_bed[future_bed > upperbound] = upperbound #any dose surpassing 95 is set to 95.
if state == 0: #last state no more further values to add
penalties = np.zeros(future_bed.shape)
penalties[future_bed > bound] = -(future_bed[future_bed > bound]-bound)*5
penalties[future_bed == upperbound] = -10000 #here we produced the penalties for all the values surpassing the limit
Vs = BEDT_transformed + penalties# Value of each sparing factor for each action
else:
penalties = np.zeros(future_bed.shape)
penalties[future_bed == upperbound] = -100
future_values_prob = (future_values_prob_all[(future_bed*10).astype(int)])#in this array are all future values multiplied with the probability of getting there. shape = sparing factors x actionspace
Vs = future_values_prob + BEDT_transformed + penalties - risk_penalty*riskfactor
best_action = Vs.argmax(axis=1)
valer = Vs.max(axis=1)
policy[state][bed] = best_action
Values[state][bed] = valer
end = time.time()
print('time elapsed = ' +str(end - start))
return [Values,policy,Values4,policy4]
def result_calc_BEDNT(pol4,pol,sparing_factors,abt = 10,abn = 3): #this function calculates the fractionation plan according to the reinforcement learning
'''in this function gives the treatment plan for a set of sparing factors based on the sparing factors that have been used to calculate the optimal policy
the pol4 and pol matrices are the ones that are returnedin the value_eval function
pol4: first fraction policy
pol: second - fifth fraction policy
sparing_factors: sparing factors that should be used to make a plan. list starting from first fraction'''
actionspace = np.arange(0,22.4,0.1) #list of all possible dose actions
total_bedt = BED_calc0(actionspace[pol4[round(sparing_factors[0]*100)]],abt)
total_bednt = BED_calc0(actionspace[pol4[round(sparing_factors[0]*100)]],abn,sparing_factors[0])
print('fraction 1 dose delivered: ',actionspace[pol4[round(sparing_factors[0]*100)]])
print('total accumulated biological effective dose in tumor; fraction 1 = ',round(total_bedt,1))
print('total accumulated biological effective dose in normal tissue; fraction 1 = ',round(total_bednt,1))
for index,fraction in enumerate(range(3,-1,-1)):
if fraction == 0:
dose_action = (-sparing_factors[index+1]+np.sqrt(sparing_factors[index+1]**2+4*sparing_factors[index+1]**2*(90-total_bednt)/abn))/(2*sparing_factors[index+1]**2/abn)
else:
dose_action = actionspace[pol[fraction][(round(total_bednt,1)*10).astype(int)][round(sparing_factors[index+1]*100)].astype(int)]
dose_delivered = BED_calc0(dose_action,abt)
total_bedt += dose_delivered
total_bednt += BED_calc0(dose_action,abn,sparing_factors[index+1])
print('fraction ', index+2, 'dose delivered: ', round(dose_action,1))
print('total accumulated dose in tumor; fraction ', index+2, '=', round(total_bedt,1))
print('total accumulated dose in normal tissue; fraction ', index+2, '=', round(total_bednt,1))
|
[
"numpy.mean",
"numpy.sqrt",
"scipy.stats.invgamma.fit",
"numpy.delete",
"numpy.argmax",
"numpy.exp",
"numpy.zeros",
"numpy.outer",
"numpy.var",
"scipy.stats.truncnorm",
"numpy.meshgrid",
"time.time",
"numpy.arange"
] |
[((1290, 1357), 'scipy.stats.truncnorm', 'truncnorm', (['((low - mean) / sd)', '((upp - mean) / sd)'], {'loc': 'mean', 'scale': 'sd'}), '((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)\n', (1299, 1357), False, 'from scipy.stats import truncnorm\n'), ((1803, 1832), 'numpy.arange', 'np.arange', (['(1e-05)', '(0.25)', '(1e-05)'], {}), '(1e-05, 0.25, 1e-05)\n', (1812, 1832), True, 'import numpy as np\n'), ((2814, 2833), 'numpy.delete', 'np.delete', (['means', '(0)'], {}), '(means, 0)\n', (2823, 2833), True, 'import numpy as np\n'), ((2845, 2863), 'numpy.delete', 'np.delete', (['stds', '(0)'], {}), '(stds, 0)\n', (2854, 2863), True, 'import numpy as np\n'), ((3548, 3579), 'scipy.stats.invgamma.fit', 'invgamma.fit', (['variances'], {'floc': '(0)'}), '(variances, floc=0)\n', (3560, 3579), False, 'from scipy.stats import invgamma\n'), ((5833, 5857), 'numpy.arange', 'np.arange', (['(0)', '(1.41)', '(0.01)'], {}), '(0, 1.41, 0.01)\n', (5842, 5857), True, 'import numpy as np\n'), ((5906, 5929), 'numpy.arange', 'np.arange', (['(0)', '(90.3)', '(0.1)'], {}), '(0, 90.3, 0.1)\n', (5915, 5929), True, 'import numpy as np\n'), ((6141, 6164), 'numpy.arange', 'np.arange', (['(0)', '(22.4)', '(0.1)'], {}), '(0, 22.4, 0.1)\n', (6150, 6164), True, 'import numpy as np\n'), ((6399, 6410), 'time.time', 'time.time', ([], {}), '()\n', (6408, 6410), False, 'import time\n'), ((6655, 6683), 'numpy.meshgrid', 'np.meshgrid', (['actionspace', 'sf'], {}), '(actionspace, sf)\n', (6666, 6683), True, 'import numpy as np\n'), ((9843, 9854), 'time.time', 'time.time', ([], {}), '()\n', (9852, 9854), False, 'import time\n'), ((10554, 10577), 'numpy.arange', 'np.arange', (['(0)', '(22.4)', '(0.1)'], {}), '(0, 22.4, 0.1)\n', (10563, 10577), True, 'import numpy as np\n'), ((2704, 2736), 'numpy.mean', 'np.mean', (['sparing_factors[:i + 1]'], {}), '(sparing_factors[:i + 1])\n', (2711, 2736), True, 'import numpy as np\n'), ((4495, 4520), 'numpy.outer', 'np.outer', (['sf', 'actionspace'], {}), '(sf, actionspace)\n', (4503, 4520), True, 'import numpy as np\n'), ((2094, 2122), 'numpy.argmax', 'np.argmax', (['likelihood_values'], {}), '(likelihood_values)\n', (2103, 2122), True, 'import numpy as np\n'), ((4197, 4221), 'numpy.arange', 'np.arange', (['(0)', '(1.41)', '(0.01)'], {}), '(0, 1.41, 0.01)\n', (4206, 4221), True, 'import numpy as np\n'), ((2000, 2021), 'numpy.exp', 'np.exp', (['(-beta / value)'], {}), '(-beta / value)\n', (2006, 2021), True, 'import numpy as np\n'), ((4523, 4548), 'numpy.outer', 'np.outer', (['sf', 'actionspace'], {}), '(sf, actionspace)\n', (4531, 4548), True, 'import numpy as np\n'), ((7912, 7938), 'numpy.zeros', 'np.zeros', (['future_bed.shape'], {}), '(future_bed.shape)\n', (7920, 7938), True, 'import numpy as np\n'), ((11236, 11345), 'numpy.sqrt', 'np.sqrt', (['(sparing_factors[index + 1] ** 2 + 4 * sparing_factors[index + 1] ** 2 * (\n 90 - total_bednt) / abn)'], {}), '(sparing_factors[index + 1] ** 2 + 4 * sparing_factors[index + 1] **\n 2 * (90 - total_bednt) / abn)\n', (11243, 11345), True, 'import numpy as np\n'), ((8757, 8783), 'numpy.zeros', 'np.zeros', (['future_bed.shape'], {}), '(future_bed.shape)\n', (8765, 8783), True, 'import numpy as np\n'), ((9195, 9221), 'numpy.zeros', 'np.zeros', (['future_bed.shape'], {}), '(future_bed.shape)\n', (9203, 9221), True, 'import numpy as np\n'), ((2028, 2049), 'numpy.var', 'np.var', (['measured_data'], {}), '(measured_data)\n', (2034, 2049), True, 'import numpy as np\n')]
|
import numpy as np
np.deprecate(1) # E: No overload variant
np.deprecate_with_doc(1) # E: incompatible type
np.byte_bounds(1) # E: incompatible type
np.who(1) # E: incompatible type
np.lookfor(None) # E: incompatible type
np.safe_eval(None) # E: incompatible type
|
[
"numpy.deprecate_with_doc",
"numpy.deprecate",
"numpy.lookfor",
"numpy.who",
"numpy.byte_bounds",
"numpy.safe_eval"
] |
[((20, 35), 'numpy.deprecate', 'np.deprecate', (['(1)'], {}), '(1)\n', (32, 35), True, 'import numpy as np\n'), ((63, 87), 'numpy.deprecate_with_doc', 'np.deprecate_with_doc', (['(1)'], {}), '(1)\n', (84, 87), True, 'import numpy as np\n'), ((113, 130), 'numpy.byte_bounds', 'np.byte_bounds', (['(1)'], {}), '(1)\n', (127, 130), True, 'import numpy as np\n'), ((156, 165), 'numpy.who', 'np.who', (['(1)'], {}), '(1)\n', (162, 165), True, 'import numpy as np\n'), ((191, 207), 'numpy.lookfor', 'np.lookfor', (['None'], {}), '(None)\n', (201, 207), True, 'import numpy as np\n'), ((233, 251), 'numpy.safe_eval', 'np.safe_eval', (['None'], {}), '(None)\n', (245, 251), True, 'import numpy as np\n')]
|
import argparse
from pathlib import Path
import numpy as np
import yaml
# this script takes in a folder path and then recursively collects all
# results.yaml files in that directory. It averages them and prints
# summary statistics
parser = argparse.ArgumentParser(description="Analyze the results")
parser.add_argument("path", type=str, help="path to the folder containing the results")
args = parser.parse_args()
results = []
keys = set()
for path in Path(args.path).rglob("results.yaml"):
with open(path, "r") as file:
results.append(yaml.safe_load(file))
keys = keys.union(results[-1].keys())
print(f"Found {len(results)} files with {len(keys)} different metrics\n")
output = {}
for key in keys:
vals = [result[key] for result in results if key in result]
n = len(vals)
mean = float(np.mean(vals))
std = float(np.std(vals))
output[key] = {
"N runs": n,
"mean": round(mean, 3),
"std": round(std, 3)
}
print(yaml.dump(output))
|
[
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path",
"yaml.dump",
"yaml.safe_load",
"numpy.std"
] |
[((244, 302), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Analyze the results"""'}), "(description='Analyze the results')\n", (267, 302), False, 'import argparse\n'), ((988, 1005), 'yaml.dump', 'yaml.dump', (['output'], {}), '(output)\n', (997, 1005), False, 'import yaml\n'), ((459, 474), 'pathlib.Path', 'Path', (['args.path'], {}), '(args.path)\n', (463, 474), False, 'from pathlib import Path\n'), ((827, 840), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (834, 840), True, 'import numpy as np\n'), ((858, 870), 'numpy.std', 'np.std', (['vals'], {}), '(vals)\n', (864, 870), True, 'import numpy as np\n'), ((555, 575), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (569, 575), False, 'import yaml\n')]
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
# Mon 18 Nov 21:38:19 2013
"""Extension building for using this package
"""
import numpy
from pkg_resources import resource_filename
from bob.extension import Extension as BobExtension
# forward the build_ext command from bob.extension
from bob.extension import build_ext, Library as BobLibrary
from distutils.version import LooseVersion
class Extension(BobExtension):
"""Extension building with pkg-config packages and blitz.array.
See the documentation for :py:class:`distutils.extension.Extension` for more
details on input parameters.
"""
def __init__(self, *args, **kwargs):
"""Initialize the extension with parameters.
This extension adds ``blitz>=0.10`` as a requirement for extensions derived
from this class.
See the help for :py:class:`bob.extension.Extension` for more details on
options.
"""
require = ['blitz>=0.10', 'boost']
kwargs.setdefault('packages', []).extend(require)
self_include_dir = resource_filename(__name__, 'include')
kwargs.setdefault('system_include_dirs', []).append(numpy.get_include())
kwargs.setdefault('include_dirs', []).append(self_include_dir)
macros = [
("PY_ARRAY_UNIQUE_SYMBOL", "BOB_NUMPY_C_API"),
("NO_IMPORT_ARRAY", "1"),
]
if LooseVersion(numpy.__version__) >= LooseVersion('1.7'):
macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
kwargs.setdefault('define_macros', []).extend(macros)
# Run the constructor for the base class
BobExtension.__init__(self, *args, **kwargs)
class Library (BobLibrary):
"""Pure C++ library building with blitz array.
See the documentation for :py:class:`bob.extension.Extension` for more
details on input parameters.
"""
def __init__(self, *args, **kwargs):
"""Initialize the library with parameters.
This library adds ``blitz>=0.10`` as a requirement for library derived
from this class.
See the help for :py:class:`bob.extension.Library` for more details on
options.
"""
require = ['blitz>=0.10', 'boost']
kwargs.setdefault('packages', []).extend(require)
self_include_dir = resource_filename(__name__, 'include')
kwargs.setdefault('system_include_dirs', []).append(numpy.get_include())
kwargs.setdefault('include_dirs', []).append(self_include_dir)
# TODO: are these macros required for pure C++ builds?
macros = [
("PY_ARRAY_UNIQUE_SYMBOL", "BOB_NUMPY_C_API"),
("NO_IMPORT_ARRAY", "1"),
]
if LooseVersion(numpy.__version__) >= LooseVersion('1.7'):
macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
kwargs.setdefault('define_macros', []).extend(macros)
# Run the constructor for the base class
BobLibrary.__init__(self, *args, **kwargs)
|
[
"bob.extension.Extension.__init__",
"bob.extension.Library.__init__",
"pkg_resources.resource_filename",
"numpy.get_include",
"distutils.version.LooseVersion"
] |
[((1037, 1075), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', '"""include"""'], {}), "(__name__, 'include')\n", (1054, 1075), False, 'from pkg_resources import resource_filename\n'), ((1584, 1628), 'bob.extension.Extension.__init__', 'BobExtension.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1605, 1628), True, 'from bob.extension import Extension as BobExtension\n'), ((2219, 2257), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', '"""include"""'], {}), "(__name__, 'include')\n", (2236, 2257), False, 'from pkg_resources import resource_filename\n'), ((2825, 2867), 'bob.extension.Library.__init__', 'BobLibrary.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (2844, 2867), True, 'from bob.extension import build_ext, Library as BobLibrary\n'), ((1132, 1151), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1149, 1151), False, 'import numpy\n'), ((1349, 1380), 'distutils.version.LooseVersion', 'LooseVersion', (['numpy.__version__'], {}), '(numpy.__version__)\n', (1361, 1380), False, 'from distutils.version import LooseVersion\n'), ((1384, 1403), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.7"""'], {}), "('1.7')\n", (1396, 1403), False, 'from distutils.version import LooseVersion\n'), ((2314, 2333), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2331, 2333), False, 'import numpy\n'), ((2590, 2621), 'distutils.version.LooseVersion', 'LooseVersion', (['numpy.__version__'], {}), '(numpy.__version__)\n', (2602, 2621), False, 'from distutils.version import LooseVersion\n'), ((2625, 2644), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.7"""'], {}), "('1.7')\n", (2637, 2644), False, 'from distutils.version import LooseVersion\n')]
|
import numpy as np
import math
import time
class PulsedProgramming:
"""
This class contains all the parameters for the Pulsed programming on a memristor model.
After initializing the parameters values, start the simulation with self.simulate()
Parameters
----------
max_voltage : float
The max voltage (V) of a pulse. If 0, no limit is apply.
pulse_algorithm : string
The pulse algorithm use. Those are the available choices (Sources in the methods). Default is 'fabien'.
'fabien' : Use fabien_convergence()
'log' : Use a log_convergence()
tolerance : float
The tolerance_value input is an int that represent the absolute tolerance (Ohm) from the res_states the
pulsed programming will find. Smaller is more precise, but too small can never converge.
is_relative_tolerance : bool
If true, the tolerance_value would be in percentage instead of (Ohm). ex: 10 : if true, 10% : if false, 10 Ohm
variability_write : iterable[float]
A gaussian distribution with (mu=0, sigma=variance_write)
index_variability : int
Index of the current variability. If over 1000, reset to 0.
variance_write : float
Variance of the gaussian distribution on the memristor write. See variability.
graph_resistance : List[Union[float, int]]
Contains all resistance of the simulation. It's used in the creation of plots.
graph_voltages : List[Union[float, int]]
Contains all voltages of the simulation. It's used in the creation of plots.
number_of_reading : int
The number of correct value read before passing to the next state.
max_pulse : int
The max number of pulses.
"""
def __init__(self, memristor_simulation, pulse_algorithm='fabien', max_voltage=0, tolerance=0, is_relative_tolerance=False,
variance_write=0, number_of_reading=1, max_pulse=20000, verbose=False, plot_memristor=0):
self.memristor_simulation = memristor_simulation
self.pulse_algorithm = pulse_algorithm
self.tolerance = tolerance
self.max_voltage = max_voltage
self.is_relative_tolerance = is_relative_tolerance
self.variance_write = variance_write
self.number_of_reading = number_of_reading
self.max_pulse = max_pulse
self.verbose = verbose
self.voltage_output = {}
self.plot_memristor = plot_memristor
self.index_variability = 0
self.variability_write = np.random.normal(0, variance_write, 1000)
self.graph_resistance = []
self.graph_voltages = []
def print(self):
print(self.pulse_algorithm)
print(self.tolerance)
print(self.max_voltage)
print(self.voltage_output)
print(self.is_relative_tolerance)
print(self.variance_write)
print(self.number_of_reading)
print(self.max_pulse)
print(self.verbose)
print(np.array(self.graph_resistance))
print(np.array(self.graph_voltages))
def write_resistance(self, memristor, voltage, t_pulse):
"""
This function change the resistance of the memristor by applying a voltage fo t_pulse.
Parameters
----------
memristor : Memristor
The memristor wrote.
voltage : float
The voltage (V) applied.
t_pulse : float
The time of the writing pulse. (s)
Returns
----------
"""
t = int(t_pulse / memristor.time_series_resolution)
signal = [voltage] * t
memristor.simulate(signal)
self.index_variability = self.index_variability + 1 if self.index_variability < len(self.variability_write) - 1 else 0
memristor.g = 1 / (1 / memristor.g + (1 / memristor.g) * self.variability_write[self.index_variability])
def find_number_iteration(self):
"""
This function find the number of iteration needed to create the resistance list depending on the distribution type
Returns
----------
number_iteration : int
number of iteration
"""
number_iteration = 1
if self.distribution_type == 'full_spread':
number_iteration = self.circuit.number_of_memristor
return number_iteration
def simulate(self, voltages_target, precision=None):
"""
This function will set the memristors to the resistance wanted in each voltages_target package.
Parameters
----------
voltages_target : dict
dict with keys as voltage and package as list of resistance
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
if self.pulse_algorithm != 'fabien' and self.pulse_algorithm != 'log':
raise(Exception(f'Pulse algorithm not supported: {self.pulse_algorithm}'))
# voltages_target_list = list(voltages_target.keys())
# resolution = voltages_target_list[1] - voltages_target_list[0]
index = 1
conf_done = 0
start_time = time.time()
diff_voltage = {}
for v in list(voltages_target.keys()):
if index == 1:
start_time_ = time.time()
self.simulate_list_memristor(voltages_target[v], precision)
self.voltage_output[self.memristor_simulation.circuit.current_v_out()] = [i.read() for i in self.memristor_simulation.circuit.list_memristor]
diff_voltage[abs(v - self.memristor_simulation.circuit.current_v_out())] = [round(1 / np.sum([1/res for res in voltages_target[v]]), 4), round(1 / self.memristor_simulation.circuit.current_conductance(), 4)]
if index == 50 and self.verbose:
conf_done += index
print(f'Conf done: {conf_done}\tTook: {round(time.time() - start_time_, 2)} s\tTime left: {round((time.time() - start_time_) * (len(voltages_target.keys()) - conf_done) / 50, 2)} s')
index = 0
index += 1
if self.verbose:
print(f'Total time: {time.time() - start_time}')
print()
for key in diff_voltage.keys():
print(f'{round(key*1000, 4)} mV\t{diff_voltage.get(key)[0]}\t{diff_voltage.get(key)[1]} (Ohm)')
print(f'Mean diff: {np.mean(list(diff_voltage.keys()))}')
print(f'Min diff: {np.min(list(diff_voltage.keys()))}\tMax diff: {np.max(list(diff_voltage.keys()))}')
return self.voltage_output
def simulate_list_memristor(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method.
"""
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if i == self.plot_memristor else False
if self.pulse_algorithm == 'fabien':
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
elif self.pulse_algorithm == 'log':
self.log_convergence(self.memristor_simulation.circuit.list_memristor[i], list_resistance[i], plot=plot)
self.balance(list_resistance, precision)
def balance(self, list_resistance, precision):
"""
This function will set the memristors to the resistance wanted list_resistance.
Parameters
----------
list_resistance : list
list of the wanted resistance for the memristor.
precision : list
[[macro_tune, is_relative_variability], [fine_tune, is_relative_variability]] for the balance() method. If 0,
won't do it.
"""
final_g = np.sum([1 / i for i in list_resistance])
delta_g = final_g - self.memristor_simulation.circuit.current_conductance()
for i in range(self.memristor_simulation.circuit.number_of_memristor):
plot = True if -(i+1) == self.plot_memristor else False
final_res = 1 / (self.memristor_simulation.circuit.list_memristor[-(i+1)].g + delta_g)
if self.memristor_simulation.circuit.memristor_model.r_on <= final_res <= self.memristor_simulation.circuit.memristor_model.r_off:
p_tolerance, p_relative = self.tolerance, self.is_relative_tolerance
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[0][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[0][0], precision[0][1]
self.fabien_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
if precision[1][0] != 0 or precision is not None:
self.tolerance, self.is_relative_tolerance = precision[1][0], precision[1][1]
self.small_convergence(self.memristor_simulation.circuit.list_memristor[-(i+1)], final_res, plot)
# print(f'{final_res}\t{1 / self.memristor_simulation.circuit.list_memristor[-(i+1)].g}\t{final_g - self.memristor_simulation.circuit.current_conductance()}')
self.tolerance, self.is_relative_tolerance = p_tolerance, p_relative
break
def small_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor with a
really small increment.
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.001
positive_voltage = voltage_set = 0.1
negative_voltage = voltage_reset = -0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print(f'Got max pulse {self.max_pulse}')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
def log_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://arxiv.org/abs/2103.09931
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
# additional parameters
min_shift = 0.005
max_shift = 0.2
a = 0.1
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
r_shift = 1
current_res = memristor.read()
while not flag_finish:
if res_min < current_res < res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res > res_max:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
positive_voltage += a * np.log10(abs(target_res - current_res) / r_shift)
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
positive_voltage = voltage_set
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
elif current_res < res_min:
if r_shift < min_shift * (memristor.r_off - memristor.r_on):
negative_voltage -= a * np.log10(abs((target_res - current_res) / r_shift))
elif r_shift > max_shift * (memristor.r_off - memristor.r_on):
negative_voltage = voltage_reset
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print('Got max pulse')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
counter += 1
previous_res = current_res
current_res = memristor.read()
r_shift = abs(current_res - previous_res) if abs(current_res - previous_res) != 0 else 1
def fabien_convergence(self, memristor, target_res, plot=False):
"""
This function run the pulsed programming with a variable voltage to set the target_res for the memristor.
From : https://iopscience.iop.org/article/10.1088/0957-4484/23/7/075201
Parameters
----------
memristor : Memristor
The memristor object
target_res : float
The target resistance
"""
step = 0.005
positive_voltage = voltage_set = 0.5
negative_voltage = voltage_reset = -0.5
if self.is_relative_tolerance:
res_max = target_res + self.tolerance * target_res / 100
res_min = target_res - self.tolerance * target_res / 100
else:
res_max = target_res + self.tolerance
res_min = target_res - self.tolerance
start_len_res = len(self.graph_resistance)
start_len_v = len(self.graph_voltages)
counter = 0
action = 'read'
flag_finish = False
counter_read = 0
while not flag_finish:
current_res = memristor.read()
if res_min <= current_res <= res_max:
counter_read += 1
if plot:
action = 'read'
self.graph_voltages.append([0.2, counter + start_len_v, action])
elif current_res < res_min:
if self.max_voltage != 0:
negative_voltage = -self.max_voltage if negative_voltage <= -self.max_voltage else negative_voltage
self.write_resistance(memristor, negative_voltage, 200e-9)
if plot:
action = 'reset'
self.graph_voltages.append([negative_voltage, counter + start_len_v, action])
negative_voltage -= step
positive_voltage = voltage_set
elif current_res > res_max:
if self.max_voltage != 0:
positive_voltage = self.max_voltage if positive_voltage >= self.max_voltage else positive_voltage
self.write_resistance(memristor, positive_voltage, 200e-9)
if plot:
action = 'set'
self.graph_voltages.append([positive_voltage, counter + start_len_v, action])
positive_voltage += step
negative_voltage = voltage_reset
if counter_read == self.number_of_reading:
flag_finish = not flag_finish
if counter >= self.max_pulse:
flag_finish = not flag_finish
print('Got max pulse')
if plot:
self.graph_resistance.append([current_res, counter + start_len_res, action, flag_finish])
# print(f'{self.graph_resistance[-1]}\t{self.graph_voltages[-1]}')
counter += 1
|
[
"numpy.random.normal",
"numpy.sum",
"numpy.array",
"time.time"
] |
[((2523, 2564), 'numpy.random.normal', 'np.random.normal', (['(0)', 'variance_write', '(1000)'], {}), '(0, variance_write, 1000)\n', (2539, 2564), True, 'import numpy as np\n'), ((5167, 5178), 'time.time', 'time.time', ([], {}), '()\n', (5176, 5178), False, 'import time\n'), ((8053, 8095), 'numpy.sum', 'np.sum', (['[(1 / i) for i in list_resistance]'], {}), '([(1 / i) for i in list_resistance])\n', (8059, 8095), True, 'import numpy as np\n'), ((2976, 3007), 'numpy.array', 'np.array', (['self.graph_resistance'], {}), '(self.graph_resistance)\n', (2984, 3007), True, 'import numpy as np\n'), ((3023, 3052), 'numpy.array', 'np.array', (['self.graph_voltages'], {}), '(self.graph_voltages)\n', (3031, 3052), True, 'import numpy as np\n'), ((5309, 5320), 'time.time', 'time.time', ([], {}), '()\n', (5318, 5320), False, 'import time\n'), ((5645, 5694), 'numpy.sum', 'np.sum', (['[(1 / res) for res in voltages_target[v]]'], {}), '([(1 / res) for res in voltages_target[v]])\n', (5651, 5694), True, 'import numpy as np\n'), ((6154, 6165), 'time.time', 'time.time', ([], {}), '()\n', (6163, 6165), False, 'import time\n'), ((5908, 5919), 'time.time', 'time.time', ([], {}), '()\n', (5917, 5919), False, 'import time\n'), ((5961, 5972), 'time.time', 'time.time', ([], {}), '()\n', (5970, 5972), False, 'import time\n')]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
from mmdet.datasets.pipelines import Compose
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_img_metas(cfg, ori_wh):
w, h = ori_wh
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
test_pipeline = Compose(cfg.data.test.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8)}
data = test_pipeline(data)
img_metas = data['img_metas'][0].data
return img_metas
def process_img(frame_resize, img_metas, device):
assert frame_resize.shape == img_metas['pad_shape']
frame_cuda = torch.from_numpy(frame_resize).to(device).float()
frame_cuda = frame_cuda.permute(2, 0, 1) # HWC to CHW
mean = torch.from_numpy(img_metas['img_norm_cfg']['mean']).to(device)
std = torch.from_numpy(img_metas['img_norm_cfg']['std']).to(device)
frame_cuda = F.normalize(frame_cuda, mean=mean, std=std, inplace=True)
frame_cuda = frame_cuda[None, :, :, :] # NCHW
data = {'img': [frame_cuda], 'img_metas': [[img_metas]]}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
img_metas = prefetch_img_metas(model.cfg,
(video_origin.width, video_origin.height))
resize_wh = img_metas['pad_shape'][1::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft',
pix_fmt='rgb24')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for frame_resize, frame_origin in zip(
mmcv.track_iter_progress(video_resize), video_origin):
data = process_img(frame_resize, img_metas, args.device)
result = model(return_loss=False, rescale=True, **data)[0]
frame_mask = model.show_result(
frame_origin, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"mmcv.track_iter_progress",
"argparse.ArgumentParser",
"mmdet.apis.init_detector",
"ffmpegcv.VideoWriter",
"torch.from_numpy",
"mmdet.datasets.pipelines.Compose",
"mmcv.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"torch.no_grad",
"torchvision.transforms.functional.normalize",
"cv2.namedWindow"
] |
[((425, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MMDetection video demo with GPU acceleration"""'}), "(description=\n 'MMDetection video demo with GPU acceleration')\n", (448, 513), False, 'import argparse\n'), ((1446, 1477), 'mmdet.datasets.pipelines.Compose', 'Compose', (['cfg.data.test.pipeline'], {}), '(cfg.data.test.pipeline)\n', (1453, 1477), False, 'from mmdet.datasets.pipelines import Compose\n'), ((2025, 2082), 'torchvision.transforms.functional.normalize', 'F.normalize', (['frame_cuda'], {'mean': 'mean', 'std': 'std', 'inplace': '(True)'}), '(frame_cuda, mean=mean, std=std, inplace=True)\n', (2036, 2082), True, 'from torchvision.transforms import functional as F\n'), ((2420, 2483), 'mmdet.apis.init_detector', 'init_detector', (['args.config', 'args.checkpoint'], {'device': 'args.device'}), '(args.config, args.checkpoint, device=args.device)\n', (2433, 2483), False, 'from mmdet.apis import init_detector\n'), ((3828, 3851), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3849, 3851), False, 'import cv2\n'), ((1497, 1532), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.uint8'}), '((h, w, 3), dtype=np.uint8)\n', (1505, 1532), True, 'import numpy as np\n'), ((3063, 3115), 'ffmpegcv.VideoWriter', 'ffmpegcv.VideoWriter', (['args.out'], {'fps': 'video_origin.fps'}), '(args.out, fps=video_origin.fps)\n', (3083, 3115), False, 'import ffmpegcv\n'), ((3126, 3141), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3139, 3141), False, 'import torch\n'), ((1873, 1924), 'torch.from_numpy', 'torch.from_numpy', (["img_metas['img_norm_cfg']['mean']"], {}), "(img_metas['img_norm_cfg']['mean'])\n", (1889, 1924), False, 'import torch\n'), ((1946, 1996), 'torch.from_numpy', 'torch.from_numpy', (["img_metas['img_norm_cfg']['std']"], {}), "(img_metas['img_norm_cfg']['std'])\n", (1962, 1996), False, 'import torch\n'), ((3206, 3244), 'mmcv.track_iter_progress', 'mmcv.track_iter_progress', (['video_resize'], {}), '(video_resize)\n', (3230, 3244), False, 'import mmcv\n'), ((3551, 3578), 'cv2.namedWindow', 'cv2.namedWindow', (['"""video"""', '(0)'], {}), "('video', 0)\n", (3566, 3578), False, 'import cv2\n'), ((3595, 3643), 'mmcv.imshow', 'mmcv.imshow', (['frame_mask', '"""video"""', 'args.wait_time'], {}), "(frame_mask, 'video', args.wait_time)\n", (3606, 3643), False, 'import mmcv\n'), ((1753, 1783), 'torch.from_numpy', 'torch.from_numpy', (['frame_resize'], {}), '(frame_resize)\n', (1769, 1783), False, 'import torch\n')]
|
import numpy as np
import torch
import matplotlib.pyplot as plt
from icecream import ic
def visualize_vector_field(policy, device, min_max = [[-1,-1],[1,1]], fig_number=1):
min_x = min_max[0][0]
max_x = min_max[1][0]
min_y = min_max[0][1]
max_y = min_max[1][1]
n_sample = 100
x = np.linspace(min_x, max_x, n_sample)
y = np.linspace(min_y, max_y, n_sample)
xy = np.meshgrid(x, y)
h = np.concatenate(xy[0])
v = np.concatenate(xy[1])
hv = torch.Tensor(np.stack([h, v]).T).float()
if device is not None:
hv = hv.to(device)
vel = policy(hv)
#vel = to_numpy(vel)
vel = np.nan_to_num(vel)
vel_x = np.reshape(vel[:, 0], (n_sample, n_sample))
vel_y = np.reshape(vel[:, 1], (n_sample, n_sample))
speed = np.sqrt(vel_x ** 2 + vel_y ** 2)
speed = speed/np.max(speed)
plt.streamplot(xy[0], xy[1], vel_x, vel_y, color=speed)
w = 5
Y, X = np.mgrid[-w:w:5j, -w:w:5j]
ic(Y)
ic(X)
import numpy as np
import matplotlib.pyplot as plt
# # Creating dataset
# x = np.arange(0, 10)
# y = np.arange(0, 10)
#
# # Creating grids
# X, Y = np.meshgrid(x, y)
# # ic(X)
# # ic(Y)
#
# # x-component to the right
# u = np.ones((15, 10))
#
# # y-component zero
# v = -np.ones((10, 10))
#
# fig = plt.figure(figsize=(12, 7))
#
# # Plotting stream plot
# plt.streamplot(X, Y, u, v, density=0.5)
#
# # show plot
# # plt.show()
import numpy as np
import matplotlib.pyplot as plt
# Creating data set
w = 3
Y, X = np.mgrid[-w:w:100j, -w:w:100j]
U1 = -1 - X ** 2 + Y
ic(type(U1))
ic(np.shape(U1))
V1 = 1 + X - Y ** 2
ic(np.shape(V1))
U2 = -1.1 - X ** 2 + Y
ic(np.shape(U1))
V2 = 2.1 + X - Y ** 2
# speed = np.sqrt(U ** 2 + V ** 2)
# Creating plot
fig = plt.figure(figsize=(12, 7))
plt.streamplot(X, Y, U1, V1, density=1)
plt.streamplot(X, Y, U2, V2, density=0.8)
# show plot
plt.show()
|
[
"icecream.ic",
"numpy.reshape",
"numpy.sqrt",
"numpy.max",
"numpy.stack",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.streamplot",
"numpy.linspace",
"numpy.concatenate",
"numpy.meshgrid",
"numpy.shape",
"numpy.nan_to_num",
"matplotlib.pyplot.show"
] |
[((947, 952), 'icecream.ic', 'ic', (['Y'], {}), '(Y)\n', (949, 952), False, 'from icecream import ic\n'), ((953, 958), 'icecream.ic', 'ic', (['X'], {}), '(X)\n', (955, 958), False, 'from icecream import ic\n'), ((1714, 1741), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (1724, 1741), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1781), 'matplotlib.pyplot.streamplot', 'plt.streamplot', (['X', 'Y', 'U1', 'V1'], {'density': '(1)'}), '(X, Y, U1, V1, density=1)\n', (1756, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1823), 'matplotlib.pyplot.streamplot', 'plt.streamplot', (['X', 'Y', 'U2', 'V2'], {'density': '(0.8)'}), '(X, Y, U2, V2, density=0.8)\n', (1796, 1823), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1844, 1846), True, 'import matplotlib.pyplot as plt\n'), ((307, 342), 'numpy.linspace', 'np.linspace', (['min_x', 'max_x', 'n_sample'], {}), '(min_x, max_x, n_sample)\n', (318, 342), True, 'import numpy as np\n'), ((351, 386), 'numpy.linspace', 'np.linspace', (['min_y', 'max_y', 'n_sample'], {}), '(min_y, max_y, n_sample)\n', (362, 386), True, 'import numpy as np\n'), ((397, 414), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (408, 414), True, 'import numpy as np\n'), ((423, 444), 'numpy.concatenate', 'np.concatenate', (['xy[0]'], {}), '(xy[0])\n', (437, 444), True, 'import numpy as np\n'), ((453, 474), 'numpy.concatenate', 'np.concatenate', (['xy[1]'], {}), '(xy[1])\n', (467, 474), True, 'import numpy as np\n'), ((636, 654), 'numpy.nan_to_num', 'np.nan_to_num', (['vel'], {}), '(vel)\n', (649, 654), True, 'import numpy as np\n'), ((668, 711), 'numpy.reshape', 'np.reshape', (['vel[:, 0]', '(n_sample, n_sample)'], {}), '(vel[:, 0], (n_sample, n_sample))\n', (678, 711), True, 'import numpy as np\n'), ((724, 767), 'numpy.reshape', 'np.reshape', (['vel[:, 1]', '(n_sample, n_sample)'], {}), '(vel[:, 1], (n_sample, n_sample))\n', (734, 767), True, 'import numpy as np\n'), ((780, 812), 'numpy.sqrt', 'np.sqrt', (['(vel_x ** 2 + vel_y ** 2)'], {}), '(vel_x ** 2 + vel_y ** 2)\n', (787, 812), True, 'import numpy as np\n'), ((850, 905), 'matplotlib.pyplot.streamplot', 'plt.streamplot', (['xy[0]', 'xy[1]', 'vel_x', 'vel_y'], {'color': 'speed'}), '(xy[0], xy[1], vel_x, vel_y, color=speed)\n', (864, 905), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1554), 'numpy.shape', 'np.shape', (['U1'], {}), '(U1)\n', (1550, 1554), True, 'import numpy as np\n'), ((1579, 1591), 'numpy.shape', 'np.shape', (['V1'], {}), '(V1)\n', (1587, 1591), True, 'import numpy as np\n'), ((1620, 1632), 'numpy.shape', 'np.shape', (['U1'], {}), '(U1)\n', (1628, 1632), True, 'import numpy as np\n'), ((831, 844), 'numpy.max', 'np.max', (['speed'], {}), '(speed)\n', (837, 844), True, 'import numpy as np\n'), ((497, 513), 'numpy.stack', 'np.stack', (['[h, v]'], {}), '([h, v])\n', (505, 513), True, 'import numpy as np\n')]
|
from __future__ import annotations
from copy import copy, deepcopy
from types import MappingProxyType
from typing import (
Any,
Union,
Mapping,
TypeVar,
Callable,
Iterable,
Iterator,
Sequence,
TYPE_CHECKING,
)
from pathlib import Path
from functools import partial
from itertools import chain
from typing_extensions import Literal
import re
import validators
from scanpy import logging as logg
from anndata import AnnData
from scanpy.plotting.palettes import default_102 as default_palette
from dask import delayed
import numpy as np
import xarray as xr
import dask.array as da
from matplotlib.colors import ListedColormap
import matplotlib as mpl
import matplotlib.pyplot as plt
from skimage.util import img_as_float
from skimage.transform import rescale
from squidpy._docs import d, inject_docs
from squidpy._utils import NDArrayA, singledispatchmethod
from squidpy.im._io import _lazy_load_image, _infer_dimensions, _assert_dims_present
from squidpy.gr._utils import (
_assert_in_range,
_assert_positive,
_assert_non_negative,
_assert_spatial_basis,
_assert_non_empty_sequence,
)
from squidpy.im._coords import (
CropCoords,
CropPadding,
_NULL_COORDS,
_NULL_PADDING,
TupleSerializer,
_update_attrs_scale,
_update_attrs_coords,
)
from squidpy.im._feature_mixin import FeatureMixin
from squidpy._constants._constants import InferDimensions
from squidpy._constants._pkg_constants import Key
FoI_t = Union[int, float]
Pathlike_t = Union[str, Path]
Arraylike_t = Union[NDArrayA, xr.DataArray]
InferDims_t = Union[Literal["default", "prefer_channels", "prefer_z"], Sequence[str]]
Input_t = Union[Pathlike_t, Arraylike_t, "ImageContainer"]
Interactive = TypeVar("Interactive") # cannot import because of cyclic dependencies
_ERROR_NOTIMPLEMENTED_LIBID = f"It seems there are multiple `library_id` in `adata.uns[{Key.uns.spatial!r}]`.\n \
Loading multiple images is not implemented (yet), please specify a `library_id`."
__all__ = ["ImageContainer"]
@d.dedent # trick to overcome not top-down order
@d.dedent
class ImageContainer(FeatureMixin):
"""
Container for in memory arrays or on-disk images.
Wraps :class:`xarray.Dataset` to store several image layers with the same `x`, `y` and `z` dimensions in one object.
Dimensions of stored images are ``(y, x, z, channels)``. The channel dimension may vary between image layers.
This class also allows for lazy loading and processing using :mod:`dask`, and is given to all image
processing functions, along with :class:`anndata.AnnData` instance, if necessary.
Parameters
----------
%(add_img.parameters)s
scale
Scaling factor of the image with respect to the spatial coordinates
saved in the accompanying :class:`anndata.AnnData`.
Raises
------
%(add_img.raises)s
"""
def __init__(
self,
img: Input_t | None = None,
layer: str = "image",
lazy: bool = True,
scale: float = 1.0,
**kwargs: Any,
):
self._data: xr.Dataset = xr.Dataset()
self._data.attrs[Key.img.coords] = _NULL_COORDS # can't save None to NetCDF
self._data.attrs[Key.img.padding] = _NULL_PADDING
self._data.attrs[Key.img.scale] = scale
self._data.attrs[Key.img.mask_circle] = False
if img is not None:
self.add_img(img, layer=layer, **kwargs)
if not lazy:
self.compute()
@classmethod
def concat(
cls,
imgs: Iterable[ImageContainer],
library_ids: Sequence[str | None] | None = None,
combine_attrs: str = "identical",
**kwargs: Any,
) -> ImageContainer:
"""
Concatenate ``imgs`` in Z-dimension.
All ``imgs`` need to have the same shape and the same name to be concatenated.
Parameters
----------
imgs
Images that should be concatenated in Z-dimension.
library_ids
Name for each image that will be associated to each Z-dimension. This should match the ``library_id``
in the corresponding :class:`anndata.AnnData` object.
If `None`, the existing name of the Z-dimension is used for each image.
combine_attrs
How to combine attributes of ``imgs``. By default, all ``imgs`` need to have the same scale
and crop attributes. Use ``combine_attrs = 'override'`` to relax this requirement.
This might lead to a mismatch between :class:`ImageContainer` and :class:`anndata.AnnData` coordinates.
kwargs
Keyword arguments for :func:`xarray.concat`.
Returns
-------
Concatenated :class:`squidpy.img.ImageContainer` with ``imgs`` stacks in Z-dimension.
Raises
------
ValueError
If any of the ``imgs`` have more than 1 Z-dimension or if ``library_ids`` are not unique.
"""
# check that imgs are not already 3d
imgs = list(imgs)
for img in imgs:
if img.data.dims["z"] > 1:
raise ValueError(
f"Currently, can concatenate only images with 1 Z-dimension, found `{img.data.dims['z']}`."
)
# check library_ids
if library_ids is None:
library_ids = [None] * len(imgs)
if len(library_ids) != len(imgs):
raise ValueError(f"Expected library ids to be of length `{len(imgs)}`, found `{len(library_ids)}`.")
_library_ids = np.concatenate(
[img._get_library_ids(library_id, allow_new=True) for img, library_id in zip(imgs, library_ids)]
)
if len(set(_library_ids)) != len(_library_ids):
raise ValueError(f"Found non-unique library ids `{list(_library_ids)}`.")
# add library_id to z dim
prep_imgs = []
for lid, img in zip(_library_ids, imgs):
prep_img = img.copy()
prep_img._data = prep_img.data.assign_coords(z=[lid])
prep_imgs.append(prep_img)
return cls._from_dataset(
xr.concat([img.data for img in prep_imgs], dim="z", combine_attrs=combine_attrs, **kwargs)
)
@classmethod
def load(cls, path: Pathlike_t, lazy: bool = True, chunks: int | None = None) -> ImageContainer:
"""
Load data from a *Zarr* store.
Parameters
----------
path
Path to *Zarr* store.
lazy
Whether to use :mod:`dask` to lazily load image.
chunks
Chunk size for :mod:`dask`. Only used when ``lazy = True``.
Returns
-------
The loaded container.
"""
res = cls()
res.add_img(path, layer="image", chunks=chunks, lazy=True)
return res if lazy else res.compute()
def save(self, path: Pathlike_t, **kwargs: Any) -> None:
"""
Save the container into a *Zarr* store.
Parameters
----------
path
Path to a *Zarr* store.
Returns
-------
Nothing, just saves the container.
"""
attrs = self.data.attrs
try:
self._data = self.data.load() # if we're loading lazily and immediately saving
self.data.attrs = {
k: (v.to_tuple() if isinstance(v, TupleSerializer) else v) for k, v in self.data.attrs.items()
}
self.data.to_zarr(str(path), mode="w", **kwargs, **kwargs)
finally:
self.data.attrs = attrs
@d.get_sections(base="add_img", sections=["Parameters", "Raises"])
@d.dedent
@inject_docs(id=InferDimensions)
def add_img(
self,
img: Input_t,
layer: str | None = None,
dims: InferDims_t = InferDimensions.DEFAULT.s,
library_id: str | Sequence[str] | None = None,
lazy: bool = True,
chunks: str | tuple[int, ...] | None = None,
copy: bool = True,
**kwargs: Any,
) -> None:
"""
Add a new image to the container.
Parameters
----------
img
In-memory 2, 3 or 4-dimensional array, a URL to a *Zarr* store (ending in *.zarr*),
or a path to an on-disk image.
%(img_layer)s
dims
Where to save channel dimension when reading from a file or loading an array. Valid options are:
- `{id.CHANNELS_LAST.s!r}` - load the last non-spatial dimension as channels.
- `{id.Z_LAST.s!r}` - load the last non-spatial dimension as Z-dimension.
- `{id.DEFAULT.s!r}` - same as `{id.CHANNELS_LAST.s!r}`, but for 4-dimensional arrays,
tries to also load the first dimension as channels if the last non-spatial dimension is 1.
- a sequence of dimension names matching the shape of ``img``, e.g. ``('y', 'x', 'z', 'channels')``.
`'y'`, `'x'` and `'z'` must always be present.
library_id
Name for each Z-dimension of the image. This should correspond to the ``library_id``
in :attr:`anndata.AnnData.uns`.
lazy
Whether to use :mod:`dask` to lazily load image.
chunks
Chunk size for :mod:`dask`. Only used when ``lazy = True``.
copy
Whether to copy the underlying data if ``img`` is an in-memory array.
Returns
-------
Nothing, just adds a new ``layer`` to :attr:`data`.
Raises
------
ValueError
If loading from a file/store with an unknown format or if a supplied channel dimension cannot be aligned.
NotImplementedError
If loading a specific data type has not been implemented.
"""
layer = self._get_next_image_id("image") if layer is None else layer
dims: InferDimensions | Sequence[str] = ( # type: ignore[no-redef]
InferDimensions(dims) if isinstance(dims, str) else dims
)
res: xr.DataArray | None = self._load_img(img, chunks=chunks, layer=layer, copy=copy, dims=dims, **kwargs)
if res is not None:
library_id = self._get_library_ids(library_id, res, allow_new=not len(self))
try:
res = res.assign_coords({"z": library_id})
except ValueError as e:
if "conflicting sizes for dimension 'z'" not in str(e):
raise
# at this point, we know the container is not empty
raise ValueError(
f"Expected image to have `{len(self.library_ids)}` Z-dimension(s), found `{res.sizes['z']}`."
) from None
if TYPE_CHECKING:
assert isinstance(res, xr.DataArray)
logg.info(f"{'Overwriting' if layer in self else 'Adding'} image layer `{layer}`")
try:
self.data[layer] = res
except ValueError as e:
c_dim = res.dims[-1]
if f"along dimension {str(c_dim)!r} cannot be aligned" not in str(e):
raise
channel_dim = self._get_next_channel_id(res)
logg.warning(f"Channel dimension cannot be aligned with an existing one, using `{channel_dim}`")
self.data[layer] = res.rename({res.dims[-1]: channel_dim})
if not lazy:
self.compute(layer)
@singledispatchmethod
def _load_img(self, img: Pathlike_t | Input_t | ImageContainer, layer: str, **kwargs: Any) -> xr.DataArray | None:
if isinstance(img, ImageContainer):
if layer not in img:
raise KeyError(f"Image identifier `{layer}` not found in `{img}`.")
_ = kwargs.pop("dims", None)
return self._load_img(img[layer], **kwargs)
raise NotImplementedError(f"Loading `{type(img).__name__}` is not yet implemented.")
@_load_img.register(str)
@_load_img.register(Path)
def _(
self,
img_path: Pathlike_t,
chunks: int | None = None,
dims: InferDimensions | tuple[str, ...] = InferDimensions.DEFAULT,
**_: Any,
) -> xr.DataArray | None:
def transform_metadata(data: xr.Dataset) -> xr.Dataset:
for key, img in data.items():
if len(img.dims) != 4:
data[key] = img = img.expand_dims({"z": 1}, axis=-2) # assume only channel dim is present
_assert_dims_present(img.dims, include_z=True)
data.attrs[Key.img.coords] = CropCoords.from_tuple(data.attrs.get(Key.img.coords, _NULL_COORDS.to_tuple()))
data.attrs[Key.img.padding] = CropPadding.from_tuple(
data.attrs.get(Key.img.padding, _NULL_PADDING.to_tuple())
)
data.attrs.setdefault(Key.img.mask_circle, False)
data.attrs.setdefault(Key.img.scale, 1)
return data
img_path = str(img_path)
is_url, suffix = validators.url(img_path), Path(img_path).suffix.lower()
logg.debug(f"Loading data from `{img_path}`")
if not is_url and not Path(img_path).exists():
raise OSError(f"Path `{img_path}` does not exist.")
if suffix in (".jpg", ".jpeg", ".png", ".tif", ".tiff"):
return _lazy_load_image(img_path, dims=dims, chunks=chunks)
if suffix == ".zarr" or Path(img_path).is_dir(): # can also be a URL
if len(self._data):
raise ValueError("Loading data from `Zarr` store is disallowed when the container is not empty.")
self._data = transform_metadata(xr.open_zarr(img_path, chunks=chunks))
elif suffix in (".nc", ".cdf"):
if len(self._data):
raise ValueError("Loading data from `NetCDF` is disallowed when the container is not empty.")
self._data = transform_metadata(xr.open_dataset(img_path, chunks=chunks))
else:
raise ValueError(f"Unable to handle path `{img_path}`.")
@_load_img.register(da.Array)
@_load_img.register(np.ndarray)
def _(
self,
img: NDArrayA,
copy: bool = True,
dims: InferDimensions | tuple[str, ...] = InferDimensions.DEFAULT,
**_: Any,
) -> xr.DataArray:
logg.debug(f"Loading `numpy.array` of shape `{img.shape}`")
return self._load_img(xr.DataArray(img), copy=copy, dims=dims, warn=False)
@_load_img.register(xr.DataArray)
def _(
self,
img: xr.DataArray,
copy: bool = True,
warn: bool = True,
dims: InferDimensions | tuple[str, ...] = InferDimensions.DEFAULT,
**_: Any,
) -> xr.DataArray:
logg.debug(f"Loading `xarray.DataArray` of shape `{img.shape}`")
img = img.copy() if copy else img
if not ("y" in img.dims and "x" in img.dims and "z" in img.dims):
_, dims, _, expand_axes = _infer_dimensions(img, infer_dimensions=dims)
if TYPE_CHECKING:
assert isinstance(dims, Iterable)
if warn:
logg.warning(f"Unable to find `y`, `x` or `z` dimension in `{img.dims}`. Renaming to `{dims}`")
# `axes` is always of length 0, 1 or 2
if len(expand_axes):
dimnames = ("z", "channels") if len(expand_axes) == 2 else (("channels",) if "z" in dims else ("z",))
img = img.expand_dims([d for _, d in zip(expand_axes, dimnames)], axis=expand_axes)
img = img.rename(dict(zip(img.dims, dims)))
return img.transpose("y", "x", "z", ...)
@classmethod
@d.dedent
def from_adata(
cls,
adata: AnnData,
img_key: str | None = None,
library_id: Sequence[str] | str | None = None,
spatial_key: str = Key.uns.spatial,
**kwargs: Any,
) -> ImageContainer:
"""
Load an image from :mod:`anndata` object.
Parameters
----------
%(adata)s
img_key
Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']['{library_id}']['images']``.
If `None`, the first key found is used.
library_id
Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']`` specifying which library to access.
spatial_key
Key in :attr:`anndata.AnnData.uns` where spatial metadata is stored.
kwargs
Keyword arguments for :class:`squidpy.im.ImageContainer`.
Returns
-------
The image container.
"""
library_id = Key.uns.library_id(adata, spatial_key, library_id)
if not isinstance(library_id, str):
raise NotImplementedError(_ERROR_NOTIMPLEMENTED_LIBID)
spatial_data = adata.uns[spatial_key][library_id]
if img_key is None:
try:
img_key = next(k for k in spatial_data.get("images", []))
except StopIteration:
raise KeyError(f"No images found in `adata.uns[{spatial_key!r}][{library_id!r}]['images']`") from None
img: NDArrayA | None = spatial_data.get("images", {}).get(img_key, None)
if img is None:
raise KeyError(
f"Unable to find the image in `adata.uns[{spatial_key!r}][{library_id!r}]['images'][{img_key!r}]`."
)
scale = spatial_data.get("scalefactors", {}).get(f"tissue_{img_key}_scalef", None)
if scale is None and "scale" not in kwargs:
logg.warning(
f"Unable to determine the scale factor from "
f"`adata.uns[{spatial_key!r}][{library_id!r}]['scalefactors']['tissue_{img_key}_scalef']`, "
f"using `1.0`. Consider specifying it manually as `scale=...`"
)
scale = 1.0
kwargs.setdefault("scale", scale)
return cls(img, layer=img_key, library_id=library_id, **kwargs)
@d.get_sections(base="crop_corner", sections=["Parameters", "Returns"])
@d.dedent
def crop_corner(
self,
y: FoI_t,
x: FoI_t,
size: FoI_t | tuple[FoI_t, FoI_t] | None = None,
library_id: str | None = None,
scale: float = 1.0,
cval: int | float = 0,
mask_circle: bool = False,
preserve_dtypes: bool = True,
) -> ImageContainer:
"""
Extract a crop from the upper-left corner.
Parameters
----------
%(yx)s
%(size)s
library_id
Name of the Z-dimension to be cropped. If `None`, all Z-dimensions are cropped.
scale
Rescale the crop using :func:`skimage.transform.rescale`.
cval
Fill value to use if ``mask_circle = True`` or if crop goes out of the image boundary.
mask_circle
Whether to mask out values that are not within a circle defined by this crop.
Only available if ``size`` defines a square.
preserve_dtypes
Whether to preserver the data types of underlying :class:`xarray.DataArray`, even if ``cval``
is of different type.
Returns
-------
The cropped image of size ``size * scale``.
Raises
------
ValueError
If the crop would completely lie outside of the image or if ``mask_circle = True`` and
``size`` does not define a square.
Notes
-----
If ``preserve_dtypes = True`` but ``cval`` cannot be safely cast, ``cval`` will be set to 0.
"""
self._assert_not_empty()
y, x = self._convert_to_pixel_space((y, x))
size = self._get_size(size)
size = self._convert_to_pixel_space(size)
ys, xs = size
_assert_positive(ys, name="height")
_assert_positive(xs, name="width")
_assert_positive(scale, name="scale")
orig = CropCoords(x0=x, y0=y, x1=x + xs, y1=y + ys)
ymin, xmin = self.shape
coords = CropCoords(
x0=min(max(x, 0), xmin), y0=min(max(y, 0), ymin), x1=min(x + xs, xmin), y1=min(y + ys, ymin)
)
if not coords.dy:
raise ValueError("Height of the crop is empty.")
if not coords.dx:
raise ValueError("Width of the crop is empty.")
crop = self.data.isel(x=slice(coords.x0, coords.x1), y=slice(coords.y0, coords.y1)).copy(deep=False)
if len(crop.z) > 1:
crop = crop.sel(z=self._get_library_ids(library_id))
crop.attrs = _update_attrs_coords(crop.attrs, coords)
if orig != coords:
padding = orig - coords
# because padding does not change dtype by itself
for key, arr in crop.items():
if preserve_dtypes:
if not np.can_cast(cval, arr.dtype, casting="safe"):
cval = 0
else:
crop[key] = crop[key].astype(np.dtype(type(cval)), copy=False)
crop = crop.pad(
y=(padding.y_pre, padding.y_post),
x=(padding.x_pre, padding.x_post),
mode="constant",
constant_values=cval,
)
crop.attrs[Key.img.padding] = padding
else:
crop.attrs[Key.img.padding] = _NULL_PADDING
return self._from_dataset(
self._post_process(
data=crop, scale=scale, cval=cval, mask_circle=mask_circle, preserve_dtypes=preserve_dtypes
)
)
def _post_process(
self,
data: xr.Dataset,
scale: FoI_t = 1,
cval: FoI_t = 0,
mask_circle: bool = False,
preserve_dtypes: bool = True,
**_: Any,
) -> xr.Dataset:
def _rescale(arr: xr.DataArray) -> xr.DataArray:
scaling_fn = partial(
rescale, scale=[scale, scale, 1], preserve_range=True, order=1, channel_axis=-1, cval=cval
)
dtype = arr.dtype
if isinstance(arr.data, da.Array):
shape = np.maximum(np.round(scale * np.asarray(arr.shape)), 1)
shape[-1] = arr.shape[-1]
shape[-2] = arr.shape[-2]
return xr.DataArray(
da.from_delayed(delayed(lambda arr: scaling_fn(arr).astype(dtype))(arr), shape=shape, dtype=dtype),
dims=arr.dims,
)
return xr.DataArray(scaling_fn(arr).astype(dtype), dims=arr.dims)
if scale != 1:
attrs = data.attrs
library_ids = data.coords["z"]
data = data.map(_rescale).assign_coords({"z": library_ids})
data.attrs = _update_attrs_scale(attrs, scale)
if mask_circle:
if data.dims["y"] != data.dims["x"]:
raise ValueError(
f"Masking circle is only available for square crops, "
f"found crop of shape `{(data.dims['y'], data.dims['x'])}`."
)
c = data.x.shape[0] // 2
# manually reassign coordinates
library_ids = data.coords["z"]
data = data.where((data.x - c) ** 2 + (data.y - c) ** 2 <= c**2, other=cval).assign_coords(
{"z": library_ids}
)
data.attrs[Key.img.mask_circle] = True
if preserve_dtypes:
for key, arr in self.data.items():
data[key] = data[key].astype(arr.dtype, copy=False)
return data
@d.dedent
def crop_center(
self,
y: FoI_t,
x: FoI_t,
radius: FoI_t | tuple[FoI_t, FoI_t],
**kwargs: Any,
) -> ImageContainer:
"""
Extract a circular crop.
The extracted crop will have shape ``(radius[0] * 2 + 1, radius[1] * 2 + 1)``.
Parameters
----------
%(yx)s
radius
Radius along the ``height`` and ``width`` dimensions, respectively.
kwargs
Keyword arguments for :meth:`crop_corner`.
Returns
-------
%(crop_corner.returns)s
"""
y, x = self._convert_to_pixel_space((y, x))
_assert_in_range(y, 0, self.shape[0], name="height")
_assert_in_range(x, 0, self.shape[1], name="width")
if not isinstance(radius, Iterable):
radius = (radius, radius)
(yr, xr) = self._convert_to_pixel_space(radius)
_assert_non_negative(yr, name="radius height")
_assert_non_negative(xr, name="radius width")
return self.crop_corner( # type: ignore[no-any-return]
y=y - yr, x=x - xr, size=(yr * 2 + 1, xr * 2 + 1), **kwargs
)
@d.dedent
def generate_equal_crops(
self,
size: FoI_t | tuple[FoI_t, FoI_t] | None = None,
as_array: str | bool = False,
squeeze: bool = True,
**kwargs: Any,
) -> Iterator[ImageContainer] | Iterator[dict[str, NDArrayA]]:
"""
Decompose image into equally sized crops.
Parameters
----------
%(size)s
%(as_array)s
squeeze
Remove singleton dimensions from the results if ``as_array = True``.
kwargs
Keyword arguments for :meth:`crop_corner`.
Yields
------
The crops, whose type depends on ``as_array``.
Notes
-----
Crops going outside out of the image boundary are padded with ``cval``.
"""
self._assert_not_empty()
size = self._get_size(size)
size = self._convert_to_pixel_space(size)
y, x = self.shape
ys, xs = size
_assert_in_range(ys, 0, y, name="height")
_assert_in_range(xs, 0, x, name="width")
unique_ycoord = np.arange(start=0, stop=(y // ys + (y % ys != 0)) * ys, step=ys)
unique_xcoord = np.arange(start=0, stop=(x // xs + (x % xs != 0)) * xs, step=xs)
ycoords = np.repeat(unique_ycoord, len(unique_xcoord))
xcoords = np.tile(unique_xcoord, len(unique_ycoord))
for y, x in zip(ycoords, xcoords):
yield self.crop_corner(y=y, x=x, size=(ys, xs), **kwargs)._maybe_as_array(
as_array, squeeze=squeeze, lazy=True
)
@d.dedent
def generate_spot_crops(
self,
adata: AnnData,
spatial_key: str = Key.obsm.spatial,
library_id: Sequence[str] | str | None = None,
spot_diameter_key: str = "spot_diameter_fullres",
spot_scale: float = 1.0,
obs_names: Iterable[Any] | None = None,
as_array: str | bool = False,
squeeze: bool = True,
return_obs: bool = False,
**kwargs: Any,
) -> (
Iterator[ImageContainer] | Iterator[NDArrayA] | Iterator[tuple[NDArrayA, ...]] | Iterator[dict[str, NDArrayA]]
):
"""
Iterate over :attr:`anndata.AnnData.obs_names` and extract crops.
Implemented for 10X spatial datasets.
For Z-stacks, the specified ``library_id`` or list of ``library_id`` need to match the name of the Z-dimension.
Always extracts 2D crops from the specified Z-dimension.
Parameters
----------
%(adata)s
%(spatial_key)s
%(img_library_id)s
spot_diameter_key
Key in :attr:`anndata.AnnData.uns` ``['{spatial_key}']['{library_id}']['scalefactors']``
where the spot diameter is stored.
spot_scale
Scaling factor for the spot diameter. Larger values mean more context.
obs_names
Observations from :attr:`anndata.AnnData.obs_names` for which to generate the crops.
If `None`, all observations are used.
%(as_array)s
squeeze
Remove singleton dimensions from the results if ``as_array = True``.
return_obs
Whether to also yield names from ``obs_names``.
kwargs
Keyword arguments for :meth:`crop_center`.
Yields
------
If ``return_obs = True``, yields a :class:`tuple` ``(crop, obs_name)``. Otherwise, yields just the crops.
The type of the crops depends on ``as_array`` and the number of dimensions on ``squeeze``.
"""
self._assert_not_empty()
_assert_positive(spot_scale, name="scale")
_assert_spatial_basis(adata, spatial_key)
# limit to obs_names
if obs_names is None:
obs_names = adata.obs_names
obs_names = _assert_non_empty_sequence(obs_names, name="observations")
adata = adata[obs_names, :]
scale = self.data.attrs.get(Key.img.scale, 1)
spatial = adata.obsm[spatial_key][:, :2]
if library_id is None:
try:
library_id = Key.uns.library_id(adata, spatial_key=spatial_key, library_id=None)
if not isinstance(library_id, str):
raise NotImplementedError(_ERROR_NOTIMPLEMENTED_LIBID)
obs_library_ids = [library_id] * adata.n_obs
except ValueError as e:
if "Unable to determine which library id to use" in str(e):
raise ValueError(
str(e)
+ " Or specify a key in `adata.obs` containing a mapping from observations to library ids."
)
else:
raise e
else:
try:
obs_library_ids = adata.obs[library_id]
except KeyError:
logg.debug(
f"Unable to find library ids in `adata.obs[{library_id!r}]`. "
f"Trying in `adata.uns[{spatial_key!r}]`"
)
library_id = Key.uns.library_id(adata, spatial_key=spatial_key, library_id=library_id)
if not isinstance(library_id, str):
raise NotImplementedError(_ERROR_NOTIMPLEMENTED_LIBID)
obs_library_ids = [library_id] * adata.n_obs
lids = set(obs_library_ids)
if len(self.data.z) > 1 and len(lids) == 1:
logg.warning(
f"ImageContainer has `{len(self.data.z)}` Z-dimensions, using library id `{next(iter(lids))}` for all"
)
if adata.n_obs != len(obs_library_ids):
raise ValueError(f"Expected library ids to be of length `{adata.n_obs}`, found `{len(obs_library_ids)}`.")
for i, (obs, lid) in enumerate(zip(adata.obs_names, obs_library_ids)):
# get spot diameter of current obs (might be different library ids)
diameter = (
Key.uns.spot_diameter(
adata, spatial_key=spatial_key, library_id=lid, spot_diameter_key=spot_diameter_key
)
* scale
)
radius = int(round(diameter // 2 * spot_scale))
# get coords in image pixel space from original space
y = int(spatial[i][1] * scale)
x = int(spatial[i][0] * scale)
# if CropCoords exist, need to offset y and x
if self.data.attrs.get(Key.img.coords, _NULL_COORDS) != _NULL_COORDS:
y = int(y - self.data.attrs[Key.img.coords].y0)
x = int(x - self.data.attrs[Key.img.coords].x0)
crop = self.crop_center(y=y, x=x, radius=radius, library_id=obs_library_ids[i], **kwargs)
crop.data.attrs[Key.img.obs] = obs
crop = crop._maybe_as_array(as_array, squeeze=squeeze, lazy=False)
yield (crop, obs) if return_obs else crop
@classmethod
@d.get_sections(base="uncrop", sections=["Parameters", "Returns"])
def uncrop(
cls,
crops: list[ImageContainer],
shape: tuple[int, int] | None = None,
) -> ImageContainer:
"""
Re-assemble image from crops and their positions.
Fills remaining positions with zeros.
Parameters
----------
crops
List of image crops.
shape
Requested image shape as ``(height, width)``. If `None`, it is automatically determined from ``crops``.
Returns
-------
Re-assembled image from ``crops``.
Raises
------
ValueError
If crop metadata was not found or if the requested ``shape`` is smaller than required by ``crops``.
"""
if not len(crops):
raise ValueError("No crops were supplied.")
keys = set(crops[0].data.keys())
scales = set()
dy, dx = -1, -1
for crop in crops:
if set(crop.data.keys()) != keys:
raise KeyError(f"Expected to find `{sorted(keys)}` keys, found `{sorted(crop.data.keys())}`.")
coord = crop.data.attrs.get(Key.img.coords, None)
if coord is None:
raise ValueError("Crop does not have coordinate metadata.")
if coord == _NULL_COORDS:
raise ValueError(f"Null coordinates detected `{coord}`.")
scales.add(crop.data.attrs.get(Key.img.scale, None))
dy, dx = max(dy, coord.y0 + coord.dy), max(dx, coord.x0 + coord.dx)
scales.discard(None)
if len(scales) != 1:
raise ValueError(f"Unable to uncrop images of different scales `{sorted((scales))}`.")
scale, *_ = scales
if shape is None:
shape = (dy, dx)
# can be float because coords can be scaled
shape = tuple(map(int, shape)) # type: ignore[assignment]
if len(shape) != 2:
raise ValueError(f"Expected `shape` to be of length `2`, found `{len(shape)}`.")
if shape < (dy, dx):
raise ValueError(f"Requested final image shape `{shape}`, but minimal is `({dy}, {dx})`.")
# create resulting dataset
dataset = xr.Dataset()
dataset.attrs[Key.img.scale] = scale
for key in keys:
img = crop.data[key]
# get shape for this DataArray
dataset[key] = xr.DataArray(
np.zeros(shape + tuple(img.shape[2:]), dtype=img.dtype), dims=img.dims, coords=img.coords
)
# fill data with crops
for crop in crops:
coord = crop.data.attrs[Key.img.coords]
padding = crop.data.attrs.get(Key.img.padding, _NULL_PADDING) # maybe warn
dataset[key][coord.slice] = crop[key][coord.to_image_coordinates(padding=padding).slice]
return cls._from_dataset(dataset)
@d.dedent
def show(
self,
layer: str | None = None,
library_id: str | Sequence[str] | None = None,
channel: int | Sequence[int] | None = None,
channelwise: bool = False,
segmentation_layer: str | None = None,
segmentation_alpha: float = 0.75,
transpose: bool | None = None,
ax: mpl.axes.Axes | None = None,
figsize: tuple[float, float] | None = None,
dpi: int | None = None,
save: Pathlike_t | None = None,
**kwargs: Any,
) -> None:
"""
Show an image within this container.
Parameters
----------
%(img_layer)s
library_id
Name of Z-dimension to plot. In `None`, plot all Z-dimensions as separate images.
channel
Channels to plot. If `None`, use all channels.
channelwise
Whether to plot each channel separately or not.
segmentation_layer
Segmentation layer to plot over each ax.
segmentation_alpha
Alpha value for ``segmentation_layer``.
transpose
Whether to plot Z-dimensions in columns or in rows. If `None`, it will be set to ``not channelwise``.
ax
Optional :mod:`matplotlib` axes where to plot the image.
If not `None`, ``save``, ``figsize`` and ``dpi`` have no effect.
%(plotting)s
kwargs
Keyword arguments for :meth:`matplotlib.axes.Axes.imshow`.
Returns
-------
%(plotting_returns)s
Raises
------
ValueError
If number of supplied axes is different than the number of requested Z-dimensions or channels.
"""
from squidpy.pl._utils import save_fig
layer = self._get_layer(layer)
arr: xr.DataArray = self[layer]
library_ids = self._get_library_ids(library_id)
arr = arr.sel(z=library_ids)
if channel is not None:
channel = np.asarray([channel]).ravel() # type: ignore[assignment]
if not len(channel): # type: ignore[arg-type]
raise ValueError("No channels have been selected.")
arr = arr[{arr.dims[-1]: channel}]
else:
channel = np.arange(arr.shape[-1])
if TYPE_CHECKING:
assert isinstance(channel, Sequence)
n_channels = arr.shape[-1]
if n_channels not in (1, 3, 4) and not channelwise:
logg.warning(f"Unable to plot image with `{n_channels}`. Setting `channelwise=True`")
channelwise = True
if transpose is None:
transpose = not channelwise
fig = None
nrows, ncols = len(library_ids), (n_channels if channelwise else 1)
if transpose:
nrows, ncols = ncols, nrows
if ax is None:
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(8, 8) if figsize is None else figsize,
dpi=dpi,
tight_layout=True,
squeeze=False,
)
elif isinstance(ax, mpl.axes.Axes):
ax = np.array([ax])
ax = np.asarray(ax)
try:
ax = ax.reshape(nrows, ncols)
except ValueError:
raise ValueError(f"Expected `ax` to be of shape `{(nrows, ncols)}`, found `{ax.shape}`.") from None
if segmentation_layer is not None:
seg_arr = self[segmentation_layer].sel(z=library_ids)
if not seg_arr.attrs.get("segmentation", False):
raise TypeError(f"Expected layer `{segmentation_layer!r}` to be marked as segmentation layer.")
if not np.issubdtype(seg_arr.dtype, np.integer):
raise TypeError(
f"Expected segmentation layer `{segmentation_layer!r}` to be of integer type, "
f"found `{seg_arr.dtype}`."
)
seg_arr = seg_arr.values
seg_cmap = np.array(default_palette, dtype=object)[np.arange(np.max(seg_arr)) % len(default_palette)]
seg_cmap[0] = "#00000000" # transparent background
seg_cmap = ListedColormap(seg_cmap)
else:
seg_arr, seg_cmap = None, None
for z, row in enumerate(ax):
for c, ax_ in enumerate(row):
if transpose:
z, c = c, z
title = layer
if channelwise:
img = arr[..., z, c]
title += f":{channel[c]}"
else:
img = arr[..., z, :]
if len(self.data.coords["z"]) > 1:
title += f", library_id:{library_ids[z]}"
ax_.imshow(img_as_float(img.values, force_copy=False), **kwargs)
if seg_arr is not None:
ax_.imshow(
seg_arr[:, :, z, ...],
cmap=seg_cmap,
interpolation="nearest", # avoid artifacts
alpha=segmentation_alpha,
**{k: v for k, v in kwargs.items() if k not in ("cmap", "interpolation")},
)
ax_.set_title(title)
ax_.set_axis_off()
if save and fig is not None:
save_fig(fig, save)
@d.get_sections(base="_interactive", sections=["Parameters"])
@d.dedent
def interactive(
self,
adata: AnnData,
spatial_key: str = Key.obsm.spatial,
library_key: str | None = None,
library_id: str | Sequence[str] | None = None,
cmap: str = "viridis",
palette: str | None = None,
blending: Literal["opaque", "translucent", "additive"] = "opaque",
symbol: Literal["disc", "square"] = "disc",
key_added: str = "shapes",
) -> Interactive:
"""
Launch :mod:`napari` viewer.
Parameters
----------
%(adata)s
%(spatial_key)s
library_key
Key in :attr:`adata.AnnData.obs` specifying mapping between observations and library ids.
Required if the container has more than 1 Z-dimension.
library_id
Subset of library ids to visualize. If `None`, visualize all library ids.
cmap
Colormap for continuous variables.
palette
Colormap for categorical variables in :attr:`anndata.AnnData.obs`. If `None`, use :mod:`scanpy`'s default.
blending
Method which determines how RGB and alpha values of :class:`napari.layers.Shapes` are mixed.
symbol
Symbol to use for the spots. Valid options are:
- `'disc'` - circle.
- `'square'` - square.
key_added
Key where to store :class:`napari.layers.Shapes`, which can be exported by pressing `SHIFT-E`:
- :attr:`anndata.AnnData.obs` ``['{layer_name}_{key_added}']`` - boolean mask containing the selected
cells.
- :attr:`anndata.AnnData.uns` ``['{layer_name}_{key_added}']['meshes']`` - list of :class:`numpy.array`,
defining a mesh in the spatial coordinates.
See :mod:`napari`'s `tutorial <https://napari.org/howtos/layers/shapes.html>`_ for more
information about different mesh types, such as circles, squares etc.
Returns
-------
Interactive view of this container. Screenshot of the canvas can be taken by
:meth:`squidpy.pl.Interactive.screenshot`.
"""
from squidpy.pl import Interactive # type: ignore[attr-defined]
return Interactive( # type: ignore[no-any-return]
img=self,
adata=adata,
spatial_key=spatial_key,
library_key=library_key,
library_id=library_id,
cmap=cmap,
palette=palette,
blending=blending,
key_added=key_added,
symbol=symbol,
).show()
@d.dedent
def apply(
self,
func: Callable[..., NDArrayA] | Mapping[str, Callable[..., NDArrayA]],
layer: str | None = None,
new_layer: str | None = None,
channel: int | None = None,
lazy: bool = False,
chunks: str | tuple[int, int] | None = None,
copy: bool = True,
drop: bool = True,
fn_kwargs: Mapping[str, Any] = MappingProxyType({}),
**kwargs: Any,
) -> ImageContainer | None:
"""
Apply a function to a layer within this container.
For each Z-dimension a different function can be defined, using its ``library_id`` name.
For not mentioned ``library_id``'s the identity function is applied.
Parameters
----------
func
A function or a mapping of ``{'{library_id}': function}`` which takes a :class:`numpy.ndarray` as input
and produces an image-like output.
%(img_layer)s
new_layer
Name of the new layer. If `None` and ``copy = False``, overwrites the data in ``layer``.
channel
Apply ``func`` only over a specific ``channel``. If `None`, use all channels.
chunks
Chunk size for :mod:`dask`. If `None`, don't use :mod:`dask`.
%(copy_cont)s
drop
Whether to drop Z-dimensions that were not selected by ``func``. Only used when ``copy = True``.
fn_kwargs
Keyword arguments for ``func``.
kwargs
Keyword arguments for :func:`dask.array.map_overlap` or :func:`dask.array.map_blocks`, depending whether
``depth`` is present in ``fn_kwargs``. Only used when ``chunks != None``.
Use ``depth`` to control boundary artifacts if ``func`` requires data from neighboring chunks,
by default, ``boundary = 'reflect`` is used.
Returns
-------
If ``copy = True``, returns a new container with ``layer``.
Raises
------
ValueError
If the ``func`` returns 0 or 1 dimensional array.
"""
def apply_func(func: Callable[..., NDArrayA], arr: xr.DataArray) -> NDArrayA | da.Array:
if chunks is None:
return func(arr.data, **fn_kwargs)
arr = da.asarray(arr.data).rechunk(chunks)
return (
da.map_overlap(func, arr, **fn_kwargs, **kwargs)
if "depth" in kwargs
else da.map_blocks(func, arr, **fn_kwargs, **kwargs, dtype=arr.dtype)
)
if "depth" in kwargs:
kwargs.setdefault("boundary", "reflect")
layer = self._get_layer(layer)
if new_layer is None:
new_layer = layer
arr = self[layer]
library_ids = list(arr.coords["z"].values)
dims, channel_dim = arr.dims, arr.dims[-1]
if channel is not None:
arr = arr[{channel_dim: channel}]
if callable(func):
res = apply_func(func, arr)
new_library_ids = library_ids
else:
res = {}
noop_library_ids = [] if copy and drop else list(set(library_ids) - set(func.keys()))
for key, fn in func.items():
res[key] = apply_func(fn, arr.sel(z=key))
for key in noop_library_ids:
res[key] = arr.sel(z=key).data
new_library_ids = [lid for lid in library_ids if lid in res]
try:
res = da.stack([res[lid] for lid in new_library_ids], axis=2)
except ValueError as e:
if not len(noop_library_ids) or "must have the same shape" not in str(e):
# processing functions returned wrong shape
raise ValueError(
"Unable to stack an array because functions returned arrays of different shapes."
) from e
# funcs might have changed channel dims, replace noops with 0
logg.warning(
f"Function changed the number of channels, cannot use identity "
f"for library ids `{noop_library_ids}`. Replacing with 0"
)
# TODO(michalk8): once (or if) Z-dim is not fixed, always drop ids
tmp = next(iter(res.values()))
for lid in noop_library_ids:
res[lid] = (np.zeros_like if chunks is None else da.zeros_like)(tmp)
res = da.stack([res[lid] for lid in new_library_ids], axis=2)
if res.ndim == 2: # assume that dims are y, x
res = res[..., np.newaxis]
if res.ndim == 3: # assume dims are y, x, z (changing of z dim is not supported)
res = res[..., np.newaxis]
if res.ndim != 4:
raise ValueError(f"Expected `2`, `3` or `4` dimensional array, found `{res.ndim}`.")
if copy:
cont = ImageContainer(
res,
layer=new_layer,
copy=True,
lazy=lazy,
dims=dims,
library_id=new_library_ids,
)
cont.data.attrs = self.data.attrs.copy()
return cont
self.add_img(
res,
layer=new_layer,
lazy=lazy,
copy=new_layer != layer,
dims=dims,
library_id=new_library_ids,
)
@d.dedent
def subset(self, adata: AnnData, spatial_key: str = Key.obsm.spatial, copy: bool = False) -> AnnData:
"""
Subset :class:`anndata.AnnData` using this container.
Useful when this container is a crop of the original image.
Parameters
----------
%(adata)s
%(spatial_key)s
copy
Whether to return a copy of ``adata``.
Returns
-------
Subset of :class:`anndata.AnnData`.
"""
c: CropCoords = self.data.attrs.get(Key.img.coords, _NULL_COORDS)
if c == _NULL_COORDS: # not a crop
return adata.copy() if copy else adata
_assert_spatial_basis(adata, spatial_key)
coordinates = adata.obsm[spatial_key]
coordinates = coordinates * self.data.attrs.get(Key.img.scale, 1)
mask = (
(coordinates[:, 0] >= c.x0)
& (coordinates[:, 0] <= c.x1)
& (coordinates[:, 1] >= c.y0)
& (coordinates[:, 1] <= c.y1)
)
return adata[mask, :].copy() if copy else adata[mask, :]
def rename(self, old: str, new: str) -> ImageContainer:
"""
Rename a layer.
Parameters
----------
old
Name of the layer to rename.
new
New name.
Returns
-------
Modifies and returns self.
"""
self._data = self.data.rename_vars({old: new})
return self
def compute(self, layer: str | None = None) -> ImageContainer:
"""
Trigger lazy computation in-place.
Parameters
----------
layer
Layer which to compute. If `None`, compute all layers.
Returns
-------
Modifies and returns self.
"""
if layer is None:
self.data.load()
else:
self[layer].load()
return self
@property
def library_ids(self) -> list[str]:
"""Library ids."""
try:
return list(map(str, self.data.coords["z"].values))
except KeyError:
return []
@library_ids.setter
def library_ids(self, library_ids: str | Sequence[str] | Mapping[str, str]) -> None:
"""Set library ids."""
if isinstance(library_ids, Mapping):
library_ids = [str(library_ids.get(lid, lid)) for lid in self.library_ids]
elif isinstance(library_ids, str):
library_ids = (library_ids,)
library_ids = list(map(str, library_ids))
if len(set(library_ids)) != len(library_ids):
raise ValueError(f"Remapped library ids must be unique, found `{library_ids}`.")
self._data = self.data.assign_coords({"z": library_ids})
@property
def data(self) -> xr.Dataset:
"""Underlying :class:`xarray.Dataset`."""
return self._data
@property
def shape(self) -> tuple[int, int]:
"""Image shape ``(y, x)``."""
if not len(self):
return 0, 0
return self.data.dims["y"], self.data.dims["x"]
def copy(self, deep: bool = False) -> ImageContainer:
"""
Return a copy of self.
Parameters
----------
deep
Whether to make a deep copy or not.
Returns
-------
Copy of self.
"""
return deepcopy(self) if deep else copy(self)
@classmethod
def _from_dataset(cls, data: xr.Dataset, deep: bool | None = None) -> ImageContainer:
"""
Utility function used for initialization.
Parameters
----------
data
The :class:`xarray.Dataset` to use.
deep
If `None`, don't copy the ``data``. If `True`, make a deep copy of the data, otherwise, make a shallow copy.
Returns
-------
The newly created container.
""" # noqa: D401
res = cls()
res._data = data if deep is None else data.copy(deep=deep)
res._data.attrs.setdefault(Key.img.coords, _NULL_COORDS) # can't save None to NetCDF
res._data.attrs.setdefault(Key.img.padding, _NULL_PADDING)
res._data.attrs.setdefault(Key.img.scale, 1.0)
res._data.attrs.setdefault(Key.img.mask_circle, False)
return res
def _maybe_as_array(
self,
as_array: str | Sequence[str] | bool = False,
squeeze: bool = True,
lazy: bool = True,
) -> ImageContainer | dict[str, NDArrayA] | NDArrayA | tuple[NDArrayA, ...]:
res = self
if as_array:
# do not trigger dask computation
res = {key: (res[key].data if lazy else res[key].values) for key in res} # type: ignore[assignment]
if squeeze:
axis = (2,) if len(self.data.z) == 1 else ()
res = {
k: v.squeeze(axis=axis + ((3,) if v.shape[-1] == 1 else ()))
for k, v in res.items() # type: ignore[assignment,attr-defined]
}
# this is just for convenience for DL iterators
if isinstance(as_array, str):
res = res[as_array]
elif isinstance(as_array, Sequence):
res = tuple(res[key] for key in as_array) # type: ignore[assignment]
if lazy:
return res
return res.compute() if isinstance(res, ImageContainer) else res
def _get_next_image_id(self, layer: str) -> str:
pat = re.compile(rf"^{layer}_(\d*)$")
iterator = chain.from_iterable(pat.finditer(k) for k in self.data.keys())
return f"{layer}_{(max(map(lambda m: int(m.groups()[0]), iterator), default=-1) + 1)}"
def _get_next_channel_id(self, channel: str | xr.DataArray) -> str:
if isinstance(channel, xr.DataArray):
channel, *_ = (str(dim) for dim in channel.dims if dim not in ("y", "x", "z"))
pat = re.compile(rf"^{channel}_(\d*)$")
iterator = chain.from_iterable(pat.finditer(v.dims[-1]) for v in self.data.values())
return f"{channel}_{(max(map(lambda m: int(m.groups()[0]), iterator), default=-1) + 1)}"
def _get_library_id(self, library_id: str | None = None) -> str:
self._assert_not_empty()
if library_id is None:
if len(self.library_ids) > 1:
raise ValueError(
f"Unable to determine which library id to use. Please supply one from `{self.library_ids}`."
)
library_id = self.library_ids[0]
if library_id not in self.library_ids:
raise KeyError(f"Library id `{library_id}` not found in `{self.library_ids}`.")
return library_id
def _get_library_ids(
self,
library_id: str | Sequence[str] | None = None,
arr: xr.DataArray | None = None,
allow_new: bool = False,
) -> list[str]:
"""
Get library ids.
Parameters
----------
library_id
Requested library ids.
arr
If the current container is empty, try getting the library ids from the ``arr``.
allow_new
If `True`, don't check if the returned library ids are present in the non-empty container.
This is set to `True` only in :meth:`concat` to allow for remapping.
Returns
-------
The library ids.
"""
if library_id is None:
if len(self):
library_id = self.library_ids
elif isinstance(arr, xr.DataArray):
try:
library_id = list(arr.coords["z"].values)
except (KeyError, AttributeError) as e:
logg.warning(f"Unable to retrieve library ids, reason `{e}`. Using default names")
# at this point, it should have Z-dim
library_id = [str(i) for i in range(arr.sizes["z"])]
else:
raise ValueError("Please specify the number of library ids if the container is empty.")
if isinstance(library_id, str):
library_id = [library_id]
if not isinstance(library_id, Iterable):
raise TypeError(f"Expected library ids to be `iterable`, found `{type(library_id).__name__!r}`.")
res = list(map(str, library_id))
if not len(res):
raise ValueError("No library ids have been selected.")
if not allow_new and len(self) and not (set(res) & set(self.library_ids)):
raise ValueError(f"Invalid library ids have been selected `{res}`. Valid options are `{self.library_ids}`.")
return res
def _get_layer(self, layer: str | None) -> str:
self._assert_not_empty()
if layer is None:
if len(self) > 1:
raise ValueError(
f"Unable to determine which layer to use. Please supply one from `{sorted(self.data.keys())}`."
)
layer = list(self)[0]
if layer not in self:
raise KeyError(f"Image layer `{layer}` not found in `{sorted(self)}`.")
return layer
def _assert_not_empty(self) -> None:
if not len(self):
raise ValueError("The container is empty.")
def _get_size(self, size: FoI_t | tuple[FoI_t | None, FoI_t | None] | None) -> tuple[FoI_t, FoI_t]:
if size is None:
size = (None, None)
if not isinstance(size, Iterable):
size = (size, size)
res = list(size)
if size[0] is None:
res[0] = self.shape[0]
if size[1] is None:
res[1] = self.shape[1]
return tuple(res) # type: ignore[return-value]
def _convert_to_pixel_space(self, size: tuple[FoI_t, FoI_t]) -> tuple[int, int]:
y, x = size
if isinstance(y, float):
_assert_in_range(y, 0, 1, name="y")
y = int(self.shape[0] * y)
if isinstance(x, float):
_assert_in_range(x, 0, 1, name="x")
x = int(self.shape[1] * x)
return y, x
def __delitem__(self, key: str) -> None:
del self.data[key]
def __iter__(self) -> Iterator[str]:
yield from self.data.keys()
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: str) -> xr.DataArray:
return self.data[key]
def __setitem__(self, key: str, value: NDArrayA | xr.DataArray | da.Array) -> None:
if not isinstance(value, (np.ndarray, xr.DataArray, da.Array)):
raise NotImplementedError(f"Adding `{type(value).__name__}` is not yet implemented.")
self.add_img(value, layer=key, copy=True)
def _ipython_key_completions_(self) -> Iterable[str]:
return sorted(map(str, self.data.keys()))
def __copy__(self) -> ImageContainer:
return type(self)._from_dataset(self.data, deep=False)
def __deepcopy__(self, memodict: Mapping[str, Any] = MappingProxyType({})) -> ImageContainer:
return type(self)._from_dataset(self.data, deep=True)
def _repr_html_(self) -> str:
import html
if not len(self):
return f"{self.__class__.__name__} object with 0 layers"
inflection = "" if len(self) <= 1 else "s"
s = f"{self.__class__.__name__} object with {len(self.data.keys())} layer{inflection}:"
style = "text-indent: 25px; margin-top: 0px; margin-bottom: 0px;"
for i, layer in enumerate(self.data.keys()):
s += f"<p style={style!r}><strong>{html.escape(str(layer))}</strong>: "
s += ", ".join(
f"<em>{html.escape(str(dim))}</em> ({shape})"
for dim, shape in zip(self.data[layer].dims, self.data[layer].shape)
)
s += "</p>"
if i == 9 and i < len(self) - 1: # show only first 10 layers
s += f"<p style={style!r}>and {len(self) - i - 1} more...</p>"
break
return s
def __repr__(self) -> str:
return f"{self.__class__.__name__}[shape={self.shape}, layers={sorted(self.data.keys())}]"
def __str__(self) -> str:
return repr(self)
|
[
"squidpy.gr._utils._assert_spatial_basis",
"skimage.util.img_as_float",
"squidpy.pl.Interactive",
"scanpy.logging.debug",
"re.compile",
"squidpy._docs.d.get_sections",
"types.MappingProxyType",
"squidpy.im._io._infer_dimensions",
"dask.array.map_blocks",
"xarray.concat",
"numpy.array",
"copy.deepcopy",
"copy.copy",
"numpy.arange",
"squidpy.gr._utils._assert_non_empty_sequence",
"pathlib.Path",
"numpy.asarray",
"matplotlib.colors.ListedColormap",
"numpy.max",
"numpy.issubdtype",
"squidpy.pl._utils.save_fig",
"squidpy._constants._pkg_constants.Key.uns.spot_diameter",
"matplotlib.pyplot.subplots",
"squidpy.im._coords.CropCoords",
"squidpy._docs.inject_docs",
"squidpy.gr._utils._assert_positive",
"squidpy.im._coords._NULL_PADDING.to_tuple",
"squidpy.im._coords._update_attrs_coords",
"squidpy.im._coords._update_attrs_scale",
"xarray.Dataset",
"scanpy.logging.info",
"validators.url",
"squidpy._constants._constants.InferDimensions",
"xarray.open_zarr",
"xarray.open_dataset",
"squidpy._constants._pkg_constants.Key.uns.library_id",
"squidpy.gr._utils._assert_non_negative",
"dask.array.stack",
"squidpy.im._io._lazy_load_image",
"scanpy.logging.warning",
"dask.array.asarray",
"numpy.can_cast",
"squidpy.im._io._assert_dims_present",
"functools.partial",
"xarray.DataArray",
"dask.array.map_overlap",
"squidpy.im._coords._NULL_COORDS.to_tuple",
"squidpy.gr._utils._assert_in_range",
"typing.TypeVar"
] |
[((1741, 1763), 'typing.TypeVar', 'TypeVar', (['"""Interactive"""'], {}), "('Interactive')\n", (1748, 1763), False, 'from typing import Any, Union, Mapping, TypeVar, Callable, Iterable, Iterator, Sequence, TYPE_CHECKING\n'), ((7609, 7674), 'squidpy._docs.d.get_sections', 'd.get_sections', ([], {'base': '"""add_img"""', 'sections': "['Parameters', 'Raises']"}), "(base='add_img', sections=['Parameters', 'Raises'])\n", (7623, 7674), False, 'from squidpy._docs import d, inject_docs\n'), ((7694, 7725), 'squidpy._docs.inject_docs', 'inject_docs', ([], {'id': 'InferDimensions'}), '(id=InferDimensions)\n', (7705, 7725), False, 'from squidpy._docs import d, inject_docs\n'), ((17930, 18000), 'squidpy._docs.d.get_sections', 'd.get_sections', ([], {'base': '"""crop_corner"""', 'sections': "['Parameters', 'Returns']"}), "(base='crop_corner', sections=['Parameters', 'Returns'])\n", (17944, 18000), False, 'from squidpy._docs import d, inject_docs\n'), ((31513, 31578), 'squidpy._docs.d.get_sections', 'd.get_sections', ([], {'base': '"""uncrop"""', 'sections': "['Parameters', 'Returns']"}), "(base='uncrop', sections=['Parameters', 'Returns'])\n", (31527, 31578), False, 'from squidpy._docs import d, inject_docs\n'), ((39806, 39866), 'squidpy._docs.d.get_sections', 'd.get_sections', ([], {'base': '"""_interactive"""', 'sections': "['Parameters']"}), "(base='_interactive', sections=['Parameters'])\n", (39820, 39866), False, 'from squidpy._docs import d, inject_docs\n'), ((3132, 3144), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (3142, 3144), True, 'import xarray as xr\n'), ((13101, 13146), 'scanpy.logging.debug', 'logg.debug', (['f"""Loading data from `{img_path}`"""'], {}), "(f'Loading data from `{img_path}`')\n", (13111, 13146), True, 'from scanpy import logging as logg\n'), ((14335, 14394), 'scanpy.logging.debug', 'logg.debug', (['f"""Loading `numpy.array` of shape `{img.shape}`"""'], {}), "(f'Loading `numpy.array` of shape `{img.shape}`')\n", (14345, 14394), True, 'from scanpy import logging as logg\n'), ((14748, 14812), 'scanpy.logging.debug', 'logg.debug', (['f"""Loading `xarray.DataArray` of shape `{img.shape}`"""'], {}), "(f'Loading `xarray.DataArray` of shape `{img.shape}`')\n", (14758, 14812), True, 'from scanpy import logging as logg\n'), ((16595, 16645), 'squidpy._constants._pkg_constants.Key.uns.library_id', 'Key.uns.library_id', (['adata', 'spatial_key', 'library_id'], {}), '(adata, spatial_key, library_id)\n', (16613, 16645), False, 'from squidpy._constants._pkg_constants import Key\n'), ((19737, 19772), 'squidpy.gr._utils._assert_positive', '_assert_positive', (['ys'], {'name': '"""height"""'}), "(ys, name='height')\n", (19753, 19772), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((19781, 19815), 'squidpy.gr._utils._assert_positive', '_assert_positive', (['xs'], {'name': '"""width"""'}), "(xs, name='width')\n", (19797, 19815), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((19824, 19861), 'squidpy.gr._utils._assert_positive', '_assert_positive', (['scale'], {'name': '"""scale"""'}), "(scale, name='scale')\n", (19840, 19861), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((19878, 19922), 'squidpy.im._coords.CropCoords', 'CropCoords', ([], {'x0': 'x', 'y0': 'y', 'x1': '(x + xs)', 'y1': '(y + ys)'}), '(x0=x, y0=y, x1=x + xs, y1=y + ys)\n', (19888, 19922), False, 'from squidpy.im._coords import CropCoords, CropPadding, _NULL_COORDS, _NULL_PADDING, TupleSerializer, _update_attrs_scale, _update_attrs_coords\n'), ((20498, 20538), 'squidpy.im._coords._update_attrs_coords', '_update_attrs_coords', (['crop.attrs', 'coords'], {}), '(crop.attrs, coords)\n', (20518, 20538), False, 'from squidpy.im._coords import CropCoords, CropPadding, _NULL_COORDS, _NULL_PADDING, TupleSerializer, _update_attrs_scale, _update_attrs_coords\n'), ((24131, 24183), 'squidpy.gr._utils._assert_in_range', '_assert_in_range', (['y', '(0)', 'self.shape[0]'], {'name': '"""height"""'}), "(y, 0, self.shape[0], name='height')\n", (24147, 24183), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((24192, 24243), 'squidpy.gr._utils._assert_in_range', '_assert_in_range', (['x', '(0)', 'self.shape[1]'], {'name': '"""width"""'}), "(x, 0, self.shape[1], name='width')\n", (24208, 24243), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((24393, 24439), 'squidpy.gr._utils._assert_non_negative', '_assert_non_negative', (['yr'], {'name': '"""radius height"""'}), "(yr, name='radius height')\n", (24413, 24439), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((24448, 24493), 'squidpy.gr._utils._assert_non_negative', '_assert_non_negative', (['xr'], {'name': '"""radius width"""'}), "(xr, name='radius width')\n", (24468, 24493), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((25605, 25646), 'squidpy.gr._utils._assert_in_range', '_assert_in_range', (['ys', '(0)', 'y'], {'name': '"""height"""'}), "(ys, 0, y, name='height')\n", (25621, 25646), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((25655, 25695), 'squidpy.gr._utils._assert_in_range', '_assert_in_range', (['xs', '(0)', 'x'], {'name': '"""width"""'}), "(xs, 0, x, name='width')\n", (25671, 25695), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((25721, 25785), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '((y // ys + (y % ys != 0)) * ys)', 'step': 'ys'}), '(start=0, stop=(y // ys + (y % ys != 0)) * ys, step=ys)\n', (25730, 25785), True, 'import numpy as np\n'), ((25810, 25874), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '((x // xs + (x % xs != 0)) * xs)', 'step': 'xs'}), '(start=0, stop=(x // xs + (x % xs != 0)) * xs, step=xs)\n', (25819, 25874), True, 'import numpy as np\n'), ((28212, 28254), 'squidpy.gr._utils._assert_positive', '_assert_positive', (['spot_scale'], {'name': '"""scale"""'}), "(spot_scale, name='scale')\n", (28228, 28254), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((28263, 28304), 'squidpy.gr._utils._assert_spatial_basis', '_assert_spatial_basis', (['adata', 'spatial_key'], {}), '(adata, spatial_key)\n', (28284, 28304), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((28425, 28483), 'squidpy.gr._utils._assert_non_empty_sequence', '_assert_non_empty_sequence', (['obs_names'], {'name': '"""observations"""'}), "(obs_names, name='observations')\n", (28451, 28483), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((33750, 33762), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (33760, 33762), True, 'import xarray as xr\n'), ((37629, 37643), 'numpy.asarray', 'np.asarray', (['ax'], {}), '(ax)\n', (37639, 37643), True, 'import numpy as np\n'), ((42896, 42916), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (42912, 42916), False, 'from types import MappingProxyType\n'), ((48576, 48617), 'squidpy.gr._utils._assert_spatial_basis', '_assert_spatial_basis', (['adata', 'spatial_key'], {}), '(adata, spatial_key)\n', (48597, 48617), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((53340, 53371), 're.compile', 're.compile', (['f"""^{layer}_(\\\\d*)$"""'], {}), "(f'^{layer}_(\\\\d*)$')\n", (53350, 53371), False, 'import re\n'), ((53774, 53807), 're.compile', 're.compile', (['f"""^{channel}_(\\\\d*)$"""'], {}), "(f'^{channel}_(\\\\d*)$')\n", (53784, 53807), False, 'import re\n'), ((58789, 58809), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (58805, 58809), False, 'from types import MappingProxyType\n'), ((6160, 6255), 'xarray.concat', 'xr.concat', (['[img.data for img in prep_imgs]'], {'dim': '"""z"""', 'combine_attrs': 'combine_attrs'}), "([img.data for img in prep_imgs], dim='z', combine_attrs=\n combine_attrs, **kwargs)\n", (6169, 6255), True, 'import xarray as xr\n'), ((9987, 10008), 'squidpy._constants._constants.InferDimensions', 'InferDimensions', (['dims'], {}), '(dims)\n', (10002, 10008), False, 'from squidpy._constants._constants import InferDimensions\n'), ((10837, 10924), 'scanpy.logging.info', 'logg.info', (['f"""{\'Overwriting\' if layer in self else \'Adding\'} image layer `{layer}`"""'], {}), '(\n f"{\'Overwriting\' if layer in self else \'Adding\'} image layer `{layer}`")\n', (10846, 10924), True, 'from scanpy import logging as logg\n'), ((13037, 13061), 'validators.url', 'validators.url', (['img_path'], {}), '(img_path)\n', (13051, 13061), False, 'import validators\n'), ((13352, 13404), 'squidpy.im._io._lazy_load_image', '_lazy_load_image', (['img_path'], {'dims': 'dims', 'chunks': 'chunks'}), '(img_path, dims=dims, chunks=chunks)\n', (13368, 13404), False, 'from squidpy.im._io import _lazy_load_image, _infer_dimensions, _assert_dims_present\n'), ((14426, 14443), 'xarray.DataArray', 'xr.DataArray', (['img'], {}), '(img)\n', (14438, 14443), True, 'import xarray as xr\n'), ((14968, 15013), 'squidpy.im._io._infer_dimensions', '_infer_dimensions', (['img'], {'infer_dimensions': 'dims'}), '(img, infer_dimensions=dims)\n', (14985, 15013), False, 'from squidpy.im._io import _lazy_load_image, _infer_dimensions, _assert_dims_present\n'), ((17507, 17724), 'scanpy.logging.warning', 'logg.warning', (['f"""Unable to determine the scale factor from `adata.uns[{spatial_key!r}][{library_id!r}][\'scalefactors\'][\'tissue_{img_key}_scalef\']`, using `1.0`. Consider specifying it manually as `scale=...`"""'], {}), '(\n f"Unable to determine the scale factor from `adata.uns[{spatial_key!r}][{library_id!r}][\'scalefactors\'][\'tissue_{img_key}_scalef\']`, using `1.0`. Consider specifying it manually as `scale=...`"\n )\n', (17519, 17724), True, 'from scanpy import logging as logg\n'), ((21799, 21902), 'functools.partial', 'partial', (['rescale'], {'scale': '[scale, scale, 1]', 'preserve_range': '(True)', 'order': '(1)', 'channel_axis': '(-1)', 'cval': 'cval'}), '(rescale, scale=[scale, scale, 1], preserve_range=True, order=1,\n channel_axis=-1, cval=cval)\n', (21806, 21902), False, 'from functools import partial\n'), ((22654, 22687), 'squidpy.im._coords._update_attrs_scale', '_update_attrs_scale', (['attrs', 'scale'], {}), '(attrs, scale)\n', (22673, 22687), False, 'from squidpy.im._coords import CropCoords, CropPadding, _NULL_COORDS, _NULL_PADDING, TupleSerializer, _update_attrs_scale, _update_attrs_coords\n'), ((36699, 36723), 'numpy.arange', 'np.arange', (['arr.shape[-1]'], {}), '(arr.shape[-1])\n', (36708, 36723), True, 'import numpy as np\n'), ((36907, 36997), 'scanpy.logging.warning', 'logg.warning', (['f"""Unable to plot image with `{n_channels}`. Setting `channelwise=True`"""'], {}), "(\n f'Unable to plot image with `{n_channels}`. Setting `channelwise=True`')\n", (36919, 36997), True, 'from scanpy import logging as logg\n'), ((37298, 37431), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '((8, 8) if figsize is None else figsize)', 'dpi': 'dpi', 'tight_layout': '(True)', 'squeeze': '(False)'}), '(nrows=nrows, ncols=ncols, figsize=(8, 8) if figsize is None else\n figsize, dpi=dpi, tight_layout=True, squeeze=False)\n', (37310, 37431), True, 'import matplotlib.pyplot as plt\n'), ((38620, 38644), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['seg_cmap'], {}), '(seg_cmap)\n', (38634, 38644), False, 'from matplotlib.colors import ListedColormap\n'), ((51257, 51271), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (51265, 51271), False, 'from copy import copy, deepcopy\n'), ((51285, 51295), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (51289, 51295), False, 'from copy import copy, deepcopy\n'), ((57695, 57730), 'squidpy.gr._utils._assert_in_range', '_assert_in_range', (['y', '(0)', '(1)'], {'name': '"""y"""'}), "(y, 0, 1, name='y')\n", (57711, 57730), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((57815, 57850), 'squidpy.gr._utils._assert_in_range', '_assert_in_range', (['x', '(0)', '(1)'], {'name': '"""x"""'}), "(x, 0, 1, name='x')\n", (57831, 57850), False, 'from squidpy.gr._utils import _assert_in_range, _assert_positive, _assert_non_negative, _assert_spatial_basis, _assert_non_empty_sequence\n'), ((12517, 12563), 'squidpy.im._io._assert_dims_present', '_assert_dims_present', (['img.dims'], {'include_z': '(True)'}), '(img.dims, include_z=True)\n', (12537, 12563), False, 'from squidpy.im._io import _lazy_load_image, _infer_dimensions, _assert_dims_present\n'), ((13674, 13711), 'xarray.open_zarr', 'xr.open_zarr', (['img_path'], {'chunks': 'chunks'}), '(img_path, chunks=chunks)\n', (13686, 13711), True, 'import xarray as xr\n'), ((15131, 15236), 'scanpy.logging.warning', 'logg.warning', (['f"""Unable to find `y`, `x` or `z` dimension in `{img.dims}`. Renaming to `{dims}`"""'], {}), "(\n f'Unable to find `y`, `x` or `z` dimension in `{img.dims}`. Renaming to `{dims}`'\n )\n", (15143, 15236), True, 'from scanpy import logging as logg\n'), ((28702, 28769), 'squidpy._constants._pkg_constants.Key.uns.library_id', 'Key.uns.library_id', (['adata'], {'spatial_key': 'spatial_key', 'library_id': 'None'}), '(adata, spatial_key=spatial_key, library_id=None)\n', (28720, 28769), False, 'from squidpy._constants._pkg_constants import Key\n'), ((30542, 30652), 'squidpy._constants._pkg_constants.Key.uns.spot_diameter', 'Key.uns.spot_diameter', (['adata'], {'spatial_key': 'spatial_key', 'library_id': 'lid', 'spot_diameter_key': 'spot_diameter_key'}), '(adata, spatial_key=spatial_key, library_id=lid,\n spot_diameter_key=spot_diameter_key)\n', (30563, 30652), False, 'from squidpy._constants._pkg_constants import Key\n'), ((37600, 37614), 'numpy.array', 'np.array', (['[ax]'], {}), '([ax])\n', (37608, 37614), True, 'import numpy as np\n'), ((38140, 38180), 'numpy.issubdtype', 'np.issubdtype', (['seg_arr.dtype', 'np.integer'], {}), '(seg_arr.dtype, np.integer)\n', (38153, 38180), True, 'import numpy as np\n'), ((38442, 38481), 'numpy.array', 'np.array', (['default_palette'], {'dtype': 'object'}), '(default_palette, dtype=object)\n', (38450, 38481), True, 'import numpy as np\n'), ((39780, 39799), 'squidpy.pl._utils.save_fig', 'save_fig', (['fig', 'save'], {}), '(fig, save)\n', (39788, 39799), False, 'from squidpy.pl._utils import save_fig\n'), ((42131, 42330), 'squidpy.pl.Interactive', 'Interactive', ([], {'img': 'self', 'adata': 'adata', 'spatial_key': 'spatial_key', 'library_key': 'library_key', 'library_id': 'library_id', 'cmap': 'cmap', 'palette': 'palette', 'blending': 'blending', 'key_added': 'key_added', 'symbol': 'symbol'}), '(img=self, adata=adata, spatial_key=spatial_key, library_key=\n library_key, library_id=library_id, cmap=cmap, palette=palette,\n blending=blending, key_added=key_added, symbol=symbol)\n', (42142, 42330), False, 'from squidpy.pl import Interactive\n'), ((44855, 44903), 'dask.array.map_overlap', 'da.map_overlap', (['func', 'arr'], {}), '(func, arr, **fn_kwargs, **kwargs)\n', (44869, 44903), True, 'import dask.array as da\n'), ((44962, 45026), 'dask.array.map_blocks', 'da.map_blocks', (['func', 'arr'], {'dtype': 'arr.dtype'}), '(func, arr, **fn_kwargs, **kwargs, dtype=arr.dtype)\n', (44975, 45026), True, 'import dask.array as da\n'), ((45976, 46031), 'dask.array.stack', 'da.stack', (['[res[lid] for lid in new_library_ids]'], {'axis': '(2)'}), '([res[lid] for lid in new_library_ids], axis=2)\n', (45984, 46031), True, 'import dask.array as da\n'), ((11238, 11344), 'scanpy.logging.warning', 'logg.warning', (['f"""Channel dimension cannot be aligned with an existing one, using `{channel_dim}`"""'], {}), "(\n f'Channel dimension cannot be aligned with an existing one, using `{channel_dim}`'\n )\n", (11250, 11344), True, 'from scanpy import logging as logg\n'), ((12659, 12682), 'squidpy.im._coords._NULL_COORDS.to_tuple', '_NULL_COORDS.to_tuple', ([], {}), '()\n', (12680, 12682), False, 'from squidpy.im._coords import CropCoords, CropPadding, _NULL_COORDS, _NULL_PADDING, TupleSerializer, _update_attrs_scale, _update_attrs_coords\n'), ((12799, 12823), 'squidpy.im._coords._NULL_PADDING.to_tuple', '_NULL_PADDING.to_tuple', ([], {}), '()\n', (12821, 12823), False, 'from squidpy.im._coords import CropCoords, CropPadding, _NULL_COORDS, _NULL_PADDING, TupleSerializer, _update_attrs_scale, _update_attrs_coords\n'), ((13438, 13452), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (13442, 13452), False, 'from pathlib import Path\n'), ((13940, 13980), 'xarray.open_dataset', 'xr.open_dataset', (['img_path'], {'chunks': 'chunks'}), '(img_path, chunks=chunks)\n', (13955, 13980), True, 'import xarray as xr\n'), ((29459, 29581), 'scanpy.logging.debug', 'logg.debug', (['f"""Unable to find library ids in `adata.obs[{library_id!r}]`. Trying in `adata.uns[{spatial_key!r}]`"""'], {}), "(\n f'Unable to find library ids in `adata.obs[{library_id!r}]`. Trying in `adata.uns[{spatial_key!r}]`'\n )\n", (29469, 29581), True, 'from scanpy import logging as logg\n'), ((29663, 29736), 'squidpy._constants._pkg_constants.Key.uns.library_id', 'Key.uns.library_id', (['adata'], {'spatial_key': 'spatial_key', 'library_id': 'library_id'}), '(adata, spatial_key=spatial_key, library_id=library_id)\n', (29681, 29736), False, 'from squidpy._constants._pkg_constants import Key\n'), ((36431, 36452), 'numpy.asarray', 'np.asarray', (['[channel]'], {}), '([channel])\n', (36441, 36452), True, 'import numpy as np\n'), ((39198, 39240), 'skimage.util.img_as_float', 'img_as_float', (['img.values'], {'force_copy': '(False)'}), '(img.values, force_copy=False)\n', (39210, 39240), False, 'from skimage.util import img_as_float\n'), ((44781, 44801), 'dask.array.asarray', 'da.asarray', (['arr.data'], {}), '(arr.data)\n', (44791, 44801), True, 'import dask.array as da\n'), ((46490, 46632), 'scanpy.logging.warning', 'logg.warning', (['f"""Function changed the number of channels, cannot use identity for library ids `{noop_library_ids}`. Replacing with 0"""'], {}), "(\n f'Function changed the number of channels, cannot use identity for library ids `{noop_library_ids}`. Replacing with 0'\n )\n", (46502, 46632), True, 'from scanpy import logging as logg\n'), ((46972, 47027), 'dask.array.stack', 'da.stack', (['[res[lid] for lid in new_library_ids]'], {'axis': '(2)'}), '([res[lid] for lid in new_library_ids], axis=2)\n', (46980, 47027), True, 'import dask.array as da\n'), ((13063, 13077), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (13067, 13077), False, 'from pathlib import Path\n'), ((13178, 13192), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (13182, 13192), False, 'from pathlib import Path\n'), ((20771, 20815), 'numpy.can_cast', 'np.can_cast', (['cval', 'arr.dtype'], {'casting': '"""safe"""'}), "(cval, arr.dtype, casting='safe')\n", (20782, 20815), True, 'import numpy as np\n'), ((38492, 38507), 'numpy.max', 'np.max', (['seg_arr'], {}), '(seg_arr)\n', (38498, 38507), True, 'import numpy as np\n'), ((22059, 22080), 'numpy.asarray', 'np.asarray', (['arr.shape'], {}), '(arr.shape)\n', (22069, 22080), True, 'import numpy as np\n'), ((55559, 55646), 'scanpy.logging.warning', 'logg.warning', (['f"""Unable to retrieve library ids, reason `{e}`. Using default names"""'], {}), "(\n f'Unable to retrieve library ids, reason `{e}`. Using default names')\n", (55571, 55646), True, 'from scanpy import logging as logg\n')]
|
# -*- coding: utf-8 -*-
# python3 make.py -loc "data/lines/1.csv" -width 3840 -height 2160 -overwrite
# python3 make.py -loc "data/lines/1.csv" -width 3840 -height 2160 -rtl -overwrite
# python3 combine.py
# python3 make.py -data "data/lines/A_LEF.csv" -width 3840 -height 2160 -loc "data/lines/C.csv" -img "img/A.png" -sw 0.1405 -tw 0.145 -overwrite
# python3 make.py -data "data/lines/A_LEF.csv" -width 3840 -height 2160 -loc "data/lines/C.csv" -img "img/A.png" -sw 0.1405 -tw 0.145 -rtl -overwrite
# python3 combine.py -in "output/subway_line_A.mp4,output/subway_line_A_rtl.mp4" -out "output/subway_line_A_loop.mp4"
# python3 make.py -data "data/lines/7.csv" -width 3840 -height 2160 -img "img/7.png" -sw 0.11725 -tw 0.135625 -reverse -overwrite
# python3 make.py -data "data/lines/7.csv" -width 3840 -height 2160 -img "img/7.png" -sw 0.11725 -tw 0.135625 -reverse -rtl -overwrite
# python3 combine.py -in "output/subway_line_7.mp4,output/subway_line_7_rtl.mp4" -out "output/subway_line_7_loop.mp4"
import argparse
import numpy as np
import os
from pprint import pprint
import sys
from lib import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-data', dest="DATA_FILE", default="data/lines/2.csv", help="Input csv file with preprocessed data")
parser.add_argument('-loc', dest="DATA_LOCAL_FILE", default="", help="Input csv file with preprocessed data of a local train that should 'fill in' stations in-between express trains")
parser.add_argument('-img', dest="IMAGE_FILE", default="img/2.png", help="Subway bullet image")
parser.add_argument('-instruments', dest="INSTRUMENTS_FILE", default="data/instruments.csv", help="Input csv file with instruments config")
parser.add_argument('-dir', dest="MEDIA_DIRECTORY", default="audio/", help="Input media directory")
parser.add_argument('-width', dest="WIDTH", default=1920, type=int, help="Output video width")
parser.add_argument('-height', dest="HEIGHT", default=1080, type=int, help="Output video height")
parser.add_argument('-pad0', dest="PAD_START", default=2000, type=int, help="Pad start in ms")
parser.add_argument('-pad1', dest="PAD_END", default=2000, type=int, help="Pad end in ms")
parser.add_argument('-fps', dest="FPS", default=30, type=int, help="Output video frames per second")
parser.add_argument('-outframe', dest="OUTPUT_FRAME", default="tmp/line_%s/frame.%s.png", help="Output frames pattern")
parser.add_argument('-aout', dest="AUDIO_OUTPUT_FILE", default="output/subway_line_%s.mp3", help="Output audio file")
parser.add_argument('-dout', dest="DATA_OUTPUT_FILE", default="output/subway_line_%s.csv", help="Output data file")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/subway_line_%s.mp4", help="Output media file")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing files?")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just view statistics?")
parser.add_argument('-reverse', dest="REVERSE", action="store_true", help="Reverse the line?")
parser.add_argument('-rtl', dest="RIGHT_TO_LEFT", action="store_true", help="Play from right to left?")
parser.add_argument('-ao', dest="AUDIO_ONLY", action="store_true", help="Only output audio?")
parser.add_argument('-vo', dest="VIDEO_ONLY", action="store_true", help="Only output video?")
parser.add_argument('-do', dest="DATA_ONLY", action="store_true", help="Only output data?")
parser.add_argument('-viz', dest="VISUALIZE_SEQUENCE", action="store_true", help="Output a visualization of the sequence")
parser.add_argument('-plot', dest="PLOT_SEQUENCE", action="store_true", help="Display a plot chart of the sequence")
parser.add_argument('-frame', dest="SINGLE_FRAME", default=-1, type=int, help="Output just a single frame")
# Music config
parser.add_argument('-db', dest="MASTER_DB", type=float, default=-2.4, help="Master +/- decibels to be applied to final audio")
parser.add_argument('-bpm', dest="BPM", type=int, default=120, help="Beats per minute, e.g. 60, 75, 100, 120, 150")
parser.add_argument('-mpb', dest="METERS_PER_BEAT", type=int, default=75, help="Higher numbers creates shorter songs")
parser.add_argument('-dpb', dest="DIVISIONS_PER_BEAT", type=int, default=4, help="e.g. 4 = quarter notes, 8 = eighth notes")
parser.add_argument('-pm', dest="PRICE_MULTIPLIER", type=float, default=1.3, help="Makes instruments more expensive; higher numbers = less instruments playing")
parser.add_argument('-vdur', dest="VARIANCE_MS", type=int, default=20, help="+/- milliseconds an instrument note should be off by to give it a little more 'natural' feel")
# Visual design config
parser.add_argument('-sw', dest="STATION_WIDTH", type=float, default=0.125, help="Minimum station width as a percent of the screen width; adjust this to change the overall visual speed")
parser.add_argument('-tw', dest="TEXT_WIDTH", type=float, default=0.15, help="Station text width as a percent of the screen width")
parser.add_argument('-cy', dest="CENTER_Y", type=float, default=0.475, help="Center y as a percent of screen height")
parser.add_argument('-bty', dest="BOROUGH_TEXT_Y", type=float, default=0.55, help="Borough text center y as a percent of screen height")
parser.add_argument('-sty', dest="STATION_TEXT_Y", type=float, default=0.375, help="Station text center y as a percent of screen height")
parser.add_argument('-cw', dest="CIRCLE_WIDTH", type=int, default=60, help="Circle radius in pixels assuming 1920x1080")
parser.add_argument('-lh', dest="LINE_HEIGHT", type=int, default=24, help="Height of horizontal line in pixels assuming 1920x1080")
parser.add_argument('-bh', dest="BOUNDARY_HEIGHT", type=int, default=166, help="Height of boundary line in pixels assuming 1920x1080")
parser.add_argument('-bw', dest="BOUNDARY_WIDTH", type=int, default=3, help="Width of boundary line in pixels assuming 1920x1080")
parser.add_argument('-bm', dest="BOUNDARY_MARGIN", type=int, default=48, help="Horizontal margin of boundary line in pixels assuming 1920x1080")
parser.add_argument('-mw', dest="MARKER_WIDTH", type=int, default=8, help="Height of horizontal line in pixels assuming 1920x1080")
parser.add_argument('-sts', dest="STATION_TEXT_SIZE", type=int, default=30, help="Station text size in pixels assuming 1920x1080")
parser.add_argument('-stm', dest="STATION_TEXT_MARGIN", type=int, default=20, help="Station text bottom margin in pixels assuming 1920x1080")
parser.add_argument('-slm', dest="STATION_LETTER_MARGIN", type=int, default=1, help="Space after each station text letter in pixels assuming 1920x1080")
parser.add_argument('-bts', dest="BOROUGH_TEXT_SIZE", type=int, default=24, help="Borough text size in pixels assuming 1920x1080")
parser.add_argument('-blm', dest="BOROUGH_LETTER_MARGIN", type=int, default=1, help="Space after each borough text letter in pixels assuming 1920x1080")
parser.add_argument('-bthresh', dest="BOROUGH_THRESHOLD", type=float, default=0.375, help="Minimum width available for displaying borough dividers")
parser.add_argument('-dw', dest="DIVIDER_WIDTH", type=int, default=28, help="Line divider in pixels assuming 1920x1080")
parser.add_argument('-dd', dest="DIVIDER_DISTANCE", type=float, default=0.333, help="Distance between dividers as a percent of screen width")
parser.add_argument('-dc', dest="DIVIDER_COLOR", default="#666666", help="Distance between dividers as a percent of screen width")
parser.add_argument('-bg', dest="BG_COLOR", default="#000000", help="Background color")
parser.add_argument('-tc', dest="TEXT_COLOR", default="#eeeeee", help="Text color")
parser.add_argument('-atc', dest="ALT_TEXT_COLOR", default="#aaaaaa", help="Secondary text color")
parser.add_argument('-mc', dest="MARKER_COLOR", default="#dddddd", help="Marker color")
parser.add_argument('-sfont', dest="STATION_FONT", default="fonts/OpenSans-Bold.ttf", help="Station font")
parser.add_argument('-bfont', dest="BOROUGH_FONT", default="fonts/OpenSans-SemiBold.ttf", help="Borough font")
parser.add_argument('-map', dest="MAP_IMAGE", default="img/nyc.png", help="Station font")
parser.add_argument('-mcoord', dest="MAP_COORDS", default=" -74.1261,40.9087,-73.7066,40.5743", help="Top left, bottom right point")
parser.add_argument('-mapm', dest="MAP_MARGIN", type=int, default=30, help="Margin of map in pixels assuming 1920x1080")
parser.add_argument('-mapw', dest="MAP_W", type=int, default=260, help="Map width in pixels assuming 1920x1080")
parser.add_argument('-mlw', dest="MAP_LINE_WIDTH", type=int, default=4, help="Map line in pixels assuming 1920x1080")
parser.add_argument('-mlc', dest="MAP_LINE_COLOR", default="#eeeeee", help="Secondary text color")
a = parser.parse_args()
if not a.AUDIO_ONLY:
import gizeh
from PIL import Image, ImageDraw, ImageFont
startTime = logTime()
# Calculations
BEAT_MS = roundInt(60.0 / a.BPM * 1000)
ROUND_TO_NEAREST = roundInt(1.0 * BEAT_MS / a.DIVISIONS_PER_BEAT)
basename = getBasename(a.DATA_FILE)
if "_" in basename:
basename, _ = tuple(basename.split("_"))
lineName = basename
if a.RIGHT_TO_LEFT:
basename += "_rtl"
# Read data
_, stations = readCsv(a.DATA_FILE)
_, instruments = readCsv(a.INSTRUMENTS_FILE)
lstations = []
if len(a.DATA_LOCAL_FILE):
_, lstations = readCsv(a.DATA_LOCAL_FILE)
# Parse instruments
instruments = prependAll(instruments, ("file", a.MEDIA_DIRECTORY))
instruments = [i for i in instruments if i["active"] > 0]
instruments = addIndices(instruments, "index")
for i, instrument in enumerate(instruments):
instruments[i]["from_beat_ms"] = roundInt(1.0 * BEAT_MS / instrument["from_tempo"])
instruments[i]["to_beat_ms"] = roundInt(1.0 * BEAT_MS / instrument["to_tempo"])
instruments[i]["interval_ms"] = roundInt(instrument["interval_phase"] * BEAT_MS)
instruments[i]["price"] = instrument["price"] * a.PRICE_MULTIPLIER
# Buy instruments based on a specified budget
def buyInstruments(station, instrumentsShelf):
budget = station['income'] / 12.0
percentile = station['percentile']
instrumentsCart = []
for i in instrumentsShelf:
# skip if not in bracket
if percentile < i['bracket_min'] or percentile >= i['bracket_max']:
continue
# add to cart if in budget
elif i['price'] < budget:
budget -= i['price']
instrumentsCart.append(i.copy())
# out of budget, finished
else:
break
return instrumentsCart
# Add local stations in-between express ones
if len(lstations) > 0:
lbasename = getBasename(a.DATA_LOCAL_FILE)
estations = {}
addStations = []
for i, s in enumerate(stations):
lines = str(s["Daytime Routes"]).split(" ")
if lbasename in lines:
estations[s["Station ID"]] = s.copy()
sortByStart = None
currentLStations = []
for i, s in enumerate(lstations):
if s["Station ID"] in estations:
if sortByStart is not None and len(currentLStations) > 0:
step = 1.0 / (len(currentLStations) + 1)
for j, ls in enumerate(currentLStations):
currentLStations[j]["sortBy"] = sortByStart + (j+1) * step
currentLStations[j]["isLocal"] = 1
addStations += currentLStations
currentLStations = []
sortByStart = estations[s["Station ID"]]["sortBy"]
elif sortByStart is not None:
currentLStations.append(s)
stations += addStations
# stations = sorted(stations, key=lambda d: d["sortBy"])
# for s in stations:
# if "isLocal" in s:
# print(" --"+s["Stop Name"])
# else:
# print(s["Stop Name"])
# sys.exit()
# Parse stations
stations = sorted(stations, key=lambda d: d["income"])
stations = addNormalizedValues(stations, "income", "nIncome")
stations = addIndices(stations, "incomeIndex")
isReverse = a.REVERSE
if a.RIGHT_TO_LEFT:
isReverse = (not isReverse)
stations = sorted(stations, key=lambda d: d["sortBy"], reverse=isReverse)
stations = addIndices(stations, "index")
stationCount = len(stations)
ms = a.PAD_START
for i, station in enumerate(stations):
stations[i]["percentile"] = 1.0 * station["incomeIndex"] / stationCount * 100
# stations[i]["percentile"] = min(99.999, 1.0 * station["nIncome"] * 100)
stations[i]["instruments"] = buyInstruments(stations[i], instruments)
# print(len(stations[i]["instruments"]))
distance = beats = duration = 0
if i < stationCount-1:
distance = earthDistance(stations[i+1]['GTFS Latitude'], stations[i+1]['GTFS Longitude'], station['GTFS Latitude'], station['GTFS Longitude'])
beats = roundInt(1.0 * distance / a.METERS_PER_BEAT)
duration = beats * BEAT_MS
boroughNext = stations[i+1]["Borough"]
stations[i]["distance"] = distance
stations[i]["beats"] = beats
stations[i]["duration"] = duration
stations[i]["vduration"] = duration
stations[i]["BoroughNext"] = boroughNext
stations[i]["ms"] = ms
stations[i]["lineName"] = lineName
ms += duration
if a.PROBE:
print("===========================")
for s in stations:
if "isLocal" in s:
print(formatSeconds(roundInt(s["ms"]/1000.0)) + " --- " + s["Stop Name"] + " (LOCAL) - $" + formatNumber(s["income"]))
else:
print(formatSeconds(roundInt(s["ms"]/1000.0)) + " - " + s["Stop Name"] + " - $" + formatNumber(s["income"]))
print("===========================")
else:
dataFilename = a.DATA_OUTPUT_FILE % basename
makeDirectories([dataFilename])
writeCsv(dataFilename, stations, headings=["ms", "Stop Name", "isLocal", "income", "Borough", "lineName"])
textFilename = replaceFileExtension(dataFilename, ".txt")
text = f'Subway Inequality: {basename} train ({stations[-1]["Stop Name"]} Bound)\n\n'
text += f'This song above mimics a ride along a subway line (the {basename} train), where the quantity and power of the instruments at any given moment in the song corresponds to the median household income of the neighborhood that you are passing through. The goal is to have the dramatic contrasts of the song echo the dramatic contrast of income in the city.\n\n'
for s in stations:
if "isLocal" not in s:
text += f'{formatSeconds(roundInt(s["ms"]/1000.0))} - {s["Stop Name"]} - ${formatNumber(s["income"])} household income\n'
writeTextFile(textFilename, text)
if a.DATA_ONLY:
sys.exit()
# Calculate ranges
distances = [s["distance"] for s in stations if s["distance"] > 0]
totalDistance = sum(distances)
minDistance, maxDistance = (min(distances), max(distances))
durations = [s["duration"] for s in stations if s["duration"] > 0]
totalMs = sum(durations)
minDuration, maxDuration = (min(durations), max(durations))
totalBeats = sum([s["beats"] for s in stations])
totalSeconds = roundInt(totalMs / 1000.0)
secondsPerStation = roundInt(1.0*totalSeconds/stationCount)
print('Total distance in meters: %s' % roundInt(totalDistance))
print('Distance range in meters: [%s, %s]' % (roundInt(minDistance), roundInt(maxDistance)))
print('Average beats per station: %s' % roundInt(1.0*totalBeats/stationCount))
print('Average time per station: %s' % formatSeconds(secondsPerStation))
print('Main sequence beats: %s' % totalBeats)
# Retrieve gain based on current beat
def getVolume(instrument, beat):
beats_per_phase = instrument['gain_phase']
percent_complete = float(beat % beats_per_phase) / beats_per_phase
percent = easeSin(percent_complete)
from_volume = instrument['from_volume']
to_volume = instrument['to_volume']
volume = lerp((from_volume, to_volume), percent)
return volume
# Get beat duration in ms based on current point in time
def getBeatMs(instrument, beat, round_to):
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
beats_per_phase = instrument['tempo_phase']
percent_complete = float(beat % beats_per_phase) / beats_per_phase
percent = easeSin(percent_complete)
ms = lerp((from_beat_ms, to_beat_ms), percent)
ms = roundInt(roundToNearest(ms, round_to))
return ms
# Return if the instrument should be played in the given interval
def isValidInterval(instrument, elapsed_ms, start_ms, end_ms, minIntervalDuration=3000):
interval_ms = instrument['interval_ms']
interval = instrument['interval']
interval_offset = instrument['interval_offset']
isValid = (int(math.floor(1.0*elapsed_ms/interval_ms)) % interval == interval_offset)
# return isValid
if end_ms - start_ms <= minIntervalDuration * 3:
return isValid
# check to see if we're at the start and not long enough
if isValid and elapsed_ms < (start_ms+minIntervalDuration) and not isValidInterval(instrument, start_ms+minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = False
# make start interval earlier if necessary
elif not isValid and elapsed_ms < (start_ms+minIntervalDuration) and isValidInterval(instrument, start_ms+minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = True
# check to see if we're at the end and not long enough
elif isValid and elapsed_ms > (end_ms-minIntervalDuration) and not isValidInterval(instrument, end_ms-minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = False
# make start interval earlier if necessary
elif not isValid and elapsed_ms > (end_ms-minIntervalDuration) and isValidInterval(instrument, end_ms-minIntervalDuration, start_ms, end_ms, minIntervalDuration):
isValid = True
return isValid
# Add beats to sequence
def addBeatsToSequence(sequence, instrument, duration, ms, beat_ms, round_to, pad_start):
msStart = ms
msEnd = ms + duration
offset_ms = int(instrument['tempo_offset'] * beat_ms)
ms += offset_ms
previous_ms = int(ms)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
min_ms = min(from_beat_ms, to_beat_ms)
remaining_duration = int(duration)
elapsed_duration = offset_ms
continue_from_prev = (instrument['bracket_min'] > 0 or instrument['bracket_max'] < 100)
rn = pseudoRandom(instrument["index"]+1)
while remaining_duration >= min_ms:
elapsed_ms = int(ms)
elapsed_beat = int((elapsed_ms-previous_ms) / beat_ms)
# continue beat from previous
if continue_from_prev:
elapsed_beat = int(elapsed_ms / beat_ms)
this_beat_ms = getBeatMs(instrument, elapsed_beat, round_to)
# add to sequence if in valid interval
if isValidInterval(instrument, elapsed_ms, msStart, msEnd):
variance = roundInt(rn * a.VARIANCE_MS * 2 - a.VARIANCE_MS)
sequence.append({
'instrumentIndex': instrument["index"],
'filename': instrument["file"],
'volume': getVolume(instrument, elapsed_beat),
'ms': max([pad_start + elapsed_ms + variance, 0])
})
remaining_duration -= this_beat_ms
elapsed_duration += this_beat_ms
ms += this_beat_ms
return sequence
# Build main sequence
sequence = []
for i, instrument in enumerate(instruments):
ms = 0
stationQueueDur = 0
# Each station in stations
for station in stations:
# Check if instrument is in this station
instrumentIndex = findInList(station['instruments'], 'index', instrument['index'])
# Instrument not here, just add the station duration and continue
if instrumentIndex < 0 and stationQueueDur > 0:
sequence = addBeatsToSequence(sequence, instrument, stationQueueDur, ms, BEAT_MS, ROUND_TO_NEAREST, a.PAD_START)
ms += stationQueueDur + station['duration']
stationQueueDur = 0
elif instrumentIndex < 0:
ms += station['duration']
else:
stationQueueDur += station['duration']
if stationQueueDur > 0:
sequence = addBeatsToSequence(sequence, instrument, stationQueueDur, ms, BEAT_MS, ROUND_TO_NEAREST, a.PAD_START)
sequenceDuration = max([s["ms"] for s in sequence]) + a.PAD_END
# Now start the video frame logic
# Calculations
aa = vars(a)
aa["STATION_WIDTH"] = roundInt(1.0 * a.WIDTH * a.STATION_WIDTH)
aa["TEXT_WIDTH"] = roundInt(1.0 * a.WIDTH * a.TEXT_WIDTH)
aa["CENTER_Y"] = roundInt(1.0 * a.HEIGHT * a.CENTER_Y)
aa["BOROUGH_TEXT_Y"] = roundInt(1.0 * a.HEIGHT * a.BOROUGH_TEXT_Y)
aa["STATION_TEXT_Y"] = roundInt(1.0 * a.HEIGHT * a.STATION_TEXT_Y)
RESOLUTION = a.WIDTH / 1920.0
aa["CIRCLE_WIDTH"] = roundInt(a.CIRCLE_WIDTH * RESOLUTION)
aa["LINE_HEIGHT"] = roundInt(a.LINE_HEIGHT * RESOLUTION)
aa["BOUNDARY_MARGIN"] = roundInt(a.BOUNDARY_MARGIN * RESOLUTION)
aa["BOUNDARY_HEIGHT"] = roundInt(a.BOUNDARY_HEIGHT * RESOLUTION)
aa["BOUNDARY_WIDTH"] = roundInt(a.BOUNDARY_WIDTH * RESOLUTION)
aa["BOROUGH_THRESHOLD"] = roundInt(1.0 * a.WIDTH * a.BOROUGH_THRESHOLD)
aa["MARKER_WIDTH"] = roundInt(a.MARKER_WIDTH * RESOLUTION)
aa["STATION_TEXT_SIZE"] = roundInt(a.STATION_TEXT_SIZE * RESOLUTION)
aa["STATION_TEXT_MARGIN"] = roundInt(a.STATION_TEXT_MARGIN * RESOLUTION)
aa["STATION_LETTER_MARGIN"] = roundInt(a.STATION_LETTER_MARGIN * RESOLUTION)
aa["BOROUGH_TEXT_SIZE"] = roundInt(a.BOROUGH_TEXT_SIZE * RESOLUTION)
aa["BOROUGH_LETTER_MARGIN"] = roundInt(a.BOROUGH_LETTER_MARGIN * RESOLUTION)
aa["MAP_COORDS"] = tuple([float(c) for c in a.MAP_COORDS.strip().split(",")])
aa["MAP_MARGIN"] = roundInt(a.MAP_MARGIN * RESOLUTION)
aa["MAP_W"] = roundInt(a.MAP_W * RESOLUTION)
aa["MAP_LINE_WIDTH"] = roundInt(a.MAP_LINE_WIDTH * RESOLUTION)
aa["DIVIDER_WIDTH"] = roundInt(a.DIVIDER_WIDTH * RESOLUTION)
aa["DIVIDER_DISTANCE"] = roundInt(1.0 * a.WIDTH * a.DIVIDER_DISTANCE)
# Add borough names
boroughNames = {
"Q": "Queens",
"M": "Manhattan",
"Bk": "Brooklyn",
"Bx": "Bronx",
"SI": "Staten Island"
}
for i, station in enumerate(stations):
stations[i]["borough"] = boroughNames[station["Borough"]]
x = 0
mlon0, mlat0, mlon1, mlat1 = a.MAP_COORDS
vstations = stations[:]
# If going right to left, reverse the stations visually
if a.RIGHT_TO_LEFT:
vstations = list(reversed(vstations))
for i, station in enumerate(vstations):
if i < stationCount-1:
vstations[i]["vduration"] = vstations[i+1]["duration"]
else:
vstations[i]["vduration"] = 0
for i, station in enumerate(vstations):
boroughNext = station["borough"]
if i < stationCount-1:
boroughNext = vstations[i+1]["borough"]
vstations[i]["boroughNext"] = boroughNext
vstations[i]["width"] = roundInt(1.0 * station["vduration"] / minDuration * a.STATION_WIDTH)
vstations[i]["x"] = x
vstations[i]["x0"] = x - a.TEXT_WIDTH / 2
vstations[i]["x1"] = x + a.TEXT_WIDTH / 2
vstations[i]["mapNx"] = norm(station["GTFS Longitude"], (mlon0, mlon1))
vstations[i]["mapNy"] = norm(station["GTFS Latitude"], (mlat0, mlat1))
x += vstations[i]["width"]
totalW = x
pxPerMs = 1.0 * totalW / totalMs
pxPerS = pxPerMs * 1000.0
pxPerFrame = pxPerS / a.FPS
print("Total width: %s px" % totalW)
print("Pixels per second: %s" % pxPerS)
print("Pixels per frame: %s" % pxPerFrame)
totalFrames = msToFrame(sequenceDuration, a.FPS)
totalFrames = int(ceilToNearest(totalFrames, a.FPS))
print("Total frames: %s" % totalFrames)
sequenceDuration = frameToMs(totalFrames, a.FPS)
def drawFrame(filename, ms, xOffset, stations, totalW, bulletImg, mapImg, fontStation, fontBorough, a):
if not a.OVERWRITE and os.path.isfile(filename):
return
im = Image.new('RGB', (a.WIDTH, a.HEIGHT), a.BG_COLOR)
draw = ImageDraw.Draw(im, 'RGBA')
cx = roundInt(a.WIDTH * 0.5)
cy = a.CENTER_Y
stationCount = len(stations)
leftX = xOffset
rightX = leftX + totalW
# draw the center line
x0 = 0 if leftX < 0 else leftX
x1 = a.WIDTH if rightX > a.WIDTH else rightX
y0 = cy - a.LINE_HEIGHT/2
y1 = y0 + a.LINE_HEIGHT
draw.rectangle([(x0, y0), (x1, y1)], fill=a.ALT_TEXT_COLOR)
for i, s in enumerate(stations):
# check to see if we should draw borough divider
if s["borough"] != s["boroughNext"]:
deltaBx = abs(stations[i+1]["x"]-s["x"])
# don't draw boundary in tight space
if deltaBx > a.BOROUGH_THRESHOLD:
bdx = roundInt(xOffset + (s["x"] + stations[i+1]["x"]) * 0.5)
bdx0 = bdx - a.WIDTH/2
bdx1 = bdx + a.WIDTH/2
if 0 <= bdx0 <= a.WIDTH or 0 <= bdx1 <= a.WIDTH:
dx0 = bdx - a.BOUNDARY_WIDTH/2
dx1 = dx0 + a.BOUNDARY_WIDTH
dy0 = cy
dy1 = dy0 + a.BOUNDARY_HEIGHT
draw.rectangle([(dx0, dy0), (dx1, dy1)], fill=a.ALT_TEXT_COLOR)
blw, blh = getLineSize(fontBorough, s["borough"], a.BOROUGH_LETTER_MARGIN)
bx = dx0 - a.BOUNDARY_MARGIN - blw/2
drawTextToImage(draw, s["borough"], fontBorough, a.BOROUGH_LETTER_MARGIN, bx, a.BOROUGH_TEXT_Y, a.ALT_TEXT_COLOR)
blw, blh = getLineSize(fontBorough, s["boroughNext"], a.BOROUGH_LETTER_MARGIN)
bx = dx1 + a.BOUNDARY_MARGIN + blw/2
drawTextToImage(draw, s["boroughNext"], fontBorough, a.BOROUGH_LETTER_MARGIN, bx, a.BOROUGH_TEXT_Y, a.ALT_TEXT_COLOR)
sx = xOffset + s["x"]
sy = a.CENTER_Y
# draw dividers
if i < stationCount-1:
dividers = 0
dividerDistance = 0
nextSx = xOffset + stations[i+1]["x"]
deltaSx = abs(nextSx - sx)
if deltaSx >= a.DIVIDER_DISTANCE * 2:
dividers = int(1.0 * deltaSx / a.DIVIDER_DISTANCE) - 1
if dividers > 0:
dividerDistance = roundInt(1.0 * deltaSx / (dividers+1))
for di in range(dividers):
divX = sx + (di+1) * dividerDistance
divX0 = divX - a.DIVIDER_WIDTH/2
divX1 = divX0 + a.DIVIDER_WIDTH
divY0 = y0
divY1 = y1
if divX1 > 0:
draw.rectangle([(divX0, divY0), (divX1, divY1)], fill=a.DIVIDER_COLOR)
# check if station is visible
sx0 = xOffset + s["x0"]
sx1 = xOffset + s["x1"]
if not (0 <= sx0 <= a.WIDTH or 0 <= sx1 <= a.WIDTH):
continue
# just draw empty bullet for local stops
if "isLocal" in s:
brad = roundInt(a.CIRCLE_WIDTH/3)
bx = sx
by = sy
# Draw line using gizeh so it will be smooth
bsurface = gizeh.Surface(width=a.WIDTH, height=a.HEIGHT)
circle = gizeh.circle(r=brad, xy=[bx, by], fill=hexToRGB(a.DIVIDER_COLOR, toFloat=True))
circle.draw(bsurface)
bpixels = bsurface.get_npimage(transparent=True) # should be shape: h, w, rgba
circleImg = Image.fromarray(bpixels, mode="RGBA")
im.paste(circleImg, (0, 0), circleImg)
continue
# draw borough text
bx = sx
by = a.BOROUGH_TEXT_Y
drawTextToImage(draw, s["borough"], fontBorough, a.BOROUGH_LETTER_MARGIN, bx, by, a.ALT_TEXT_COLOR)
# draw bullet
bx = roundInt(sx - a.CIRCLE_WIDTH/2)
by = roundInt(sy - a.CIRCLE_WIDTH/2)
im.paste(bulletImg, (bx, by), bulletImg)
# draw station text
stx = sx
sty = a.STATION_TEXT_Y
slines = getMultilines(s["Stop Name"], fontStation, a.TEXT_WIDTH, a.STATION_LETTER_MARGIN)
drawTextLinesToImage(draw, slines, fontStation, a.STATION_TEXT_MARGIN, a.STATION_LETTER_MARGIN, stx, sty, a.TEXT_COLOR)
# draw the map
mw, mh = mapImg.size
mx = a.MAP_MARGIN
my = a.HEIGHT - mh - a.MAP_MARGIN
im.paste(mapImg, (mx, my))
lineColor = "#"+str(stations[0]["color"])
points = []
allPoints = []
mstations = stations[:]
if a.RIGHT_TO_LEFT:
mstations = list(reversed(mstations))
for i, s in enumerate(mstations):
sms0 = s["ms"]
sms1 = sms0 + s["duration"]
# print("%s, %s" % (sms0, sms1))
mprogress = norm(ms, (sms0, sms1), limit=True) if s["duration"] > 0 else 1.0
lx = lerp((mx, mx+mw), s["mapNx"])
ly = lerp((my, my+mh), s["mapNy"])
if ms >= sms0:
points.append((lx, ly))
if 0.0 < mprogress < 1.0 and i < stationCount-1 and s["duration"] > 0:
lx1 = lerp((mx, mx+mw), mstations[i+1]["mapNx"])
ly1 = lerp((my, my+mh), mstations[i+1]["mapNy"])
lx2 = lerp((lx, lx1), mprogress)
ly2 = lerp((ly, ly1), mprogress)
points.append((lx2, ly2))
allPoints.append((lx, ly))
# Draw line using gizeh so it will be smooth
surface = gizeh.Surface(width=a.WIDTH, height=a.HEIGHT)
line = gizeh.polyline(points=allPoints, stroke_width=max(1, a.MAP_LINE_WIDTH-1), stroke=hexToRGB(a.MAP_LINE_COLOR, toFloat=True))
line.draw(surface)
if len(points) > 1:
sline = gizeh.polyline(points=points, stroke_width=a.MAP_LINE_WIDTH, stroke=hexToRGB(lineColor, toFloat=True))
sline.draw(surface)
spixels = surface.get_npimage(transparent=True) # should be shape: h, w, rgba
lineImage = Image.fromarray(spixels, mode="RGBA")
im.paste(lineImage, (0, 0), lineImage)
# draw the marker
x0 = cx - a.MARKER_WIDTH/2
x1 = x0 + a.MARKER_WIDTH
y0 = 0
y1 = a.HEIGHT
draw.rectangle([(x0, y0), (x1, y1)], fill=(255,255,255,100))
del draw
im.save(filename)
# print("Saved %s" % filename)
def getEasedFrames(easeFrameCount, stationFrameCount, pxPerFrame):
fromFrameCount = int(min(easeFrameCount, stationFrameCount) / 2)
fromPx = fromFrameCount * pxPerFrame
toFrameCount = easeFrameCount + fromFrameCount # 'fromPx' will be stretched into 'toFrameCount' frames
# easedPoints = [easeIn(n) * pxPerFrame for n in np.linspace(0, 1.0, num=toFrameCount)]
easedPoints = [n * pxPerFrame for n in np.linspace(0, 1.0, num=toFrameCount)]
buckets = [0 for n in range(toFrameCount)]
pxPool = fromPx
for i in range(toFrameCount):
index = toFrameCount-1-i
bucketPx = buckets[index]
addPx = easedPoints[index]
if addPx > pxPool:
addPx = pxPool
buckets[index] = addPx
pxPool -= addPx
if pxPool <= 0:
break
if pxPool > 0:
incr = 0.01
while pxPool > 0:
for j in range(toFrameCount):
index = toFrameCount-1-j
bucketPx = buckets[index]
if (bucketPx+incr) <= pxPerFrame:
buckets[index] += incr
pxPool -= incr
# import matplotlib.pyplot as plt
# plt.plot(buckets)
# plt.show()
# sys.exit()
# print("%s ~ %s" % (fromPx, sum(buckets)))
return buckets
audioFilename = a.AUDIO_OUTPUT_FILE % basename
print("%s steps in sequence" % len(sequence))
print('Total sequence time: %s' % formatSeconds(sequenceDuration/1000.0))
if a.VISUALIZE_SEQUENCE:
instrumentsCount = len(instruments)
labelW = 200
unitH = 10
unitW = 10
marginH = 2
imgH = (unitH+marginH) * instrumentsCount
imgW = totalSeconds * unitW + labelW
dfont = ImageFont.truetype(font="fonts/OpenSans-Regular.ttf", size=10)
print("Making viz %s x %s" % (imgW, imgH))
im = Image.new('RGB', (imgW, imgH), "#000000")
draw = ImageDraw.Draw(im, 'RGB')
for i, ins in enumerate(instruments):
y = i * (unitH + marginH)
draw.text((2, y), ins["name"], fill="#FFFFFF", font=dfont)
steps = [step for step in sequence if step["instrumentIndex"]==ins["index"]]
for step in steps:
sx = roundInt((step["ms"] - a.PAD_START) / 1000.0 / totalSeconds * (imgW-labelW) + labelW)
draw.rectangle([(sx, y), (sx+3, y+unitH)], fill=(roundInt(255*step["volume"]),0,0))
if i > 0:
draw.line([(0, y-1), (imgW, y-1)], fill="#cccccc", width=1)
printProgress(i+1, instrumentsCount)
im.save("output/viz.png")
sys.exit()
if a.PLOT_SEQUENCE:
import matplotlib.pyplot as plt
xs = [s['ms']/1000.0 for s in stations]
ys = [s['income'] for s in stations]
plt.plot(xs, ys)
plt.show()
sys.exit()
if a.PROBE:
sys.exit()
makeDirectories([a.AUDIO_OUTPUT_FILE, a.OUTPUT_FILE])
if not a.AUDIO_ONLY:
bulletImg = Image.open(a.IMAGE_FILE)
bulletImg = bulletImg.resize((a.CIRCLE_WIDTH, a.CIRCLE_WIDTH), resample=Image.LANCZOS)
mapImg = Image.open(a.MAP_IMAGE)
mapH = roundInt((1.0 * mapImg.size[1] / mapImg.size[0]) * a.MAP_W)
mapImg = mapImg.resize((a.MAP_W, mapH), resample=Image.LANCZOS)
fontStation = ImageFont.truetype(font=a.STATION_FONT, size=a.STATION_TEXT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
fontBorough = ImageFont.truetype(font=a.BOROUGH_FONT, size=a.BOROUGH_TEXT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
makeDirectories([a.OUTPUT_FRAME % (basename, "*")])
if a.OVERWRITE and a.SINGLE_FRAME < 1:
removeFiles(a.OUTPUT_FRAME % (basename, "*"))
# calculations for easing in/out
padFrameInCount = msToFrame(a.PAD_START, a.FPS)
station0FrameCount = msToFrame(stations[0]["duration"], a.FPS)
easeInFrames = getEasedFrames(padFrameInCount, station0FrameCount, pxPerFrame)
easeInFrameCount = len(easeInFrames)
padFrameOutCount = msToFrame(a.PAD_END, a.FPS)
station1FrameCount = msToFrame(stations[-2]["duration"], a.FPS)
easeOutFrames = getEasedFrames(padFrameOutCount, station1FrameCount, pxPerFrame)
# easeOutFrames = list(reversed(easeOutFrames))
easeOutFrameCount = len(easeOutFrames)
easeOutPixels = roundInt(sum(easeOutFrames))
print("Making video frame sequence...")
videoFrames = []
centerX = roundInt(a.WIDTH * 0.5)
xOffset = centerX
direction = -1
if a.RIGHT_TO_LEFT:
direction = 1
xOffset -= totalW
xOffsetF = 1.0 * xOffset
target = centerX-totalW if direction < 0 else centerX
for f in range(totalFrames):
frame = f + 1
ms = frameToMs(frame, a.FPS)
frameFilename = a.OUTPUT_FRAME % (basename, zeroPad(frame, totalFrames))
if a.SINGLE_FRAME < 1 or a.SINGLE_FRAME == frame:
if a.SINGLE_FRAME > 0:
frameFilename = "output/frame.png"
drawFrame(frameFilename, ms, xOffset, vstations, totalW, bulletImg, mapImg, fontStation, fontBorough, a)
if a.SINGLE_FRAME > 0:
sys.exit()
pixelsLeft = abs(target - xOffset)
# ease in start
if frame < easeInFrameCount:
xOffsetF += (direction * easeInFrames[frame-1])
xOffset = roundInt(xOffsetF)
# print(abs(xOffset-centerX))
# # correct any discrepancy after ease in
# elif frame <= easeInFrameCount:
# xOffset = (frame - padFrameInCount) * pxPerFrame
# xOffsetF = 1.0 * xOffset
# ease out end
elif pixelsLeft <= easeOutPixels:
pxStep = easeOutFrames.pop() if len(easeOutFrames) > 0 else 1
xOffsetF += (direction * pxStep)
xOffset = roundInt(xOffsetF)
# print("%s > %s" % (xOffset, centerX-totalW))
else:
xOffset += (direction * pxPerFrame)
xOffsetF = 1.0 * xOffset
xOffset = lim(xOffset, (centerX-totalW, centerX))
printProgress(frame, totalFrames)
# break
stepTime = logTime(startTime, "Finished frames")
padZeros = len(str(totalFrames))
outfile = a.OUTPUT_FILE % basename
frameInfile = a.OUTPUT_FRAME % (basename, '%s')
if a.VIDEO_ONLY:
compileFrames(frameInfile, a.FPS, outfile, padZeros)
sys.exit()
if a.OVERWRITE or not os.path.isfile(audioFilename):
mixAudio(sequence, sequenceDuration, audioFilename, masterDb=a.MASTER_DB)
else:
print("%s already exists" % audioFilename)
stepTime = logTime(stepTime, "Finished Audio")
if not a.AUDIO_ONLY:
if a.VIDEO_ONLY:
audioFilename = None
if a.OVERWRITE or not os.path.isfile(outfile):
compileFrames(frameInfile, a.FPS, outfile, padZeros, audioFile=audioFilename)
else:
print("%s already exists" % outfile)
logTime(startTime, "Total execution time")
|
[
"PIL.Image.fromarray",
"PIL.Image.open",
"argparse.ArgumentParser",
"PIL.Image.new",
"matplotlib.pyplot.plot",
"PIL.ImageFont.truetype",
"os.path.isfile",
"PIL.ImageDraw.Draw",
"numpy.linspace",
"gizeh.Surface",
"sys.exit",
"matplotlib.pyplot.show"
] |
[((1123, 1148), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1146, 1148), False, 'import argparse\n'), ((23531, 23580), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(a.WIDTH, a.HEIGHT)', 'a.BG_COLOR'], {}), "('RGB', (a.WIDTH, a.HEIGHT), a.BG_COLOR)\n", (23540, 23580), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((23592, 23618), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im', '"""RGBA"""'], {}), "(im, 'RGBA')\n", (23606, 23618), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((28778, 28823), 'gizeh.Surface', 'gizeh.Surface', ([], {'width': 'a.WIDTH', 'height': 'a.HEIGHT'}), '(width=a.WIDTH, height=a.HEIGHT)\n', (28791, 28823), False, 'import gizeh\n'), ((29250, 29287), 'PIL.Image.fromarray', 'Image.fromarray', (['spixels'], {'mode': '"""RGBA"""'}), "(spixels, mode='RGBA')\n", (29265, 29287), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((31269, 31331), 'PIL.ImageFont.truetype', 'ImageFont.truetype', ([], {'font': '"""fonts/OpenSans-Regular.ttf"""', 'size': '(10)'}), "(font='fonts/OpenSans-Regular.ttf', size=10)\n", (31287, 31331), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((31389, 31430), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(imgW, imgH)', '"""#000000"""'], {}), "('RGB', (imgW, imgH), '#000000')\n", (31398, 31430), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((31442, 31467), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im', '"""RGB"""'], {}), "(im, 'RGB')\n", (31456, 31467), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((32091, 32101), 'sys.exit', 'sys.exit', ([], {}), '()\n', (32099, 32101), False, 'import sys\n'), ((32248, 32264), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {}), '(xs, ys)\n', (32256, 32264), True, 'import matplotlib.pyplot as plt\n'), ((32269, 32279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32277, 32279), True, 'import matplotlib.pyplot as plt\n'), ((32284, 32294), 'sys.exit', 'sys.exit', ([], {}), '()\n', (32292, 32294), False, 'import sys\n'), ((32312, 32322), 'sys.exit', 'sys.exit', ([], {}), '()\n', (32320, 32322), False, 'import sys\n'), ((32417, 32441), 'PIL.Image.open', 'Image.open', (['a.IMAGE_FILE'], {}), '(a.IMAGE_FILE)\n', (32427, 32441), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((32546, 32569), 'PIL.Image.open', 'Image.open', (['a.MAP_IMAGE'], {}), '(a.MAP_IMAGE)\n', (32556, 32569), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((32727, 32833), 'PIL.ImageFont.truetype', 'ImageFont.truetype', ([], {'font': 'a.STATION_FONT', 'size': 'a.STATION_TEXT_SIZE', 'layout_engine': 'ImageFont.LAYOUT_RAQM'}), '(font=a.STATION_FONT, size=a.STATION_TEXT_SIZE,\n layout_engine=ImageFont.LAYOUT_RAQM)\n', (32745, 32833), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((32848, 32954), 'PIL.ImageFont.truetype', 'ImageFont.truetype', ([], {'font': 'a.BOROUGH_FONT', 'size': 'a.BOROUGH_TEXT_SIZE', 'layout_engine': 'ImageFont.LAYOUT_RAQM'}), '(font=a.BOROUGH_FONT, size=a.BOROUGH_TEXT_SIZE,\n layout_engine=ImageFont.LAYOUT_RAQM)\n', (32866, 32954), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((35723, 35733), 'sys.exit', 'sys.exit', ([], {}), '()\n', (35731, 35733), False, 'import sys\n'), ((14419, 14429), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14427, 14429), False, 'import sys\n'), ((23480, 23504), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (23494, 23504), False, 'import os\n'), ((35757, 35786), 'os.path.isfile', 'os.path.isfile', (['audioFilename'], {}), '(audioFilename)\n', (35771, 35786), False, 'import os\n'), ((26612, 26657), 'gizeh.Surface', 'gizeh.Surface', ([], {'width': 'a.WIDTH', 'height': 'a.HEIGHT'}), '(width=a.WIDTH, height=a.HEIGHT)\n', (26625, 26657), False, 'import gizeh\n'), ((26908, 26945), 'PIL.Image.fromarray', 'Image.fromarray', (['bpixels'], {'mode': '"""RGBA"""'}), "(bpixels, mode='RGBA')\n", (26923, 26945), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((29999, 30036), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)'], {'num': 'toFrameCount'}), '(0, 1.0, num=toFrameCount)\n', (30010, 30036), True, 'import numpy as np\n'), ((36064, 36087), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (36078, 36087), False, 'import os\n'), ((34525, 34535), 'sys.exit', 'sys.exit', ([], {}), '()\n', (34533, 34535), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 14:31:17 2015
@author: <NAME>.
Description:
This script does CPU and GPU matrix element time complexity
profiling. It has a function which applies the matrix element
analysis for a given set of parameters, profiles the code and
plots the time complexity results (with fit) and plots the matrix
elements from each case.
"""
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from my_timer import timer
from math import log
from scipy.optimize import curve_fit
def f_MEplaceholder(neval, mode):
# Placeholder integration instead of ME calc
result, error = (sp.integrate.quad(lambda x:
sp.special.jv(2.5, x), 0, neval) if mode == 'gpu'
else sp.integrate.quadrature(lambda x:
sp.special.jv(2.5, x), 0, neval))
return result, error
def flinear(N, mode):
"""
O(n) function
"""
y = np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
np.asarray([i for i in range(N)])
return y ,1
def fsquare(N, mode):
"""
O(n^2) function
"""
for i in range(N):
for j in range(N):
y = i*j
return y,1
def algoAnalysis(fn, nMin, nMax, mode):
"""
Run timer and plot time complexity
"""
n = []
time_result = []
y_result = []
y_err = []
for i in [j*32 for j in range(nMin,nMax+1)]:
with timer() as t:
temp_result, temp_err = fn(i, mode)
time_result.append(t.msecs)
y_result.append(temp_result)
y_err.append(temp_err)
n.append(i)
return n, time_result, y_result, y_err
def plotAll(n, time_data, y_data, err_data):
n = np.asarray(n)
time_data = np.asarray(time_data)
y_data = np.asarray(y_data)
err_data = np.asarray(err_data)
err_data[0] = err_data[1]*0.5
# plotting helpers
nTime = n[2]
n = map(lambda x: log(x,2), n[0])
colors = ['lightblue', 'lightgreen']
edgeColors = ['#1B2ACC','#3F7F4C']
faceColors = ['#089FFF', '#7EFF99']
label_entries_for_results = ['GPU Matrix Elements', 'CPU Matrix Elements']
label_entries_for_time = ['GPU Runtime', 'CPU Runtime']
plt.figure(figsize=(15,6))
###########################################################################
# The following plots the runtime information for GPU and CPU runs.
def sqFunc(x, a, b, c):
return a*x**2 + b*x +c
def linFunc(x, a, b):
return a*x + b
funcList = [linFunc, sqFunc]
ax = plt.subplot(1,2,1)
# draw plots for timing data
for dat_mode in xrange(0,2):
params = curve_fit(funcList[dat_mode], nTime, time_data[dat_mode])
x = np.linspace(nTime[0], nTime[-1], 1000)
if dat_mode == 0:
[a,b] = params[0]
y = funcList[dat_mode](x, a, b)
s = "Fit for GPU: $%.5fx$ + $%.5f$"%(a,b)
if dat_mode == 1:
[a,b,c] = params[0]
y = funcList[dat_mode](x, a, b, c)
s = "Fit for CPU: $%.5fx^2$ + $%.5fx$ + $%.2f$"%(a,b,c)
ax.text(0.035, 0.75-dat_mode*0.1, s,
transform = ax.transAxes,
fontsize = 16)
ax.plot(x,y, color='k', linestyle="--", linewidth = 4)
ax.plot(nTime, time_data[dat_mode], color=colors[dat_mode],
marker = 'o', label=label_entries_for_time[dat_mode],
linestyle = 'None')
# setting axis limits
plt.xlim([min(nTime)-50, max(nTime)+50])
plt.ylim([min(min(time_data[0]), min(time_data[1]))*1.3,
max(max(time_data[0]), max(time_data[1]))*1.3])
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding horizontal grid lines
ax.yaxis.grid(True)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# labels
plt.xlabel('Maximum number of phase space points')
plt.ylabel('Runtime (msec)')
leg = plt.legend(loc='upper left', fancybox=True, numpoints=1)
leg.get_frame().set_alpha(0.5)
###########################################################################
# The following plots the Matrix Elements for the GPU and CPU respectively
# on a subplot, on top of each other with their corresponding errors.
ax = plt.subplot(1,2,2)
# draw plots for results
for dat_mode in xrange(0,2):
ax.errorbar(x=n, y=y_data[dat_mode], yerr=err_data[dat_mode],
fmt='o', color=colors[dat_mode], ecolor='black',
alpha = 0.3)
ax.plot(n, y_data[dat_mode,:], marker='o',
linestyle = 'None', color=colors[dat_mode],
label=label_entries_for_results[dat_mode])
ax.fill_between(n, y_data[dat_mode]-err_data[dat_mode],
y_data[dat_mode]+err_data[dat_mode],
alpha=0.2, edgecolor=edgeColors[dat_mode],
facecolor=faceColors[dat_mode],
linewidth=4, linestyle='-.', antialiased=True)
# setting axis limits
plt.xlim([min(n)-1*0.2, max(n)+1*0.2])
plt.ylim([min(min(y_data[0]), min(y_data[1]))*1.3,
max(max(y_data[0]), max(y_data[1]))*1.3])
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding horizontal grid lines
ax.yaxis.grid(True)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# labels
plt.xlabel('$\log_2$(Maximum number of phase space points)')
plt.ylabel('Matrix Element')
leg = plt.legend(loc='upper left', fancybox=True, numpoints=1)
leg.get_frame().set_alpha(0.5)
plt.tight_layout()
plt.savefig('plots.pdf')
plt.show()
# main() function
def main():
print('\nAnalyzing Algorithms...')
n_GPU, timeGPU, yResult_GPU, yErr_GPU = algoAnalysis(f_MEplaceholder, 8, 20, 'gpu')
n_CPU, time_CPU, yResult_CPU, yErr_CPU = algoAnalysis(f_MEplaceholder, 8, 20, 'cpu')
nLin, timeLin, y1, y2 = algoAnalysis(flinear, 10, 50, 'cpu')
nSq, timeSq, y1, y2 = algoAnalysis(fsquare, 10, 50, 'cpu')
nList = [n_GPU, n_CPU, nLin, nSq] ### DELETE NLIN NSQ AFTER
timeList = [timeLin, timeSq]
yResultList = [yResult_GPU, yResult_CPU]
yErrList = [yErr_GPU, yErr_CPU]
plotAll(nList, timeList, yResultList, yErrList)
# call main
if __name__ == '__main__':
# matplotlib.rcParams.update({'font.family': 'Zapf Chancery'})
main()
|
[
"scipy.optimize.curve_fit",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"numpy.asarray",
"math.log",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"my_timer.timer",
"scipy.special.jv",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((496, 517), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (510, 517), False, 'import matplotlib\n'), ((1829, 1842), 'numpy.asarray', 'np.asarray', (['n'], {}), '(n)\n', (1839, 1842), True, 'import numpy as np\n'), ((1859, 1880), 'numpy.asarray', 'np.asarray', (['time_data'], {}), '(time_data)\n', (1869, 1880), True, 'import numpy as np\n'), ((1894, 1912), 'numpy.asarray', 'np.asarray', (['y_data'], {}), '(y_data)\n', (1904, 1912), True, 'import numpy as np\n'), ((1928, 1948), 'numpy.asarray', 'np.asarray', (['err_data'], {}), '(err_data)\n', (1938, 1948), True, 'import numpy as np\n'), ((2326, 2353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (2336, 2353), True, 'from matplotlib import pyplot as plt\n'), ((2658, 2678), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2669, 2678), True, 'from matplotlib import pyplot as plt\n'), ((3774, 3904), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""on"""', 'left': '"""off"""', 'right': '"""off"""', 'labelleft': '"""on"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='on', left='off', right='off', labelleft='on')\n", (3789, 3904), True, 'from matplotlib import pyplot as plt\n'), ((4183, 4233), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Maximum number of phase space points"""'], {}), "('Maximum number of phase space points')\n", (4193, 4233), True, 'from matplotlib import pyplot as plt\n'), ((4238, 4266), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Runtime (msec)"""'], {}), "('Runtime (msec)')\n", (4248, 4266), True, 'from matplotlib import pyplot as plt\n'), ((4277, 4333), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fancybox': '(True)', 'numpoints': '(1)'}), "(loc='upper left', fancybox=True, numpoints=1)\n", (4287, 4333), True, 'from matplotlib import pyplot as plt\n'), ((4614, 4634), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4625, 4634), True, 'from matplotlib import pyplot as plt\n'), ((5567, 5697), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '"""off"""', 'top': '"""off"""', 'labelbottom': '"""on"""', 'left': '"""off"""', 'right': '"""off"""', 'labelleft': '"""on"""'}), "(axis='both', which='both', bottom='off', top='off',\n labelbottom='on', left='off', right='off', labelleft='on')\n", (5582, 5697), True, 'from matplotlib import pyplot as plt\n'), ((5976, 6037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\log_2$(Maximum number of phase space points)"""'], {}), "('$\\\\log_2$(Maximum number of phase space points)')\n", (5986, 6037), True, 'from matplotlib import pyplot as plt\n'), ((6041, 6069), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Matrix Element"""'], {}), "('Matrix Element')\n", (6051, 6069), True, 'from matplotlib import pyplot as plt\n'), ((6080, 6136), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fancybox': '(True)', 'numpoints': '(1)'}), "(loc='upper left', fancybox=True, numpoints=1)\n", (6090, 6136), True, 'from matplotlib import pyplot as plt\n'), ((6177, 6195), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6193, 6195), True, 'from matplotlib import pyplot as plt\n'), ((6201, 6225), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots.pdf"""'], {}), "('plots.pdf')\n", (6212, 6225), True, 'from matplotlib import pyplot as plt\n'), ((6230, 6240), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6238, 6240), True, 'from matplotlib import pyplot as plt\n'), ((2760, 2817), 'scipy.optimize.curve_fit', 'curve_fit', (['funcList[dat_mode]', 'nTime', 'time_data[dat_mode]'], {}), '(funcList[dat_mode], nTime, time_data[dat_mode])\n', (2769, 2817), False, 'from scipy.optimize import curve_fit\n'), ((2830, 2868), 'numpy.linspace', 'np.linspace', (['nTime[0]', 'nTime[-1]', '(1000)'], {}), '(nTime[0], nTime[-1], 1000)\n', (2841, 2868), True, 'import numpy as np\n'), ((1545, 1552), 'my_timer.timer', 'timer', ([], {}), '()\n', (1550, 1552), False, 'from my_timer import timer\n'), ((2046, 2055), 'math.log', 'log', (['x', '(2)'], {}), '(x, 2)\n', (2049, 2055), False, 'from math import log\n'), ((794, 815), 'scipy.special.jv', 'sp.special.jv', (['(2.5)', 'x'], {}), '(2.5, x)\n', (807, 815), True, 'import scipy as sp\n'), ((927, 948), 'scipy.special.jv', 'sp.special.jv', (['(2.5)', 'x'], {}), '(2.5, x)\n', (940, 948), True, 'import scipy as sp\n')]
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import sys
import logging
import csv
# Setup logging
logger = logging.getLogger(__name__)
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(message)s', datefmt='%m-%d %H:%M')
console_handle.setFormatter(formatter)
logger.addHandler(console_handle)
class Data:
"""Common class for a list of instances of the class Samples
Attributes:
name: name of the data as a string
samples: a list of samples as instances of class Sample
casedisgene: a list of lists [[case,gene]] containing each case in samples and the respective disease causing gene
"""
# index for each score
FM_IDX = 0
CADD_IDX = 1
GESTALT_IDX = 2
BOQA_IDX = 3
PHENO_IDX = 4
# FEATURE_IDX is for feature vector which contain the above feature score
# LABEL_IDX is for pathogenic gene label (0, 1)
# GENE_IDX is for gene symbol
FEATURE_IDX = 0
LABEL_IDX = 1
GENE_IDX = 2
GENE_NAME_IDX = 3
def __init__(self):
self.data = {}
# Filter dict
self.filter_dict = {0: "feature_score", 1: "cadd_phred_score", 2: "gestalt_score", 3: "boqa_score", 4: "pheno_score"}
def loadData(self, input_file, filter_field=None):
filter_cases = []
with open(input_file) as csvfile:
reader = csv.DictReader(csvfile)
case = ""
for row in reader:
case = row["case"]
if not case in self.data:
self.data.update({case:[[], [], [], []]})
x = self.data[case][self.FEATURE_IDX]
y = self.data[case][self.LABEL_IDX]
gene = self.data[case][self.GENE_IDX]
gene_name = self.data[case][self.GENE_NAME_IDX]
x.append([row["feature_score"], row["cadd_phred_score"], row["gestalt_score"], row["boqa_score"], row["pheno_score"]])
y.append(int(row["label"]))
gene.append(row["gene_id"])
gene_name.append(row["gene_symbol"])
# filter the sample which has no the feature we assigned
if filter_field != None:
if int(row["label"]) == 1:
if row[self.filter_dict[filter_field[0]]] == 'nan' or row[self.filter_dict[filter_field[0]]] == '0':
logger.debug("%s - %s has no %s score", case, row["gene_symbol"], self.filter_dict[filter_field[0]])
filter_cases.append(case)
for key in list(self.data):
if key in filter_cases:
del self.data[key]
else:
x = self.data[key][self.FEATURE_IDX]
y = self.data[key][self.LABEL_IDX]
x = np.array(x)
y = np.array(y)
self.data[key][self.FEATURE_IDX] = x
self.data[key][self.LABEL_IDX] = y
logger.info("Input %s: total %d cases", input_file, len(self.data))
|
[
"logging.getLogger",
"logging.StreamHandler",
"csv.DictReader",
"logging.Formatter",
"numpy.array"
] |
[((124, 151), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (141, 151), False, 'import logging\n'), ((170, 193), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (191, 193), False, 'import logging\n'), ((246, 314), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(message)s"""'], {'datefmt': '"""%m-%d %H:%M"""'}), "('%(asctime)s: %(message)s', datefmt='%m-%d %H:%M')\n", (263, 314), False, 'import logging\n'), ((1457, 1480), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (1471, 1480), False, 'import csv\n'), ((2951, 2962), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2959, 2962), True, 'import numpy as np\n'), ((2988, 2999), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2996, 2999), True, 'import numpy as np\n')]
|
# coding=utf-8
"""
Script to generate city object.
"""
from __future__ import division
import os
import numpy as np
import pickle
import warnings
import random
import datetime
import shapely.geometry.point as point
import pycity_base.classes.Weather as weath
import pycity_base.classes.demand.SpaceHeating as SpaceHeating
import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand
import pycity_base.classes.demand.Apartment as Apartment
import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater
import pycity_base.classes.demand.Occupancy as occup
import pycity_calc.environments.timer as time
# import pycity_calc.environments.market as price
import pycity_calc.environments.germanmarket as germanmarket
import pycity_calc.environments.environment as env
import pycity_calc.environments.co2emissions as co2
import pycity_calc.buildings.building as build_ex
import pycity_calc.cities.city as city
import pycity_calc.visualization.city_visual as citvis
import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman
import pycity_calc.toolbox.teaser_usage.teaser_use as tusage
import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc
try:
import teaser.logic.simulation.VDI_6007.weather as vdiweather
except: # pragma: no cover
msg = 'Could not import teaser.logic.simulation.VDI_6007.weather. ' \
'If you need to use it, install ' \
'it via pip "pip install TEASER". Alternatively, you might have ' \
'run into trouble with XML bindings in TEASER. This can happen ' \
'if you try to re-import TEASER within an active Python console.' \
'Please close the active Python console and open another one. Then' \
' try again. You might also be on the wrong TEASER branch ' \
'(without VDI 6007 core).'
warnings.warn(msg)
def load_data_file_with_spec_demand_data(filename):
"""
Function loads and returns data from
.../src/data/BaseData/Specific_Demand_Data/filename.
Filename should hold float (or int) values.
Other values (e.g. strings) will be loaded as 'nan'.
Parameter
---------
filename : str
String with name of file, e.g. 'district_data.txt'
Returns
-------
dataset : numpy array
Numpy array with data
"""
src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname
(
os.path.abspath(
__file__)))))
input_data_path = os.path.join(src_path, 'data', 'BaseData',
'Specific_Demand_Data', filename)
dataset = np.genfromtxt(input_data_path, delimiter='\t', skip_header=1)
return dataset
def convert_th_slp_int_and_str(th_slp_int):
"""
Converts thermal slp type integer into string
Parameters
----------
th_slp_int : int
SLP type integer number
Returns
-------
th_slp_tag : str
SLP type string
Annotations
-----------
- `HEF` : Single family household
- `HMF` : Multi family household
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
"""
if th_slp_int is None:
msg = 'th_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_th_profile_dict_tag = {0: 'HEF',
1: 'HMF',
2: 'GMF',
3: 'GMK',
4: 'GPD',
5: 'GHA',
6: 'GBD',
7: 'GKO',
8: 'GBH',
9: 'GGA',
10: 'GBA',
11: 'GWA',
12: 'GGB',
13: 'GHD'}
th_slp_tag = slp_th_profile_dict_tag[th_slp_int]
return th_slp_tag
def convert_el_slp_int_and_str(el_slp_int):
"""
Converts el slp type integer into string
Parameters
----------
el_slp_int : int
SLP type integer number
Returns
-------
el_slp_tag : str
SLP type string
Annotations
-----------
# 0: H0 : Residential
# 1: G0 : Commercial
# 2: G1 : Commercial Mo-Sa 08:00 to 18:00
# 3: G2 : Commercial, mainly evening hours
# 4: G3 : Commercial 24 hours
# 5: G4 : Shop / hairdresser
# 6: G5 : Backery
# 7: G6 : Commercial, weekend
# 8: L0 : Farm
# 9: L1 : Farm, mainly cattle and milk
# 10: L2 : Other farming
"""
if el_slp_int is None:
msg = 'el_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_el_profile_dict_tag = {0: 'H0',
1: 'G0',
2: 'G1',
3: 'G2',
4: 'G3',
5: 'G4',
6: 'G5',
7: 'G6',
8: 'L0',
9: 'L1',
10: 'L2'}
el_slp_tag = slp_el_profile_dict_tag[el_slp_int]
return el_slp_tag
def convert_method_3_nb_into_str(method_3_nb):
"""
Converts method_3_nb into string
Parameters
----------
method_3_nb : int
Number of method 3
Returns
-------
method_3_str : str
String of method 3
"""
if method_3_nb is None:
msg = 'method_3_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_3 = {0: 'food_pro',
1: 'metal',
2: 'rest',
3: 'sports',
4: 'repair'}
method_3_str = dict_method_3[method_3_nb]
return method_3_str
def convert_method_4_nb_into_str(method_4_nb):
"""
Converts method_4_nb into string
Parameters
----------
method_4_nb : int
Number of method 4
Returns
-------
method_4_str : str
String of method 4
"""
if method_4_nb is None:
msg = 'method_4_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_4 = {0: 'metal_1', 1: 'metal_2', 2: 'warehouse'}
method_4_str = dict_method_4[method_4_nb]
return method_4_str
def conv_build_type_nb_to_name(build_type):
"""
Convert build_type number to name / explanation
Parameters
----------
build_type : int
Building type number, based on Spec_demands_non_res.txt
Returns
-------
build_name : str
Building name / explanation
"""
if build_type is None:
msg = 'build_type is None. Going to return None for build_name.'
warnings.warn(msg)
return None
dict_b_name = {
0: 'Residential',
1: 'Office (simulation)',
2: 'Main construction work',
3: 'Finishing trade construction work',
4: 'Bank and insurance',
5: 'Public institution',
6: 'Non profit organization',
7: 'Small office buildings',
8: 'Other services',
9: 'Metal',
10: 'Automobile',
11: 'Wood and timber',
12: 'Paper',
13: 'Small retailer for food',
14: 'Small retailer for non-food',
15: 'Large retailer for food',
16: 'Large retailer for non-food',
17: 'Primary school',
18: 'School for physically handicapped',
19: 'High school',
20: 'Trade school',
21: 'University',
22: 'Hotel',
23: 'Restaurant',
24: 'Childrens home',
25: 'Backery',
26: 'Butcher',
27: 'Laundry',
28: 'Farm primary agriculture ',
29: 'Farm with 10 - 49 cattle units',
30: 'Farm with 50 - 100 cattle units',
31: 'Farm with more than 100 cattle units',
32: 'Gardening',
33: 'Hospital',
34: 'Library',
35: 'Prison',
36: 'Cinema',
37: 'Theater',
38: 'Parish hall',
39: 'Sports hall',
40: 'Multi purpose hall',
41: 'Swimming hall',
42: 'Club house',
43: 'Fitness studio',
44: 'Train station smaller 5000m2',
45: 'Train station equal to or larger than 5000m2'
}
return dict_b_name[build_type]
def constrained_sum_sample_pos(n, total):
"""
Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur.
Parameters
----------
n : int
Number of chosen integers
total : int
Sum of all entries of result list
Returns
-------
results_list : list (of int)
List with result integers, which sum up to value 'total'
"""
dividers = sorted(random.sample(range(1, int(total)), int(n - 1)))
list_occ = [a - b for a, b in zip(dividers + [total], [0] + dividers)]
for i in range(len(list_occ)):
list_occ[i] = int(list_occ[i])
return list_occ
def redistribute_occ(occ_list):
"""
Redistribute occupants in occ_list, so that each apartment is having at
least 1 person and maximal 5 persons.
Parameters
----------
occ_list
Returns
-------
occ_list_new : list
List holding number of occupants per apartment
"""
occ_list_new = occ_list[:]
if sum(occ_list_new) / len(occ_list_new) > 5: # pragma: no cover
msg = 'Average number of occupants per apartment is higher than 5.' \
' This is not valid for usage of Richardson profile generator.'
raise AssertionError(msg)
# Number of occupants to be redistributed
nb_occ_redist = 0
# Find remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] > 5:
# Add remaining occupants to nb_occ_redist
nb_occ_redist += occ_list_new[i] - 5
# Set occ_list_new entry to 5 persons
occ_list_new[i] = 5
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Identify empty apartments and add single occupant
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] == 0:
# Add single occupant
occ_list_new[i] = 1
# Remove occupant from nb_occ_redist
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Redistribute remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] < 5:
# Fill occupants up with remaining occupants
for j in range(5 - occ_list_new[i]):
# Add single occupant
occ_list_new[i] += 1
# Remove single occupant from remaining sum
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
if nb_occ_redist: # pragma: no cover
raise AssertionError('Not all occupants could be distributed.'
'Check inputs and/or redistribute_occ() call.')
def generate_environment(timestep=3600,
year_timer=2017,
year_co2=2017,
try_path=None,
location=(51.529086, 6.944689),
altitude=55,
new_try=False):
"""
Returns environment object. Total number of timesteps is automatically
generated for one year.
Parameters
----------
timestep : int
Timestep in seconds
year_timer : int, optional
Chosen year of analysis (default: 2010)
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors (default: 2017)
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
location : Tuple, optional
(latitude , longitude) of the simulated system's position,
(default: (51.529086, 6.944689) for Bottrop, Germany.
altitude : float, optional
Altitute of location in m (default: 55 - City of Bottrop)
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
environment : object
Environment object
"""
# Create environment
timer = time.TimerExtended(timestep=timestep, year=year_timer)
weather = weath.Weather(timer, useTRY=True, pathTRY=try_path,
location=location, altitude=altitude,
new_try=new_try)
market = germanmarket.GermanMarket()
co2em = co2.Emissions(year=year_co2)
environment = env.EnvironmentExtended(timer=timer,
weather=weather,
prices=market,
location=location,
co2em=co2em)
return environment
def generate_res_building_single_zone(environment, net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1, number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=None,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of single floor
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
else:
assert number_occupants is not None
assert number_occupants > 0
# Define SLP profiles for residential building with single zone
th_slp_type = 'HEF'
el_slp_type = 'H0'
if number_occupants is not None:
assert number_occupants > 0
assert number_occupants <= 5 # Max 5 occupants for stochastic profile
if el_gen_method == 2 or (dhw_method == 2 and use_dhw == True):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants)
else: # Generate occupancy object without profile generation
# Just used to store information about number of occupants
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants,
do_profile=False)
else:
occupancy_object = None # Dummy object to prevent error with
# apartment usage
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (number_occupants is None and dhw_method == 1 and use_dhw == True):
# Set number of occupants to 2 to enable dhw usage
number_occupants = 2
# Create space heating demand
if th_gen_method == 1:
# Use SLP
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand is None:
# Generate annual_el_demand_ap
annual_el_demand = calc_el_dem_ap(nb_occ=number_occupants,
el_random=el_random,
type='sfh')
print('Annual electrical demand in kWh: ', annual_el_demand)
if number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand / number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=number_occupants,
dhw_random=dhw_random,
type='sfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment, occupancy=occupancy_object,
net_floor_area=net_floor_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve, el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=neighbour_buildings,
residential_layout=residential_layout,
attic=attic,
cellar=cellar,
construction_type=construction_type,
dormer=dormer,
with_ahu=
curr_central_ahu)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_res_building_multi_zone(environment,
net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
nb_of_apartments,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1,
total_number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=False,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with multiple apartments. Occupants are randomly distributed over
number of apartments.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
nb_of_apartments : int
Number of apartments within building
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
total_number_occupants : int, optional
Total number of occupants in all apartments (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
Annotation
----------
Raise assertion error when share of occupants per apartment is higher
than 5 (necessary for stochastic, el. profile generation)
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
if total_number_occupants is not None:
assert total_number_occupants > 0
assert total_number_occupants / nb_of_apartments <= 5, (
'Number of occupants per apartment is ' +
'at least once higher than 5.')
# Distribute occupants to different apartments
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
# While not all values are smaller or equal to 5, return run
# This while loop might lead to large runtimes for buildings with a
# large number of apartments (not finding a valid solution, see
# issue #147). Thus, we add a counter to exit the loop
count = 0
while all(i <= 5 for i in occupancy_list) is not True:
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
if count == 100000:
# Take current occupancy_list and redistribute occupants
# manually until valid distribution is found
occupancy_list = redistribute_occ(occ_list=occupancy_list)
# Exit while loop
break
count += 1
print('Current list of occupants per apartment: ', occupancy_list)
else:
msg = 'Number of occupants is None for current building!'
warnings.warn(msg)
# Define SLP profiles for residential building with multiple zone
th_slp_type = 'HMF'
el_slp_type = 'H0'
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=
neighbour_buildings,
residential_layout=
residential_layout,
attic=attic,
cellar=cellar,
construction_type=
construction_type,
dormer=dormer,
with_ahu=curr_central_ahu)
if annual_el_demand is not None:
# Distribute el. demand equally to apartments
annual_el_demand_ap = annual_el_demand / nb_of_apartments
else:
annual_el_demand_ap = None
# Loop over apartments
# #---------------------------------------------------------------------
for i in range(int(nb_of_apartments)):
# Dummy init of number of occupants
curr_number_occupants = None
# Check number of occupants
if total_number_occupants is not None:
# Get number of occupants
curr_number_occupants = occupancy_list[i]
# Generate occupancy profiles for stochastic el. and/or dhw
if el_gen_method == 2 or (dhw_method == 2 and use_dhw):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants)
else: # Generate occupancy object without profile
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants,
do_profile=False)
else:
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (curr_number_occupants is None and dhw_method == 1 and
use_dhw == True):
# If dhw profile should be generated, but current number of
# occupants is None, number of occupants is samples from
# occupancy distribution for apartment
curr_number_occupants = usunc.calc_sampling_occ_per_app(
nb_samples=1)
# Assumes equal area share for all apartments
apartment_area = net_floor_area / nb_of_apartments
# Create space heating demand (for apartment)
if th_gen_method == 1:
# Use SLP
heat_power_curve = \
SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=apartment_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=apartment_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand_ap is None:
# Generate annual_el_demand_ap
annual_el_demand_ap = calc_el_dem_ap(nb_occ=curr_number_occupants,
el_random=el_random,
type='mfh')
print('Annual el. demand (apartment) in kWh: ', annual_el_demand_ap)
if curr_number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand_ap / curr_number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=curr_number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand_ap,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand_ap,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=curr_number_occupants,
dhw_random=dhw_random,
type='mfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (
955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / curr_number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * curr_number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment,
occupancy=occupancy_object,
net_floor_area=apartment_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_nonres_building_single_zone(environment,
net_floor_area, spec_th_demand,
annual_el_demand, th_slp_type,
el_slp_type=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
method_3_type=None,
method_4_type=None,
height_of_floors=None,
nb_of_floors=None):
"""
Function generates and returns extended nonresidential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float
Annual electrical energy demand in kWh/a
th_slp_type : str
Thermal SLP type (for non-residential buildings)
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
el_slp_type : str, optional (default: None)
Electrical SLP type
- H0 : Household
- L0 : Farms
- L1 : Farms with breeding / cattle
- L2 : Farms without cattle
- G0 : Business (general)
- G1 : Business (workingdays 8:00 AM - 6:00 PM)
- G2 : Business with high loads in the evening
- G3 : Business (24 hours)
- G4 : Shops / Barbers
- G5 : Bakery
- G6 : Weekend operation
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
- 'food_pro': Food production
- 'metal': Metal company
- 'rest': Restaurant (with large cooling load)
- 'sports': Sports hall
- 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
- 'metal_1' : Metal company with smooth profile
- 'metal_2' : Metal company with fluctuation in profile
- 'warehouse' : Warehouse
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
assert annual_el_demand >= 0
assert th_slp_type != 'HEF', ('HEF thermal slp profile only valid for ' +
'residential buildings.')
assert th_slp_type != 'HMF', ('HMF thermal slp profile only valid for ' +
'residential buildings.')
assert el_slp_type != 'H0', ('H0 thermal slp profile only valid for ' +
'residential buildings.')
# Create space heating demand
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if method_3_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=3,
annualDemand=annual_el_demand,
do_normalization=True,
method_3_type=method_3_type)
elif method_4_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=4,
annualDemand=annual_el_demand,
do_normalization=True,
method_4_type=method_4_type)
else:
# Use el. SLP for el. power load generation
assert el_slp_type is not None, 'el_slp_type is required!'
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create apartment
apartment = Apartment.Apartment(environment)
# Add demands to apartment
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
# Create extended building object
extended_building = build_ex.BuildingExtended(environment,
net_floor_area=net_floor_area,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def get_district_data_from_txt(path, delimiter='\t'):
"""
Load city district data from txt file (see annotations below for further
information of required inputs).
naN are going to be replaced with Python None.
Parameters
----------
path : str
Path to txt file
delimiter : str, optional
Defines delimiter for txt file (default: '\t')
Returns
-------
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
Annotations
-----------
File structure
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional) (0 - free standing)
(1 - double house) (2 - row house)
18: Type of attic (int, optional, e.g. 0 for flat roof) (1 - regular roof;
unheated) (2 - regular roof; partially heated) (3 - regular roof; fully
heated)
19: Type of cellar (int, optional, e.g. 1 for non heated cellar)
(0 - no basement) (1 - non heated) (2 - partially heated) (3 - fully heated)
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional)
"""
district_data = np.genfromtxt(path, delimiter=delimiter, skip_header=1)
# Replace nan with None values of Python
district_data = np.where(np.isnan(district_data), None, district_data)
return district_data
def calc_el_dem_ap(nb_occ, el_random, type):
"""
Calculate electric energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
el_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
Returns
-------
el_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if el_random:
# Choose first entry of random sample list
el_dem = usunc.calc_sampling_el_demand_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
type=type)[0]
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 2500,
2: 3200,
3: 3900,
4: 4200,
5: 5400}
dict_mfh = {1: 1500,
2: 2200,
3: 2800,
4: 3200,
5: 4000}
if type == 'sfh':
el_dem = dict_sfh[nb_occ]
elif type == 'mfh':
el_dem = dict_mfh[nb_occ]
return el_dem
def calc_dhw_dem_ap(nb_occ, dhw_random, type, delta_t=35, c_p_water=4182,
rho_water=995):
"""
Calculate hot water energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
dhw_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. dhw_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
delta_t : float, optional
Temperature split of heated up water in Kelvin (default: 35)
c_p_water : float, optional
Specific heat capacity of water in J/kgK (default: 4182)
rho_water : float, optional
Density of water in kg/m3 (default: 995)
Returns
-------
dhw_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if dhw_random:
# Choose first entry of random sample list
# DHW volume in liters per apartment and day
dhw_volume = usunc.calc_sampling_dhw_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
b_type=type)[0]
dhw_dem = dhw_volume * 365 * rho_water * c_p_water * delta_t / \
(1000 * 3600 * 1000)
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 500,
2: 800,
3: 1000,
4: 1300,
5: 1600}
dict_mfh = {1: 500,
2: 900,
3: 1300,
4: 1400,
5: 2000}
if type == 'sfh':
dhw_dem = dict_sfh[nb_occ]
elif type == 'mfh':
dhw_dem = dict_mfh[nb_occ]
return dhw_dem
def run_city_generator(generation_mode, timestep,
year_timer, year_co2,
location,
th_gen_method,
el_gen_method, district_data, use_dhw=False,
dhw_method=1, try_path=None,
pickle_city_filename=None, do_save=True,
path_save_city=None, eff_factor=0.85,
show_city=False, altitude=55, dhw_volumen=None,
do_normalization=True, slp_manipulate=True,
call_teaser=False, teaser_proj_name='pycity',
do_log=True, log_path=None,
project_name='teaser_project',
air_vent_mode=1, vent_factor=0.5,
t_set_heat=20,
t_set_cool=70,
t_night=16,
vdi_sh_manipulate=False, city_osm=None,
el_random=False, dhw_random=False, prev_heat_dev=True,
season_mod=None, merge_windows=False, new_try=False):
"""
Function generates city district for user defined input. Generated
buildings consist of only one single zone!
Parameters
----------
generation_mode : int
Integer to define method to generate city district
(so far, only csv/txt file import has been implemented)
generation_mode = 0: Load data from csv/txt file (tab seperated)
timestep : int
Timestep in seconds
year_timer : int
Chosen year of analysis
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors
location : Tuple
(latitude, longitude) of the simulated system's position.
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
3 - Use TEASER VDI 6007 core to simulate thermal loads‚
el_gen_method : int
Electrical generation method
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building). Requires number of occupants.
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
use_dhw : bool, optional
Defines if domestic hot water profiles should be generated.
(default: False)
dhw_method : int, optional
Defines method for dhw profile generation (default: 1)
Only relevant if use_dhw=True. Options:
- 1: Generate profiles via Annex 42
- 2: Generate stochastic dhw profiles
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
pickle_city_filename : str, optional
Name for file, which should be pickled and saved, if no path is
handed over to save object to(default: None)
do_save : bool, optional
Defines, if city object instance should be saved as pickle file
(default: True)
path_save_city : str, optional
Path to save (pickle and dump) city object instance to (default: None)
If None is used, saves file to .../output/...
eff_factor : float, optional
Efficiency factor of thermal boiler system (default: 0.85)
show_city : bool, optional
Boolean to define if city district should be printed by matplotlib
after generation (default: False)
True: Print results
False: Do not print results
altitude : float, optional
Altitude of location in m (default: 55 - City of Bottrop)
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 °C. Rescales profile to
original demand value.
call_teaser : bool, optional
Defines, if teaser should be called to generate typeBuildings
(currently, residential typeBuildings only).
(default: False)
If set to True, generates typeBuildings and add them to building node
as attribute 'type_building'
teaser_proj_name : str, optional
TEASER project name (default: 'pycity'). Only relevant, if call_teaser
is set to True
do_log : bool, optional
Defines, if log file of inputs should be generated (default: True)
log_path : str, optional
Path to log file (default: None). If set to None, saves log to
.../output
air_vent_mode : int
Defines method to generation air exchange rate for VDI 6007 simulation
Options:
0 : Use constant value (vent_factor in 1/h)
1 : Use deterministic, temperature-dependent profile
2 : Use stochastic, user-dependent profile
vent_factor : float, optional
Ventilation rate factor in 1/h (default: 0.5). Only used, if
array_vent_rate is None (otherwise, array_vent_rate array is used)
t_set_heat : float, optional
Heating set temperature in degree Celsius. If temperature drops below
t_set_heat, model is going to be heated up. (default: 20)
(Related to constraints for res. buildings in DIN V 18599)
t_set_cool : float, optional
Cooling set temperature in degree Celsius. If temperature rises above
t_set_cool, model is going to be cooled down. (default: 70)
t_night : float, optional
Night set back temperature in degree Celsius (default: 16)
(Related to constraints for res. buildings in DIN V 18599)
project_name : str, optional
TEASER project name (default: 'teaser_project')
vdi_sh_manipulate : bool, optional
Defines, if VDI 6007 thermal space heating load curve should be
normalized to match given annual space heating demand in kWh
(default: False)
el_random : bool, optional
Defines, if annual, eletrical demand value for normalization of
el. load profile should randomly diverge from reference value
within specific boundaries (default: False).
If False: Use reference value for normalization
If True: Allow generating values that is different from reference value
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
merge_windows : bool, optional
Defines TEASER project setting for merge_windows_calc
(default: False). If set to False, merge_windows_calc is set to False.
If True, Windows are merged into wall resistances.
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
city_object : object
City object of pycity_calc
Annotations
-----------
Non-residential building loads are automatically generated via SLP
(even if el_gen_method is set to 2). Furthermore, dhw profile generation
is automatically neglected (only valid for residential buildings)
Electrical load profiles of residential buildings without occupants
are automatically generated via SLP (even if el_gen_method is set to 2)
File structure (district_data np.array)
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
For residential: space heating, only!
For non-residential: Space heating AND hot water! (SLP usage)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional); 0 - free standing; 1 - Double house; 2 - Row house;
18: Type of attic (int, optional, e.g. 0 for flat roof); 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
19: Type of basement (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional) (0 to 4)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional) (0 - 2)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
0 - 'food_pro': Food production
1 - 'metal': Metal company
2 - 'rest': Restaurant (with large cooling load)
3 - 'sports': Sports hall
4 - 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
0 - 'metal_1' : Metal company with smooth profile
1 - 'metal_2' : Metal company with fluctuation in profile
2 - 'warehouse' : Warehouse
"""
assert eff_factor > 0, 'Efficiency factor has to be larger than zero.'
assert eff_factor <= 1, 'Efficiency factor cannot increase value 1.'
if dhw_volumen is not None: # pragma: no cover
assert dhw_volumen >= 0, 'Hot water volume cannot be below zero.'
if generation_mode == 1: # pragma: no cover
assert city_osm is not None, 'Generation mode 1 requires city object!'
if vdi_sh_manipulate is True and th_gen_method == 3: # pragma: no cover
msg = 'Simulated profiles of VDI 6007 call (TEASER --> ' \
'space heating) is going to be normalized with annual thermal' \
' space heating demand values given by user!'
warnings.warn(msg)
if do_log: # pragma: no cover
# Write log file
# ################################################################
# Log file path
if log_path is None:
# If not existing, use default path
this_path = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(this_path, 'output', 'city_gen_log.txt')
log_file = open(log_path, mode='w')
log_file.write('PyCity_Calc city_generator.py log file')
log_file.write('\n############## Time and location ##############\n')
log_file.write('Date: ' + str(datetime.datetime.now()) + '\n')
log_file.write('generation_mode: ' + str(generation_mode) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('Year for timer: ' + str(year_timer) + '\n')
log_file.write('Year for CO2 emission factors: '
+ str(year_co2) + '\n')
log_file.write('Location: ' + str(location) + '\n')
log_file.write('altitude: ' + str(altitude) + '\n')
if generation_mode == 0:
log_file.write('Generation mode: csv/txt input, only.\n')
elif generation_mode == 1:
log_file.write('Generation mode: csv/txt plus city osm object.\n')
log_file.write('\n############## Generation methods ##############\n')
log_file.write('th_gen_method: ' + str(th_gen_method) + '\n')
if th_gen_method == 1:
log_file.write('Manipulate SLP: ' + str(slp_manipulate) + '\n')
elif th_gen_method == 3:
log_file.write('t_set_heat: ' + str(t_set_heat) + '\n')
log_file.write('t_set_night: ' + str(t_night) + '\n')
log_file.write('t_set_cool: ' + str(t_set_cool) + '\n')
log_file.write('air_vent_mode: ' + str(air_vent_mode) + '\n')
log_file.write('vent_factor: ' + str(vent_factor) + '\n')
log_file.write('el_gen_method: ' + str(el_gen_method) + '\n')
log_file.write(
'Normalize el. profile: ' + str(do_normalization) + '\n')
log_file.write(
'Do random el. normalization: ' + str(el_random) + '\n')
log_file.write(
'Prevent el. heating devices for el load generation: '
'' + str(prev_heat_dev) + '\n')
log_file.write(
'Rescaling factor lighting power curve to implement seasonal '
'influence: ' + str(season_mod) + '\n')
log_file.write('use_dhw: ' + str(use_dhw) + '\n')
log_file.write('dhw_method: ' + str(dhw_method) + '\n')
log_file.write('dhw_volumen: ' + str(dhw_volumen) + '\n')
log_file.write(
'Do random dhw. normalization: ' + str(dhw_random) + '\n')
log_file.write('\n############## Others ##############\n')
log_file.write('try_path: ' + str(try_path) + '\n')
log_file.write('eff_factor: ' + str(eff_factor) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('call_teaser: ' + str(call_teaser) + '\n')
log_file.write('teaser_proj_name: ' + str(teaser_proj_name) + '\n')
# Log file is closed, after pickle filename has been generated
# (see code below)
if generation_mode == 0 or generation_mode == 1:
# ##################################################################
# Load specific demand files
# Load specific thermal demand input data
spec_th_dem_res_building = load_data_file_with_spec_demand_data(
'RWI_res_building_spec_th_demand.txt')
start_year_column = (spec_th_dem_res_building[:, [0]])
# Reverse
start_year_column = start_year_column[::-1]
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
spec_el_dem_res_building = load_data_file_with_spec_demand_data(
'AGEB_res_building_spec_e_demand.txt')
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
# (depending on number of occupants)
spec_el_dem_res_building_per_person = \
load_data_file_with_spec_demand_data(
'Stromspiegel2017_spec_el_energy_demand.txt')
"""
Columns:
1. Number of persons (int) ( 1 - 5 SFH and 1 - 5 MFH)
2. Annual electrical demand in kWh/a (float)
3. Specific electrical demand per person in kWh/person*a (float)
"""
# ###################################################################
# Load specific demand data and slp types for
# non residential buildings
spec_dem_and_slp_non_res = load_data_file_with_spec_demand_data(
'Spec_demands_non_res.txt')
"""
Columns:
1. type_id (int)
2. type_name (string) # Currently 'nan', due to expected float
3. Spec. thermal energy demand in kWh/m2*a (float)
4. Spec. electrical energy demand in kWh/m2*a (float)
5. Thermal SLP type (int)
6. Electrical SLP type (int)
"""
# ###################################################################
# Generate city district
# Generate extended environment of pycity_calc
environment = generate_environment(timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
try_path=try_path,
altitude=altitude,
new_try=new_try)
print('Generated environment object.\n')
if generation_mode == 0:
# Generate city object
# ############################################################
city_object = city.City(environment=environment)
print('Generated city object.\n')
else:
# Overwrite city_osm environment
print('Overwrite city_osm.environment with new environment')
city_osm.environment = environment
city_object = city_osm
# Check if district_data only holds one entry for single building
# In this case, has to be processed differently
if district_data.ndim > 1:
multi_data = True
else: # Only one entry (single building)
multi_data = False
# If multi_data is false, loop below is going to be exited with
# a break statement at the end.
# Generate dummy node id and thermal space heating demand dict
dict_id_vdi_sh = {}
# Loop over district_data
# ############################################################
for i in range(len(district_data)):
if multi_data:
# Extract data out of input file
curr_id = int(
district_data[i][0]) # id / primary key of building
curr_x = district_data[i][1] # x-coordinate in m
curr_y = district_data[i][2] # y-coordinate in m
curr_build_type = int(
district_data[i][3]) # building type nb (int)
curr_nfa = district_data[i][4] # Net floor area in m2
curr_build_year = district_data[i][5] # Year of construction
curr_mod_year = district_data[i][
6] # optional (last year of modernization)
curr_th_e_demand = district_data[i][
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[i][
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[i][
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[i][
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[i][
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[i][
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[i][
13] # optional (Average Height of floors)
curr_central_ahu = district_data[i][
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[i][
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[i][
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[i][
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[i][
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[i][
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[i][
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[i][
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[i][
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
else: # Single entry
# Extract data out of input file
curr_id = int(district_data[0]) # id / primary key of building
curr_x = district_data[1] # x-coordinate in m
curr_y = district_data[2] # y-coordinate in m
curr_build_type = int(
district_data[3]) # building type nb (int)
curr_nfa = district_data[4] # Net floor area in m2
curr_build_year = district_data[5] # Year of construction
curr_mod_year = district_data[
6] # optional (last year of modernization)
curr_th_e_demand = district_data[
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[
13] # optional (Average Height of floors)
curr_central_ahu = district_data[
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
print('Process building', curr_id)
print('########################################################')
# Assert functions
# ############################################################
assert curr_build_type >= 0
assert curr_nfa > 0
for m in range(5, 9):
if multi_data:
if district_data[i][m] is not None:
assert district_data[i][m] > 0
else:
if district_data[m] is not None:
assert district_data[m] > 0
if curr_nb_of_apartments is not None:
assert curr_nb_of_apartments > 0
# Convert to int
curr_nb_of_apartments = int(curr_nb_of_apartments)
if curr_nb_of_occupants is not None:
assert curr_nb_of_occupants > 0
# Convert curr_nb_of_occupants from float to int
curr_nb_of_occupants = int(curr_nb_of_occupants)
if (curr_nb_of_occupants is not None
and curr_nb_of_apartments is not None):
assert curr_nb_of_occupants / curr_nb_of_apartments <= 5, (
'Average share of occupants per apartment should ' +
'not exceed 5 persons! (Necessary for stochastic, el.' +
'profile generation.)')
if curr_method_3_nb is not None:
curr_method_3_nb >= 0
if curr_method_4_nb is not None:
curr_method_4_nb >= 0
if curr_build_type == 0 and curr_nb_of_apartments is None: # pragma: no cover
# Define single apartment, if nb of apartments is unknown
msg = 'Building ' + str(curr_id) + ' is residential, but' \
' does not have a number' \
' of apartments. Going' \
' to set nb. to 1.'
warnings.warn(msg)
curr_nb_of_apartments = 1
if (curr_build_type == 0 and curr_nb_of_occupants is None
and use_dhw and dhw_method == 2):
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
# Check if TEASER inputs are defined
if call_teaser or th_gen_method == 3:
if curr_build_type == 0: # Residential
assert curr_nb_of_floors is not None
assert curr_avg_height_of_floors is not None
assert curr_central_ahu is not None
assert curr_res_layout is not None
assert curr_nb_of_neighbour_bld is not None
assert curr_type_attic is not None
assert curr_type_cellar is not None
assert curr_dormer is not None
assert curr_construction_type is not None
if curr_nb_of_floors is not None:
assert curr_nb_of_floors > 0
if curr_avg_height_of_floors is not None:
assert curr_avg_height_of_floors > 0
if curr_central_ahu is not None:
assert 0 <= curr_central_ahu <= 1
if curr_res_layout is not None:
assert 0 <= curr_res_layout <= 1
if curr_nb_of_neighbour_bld is not None:
assert 0 <= curr_nb_of_neighbour_bld <= 2
if curr_type_attic is not None:
assert 0 <= curr_type_attic <= 3
if curr_type_cellar is not None:
assert 0 <= curr_type_cellar <= 3
if curr_dormer is not None:
assert 0 <= curr_dormer <= 1
if curr_construction_type is not None:
assert 0 <= curr_construction_type <= 1
# Check building type (residential or non residential)
# #-------------------------------------------------------------
if curr_build_type == 0: # Is residential
print('Residential building')
# Get spec. net therm. demand value according to last year
# of modernization or build_year
# If year of modernization is defined, use curr_mod_year
if curr_mod_year is not None:
use_year = int(curr_mod_year)
else: # Use year of construction
use_year = int(curr_build_year)
# Get specific, thermal energy demand (based on use_year)
for j in range(len(start_year_column)):
if use_year >= start_year_column[j]:
curr_spec_th_demand = spec_th_dem_res_building[len(
spec_th_dem_res_building) - 1 - j][2]
break
# # Get spec. electr. demand
# if curr_nb_of_occupants is None:
# # USE AGEB values, if no number of occupants is given
# # Set specific demand value in kWh/m2*a
# curr_spec_el_demand = spec_el_dem_res_building[1]
# # Only valid for array like [2012 38.7]
# else:
# # Use Stromspiegel 2017 values
# # Calculate specific electric demand values depending
# # on number of occupants
#
# if curr_nb_of_apartments == 1:
# btype = 'sfh'
# elif curr_nb_of_apartments > 1:
# btype = 'mfh'
#
# # Average occupancy number per apartment
# curr_av_occ_per_app = \
# curr_nb_of_occupants / curr_nb_of_apartments
# print('Average number of occupants per apartment')
# print(round(curr_av_occ_per_app, ndigits=2))
#
# if curr_av_occ_per_app <= 5 and curr_av_occ_per_app > 0:
# # Correctur factor for non-int. av. number of
# # occupants (#19)
#
# # Divide annual el. energy demand with net floor area
# if btype == 'sfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1
# row_idx_high = math.floor(curr_av_occ_per_app) - 1
# elif btype == 'mfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1 \
# + 5
# row_idx_high = math.floor(curr_av_occ_per_app) - 1 \
# + 5
#
# cur_spec_el_dem_per_occ_high = \
# spec_el_dem_res_building_per_person[row_idx_high][2]
# cur_spec_el_dem_per_occ_low = \
# spec_el_dem_res_building_per_person[row_idx_low][2]
#
# print('Chosen reference spec. el. demands per person '
# 'in kWh/a (high and low value):')
# print(cur_spec_el_dem_per_occ_high)
# print(cur_spec_el_dem_per_occ_low)
#
# delta = round(curr_av_occ_per_app, 0) - \
# curr_av_occ_per_app
#
# if delta < 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# elif delta > 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_low + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# else:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high
#
# # print('Calculated spec. el. demand per person in '
# # 'kWh/a:')
# # print(round(curr_spec_el_dem_occ, ndigits=2))
#
# # Specific el. demand per person (dependend on av.
# # number of occupants in each apartment)
# # --> Multiplied with number of occupants
# # --> Total el. energy demand in kWh
# # --> Divided with net floor area
# # --> Spec. el. energy demand in kWh/a
#
# curr_spec_el_demand = \
# curr_spec_el_dem_occ * curr_nb_of_occupants \
# / curr_nfa
#
# # print('Spec. el. energy demand in kWh/m2:')
# # print(curr_spec_el_demand)
#
# else:
# raise AssertionError('Invalid number of occupants')
# if el_random:
# if curr_nb_of_occupants is None:
# # Randomize curr_spec_el_demand with normal distribution
# # with curr_spec_el_demand as mean and 10 % standard dev.
# curr_spec_el_demand = \
# np.random.normal(loc=curr_spec_el_demand,
# scale=0.10 * curr_spec_el_demand)
# else:
# # Randomize rounding up and down of curr_av_occ_per_ap
# if round(curr_av_occ_per_app) > curr_av_occ_per_app:
# # Round up
# delta = round(curr_av_occ_per_app) - \
# curr_av_occ_per_app
# prob_r_up = 1 - delta
# rnb = random.random()
# if rnb < prob_r_up:
# use_occ = math.ceil(curr_av_occ_per_app)
# else:
# use_occ = math.floor(curr_av_occ_per_app)
#
# else:
# # Round down
# delta = curr_av_occ_per_app - \
# round(curr_av_occ_per_app)
# prob_r_down = 1 - delta
# rnb = random.random()
# if rnb < prob_r_down:
# use_occ = math.floor(curr_av_occ_per_app)
# else:
# use_occ = math.ceil(curr_av_occ_per_app)
#
# sample_el_per_app = \
# usunc.calc_sampling_el_demand_per_apartment(nb_samples=1,
# nb_persons=use_occ,
# type=btype)[0]
#
# # Divide sampled el. demand per apartment through
# # number of persons of apartment (according to
# # Stromspiegel 2017) and multiply this value with
# # actual number of persons in building to get
# # new total el. energy demand. Divide this value with
# # net floor area to get specific el. energy demand
# curr_spec_el_demand = \
# (sample_el_per_app / curr_av_occ_per_app) * \
# curr_nb_of_occupants / curr_nfa
# conversion of the construction_type from int to str
if curr_construction_type == 0:
new_curr_construction_type = 'heavy'
elif curr_construction_type == 1:
new_curr_construction_type = 'light'
else:
new_curr_construction_type = 'heavy'
# #-------------------------------------------------------------
else: # Non-residential
print('Non residential')
# Get spec. demands and slp types according to building_type
curr_spec_th_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][2]
curr_spec_el_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][3]
curr_th_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][4]
curr_el_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][5]
# Convert slp type integers into strings
curr_th_slp_type = convert_th_slp_int_and_str(curr_th_slp_type)
curr_el_slp_type = convert_el_slp_int_and_str(curr_el_slp_type)
# If curr_el_e_demand is not known, calculate it via spec.
# demand
if curr_el_e_demand is None:
curr_el_e_demand = curr_spec_el_demand * curr_nfa
# #-------------------------------------------------------------
# If curr_th_e_demand is known, recalc spec e. demand
if curr_th_e_demand is not None:
# Calc. spec. net thermal energy demand with efficiency factor
curr_spec_th_demand = eff_factor * curr_th_e_demand / curr_nfa
else:
# Spec. final energy demand is given, recalculate it to
# net thermal energy demand with efficiency factor
curr_spec_th_demand *= eff_factor
# # If curr_el_e_demand is not known, calculate it via spec. demand
# if curr_el_e_demand is None:
# curr_el_e_demand = curr_spec_el_demand * curr_nfa
if th_gen_method == 1 or th_gen_method == 2 or curr_build_type != 0:
print('Used specific thermal demand value in kWh/m2*a:')
print(curr_spec_th_demand)
# #-------------------------------------------------------------
# Generate BuildingExtended object
if curr_build_type == 0: # Residential
if curr_nb_of_apartments > 1: # Multi-family house
building = generate_res_building_multi_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
nb_of_apartments=curr_nb_of_apartments,
use_dhw=use_dhw,
dhw_method=dhw_method,
total_number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
elif curr_nb_of_apartments == 1: # Single-family house
building = generate_res_building_single_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
use_dhw=use_dhw,
dhw_method=dhw_method,
number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
else:
raise AssertionError('Wrong number of apartments')
else: # Non-residential
method_3_str = None
method_4_str = None
# Convert curr_method numbers, if not None
if curr_method_3_nb is not None:
method_3_str = \
convert_method_3_nb_into_str(int(curr_method_3_nb))
if curr_method_4_nb is not None:
method_4_str = \
convert_method_4_nb_into_str(int(curr_method_4_nb))
building = generate_nonres_building_single_zone(environment,
th_slp_type=curr_th_slp_type,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
el_slp_type=curr_el_slp_type,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
method_3_type=method_3_str,
method_4_type=method_4_str,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors
)
# Generate position shapely point
position = point.Point(curr_x, curr_y)
if generation_mode == 0:
# Add building to city object
id = city_object.add_extended_building(
extended_building=building,
position=position, name=curr_id)
elif generation_mode == 1:
# Add building as entity to corresponding building node
# Positions should be (nearly) equal
assert position.x - city_object.nodes[int(curr_id)][
'position'].x <= 0.1
assert position.y - city_object.nodes[int(curr_id)][
'position'].y <= 0.1
city_object.nodes[int(curr_id)]['entity'] = building
id = curr_id
# Save annual thermal net heat energy demand for space heating
# to dict (used for normalization with VDI 6007 core)
dict_id_vdi_sh[id] = curr_spec_th_demand * curr_nfa
print('Finished processing of building', curr_id)
print('#######################################################')
print()
# If only single building should be processed, break loop
if multi_data is False:
break
# #-------------------------------------------------------------
print('Added all buildings with data to city object.')
# VDI 6007 simulation to generate space heating load curves
# Overwrites existing heat load curves (and annual heat demands)
if th_gen_method == 3:
print('Perform VDI 6007 space heating load simulation for every'
' building')
if el_gen_method == 1:
# Skip usage of occupancy and electrial load profiles
# as internal loads within VDI 6007 core
requ_profiles = False
else:
requ_profiles = True
tusage.calc_and_add_vdi_6007_loads_to_city(city=city_object,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_night,
alpha_rad=None,
project_name=project_name,
requ_profiles=requ_profiles)
# Set call_teaser to False, as it is already included
# in calc_and_add_vdi_6007_loads_to_city
call_teaser = False
if vdi_sh_manipulate:
# Normalize VDI 6007 load curves to match given annual
# thermal space heating energy demand
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
# If node_type is building
if city_object.nodes[n]['node_type'] == 'building':
# If entity is kind building
if city_object.nodes[n][
'entity']._kind == 'building':
# Given value (user input)
ann_sh = dict_id_vdi_sh[n]
# Building pointer
curr_b = city_object.nodes[n]['entity']
# Current value on object
curr_sh = curr_b.get_annual_space_heat_demand()
norm_factor = ann_sh / curr_sh
# Do normalization
# Loop over apartments
for apart in curr_b.apartments:
# Normalize apartment space heating load
apart.demandSpaceheating.loadcurve \
*= norm_factor
print('Generation results:')
print('###########################################')
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
if city_object.nodes[n]['node_type'] == 'building':
if 'entity' in city_object.nodes[n]:
if city_object.nodes[n]['entity']._kind == 'building':
print('Results of building: ', n)
print('################################')
print()
curr_b = city_object.nodes[n]['entity']
sh_demand = curr_b.get_annual_space_heat_demand()
el_demand = curr_b.get_annual_el_demand()
dhw_demand = curr_b.get_annual_dhw_demand()
nfa = curr_b.net_floor_area
print('Annual space heating demand in kWh:')
print(sh_demand)
if nfa is not None and nfa != 0:
print(
'Specific space heating demand in kWh/m2:')
print(sh_demand / nfa)
print()
print('Annual electric demand in kWh:')
print(el_demand)
if nfa is not None and nfa != 0:
print('Specific electric demand in kWh/m2:')
print(el_demand / nfa)
nb_occ = curr_b.get_number_of_occupants()
if nb_occ is not None and nb_occ != 0:
print('Specific electric demand in kWh'
' per person and year:')
print(el_demand / nb_occ)
print()
print('Annual hot water demand in kWh:')
print(dhw_demand)
if nfa is not None and nfa != 0:
print('Specific hot water demand in kWh/m2:')
print(dhw_demand / nfa)
volume_year = dhw_demand * 1000 * 3600 / (
4200 * 35)
volume_day = volume_year / 365
if nb_occ is not None and nb_occ != 0:
v_person_day = \
volume_day / nb_occ
print('Hot water volume per person and day:')
print(v_person_day)
print()
# Create and add TEASER type_buildings to every building node
if call_teaser:
# Create TEASER project
project = tusage.create_teaser_project(name=teaser_proj_name,
merge_windows=merge_windows)
# Generate typeBuildings and add to city
tusage.create_teaser_typecity(project=project,
city=city_object,
generate_Output=False)
if do_save: # pragma: no cover
if path_save_city is None:
if pickle_city_filename is None:
msg = 'If path_save_city is None, pickle_city_filename' \
'cannot be None! Instead, filename has to be ' \
'defined to be able to save city object.'
raise AssertionError
this_path = os.path.dirname(os.path.abspath(__file__))
path_save_city = os.path.join(this_path, 'output',
pickle_city_filename)
try:
# Pickle and dump city objects
pickle.dump(city_object, open(path_save_city, 'wb'))
print('Pickled and dumped city object to: ')
print(path_save_city)
except:
warnings.warn('Could not pickle and save city object')
if do_log: # pragma: no cover
if pickle_city_filename is not None:
log_file.write('pickle_city_filename: ' +
str(pickle_city_filename)
+ '\n')
print('Wrote log file to: ' + str(log_path))
# Close log file
log_file.close()
# Visualize city
if show_city: # pragma: no cover
# Plot city district
try:
citvis.plot_city_district(city=city_object,
plot_street=False)
except:
warnings.warn('Could not plot city district.')
return city_object
if __name__ == '__main__':
this_path = os.path.dirname(os.path.abspath(__file__))
# User inputs #########################################################
# Choose generation mode
# ######################################################
# 0 - Use csv/txt input to generate city district
# 1 - Use csv/txt input file to enrich existing city object, based on
# osm call (city object should hold nodes, but no entities. City
# generator is going to add building, apartment and load entities to
# building nodes
generation_mode = 0
# Generate environment
# ######################################################
year_timer = 2017
year_co2 = 2017
timestep = 3600 # Timestep in seconds
# location = (51.529086, 6.944689) # (latitude, longitude) of Bottrop
location = (50.775346, 6.083887) # (latitude, longitude) of Aachen
altitude = 266 # Altitude of location in m (Aachen)
# Weather path
try_path = None
# If None, used default TRY (region 5, 2010)
new_try = False
# new_try has to be set to True, if you want to use TRY data of 2017
# or newer! Else: new_try = False
# Space heating load generation
# ######################################################
# Thermal generation method
# 1 - SLP (standardized load profile)
# 2 - Load and rescale Modelica simulation profile
# (generated with TRY region 12, 2010)
# 3 - VDI 6007 calculation (requires el_gen_method = 2)
th_gen_method = 3
# For non-residential buildings, SLPs are generated automatically.
# Manipulate thermal slp to fit to space heating demand?
slp_manipulate = False
# True - Do manipulation
# False - Use original profile
# Only relevant, if th_gen_method == 1
# Sets thermal power to zero in time spaces, where average daily outdoor
# temperature is equal to or larger than 12 °C. Rescales profile to
# original demand value.
# Manipulate vdi space heating load to be normalized to given annual net
# space heating demand in kWh
vdi_sh_manipulate = False
# Electrical load generation
# ######################################################
# Choose electric load profile generation method (1 - SLP; 2 - Stochastic)
# Stochastic profile is only generated for residential buildings,
# which have a defined number of occupants (otherwise, SLP is used)
el_gen_method = 2
# If user defindes method_3_nb or method_4_nb within input file
# (only valid for non-residential buildings), SLP will not be used.
# Instead, corresponding profile will be loaded (based on measurement
# data, see ElectricalDemand.py within pycity)
# Do normalization of el. load profile
# (only relevant for el_gen_method=2).
# Rescales el. load profile to expected annual el. demand value in kWh
do_normalization = True
# Randomize electrical demand value (residential buildings, only)
el_random = True
# Prevent usage of electrical heating and hot water devices in
# electrical load generation (only relevant if el_gen_method == 2)
prev_heat_dev = True
# True: Prevent electrical heating device usage for profile generation
# False: Include electrical heating devices in electrical load generation
# Use cosine function to increase winter lighting usage and reduce
# summer lighting usage in richadson el. load profiles
# season_mod is factor, which is used to rescale cosine wave with
# lighting power reference (max. lighting power)
season_mod = 0.3
# If None, do not use cosine wave to estimate seasonal influence
# Else: Define float
# (only relevant if el_gen_method == 2)
# Hot water profile generation
# ######################################################
# Generate DHW profiles? (True/False)
use_dhw = True # Only relevant for residential buildings
# DHW generation method? (1 - Annex 42; 2 - Stochastic profiles)
# Choice of Anex 42 profiles NOT recommended for multiple builings,
# as profile stays the same and only changes scaling.
# Stochastic profiles require defined nb of occupants per residential
# building
dhw_method = 2 # Only relevant for residential buildings
# Define dhw volume per person and day (use_dhw=True)
dhw_volumen = None # Only relevant for residential buildings
# Randomize choosen dhw_volume reference value by selecting new value
dhw_random = True
# Input file names and pathes
# ######################################################
# Define input data filename
filename = 'city_3_buildings.txt'
# filename = 'city_clust_simple.txt'
# filename = 'aachen_forsterlinde_mod_6.txt'
# filename = 'aachen_frankenberg_mod_6.txt'
# filename = 'aachen_huenefeld_mod_6.txt'
# filename = 'aachen_kronenberg_mod_8.txt'
# filename = 'aachen_preusweg_mod_8.txt'
# filename = 'aachen_tuerme_mod_6.txt'
# Output filename
pickle_city_filename = filename[:-4] + '.pkl'
# For generation_mode == 1:
# city_osm_input = None
# city_osm_input = 'aachen_forsterlinde_mod_7.pkl'
city_osm_input = 'aachen_frankenberg_mod_7.pkl'
# city_osm_input = 'aachen_huenefeld_mod_7.pkl'
# city_osm_input = 'aachen_kronenberg_mod_7.pkl'
# city_osm_input = 'aachen_preusweg_mod_7.pkl'
# city_osm_input = 'aachen_tuerme_mod_7.pkl'
# Pickle and dump city object instance?
do_save = True
# Path to save city object instance to
path_save_city = None
# If None, uses .../output/...
# Efficiency factor of thermal energy systems
# Used to convert input values (final energy demand) to net energy demand
eff_factor = 1
# For VDI 6007 simulation (th_gen_method == 3)
# #####################################
t_set_heat = 20 # Heating set temperature in degree Celsius
t_set_night = 16 # Night set back temperature in degree Celsius
t_set_cool = 70 # Cooling set temperature in degree Celsius
# Air exchange rate (required for th_gen_method = 3 (VDI 6007 sim.))
air_vent_mode = 2
# int; Define mode for air ventilation rate generation
# 0 : Use constant value (vent_factor in 1/h)
# 1 : Use deterministic, temperature-dependent profile
# 2 : Use stochastic, user-dependent profile
# False: Use static ventilation rate value
vent_factor = 0.3 # Constant. ventilation rate
# (only used, if air_vent_mode is 0. Otherwise, estimate vent_factor
# based on last year of modernization)
# TEASER typebuilding generation
# ######################################################
# Use TEASER to generate typebuildings?
call_teaser = False
teaser_proj_name = filename[:-4]
# Requires additional attributes (such as nb_of_floors, net_floor_area..)
merge_windows = False
# merge_windows : bool, optional
# Defines TEASER project setting for merge_windows_calc
# (default: False). If set to False, merge_windows_calc is set to False.
# If True, Windows are merged into wall resistances.
txt_path = os.path.join(this_path, 'input', filename)
if generation_mode == 1:
path_city_osm_in = os.path.join(this_path, 'input', city_osm_input)
# Path for log file
log_f_name = log_file_name = str('log_' + filename)
log_f_path = os.path.join(this_path, 'output', log_file_name)
# End of user inputs ################################################
print('Run city generator for ', filename)
assert generation_mode in [0, 1]
if generation_mode == 1:
assert city_osm_input is not None
if air_vent_mode == 1 or air_vent_mode == 2:
assert el_gen_method == 2, 'air_vent_mode 1 and 2 require occupancy' \
' profiles!'
# Load district_data file
district_data = get_district_data_from_txt(txt_path)
if generation_mode == 1:
# Load city input file
city_osm = pickle.load(open(path_city_osm_in, mode='rb'))
else:
# Dummy value
city_osm = None
# Generate city district
city = run_city_generator(generation_mode=generation_mode,
timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method, use_dhw=use_dhw,
dhw_method=dhw_method,
district_data=district_data,
pickle_city_filename=pickle_city_filename,
eff_factor=eff_factor, show_city=True,
try_path=try_path, altitude=altitude,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
call_teaser=call_teaser,
teaser_proj_name=teaser_proj_name,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_set_night,
vdi_sh_manipulate=vdi_sh_manipulate,
city_osm=city_osm, el_random=el_random,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
log_path=log_f_path,
season_mod=season_mod,
merge_windows=merge_windows,
new_try=new_try,
path_save_city=path_save_city,
do_save=do_save)
|
[
"pycity_calc.environments.environment.EnvironmentExtended",
"pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_typecity",
"pycity_base.classes.demand.Occupancy.Occupancy",
"pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_dhw_per_apartment",
"pycity_calc.toolbox.modifiers.slp_th_manipulator.slp_th_manipulator",
"pycity_calc.cities.city.City",
"pycity_calc.environments.co2emissions.Emissions",
"pycity_calc.visualization.city_visual.plot_city_district",
"pycity_calc.environments.timer.TimerExtended",
"numpy.genfromtxt",
"pycity_base.classes.Weather.Weather",
"pycity_base.classes.demand.SpaceHeating.SpaceHeating",
"warnings.warn",
"pycity_calc.toolbox.teaser_usage.teaser_use.calc_and_add_vdi_6007_loads_to_city",
"pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_project",
"pycity_calc.buildings.building.BuildingExtended",
"pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_occ_per_app",
"pycity_base.classes.demand.Apartment.Apartment",
"numpy.isnan",
"pycity_calc.environments.germanmarket.GermanMarket",
"shapely.geometry.point.Point",
"pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_el_demand_per_apartment",
"pycity_base.classes.demand.DomesticHotWater.DomesticHotWater",
"os.path.join",
"datetime.datetime.now",
"pycity_base.classes.demand.ElectricalDemand.ElectricalDemand",
"os.path.abspath"
] |
[((2465, 2541), 'os.path.join', 'os.path.join', (['src_path', '"""data"""', '"""BaseData"""', '"""Specific_Demand_Data"""', 'filename'], {}), "(src_path, 'data', 'BaseData', 'Specific_Demand_Data', filename)\n", (2477, 2541), False, 'import os\n'), ((2592, 2653), 'numpy.genfromtxt', 'np.genfromtxt', (['input_data_path'], {'delimiter': '"""\t"""', 'skip_header': '(1)'}), "(input_data_path, delimiter='\\t', skip_header=1)\n", (2605, 2653), True, 'import numpy as np\n'), ((13423, 13477), 'pycity_calc.environments.timer.TimerExtended', 'time.TimerExtended', ([], {'timestep': 'timestep', 'year': 'year_timer'}), '(timestep=timestep, year=year_timer)\n', (13441, 13477), True, 'import pycity_calc.environments.timer as time\n'), ((13493, 13603), 'pycity_base.classes.Weather.Weather', 'weath.Weather', (['timer'], {'useTRY': '(True)', 'pathTRY': 'try_path', 'location': 'location', 'altitude': 'altitude', 'new_try': 'new_try'}), '(timer, useTRY=True, pathTRY=try_path, location=location,\n altitude=altitude, new_try=new_try)\n', (13506, 13603), True, 'import pycity_base.classes.Weather as weath\n'), ((13670, 13697), 'pycity_calc.environments.germanmarket.GermanMarket', 'germanmarket.GermanMarket', ([], {}), '()\n', (13695, 13697), True, 'import pycity_calc.environments.germanmarket as germanmarket\n'), ((13710, 13738), 'pycity_calc.environments.co2emissions.Emissions', 'co2.Emissions', ([], {'year': 'year_co2'}), '(year=year_co2)\n', (13723, 13738), True, 'import pycity_calc.environments.co2emissions as co2\n'), ((13758, 13862), 'pycity_calc.environments.environment.EnvironmentExtended', 'env.EnvironmentExtended', ([], {'timer': 'timer', 'weather': 'weather', 'prices': 'market', 'location': 'location', 'co2em': 'co2em'}), '(timer=timer, weather=weather, prices=market,\n location=location, co2em=co2em)\n', (13781, 13862), True, 'import pycity_calc.environments.environment as env\n'), ((28493, 28589), 'pycity_base.classes.demand.Apartment.Apartment', 'Apartment.Apartment', (['environment'], {'occupancy': 'occupancy_object', 'net_floor_area': 'net_floor_area'}), '(environment, occupancy=occupancy_object, net_floor_area\n =net_floor_area)\n', (28512, 28589), True, 'import pycity_base.classes.demand.Apartment as Apartment\n'), ((29238, 29679), 'pycity_calc.buildings.building.BuildingExtended', 'build_ex.BuildingExtended', (['environment'], {'build_year': 'build_year', 'mod_year': 'mod_year', 'build_type': 'build_type', 'roof_usabl_pv_area': 'pv_use_area', 'net_floor_area': 'net_floor_area', 'height_of_floors': 'height_of_floors', 'nb_of_floors': 'nb_of_floors', 'neighbour_buildings': 'neighbour_buildings', 'residential_layout': 'residential_layout', 'attic': 'attic', 'cellar': 'cellar', 'construction_type': 'construction_type', 'dormer': 'dormer', 'with_ahu': 'curr_central_ahu'}), '(environment, build_year=build_year, mod_year=\n mod_year, build_type=build_type, roof_usabl_pv_area=pv_use_area,\n net_floor_area=net_floor_area, height_of_floors=height_of_floors,\n nb_of_floors=nb_of_floors, neighbour_buildings=neighbour_buildings,\n residential_layout=residential_layout, attic=attic, cellar=cellar,\n construction_type=construction_type, dormer=dormer, with_ahu=\n curr_central_ahu)\n', (29263, 29679), True, 'import pycity_calc.buildings.building as build_ex\n'), ((38773, 39214), 'pycity_calc.buildings.building.BuildingExtended', 'build_ex.BuildingExtended', (['environment'], {'build_year': 'build_year', 'mod_year': 'mod_year', 'build_type': 'build_type', 'roof_usabl_pv_area': 'pv_use_area', 'net_floor_area': 'net_floor_area', 'height_of_floors': 'height_of_floors', 'nb_of_floors': 'nb_of_floors', 'neighbour_buildings': 'neighbour_buildings', 'residential_layout': 'residential_layout', 'attic': 'attic', 'cellar': 'cellar', 'construction_type': 'construction_type', 'dormer': 'dormer', 'with_ahu': 'curr_central_ahu'}), '(environment, build_year=build_year, mod_year=\n mod_year, build_type=build_type, roof_usabl_pv_area=pv_use_area,\n net_floor_area=net_floor_area, height_of_floors=height_of_floors,\n nb_of_floors=nb_of_floors, neighbour_buildings=neighbour_buildings,\n residential_layout=residential_layout, attic=attic, cellar=cellar,\n construction_type=construction_type, dormer=dormer, with_ahu=\n curr_central_ahu)\n', (38798, 39214), True, 'import pycity_calc.buildings.building as build_ex\n'), ((53349, 53485), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(1)', 'profile_type': 'th_slp_type', 'livingArea': 'net_floor_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=1, profile_type=th_slp_type,\n livingArea=net_floor_area, specificDemand=spec_th_demand)\n', (53374, 53485), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((54937, 54969), 'pycity_base.classes.demand.Apartment.Apartment', 'Apartment.Apartment', (['environment'], {}), '(environment)\n', (54956, 54969), True, 'import pycity_base.classes.demand.Apartment as Apartment\n'), ((55136, 55376), 'pycity_calc.buildings.building.BuildingExtended', 'build_ex.BuildingExtended', (['environment'], {'net_floor_area': 'net_floor_area', 'build_year': 'build_year', 'mod_year': 'mod_year', 'build_type': 'build_type', 'roof_usabl_pv_area': 'pv_use_area', 'height_of_floors': 'height_of_floors', 'nb_of_floors': 'nb_of_floors'}), '(environment, net_floor_area=net_floor_area,\n build_year=build_year, mod_year=mod_year, build_type=build_type,\n roof_usabl_pv_area=pv_use_area, height_of_floors=height_of_floors,\n nb_of_floors=nb_of_floors)\n', (55161, 55376), True, 'import pycity_calc.buildings.building as build_ex\n'), ((58033, 58088), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': 'delimiter', 'skip_header': '(1)'}), '(path, delimiter=delimiter, skip_header=1)\n', (58046, 58088), True, 'import numpy as np\n'), ((127597, 127639), 'os.path.join', 'os.path.join', (['this_path', '"""input"""', 'filename'], {}), "(this_path, 'input', filename)\n", (127609, 127639), False, 'import os\n'), ((127843, 127891), 'os.path.join', 'os.path.join', (['this_path', '"""output"""', 'log_file_name'], {}), "(this_path, 'output', log_file_name)\n", (127855, 127891), False, 'import os\n'), ((1823, 1841), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (1836, 1841), False, 'import warnings\n'), ((3535, 3553), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (3548, 3553), False, 'import warnings\n'), ((5029, 5047), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (5042, 5047), False, 'import warnings\n'), ((5938, 5956), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (5951, 5956), False, 'import warnings\n'), ((6571, 6589), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (6584, 6589), False, 'import warnings\n'), ((7168, 7186), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (7181, 7186), False, 'import warnings\n'), ((22535, 22671), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(1)', 'profile_type': 'th_slp_type', 'livingArea': 'net_floor_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=1, profile_type=th_slp_type,\n livingArea=net_floor_area, specificDemand=spec_th_demand)\n', (22560, 22671), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((24491, 24843), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(2)', 'total_nb_occupants': 'number_occupants', 'randomizeAppliances': '(True)', 'lightConfiguration': '(0)', 'annualDemand': 'annual_el_demand', 'occupancy': 'occupancy_object.occupancy', 'do_normalization': 'do_normalization', 'prev_heat_dev': 'prev_heat_dev', 'season_light_mod': 'season_light_mod', 'light_mod_fac': 'season_mod'}), '(environment, method=2, total_nb_occupants\n =number_occupants, randomizeAppliances=True, lightConfiguration=0,\n annualDemand=annual_el_demand, occupancy=occupancy_object.occupancy,\n do_normalization=do_normalization, prev_heat_dev=prev_heat_dev,\n season_light_mod=season_light_mod, light_mod_fac=season_mod)\n', (24524, 24843), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((25468, 25585), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(1)', 'annualDemand': 'annual_el_demand', 'profileType': 'el_slp_type'}), '(environment, method=1, annualDemand=\n annual_el_demand, profileType=el_slp_type)\n', (25501, 25585), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((38563, 38581), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (38576, 38581), False, 'import warnings\n'), ((48543, 48639), 'pycity_base.classes.demand.Apartment.Apartment', 'Apartment.Apartment', (['environment'], {'occupancy': 'occupancy_object', 'net_floor_area': 'apartment_area'}), '(environment, occupancy=occupancy_object, net_floor_area\n =apartment_area)\n', (48562, 48639), True, 'import pycity_base.classes.demand.Apartment as Apartment\n'), ((53752, 53896), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(3)', 'annualDemand': 'annual_el_demand', 'do_normalization': '(True)', 'method_3_type': 'method_3_type'}), '(environment, method=3, annualDemand=\n annual_el_demand, do_normalization=True, method_3_type=method_3_type)\n', (53785, 53896), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((58165, 58188), 'numpy.isnan', 'np.isnan', (['district_data'], {}), '(district_data)\n', (58173, 58188), True, 'import numpy as np\n'), ((74097, 74115), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (74110, 74115), False, 'import warnings\n'), ((120436, 120461), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (120451, 120461), False, 'import os\n'), ((127696, 127744), 'os.path.join', 'os.path.join', (['this_path', '"""input"""', 'city_osm_input'], {}), "(this_path, 'input', city_osm_input)\n", (127708, 127744), False, 'import os\n'), ((20962, 21025), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'number_occupants'}), '(environment, number_occupants=number_occupants)\n', (20977, 21025), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((21247, 21333), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'number_occupants', 'do_profile': '(False)'}), '(environment, number_occupants=number_occupants, do_profile=\n False)\n', (21262, 21333), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((21575, 21705), 'warnings.warn', 'warnings.warn', (["('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')"], {}), "('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')\n", (21588, 21705), False, 'import warnings\n'), ((23089, 23192), 'pycity_calc.toolbox.modifiers.slp_th_manipulator.slp_th_manipulator', 'slpman.slp_th_manipulator', (['timestep'], {'th_slp_curve': 'heat_power_curve.loadcurve', 'temp_array': 'temp_array'}), '(timestep, th_slp_curve=heat_power_curve.loadcurve,\n temp_array=temp_array)\n', (23114, 23192), True, 'import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman\n'), ((23421, 23531), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(3)', 'livingArea': 'net_floor_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=3, livingArea=net_floor_area,\n specificDemand=spec_th_demand)\n', (23446, 23531), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((26339, 26498), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(1)', 'dailyConsumption': '(dhw_volumen * number_occupants)', 'supplyTemperature': '(25)'}), '(environment, tFlow=60, thermal=True,\n method=1, dailyConsumption=dhw_volumen * number_occupants,\n supplyTemperature=25)\n', (26372, 26498), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((26953, 27097), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(2)', 'supplyTemperature': '(25)', 'occupancy': 'occupancy_object.occupancy'}), '(environment, tFlow=60, thermal=True,\n method=2, supplyTemperature=25, occupancy=occupancy_object.occupancy)\n', (26986, 27097), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((42319, 42364), 'pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_occ_per_app', 'usunc.calc_sampling_occ_per_app', ([], {'nb_samples': '(1)'}), '(nb_samples=1)\n', (42350, 42364), True, 'import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc\n'), ((42655, 42791), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(1)', 'profile_type': 'th_slp_type', 'livingArea': 'apartment_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=1, profile_type=th_slp_type,\n livingArea=apartment_area, specificDemand=spec_th_demand)\n', (42680, 42791), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((44737, 45097), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(2)', 'total_nb_occupants': 'curr_number_occupants', 'randomizeAppliances': '(True)', 'lightConfiguration': '(0)', 'annualDemand': 'annual_el_demand_ap', 'occupancy': 'occupancy_object.occupancy', 'do_normalization': 'do_normalization', 'prev_heat_dev': 'prev_heat_dev', 'season_light_mod': 'season_light_mod', 'light_mod_fac': 'season_mod'}), '(environment, method=2, total_nb_occupants\n =curr_number_occupants, randomizeAppliances=True, lightConfiguration=0,\n annualDemand=annual_el_demand_ap, occupancy=occupancy_object.occupancy,\n do_normalization=do_normalization, prev_heat_dev=prev_heat_dev,\n season_light_mod=season_light_mod, light_mod_fac=season_mod)\n', (44770, 45097), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((45769, 45889), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(1)', 'annualDemand': 'annual_el_demand_ap', 'profileType': 'el_slp_type'}), '(environment, method=1, annualDemand=\n annual_el_demand_ap, profileType=el_slp_type)\n', (45802, 45889), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((54152, 54296), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(4)', 'annualDemand': 'annual_el_demand', 'do_normalization': '(True)', 'method_4_type': 'method_4_type'}), '(environment, method=4, annualDemand=\n annual_el_demand, do_normalization=True, method_4_type=method_4_type)\n', (54185, 54296), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((54646, 54763), 'pycity_base.classes.demand.ElectricalDemand.ElectricalDemand', 'ElectricalDemand.ElectricalDemand', (['environment'], {'method': '(1)', 'annualDemand': 'annual_el_demand', 'profileType': 'el_slp_type'}), '(environment, method=1, annualDemand=\n annual_el_demand, profileType=el_slp_type)\n', (54679, 54763), True, 'import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand\n'), ((59165, 59256), 'pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_el_demand_per_apartment', 'usunc.calc_sampling_el_demand_per_apartment', ([], {'nb_samples': '(1)', 'nb_persons': 'nb_occ', 'type': 'type'}), '(nb_samples=1, nb_persons=nb_occ,\n type=type)\n', (59208, 59256), True, 'import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc\n'), ((61186, 61273), 'pycity_calc.toolbox.mc_helpers.user.user_unc_sampling.calc_sampling_dhw_per_apartment', 'usunc.calc_sampling_dhw_per_apartment', ([], {'nb_samples': '(1)', 'nb_persons': 'nb_occ', 'b_type': 'type'}), '(nb_samples=1, nb_persons=nb_occ,\n b_type=type)\n', (61223, 61273), True, 'import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc\n'), ((74447, 74500), 'os.path.join', 'os.path.join', (['this_path', '"""output"""', '"""city_gen_log.txt"""'], {}), "(this_path, 'output', 'city_gen_log.txt')\n", (74459, 74500), False, 'import os\n'), ((80450, 80484), 'pycity_calc.cities.city.City', 'city.City', ([], {'environment': 'environment'}), '(environment=environment)\n', (80459, 80484), True, 'import pycity_calc.cities.city as city\n'), ((111236, 111263), 'shapely.geometry.point.Point', 'point.Point', (['curr_x', 'curr_y'], {}), '(curr_x, curr_y)\n', (111247, 111263), True, 'import shapely.geometry.point as point\n'), ((113182, 113445), 'pycity_calc.toolbox.teaser_usage.teaser_use.calc_and_add_vdi_6007_loads_to_city', 'tusage.calc_and_add_vdi_6007_loads_to_city', ([], {'city': 'city_object', 'air_vent_mode': 'air_vent_mode', 'vent_factor': 'vent_factor', 't_set_heat': 't_set_heat', 't_set_cool': 't_set_cool', 't_night': 't_night', 'alpha_rad': 'None', 'project_name': 'project_name', 'requ_profiles': 'requ_profiles'}), '(city=city_object, air_vent_mode=\n air_vent_mode, vent_factor=vent_factor, t_set_heat=t_set_heat,\n t_set_cool=t_set_cool, t_night=t_night, alpha_rad=None, project_name=\n project_name, requ_profiles=requ_profiles)\n', (113224, 113445), True, 'import pycity_calc.toolbox.teaser_usage.teaser_use as tusage\n'), ((118374, 118459), 'pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_project', 'tusage.create_teaser_project', ([], {'name': 'teaser_proj_name', 'merge_windows': 'merge_windows'}), '(name=teaser_proj_name, merge_windows=merge_windows\n )\n', (118402, 118459), True, 'import pycity_calc.toolbox.teaser_usage.teaser_use as tusage\n'), ((118573, 118664), 'pycity_calc.toolbox.teaser_usage.teaser_use.create_teaser_typecity', 'tusage.create_teaser_typecity', ([], {'project': 'project', 'city': 'city_object', 'generate_Output': '(False)'}), '(project=project, city=city_object,\n generate_Output=False)\n', (118602, 118664), True, 'import pycity_calc.toolbox.teaser_usage.teaser_use as tusage\n'), ((40675, 40743), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'curr_number_occupants'}), '(environment, number_occupants=curr_number_occupants)\n', (40690, 40743), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((40946, 41036), 'pycity_base.classes.demand.Occupancy.Occupancy', 'occup.Occupancy', (['environment'], {'number_occupants': 'curr_number_occupants', 'do_profile': '(False)'}), '(environment, number_occupants=curr_number_occupants,\n do_profile=False)\n', (40961, 41036), True, 'import pycity_base.classes.demand.Occupancy as occup\n'), ((41252, 41382), 'warnings.warn', 'warnings.warn', (["('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')"], {}), "('Stochastic el. profile cannot be generated ' +\n 'due to missing number of occupants. ' + 'SLP is used instead.')\n", (41265, 41382), False, 'import warnings\n'), ((43185, 43288), 'pycity_calc.toolbox.modifiers.slp_th_manipulator.slp_th_manipulator', 'slpman.slp_th_manipulator', (['timestep'], {'th_slp_curve': 'heat_power_curve.loadcurve', 'temp_array': 'temp_array'}), '(timestep, th_slp_curve=heat_power_curve.loadcurve,\n temp_array=temp_array)\n', (43210, 43288), True, 'import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman\n'), ((43541, 43651), 'pycity_base.classes.demand.SpaceHeating.SpaceHeating', 'SpaceHeating.SpaceHeating', (['environment'], {'method': '(3)', 'livingArea': 'apartment_area', 'specificDemand': 'spec_th_demand'}), '(environment, method=3, livingArea=apartment_area,\n specificDemand=spec_th_demand)\n', (43566, 43651), True, 'import pycity_base.classes.demand.SpaceHeating as SpaceHeating\n'), ((46734, 46898), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(1)', 'dailyConsumption': '(dhw_volumen * curr_number_occupants)', 'supplyTemperature': '(25)'}), '(environment, tFlow=60, thermal=True,\n method=1, dailyConsumption=dhw_volumen * curr_number_occupants,\n supplyTemperature=25)\n', (46767, 46898), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((47118, 47262), 'pycity_base.classes.demand.DomesticHotWater.DomesticHotWater', 'DomesticHotWater.DomesticHotWater', (['environment'], {'tFlow': '(60)', 'thermal': '(True)', 'method': '(2)', 'supplyTemperature': '(25)', 'occupancy': 'occupancy_object.occupancy'}), '(environment, tFlow=60, thermal=True,\n method=2, supplyTemperature=25, occupancy=occupancy_object.occupancy)\n', (47151, 47262), True, 'import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater\n'), ((74397, 74422), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (74412, 74422), False, 'import os\n'), ((89838, 89856), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (89851, 89856), False, 'import warnings\n'), ((119241, 119296), 'os.path.join', 'os.path.join', (['this_path', '"""output"""', 'pickle_city_filename'], {}), "(this_path, 'output', pickle_city_filename)\n", (119253, 119296), False, 'import os\n'), ((120163, 120225), 'pycity_calc.visualization.city_visual.plot_city_district', 'citvis.plot_city_district', ([], {'city': 'city_object', 'plot_street': '(False)'}), '(city=city_object, plot_street=False)\n', (120188, 120225), True, 'import pycity_calc.visualization.city_visual as citvis\n'), ((2400, 2425), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2415, 2425), False, 'import os\n'), ((119181, 119206), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (119196, 119206), False, 'import os\n'), ((119613, 119667), 'warnings.warn', 'warnings.warn', (['"""Could not pickle and save city object"""'], {}), "('Could not pickle and save city object')\n", (119626, 119667), False, 'import warnings\n'), ((120304, 120350), 'warnings.warn', 'warnings.warn', (['"""Could not plot city district."""'], {}), "('Could not plot city district.')\n", (120317, 120350), False, 'import warnings\n'), ((74728, 74751), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (74749, 74751), False, 'import datetime\n')]
|
# <NAME>, March 2020
# Common code for PyTorch implementation of Copy-Pasting GAN
import copy
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os, platform, time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from torch.utils.data import Dataset
from tqdm import tqdm
def read_image_robust(img_path, monochromatic=False):
''' Returns an image that meets conditions along with a success flag, in order to avoid crashing. '''
try:
# image = plt.imread(img_path).copy()
image = np.array(Image.open(img_path)).copy() # always uint8
success = True
if np.any(np.array(image.strides) < 0):
success = False # still negative stride
elif not(monochromatic) and (image.ndim != 3 or image.shape[2] != 3):
success = False # not RGB
elif monochromatic:
# width, height = image.shape[1], image.shape[0]
# image = np.broadcast_to(x[:, :, np.newaxis], (height, width, 3))
image = image[:, :, np.newaxis] # one channel <=> only one ground truth
except IOError:
# Probably corrupt file
image = None
success = False
return image, success
def paint_squares(image, noisy=False, channels=10):
'''
Paints one or more squares at random locations to create an artificial foreground image.
Generates multiple associated ground truth masks; one per object.
'''
width, height = image.shape[1], image.shape[0]
image = image.copy() # do not overwrite background
object_count = np.random.randint(1, 5) # [1, 4] inclusive
masks = np.zeros((height, width, channels), dtype=np.uint8)
for i in range(object_count):
sq_w, sq_h = 9, 9
x1 = np.random.randint(0, width - sq_w + 1)
y1 = np.random.randint(0, height - sq_h + 1)
x2 = x1 + sq_w
y2 = y1 + sq_h
masks[y1:y2, x1:x2, i] = 255
if not(noisy):
# Pick one fixed (not necessarily saturated) color for the whole square
clr = np.random.randint(0, 256, 3)
image[y1:y2, x1:x2] = clr
else:
# Pick a random fully saturated (extremal) color for every pixel
image[y1:y2, x1:x2] = np.random.choice([0, 255], (sq_h, sq_w, 3))
return image, masks, object_count
def create_random_gfake_mask(width, height):
''' See Appendix D. '''
x0, y0 = np.random.rand(2) * 0.8 + 0.1
num_verts = np.random.randint(4, 7)
# TODO possible improvement: allow up to more vertices?
# TODO possible improvement: encourage convex (currently many "sharp" objects)
radii = np.random.rand(num_verts) * 0.4 + 0.1
# radii = np.random.rand(num_verts) * 0.8 + 0.2 # TODO: not very clear from paper
angles = np.sort(np.random.rand(num_verts)) * 2.0 * np.pi
poly_polar = list(zip(radii, angles))
poly_cart = [(int(width * (x0 + r * np.cos(a)) / 1),
int(height * (y0 + r * np.sin(a)) / 1)) for (r, a) in poly_polar]
# poly_cart = [(x1, y1), (x2, y2), ...]
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(poly_cart, outline=1, fill=255)
mask = np.array(img, dtype='uint8')
assert(mask.shape == (height, width))
return mask
def apply_border_zero(masks):
ndim = len(masks.shape)
if ndim == 2:
masks[0, :] = 0
masks[-1, :] = 0
masks[:, 0] = 0
masks[:, -1] = 0
elif ndim == 3:
masks[:, 0, :] = 0
masks[:, -1, :] = 0
masks[:, :, 0] = 0
masks[:, :, -1] = 0
elif ndim == 4:
masks[:, :, 0, :] = 0
masks[:, :, -1, :] = 0
masks[:, :, :, 0] = 0
masks[:, :, :, -1] = 0
else:
raise Exception('Mask has too many dimensions')
return masks
def copy_paste(fores, masks, backs, border_zero=True):
# TODO possible improvement: poisson blending
# if hard_thres > 0:
# used_masks = (masks > hard_thres).float() # force binary
# else:
used_masks = masks.clone()
# Border zeroing implemented in April 2020
if border_zero:
used_masks = apply_border_zero(used_masks)
return used_masks * fores + (1.0 - used_masks) * backs
class MyCopyPasteDataset(Dataset):
'''
Custom dataset class with foreground, background, and optional mask folders as image sources.
Only one object may appear per image, since the object count is not kept track of.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, fore_dir, back_dir, mask_dir=None, rand_horz_flip=True, post_resize=-1, center_crop=False):
self.fore_dir = fore_dir
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
if post_resize <= 0:
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
elif center_crop:
# Resize + square center crop
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(post_resize),
transforms.CenterCrop(post_resize),
transforms.ToTensor()
])
else:
# Resize both dimensions, possibly distorting the images
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((post_resize, post_resize)),
transforms.ToTensor()
])
self.has_masks = (mask_dir is not None)
# Load all file paths; file names must be the same across all 2 or 3 given directories
# self.all_fore_files = []
# self.all_mask_files = []
# self.all_back_files = []
# for fn in os.listdir(fore_dir):
# fore_fp = os.path.join(fore_dir, fn)
# if os.path.isfile(fore_fp):
# back_fp = os.path.join(back_dir, fn)
# assert(os.path.isfile(back_fp))
# self.all_fore_files.append(fore_fp)
# self.all_back_files.append(back_fp)
# if self.has_masks:
# mask_fp = os.path.join(mask_dir, fn)
# assert(os.path.isfile(mask_fp))
# self.all_mask_files.append(mask_fp)
# Load all file paths; file names must be the same across foreground and segmentation masks
self.all_fore_files = []
self.all_mask_files = []
self.all_back_files = []
for fn in os.listdir(fore_dir):
fore_fp = os.path.join(fore_dir, fn)
self.all_fore_files.append(fore_fp)
if self.has_masks:
mask_fp_jpg = os.path.join(mask_dir, fn[:-4] + '.jpg')
mask_fp_png = os.path.join(mask_dir, fn[:-4] + '.png')
if os.path.isfile(mask_fp_jpg):
self.all_mask_files.append(mask_fp_jpg)
elif os.path.isfile(mask_fp_png):
self.all_mask_files.append(mask_fp_png)
else:
raise Exception('No matching mask file found for ' + fore_fp)
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.fore_count = len(self.all_fore_files)
self.back_count = len(self.all_back_files)
print('Image file count: ' + str(self.fore_count) + ' foreground, ' + str(self.back_count) + ' background, has masks: ' + str(self.has_masks))
def __len__(self):
return self.fore_count
def __getitem__(self, idx):
# Force randomness (especially if num_workers > 0)
np.random.seed(idx + int((time.time() * 654321) % 123456))
# Read random pair of images from file system
success = False
while not(success):
file_idx = np.random.choice(self.fore_count)
fp = self.all_fore_files[file_idx]
fore, success = read_image_robust(fp)
if not(success):
continue
if self.has_masks:
fp = self.all_mask_files[file_idx]
mask, success = read_image_robust(fp, monochromatic=True)
assert(success) # must match fore
# mask = ((mask > 0) * 255.0).astype('uint8') # convert soft masks to hard
else:
mask = None
# Read random background image
success = False
while not(success):
file_idx2 = np.random.choice(self.back_count)
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
# Read irrelevant foreground image
success = False
while not(success):
file_idx3 = np.random.choice(self.fore_count)
if file_idx3 == file_idx:
continue # try again, cannot pick same image
fp = self.all_fore_files[file_idx3]
irrel, success = read_image_robust(fp)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
if self.has_masks:
mask = mask[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
if self.has_masks:
mask = self.post_tf(mask)
# Verify sizes
assert(fore.shape[1:] == irrel.shape[1:])
assert(fore.shape[1:] == back.shape[1:])
if self.has_masks:
assert(fore.shape[1:] == mask.shape[1:])
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary; object count is unknown
result = {'fore': fore, 'back': back, 'irrel': irrel, 'object_cnt': 1, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
if self.has_masks:
result['mask'] = mask # don't set None, otherwise crash
return result
class MySquaresDataset(Dataset):
'''
Custom dataset class with just a collection of background images as source.
One or more artificial objects are painted to create a foreground, keeping track of object count.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, back_dir, rand_horz_flip=True, noisy=False, max_objects=10):
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
self.noisy = noisy
self.max_objects = max_objects
# Load all file paths; file names must be the same across all 2 or 3 given directories
self.all_back_files = []
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.file_count = len(self.all_back_files)
print('Image file count: ' + str(self.file_count) + ', noisy: ' + str(self.noisy) + ', max objects: ' + str(self.max_objects))
def __len__(self):
return self.file_count
def __getitem__(self, idx):
# Read a random triplet (relevant + background + irrelevant) of non-overlapping backgrounds from file system
success = False
while not(success):
file_idx = np.random.choice(self.file_count)
fp = self.all_back_files[file_idx]
fore, success = read_image_robust(fp)
success = False
while not(success):
file_idx2 = np.random.choice(self.file_count)
if file_idx2 == file_idx:
continue # try again, cannot pick same image
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
success = False
while not(success):
file_idx3 = np.random.choice(self.file_count)
if file_idx3 == file_idx or file_idx3 == file_idx2:
continue # try again, cannot pick same image
fp = self.all_back_files[file_idx3]
irrel, success = read_image_robust(fp)
# Create corresponding foregrounds and masks; leave actual background unchanged
fore, masks, object_cnt = paint_squares(fore, noisy=self.noisy, channels=self.max_objects)
irrel, _, _ = paint_squares(irrel, noisy=self.noisy, channels=self.max_objects)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
masks = masks[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
masks = self.post_tf(masks)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary
result = {'fore': fore, 'back': back, 'irrel': irrel, 'mask': masks, 'object_cnt': object_cnt, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
return result
|
[
"torchvision.transforms.CenterCrop",
"os.listdir",
"PIL.Image.open",
"numpy.random.rand",
"torchvision.transforms.ToPILImage",
"numpy.random.choice",
"PIL.Image.new",
"numpy.sin",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"PIL.ImageDraw.Draw",
"numpy.cos",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"time.time"
] |
[((1671, 1694), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (1688, 1694), True, 'import numpy as np\n'), ((1726, 1777), 'numpy.zeros', 'np.zeros', (['(height, width, channels)'], {'dtype': 'np.uint8'}), '((height, width, channels), dtype=np.uint8)\n', (1734, 1777), True, 'import numpy as np\n'), ((2562, 2585), 'numpy.random.randint', 'np.random.randint', (['(4)', '(7)'], {}), '(4, 7)\n', (2579, 2585), True, 'import numpy as np\n'), ((3164, 3198), 'PIL.Image.new', 'Image.new', (['"""L"""', '(width, height)', '(0)'], {}), "('L', (width, height), 0)\n", (3173, 3198), False, 'from PIL import Image, ImageDraw\n'), ((3274, 3302), 'numpy.array', 'np.array', (['img'], {'dtype': '"""uint8"""'}), "(img, dtype='uint8')\n", (3282, 3302), True, 'import numpy as np\n'), ((1851, 1889), 'numpy.random.randint', 'np.random.randint', (['(0)', '(width - sq_w + 1)'], {}), '(0, width - sq_w + 1)\n', (1868, 1889), True, 'import numpy as np\n'), ((1903, 1942), 'numpy.random.randint', 'np.random.randint', (['(0)', '(height - sq_h + 1)'], {}), '(0, height - sq_h + 1)\n', (1920, 1942), True, 'import numpy as np\n'), ((6599, 6619), 'os.listdir', 'os.listdir', (['fore_dir'], {}), '(fore_dir)\n', (6609, 6619), False, 'import os, platform, time\n'), ((7232, 7252), 'os.listdir', 'os.listdir', (['back_dir'], {}), '(back_dir)\n', (7242, 7252), False, 'import os, platform, time\n'), ((11077, 11098), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11096, 11098), True, 'import torchvision.transforms as transforms\n'), ((11346, 11366), 'os.listdir', 'os.listdir', (['back_dir'], {}), '(back_dir)\n', (11356, 11366), False, 'import os, platform, time\n'), ((2152, 2180), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(3)'], {}), '(0, 256, 3)\n', (2169, 2180), True, 'import numpy as np\n'), ((2345, 2388), 'numpy.random.choice', 'np.random.choice', (['[0, 255]', '(sq_h, sq_w, 3)'], {}), '([0, 255], (sq_h, sq_w, 3))\n', (2361, 2388), True, 'import numpy as np\n'), ((2516, 2533), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (2530, 2533), True, 'import numpy as np\n'), ((2741, 2766), 'numpy.random.rand', 'np.random.rand', (['num_verts'], {}), '(num_verts)\n', (2755, 2766), True, 'import numpy as np\n'), ((3203, 3222), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3217, 3222), False, 'from PIL import Image, ImageDraw\n'), ((4921, 4942), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4940, 4942), True, 'import torchvision.transforms as transforms\n'), ((6643, 6669), 'os.path.join', 'os.path.join', (['fore_dir', 'fn'], {}), '(fore_dir, fn)\n', (6655, 6669), False, 'import os, platform, time\n'), ((7276, 7302), 'os.path.join', 'os.path.join', (['back_dir', 'fn'], {}), '(back_dir, fn)\n', (7288, 7302), False, 'import os, platform, time\n'), ((7971, 8004), 'numpy.random.choice', 'np.random.choice', (['self.fore_count'], {}), '(self.fore_count)\n', (7987, 8004), True, 'import numpy as np\n'), ((8635, 8668), 'numpy.random.choice', 'np.random.choice', (['self.back_count'], {}), '(self.back_count)\n', (8651, 8668), True, 'import numpy as np\n'), ((8887, 8920), 'numpy.random.choice', 'np.random.choice', (['self.fore_count'], {}), '(self.fore_count)\n', (8903, 8920), True, 'import numpy as np\n'), ((11390, 11416), 'os.path.join', 'os.path.join', (['back_dir', 'fn'], {}), '(back_dir, fn)\n', (11402, 11416), False, 'import os, platform, time\n'), ((11946, 11979), 'numpy.random.choice', 'np.random.choice', (['self.file_count'], {}), '(self.file_count)\n', (11962, 11979), True, 'import numpy as np\n'), ((12153, 12186), 'numpy.random.choice', 'np.random.choice', (['self.file_count'], {}), '(self.file_count)\n', (12169, 12186), True, 'import numpy as np\n'), ((12460, 12493), 'numpy.random.choice', 'np.random.choice', (['self.file_count'], {}), '(self.file_count)\n', (12476, 12493), True, 'import numpy as np\n'), ((738, 761), 'numpy.array', 'np.array', (['image.strides'], {}), '(image.strides)\n', (746, 761), True, 'import numpy as np\n'), ((2886, 2911), 'numpy.random.rand', 'np.random.rand', (['num_verts'], {}), '(num_verts)\n', (2900, 2911), True, 'import numpy as np\n'), ((6779, 6819), 'os.path.join', 'os.path.join', (['mask_dir', "(fn[:-4] + '.jpg')"], {}), "(mask_dir, fn[:-4] + '.jpg')\n", (6791, 6819), False, 'import os, platform, time\n'), ((6850, 6890), 'os.path.join', 'os.path.join', (['mask_dir', "(fn[:-4] + '.png')"], {}), "(mask_dir, fn[:-4] + '.png')\n", (6862, 6890), False, 'import os, platform, time\n'), ((6910, 6937), 'os.path.isfile', 'os.path.isfile', (['mask_fp_jpg'], {}), '(mask_fp_jpg)\n', (6924, 6937), False, 'import os, platform, time\n'), ((9307, 9323), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9321, 9323), True, 'import numpy as np\n'), ((9479, 9495), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9493, 9495), True, 'import numpy as np\n'), ((9567, 9583), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9581, 9583), True, 'import numpy as np\n'), ((13182, 13198), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13196, 13198), True, 'import numpy as np\n'), ((13317, 13333), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13331, 13333), True, 'import numpy as np\n'), ((13405, 13421), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13419, 13421), True, 'import numpy as np\n'), ((653, 673), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (663, 673), False, 'from PIL import Image, ImageDraw\n'), ((7020, 7047), 'os.path.isfile', 'os.path.isfile', (['mask_fp_png'], {}), '(mask_fp_png)\n', (7034, 7047), False, 'import os, platform, time\n'), ((5109, 5132), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (5130, 5132), True, 'import torchvision.transforms as transforms\n'), ((5150, 5180), 'torchvision.transforms.Resize', 'transforms.Resize', (['post_resize'], {}), '(post_resize)\n', (5167, 5180), True, 'import torchvision.transforms as transforms\n'), ((5198, 5232), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['post_resize'], {}), '(post_resize)\n', (5219, 5232), True, 'import torchvision.transforms as transforms\n'), ((5250, 5271), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5269, 5271), True, 'import torchvision.transforms as transforms\n'), ((5434, 5457), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (5455, 5457), True, 'import torchvision.transforms as transforms\n'), ((5475, 5520), 'torchvision.transforms.Resize', 'transforms.Resize', (['(post_resize, post_resize)'], {}), '((post_resize, post_resize))\n', (5492, 5520), True, 'import torchvision.transforms as transforms\n'), ((5538, 5559), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5557, 5559), True, 'import torchvision.transforms as transforms\n'), ((7808, 7819), 'time.time', 'time.time', ([], {}), '()\n', (7817, 7819), False, 'import os, platform, time\n'), ((3009, 3018), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (3015, 3018), True, 'import numpy as np\n'), ((3067, 3076), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (3073, 3076), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Richardson-Extrapolation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oNlSL2Vztk9Fc7tMBgPcL82WGaUuCY-A
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
"""
NAME = "<NAME>"
COLLABORATORS = ""
"""---
## CSE330 Lab: Richardson Extrapolation
---
## Instructions
Today's assignment is to:
1. Implement Richardson Extrapolation method using Python
## Richardson Extrapolation:
We used central difference method to calculate derivatives of functions last task. In this task we will use Richardson extrapolation to get a more accurate result.
Let,
$$ D_h = \frac{f(x_1+h) -f(x_1-h)}{2h}\tag{5.1}$$
General Taylor Series formula:
$$ f(x) = f(x_1) + f'(x_1)(x - x_1) + \frac{f''(x_1)}{2}(x - x_1)^2+... $$
Using Taylor's theorem to expand we get,
\begin{align}
f(x_1+h) &= f(x_1) + f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 + \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 + \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.2} \\
f(x_1-h) &= f(x_1) - f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 - \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 - \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.3}
\end{align}
Subtracting $5.3$ from $5.2$ we get,
$$ f(x_1+h) - f(x_1-h) = 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7)\tag{5.4}$$
So,
\begin{align}
D_h &= \frac{f(x_1+h) - f(x_1-h)}{2h} \\
&= \frac{1}{2h} \left( 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7) \right) \\
&= f^{\prime}(x_1) + \frac{f^{\prime \prime \prime}(x_1)}{6}h^2 + \frac{f^{(5)}(x_1)}{120}h^4 + O(h^6) \tag{5.5}
\end{align}
We get our derivative $f'(x)$ plus some error terms of order $>= 2$ Now, we want to bring our error order down to 4.
If we use $h, \text{and} \frac{h}{2}$ as step size in $5.5$, we get,
\begin{align}
D_h &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{6} + f^{(5)}(x_1) \frac{h^4}{120} + O(h^6) \tag{5.6} \\
D_{h/2} &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{2^2 . 6} + f^{(5)}(x_1) \frac{h^4}{2^4 . 120} + O(h^6) \tag{5.7}
\end{align}
Multiplying $5.7$ by $4$ and subtracting from $5.6$ we get,
\begin{align}
D_h - 4D_{h/2} &= -3f^{\prime}(x) + f^{(5)}(x_1) \frac{h^4}{160} + O(h^6)\\
\Longrightarrow D^{(1)}_h = \frac{4D_{h/2} - D_h}{3} &= f^{\prime}(x) - f^{(5)}(x_1) \frac{h^4}{480} + O(h^6) \tag{5.8}
\end{align}
Let's calculate the derivative using $5.8$
### 1. Let's import the necessary headers
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.polynomial import Polynomial
"""### 2. Let's create a function named `dh(f, h, x)`
function `dh(f, h, x)` takes three parameters as input: a function `f`, a value `h`, and a set of values `x`. It returns the derivatives of the function at each elements of array `x` using the Central Difference method. This calculates equation $(5.1)$.
"""
def dh(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
return (f(x+h) - f(x-h)) / (2*h)
# --------------------------------------------
"""### 3. Let's create another funtion `dh1(f, h, x)`.
`dh1(f, h, x)` takes the same type of values as `dh(f, h, x)` as input. It calculates the derivative using previously defined `dh(f, h, x)` function and using equation $5.8$ and returns the values.
"""
def dh1(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
# YOUR CODE HERE
return (4 * dh(f, h/2, x) - dh(f, h, x)) / 3
# --------------------------------------------
"""### 4. Now let's create the `error(f, hs, x_i)` function
The `error(f, hs, x_i)` function takes a function `f` as input. It also takes a list of different values of h as `hs` and a specific value as `x_i` as input. It calculates the derivatives as point `x_i` using both functions described in **B** and **C**, i.e. `dh` and `dh1`
"""
def error(f, hs, x_i): #Using the functions we wrote dh() my c_diff and dh1() which is my first order c diff, we find the error through appending their diffrences with Y_actual ny f(x)
'''
Input:
f : np.polynomial.Polynonimial type data.
hs : np.array type data. list of h.
x_i: floating point data. single value of x.
Output:
return two np.array type data of errors by two methods..
'''
f_prime = f.deriv(1) #first order derivitive f^1(x)
Y_actual = f_prime(x_i)
diff_error = []
diff2_error = []
for h in hs: #where h is my loop counter iterating through hs
# for each values of hs calculate the error using both methods
# and append those values into diff_error and diff2_error list.
# --------------------------------------------
# YOUR CODE HERE
e1 = Y_actual - dh(f, hs, x_i)
diff_error.append(e1)
e2 = Y_actual - dh1(f, hs, x_i)
diff2_error.append(e2)
# --------------------------------------------
print(pd.DataFrame({"h": hs, "Diff": diff_error, "Diff2": diff2_error}))
return diff_error, diff2_error
"""### 5. Finally let's run some tests
function to draw the actual function
"""
def draw_graph(f, ax, domain=[-10, 10], label=None):
data = f.linspace(domain=domain)
ax.plot(data[0], data[1], label='Function')
"""### Draw the polynomial and it's actual derivative function"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
p = Polynomial([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])
p_prime = p.deriv(1)
draw_graph(p, ax, [-2.4, 1.5], 'Function')
draw_graph(p_prime, ax, [-2.4, 1.5], 'Derivative')
ax.legend()
"""### Draw the actual derivative and richardson derivative using `h=1` and `h=0.1` as step size."""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
draw_graph(p_prime, ax, [-2.4, 1.5], 'actual')
h = 1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=1')
h = 0.1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=0.1')
ax.legend()
"""### Draw error-vs-h cuve"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
hs = np.array([1., 0.55, 0.3, .17, 0.1, 0.055, 0.03, 0.017, 0.01])
e1, e2 = error(p, hs, 2.0)
ax.plot(hs, e1, label='e1')
ax.plot(hs, e2, label='e2')
ax.legend()
|
[
"numpy.array",
"numpy.linspace",
"numpy.polynomial.Polynomial",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] |
[((6212, 6226), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6224, 6226), True, 'import matplotlib.pyplot as plt\n'), ((6259, 6303), 'numpy.polynomial.Polynomial', 'Polynomial', (['[2.0, 1.0, -6.0, -2.0, 2.5, 1.0]'], {}), '([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])\n', (6269, 6303), False, 'from numpy.polynomial import Polynomial\n'), ((6545, 6559), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6557, 6559), True, 'import matplotlib.pyplot as plt\n'), ((6646, 6687), 'numpy.linspace', 'np.linspace', (['(-2.4)', '(1.5)', '(50)'], {'endpoint': '(True)'}), '(-2.4, 1.5, 50, endpoint=True)\n', (6657, 6687), True, 'import numpy as np\n'), ((6757, 6798), 'numpy.linspace', 'np.linspace', (['(-2.4)', '(1.5)', '(50)'], {'endpoint': '(True)'}), '(-2.4, 1.5, 50, endpoint=True)\n', (6768, 6798), True, 'import numpy as np\n'), ((6913, 6927), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6925, 6927), True, 'import matplotlib.pyplot as plt\n'), ((6960, 7023), 'numpy.array', 'np.array', (['[1.0, 0.55, 0.3, 0.17, 0.1, 0.055, 0.03, 0.017, 0.01]'], {}), '([1.0, 0.55, 0.3, 0.17, 0.1, 0.055, 0.03, 0.017, 0.01])\n', (6968, 7023), True, 'import numpy as np\n'), ((5811, 5876), 'pandas.DataFrame', 'pd.DataFrame', (["{'h': hs, 'Diff': diff_error, 'Diff2': diff2_error}"], {}), "({'h': hs, 'Diff': diff_error, 'Diff2': diff2_error})\n", (5823, 5876), True, 'import pandas as pd\n')]
|
"""The :mod:`mlshell.pipeline.steps` contains unified pipeline steps."""
import inspect
import mlshell
import numpy as np
import pandas as pd
import sklearn
import sklearn.impute
import sklearn.compose
__all__ = ['Steps']
class Steps(object):
"""Unified pipeline steps.
Parameters
----------
estimator : :mod:`sklearn` estimator
Estimator to use in the last step.
If ``estimator_type=regressor``:
``sklearn.compose.TransformedTargetRegressor(regressor=`estimator`)``
If ``estimator_type=classifier`` and ``th_step=True``:
``sklearn.pipeline.Pipeline(steps=[
('predict_proba',
mlshell.model_selection.PredictionTransformer(`estimator`)),
('apply_threshold',
mlshell.model_selection.ThresholdClassifier(threshold=0.5,
kwargs='auto')),
])``
If ``estimator_type=classifier`` and ``th_step=False``:
``sklearn.pipeline.Pipeline(steps=[('classifier', `estimator`)])``
estimator_type : str {'classifier`, 'regressor'}, optional (default=None)
Either regression or classification task. If None, get from
:func:`sklearn.base.is_classifier` on ``estimator``.
th_step : bool
If True and ``estimator_type=classifier``: ``mlshell.model_selection.
ThresholdClassifier`` sub-step added, otherwise ignored.
Notes
-----
Assembling steps in class are made for convenience. Use steps property to
access after initialization. Only OneHot encoder and imputer steps are
initially activated.
By default, 4 parameters await for resolution ('auto'):
'process_parallel__pipeline_categoric__select_columns__kw_args'
'process_parallel__pipeline_numeric__select_columns__kw_args'
'estimate__apply_threshold__threshold'
'estimate__apply_threshold__params'
Set corresponding parameters with ``set_params()`` to overwrite default in
created pipeline or use :class:`mlshell.model_selection.Resolver` .
'pass_custom' step allows brute force arbitrary parameters in uniform style
with pipeline hp (as if score contains additional nested loops). Step name
is hard-coded and could not be changed.
'apply_threshold' allows grid search classification thresholds as pipeline
hyper-parameter.
'estimate' step should be the last.
"""
_required_parameters = ['estimator', 'estimator_type']
def __init__(self, estimator, estimator_type=None, th_step=False):
if estimator_type is None:
estimator_type = 'classifier' if sklearn.base.is_classifier(estimator)\
else 'regressor'
self._steps = [
('pass_custom', mlshell.preprocessing.FunctionTransformer(func=self.scorer_kwargs, validate=False, skip=True, kw_args={})),
('select_rows', mlshell.preprocessing.FunctionTransformer(func=self.subrows, validate=False, skip=True)),
('process_parallel', sklearn.pipeline.FeatureUnion(transformer_list=[
('pipeline_categoric', sklearn.pipeline.Pipeline(steps=[
('select_columns', mlshell.preprocessing.FunctionTransformer(self.subcolumns, validate=False, skip=False, kw_args='auto')), # {'indices': dataset.meta['categoric_ind_name']}
('encode_onehot', mlshell.preprocessing.OneHotEncoder(handle_unknown='ignore', categories='auto', sparse=False, drop=None, skip=False)), # x could be [].
])),
('pipeline_numeric', sklearn.pipeline.Pipeline(steps=[
('select_columns', mlshell.preprocessing.FunctionTransformer(self.subcolumns, validate=False, skip=False, kw_args='auto')), # {'indices': dataset.meta['numeric_ind_name']}
('impute', sklearn.pipeline.FeatureUnion([
('indicators', sklearn.impute.MissingIndicator(missing_values=np.nan, error_on_new=False)),
('gaps', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0, copy=True)),
])),
('transform_normal', mlshell.preprocessing.PowerTransformer(method='yeo-johnson', standardize=False, copy=False, skip=True)),
('scale_row_wise', mlshell.preprocessing.FunctionTransformer(func=None, validate=False, skip=True)),
('scale_column_wise', sklearn.preprocessing.RobustScaler(quantile_range=(0, 100), copy=False)),
('add_polynomial', sklearn.preprocessing.PolynomialFeatures(degree=1, include_bias=False)), # x => degree=1 => x, x => degree=0 => []
('compose_columns', sklearn.compose.ColumnTransformer([
("discretize", sklearn.preprocessing.KBinsDiscretizer(n_bins=5, encode='onehot-dense', strategy='quantile'), self.bining_mask)], sparse_threshold=0, remainder='passthrough'))
])),
])),
('select_columns', sklearn.feature_selection.SelectFromModel(estimator=CustomSelector(estimator_type=estimator_type, verbose=False, skip=True), prefit=False)),
('reduce_dimensions', mlshell.decomposition.PCA(random_state=42, skip=True)),
('estimate', self.last_step(estimator, estimator_type, th_step=th_step)),
]
def last_step(self, estimator, estimator_type, th_step):
"""Prepare estimator step."""
if estimator_type == 'regressor':
last_step =\
sklearn.compose.TransformedTargetRegressor(regressor=estimator)
elif estimator_type == 'classifier' and th_step:
last_step = sklearn.pipeline.Pipeline(steps=[
('predict_proba',
mlshell.model_selection.PredictionTransformer(
estimator)),
('apply_threshold',
mlshell.model_selection.ThresholdClassifier(
params='auto', threshold=None)),
])
elif estimator_type == 'classifier' and not th_step:
last_step = sklearn.pipeline.Pipeline(steps=[('classifier',
estimator)])
else:
raise ValueError(f"Unknown estimator type `{estimator_type}`.")
if sklearn.base.is_classifier(estimator=last_step)\
^ (estimator_type == "classifier"):
raise TypeError(f"{self.__class__.__name__}:"
f"{inspect.stack()[0][3]}:"
f" wrong estimator type: {last_step}")
return last_step
@property
def steps(self):
"""list : access steps to pass in `sklearn.pipeline.Pipeline` ."""
return self._steps
def scorer_kwargs(self, x, **kw_args):
"""Mock function to custom kwargs setting.
Parameters
----------
x : :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Features of shape [n_samples, n_features].
**kw_args : dict
Step parameters. Could be extracted from pipeline in scorer if
needed.
Returns
-------
result: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Unchanged ``x``.
"""
return x
def subcolumns(self, x, **kw_args):
"""Get sub-columns from x.
Parameters
----------
x : :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Features of shape [n_samples, n_features].
**kw_args : dict
Columns indices to extract: {'indices': array-like}.
Returns
-------
result: :class:`numpy.ndarray` or :class:`pandas.DataFrame`
Extracted sub-columns of ``x``.
"""
indices = kw_args['indices']
if isinstance(x, pd.DataFrame):
return x.iloc[:, indices]
else:
return x[:, indices]
def subrows(self, x):
"""Get rows from x."""
# For example to delete outlier/anomalies.
return x
def bining_mask(self, x):
"""Get features indices which need bining."""
# Use slice(0, None) to get all.
return []
class CustomSelector(sklearn.base.BaseEstimator):
"""Custom feature selector template."""
def __init__(self, estimator_type='classifier', verbose=True,
skip=False):
self.skip = skip
self.verbose = verbose
self.feature_importances_ = None
self.estimator_type = estimator_type
super().__init__()
if not self.skip:
raise NotImplementedError
def fit(self, x, y):
if self.skip:
self.feature_importances_ = np.full(x.shape[1], fill_value=1)
return self
# TODO: some logic
self.feature_importances_ = np.full(x.shape[1], fill_value=1)
return self
if __name__ == '__main__':
pass
|
[
"sklearn.preprocessing.PolynomialFeatures",
"mlshell.decomposition.PCA",
"mlshell.preprocessing.FunctionTransformer",
"sklearn.base.is_classifier",
"inspect.stack",
"mlshell.preprocessing.OneHotEncoder",
"sklearn.preprocessing.KBinsDiscretizer",
"mlshell.model_selection.ThresholdClassifier",
"mlshell.preprocessing.PowerTransformer",
"sklearn.impute.MissingIndicator",
"sklearn.preprocessing.RobustScaler",
"sklearn.impute.SimpleImputer",
"sklearn.compose.TransformedTargetRegressor",
"sklearn.pipeline.Pipeline",
"numpy.full",
"mlshell.model_selection.PredictionTransformer"
] |
[((8958, 8991), 'numpy.full', 'np.full', (['x.shape[1]'], {'fill_value': '(1)'}), '(x.shape[1], fill_value=1)\n', (8965, 8991), True, 'import numpy as np\n'), ((5627, 5690), 'sklearn.compose.TransformedTargetRegressor', 'sklearn.compose.TransformedTargetRegressor', ([], {'regressor': 'estimator'}), '(regressor=estimator)\n', (5669, 5690), False, 'import sklearn\n'), ((6431, 6478), 'sklearn.base.is_classifier', 'sklearn.base.is_classifier', ([], {'estimator': 'last_step'}), '(estimator=last_step)\n', (6457, 6478), False, 'import sklearn\n'), ((8837, 8870), 'numpy.full', 'np.full', (['x.shape[1]'], {'fill_value': '(1)'}), '(x.shape[1], fill_value=1)\n', (8844, 8870), True, 'import numpy as np\n'), ((2650, 2687), 'sklearn.base.is_classifier', 'sklearn.base.is_classifier', (['estimator'], {}), '(estimator)\n', (2676, 2687), False, 'import sklearn\n'), ((2780, 2890), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', ([], {'func': 'self.scorer_kwargs', 'validate': '(False)', 'skip': '(True)', 'kw_args': '{}'}), '(func=self.scorer_kwargs, validate\n =False, skip=True, kw_args={})\n', (2821, 2890), False, 'import mlshell\n'), ((2921, 3012), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', ([], {'func': 'self.subrows', 'validate': '(False)', 'skip': '(True)'}), '(func=self.subrows, validate=False,\n skip=True)\n', (2962, 3012), False, 'import mlshell\n'), ((5292, 5345), 'mlshell.decomposition.PCA', 'mlshell.decomposition.PCA', ([], {'random_state': '(42)', 'skip': '(True)'}), '(random_state=42, skip=True)\n', (5317, 5345), False, 'import mlshell\n'), ((6210, 6270), 'sklearn.pipeline.Pipeline', 'sklearn.pipeline.Pipeline', ([], {'steps': "[('classifier', estimator)]"}), "(steps=[('classifier', estimator)])\n", (6235, 6270), False, 'import sklearn\n'), ((5860, 5916), 'mlshell.model_selection.PredictionTransformer', 'mlshell.model_selection.PredictionTransformer', (['estimator'], {}), '(estimator)\n', (5905, 5916), False, 'import mlshell\n'), ((6000, 6074), 'mlshell.model_selection.ThresholdClassifier', 'mlshell.model_selection.ThresholdClassifier', ([], {'params': '"""auto"""', 'threshold': 'None'}), "(params='auto', threshold=None)\n", (6043, 6074), False, 'import mlshell\n'), ((6621, 6636), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (6634, 6636), False, 'import inspect\n'), ((3209, 3315), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', (['self.subcolumns'], {'validate': '(False)', 'skip': '(False)', 'kw_args': '"""auto"""'}), "(self.subcolumns, validate=False,\n skip=False, kw_args='auto')\n", (3250, 3315), False, 'import mlshell\n'), ((3408, 3529), 'mlshell.preprocessing.OneHotEncoder', 'mlshell.preprocessing.OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'categories': '"""auto"""', 'sparse': '(False)', 'drop': 'None', 'skip': '(False)'}), "(handle_unknown='ignore', categories=\n 'auto', sparse=False, drop=None, skip=False)\n", (3443, 3529), False, 'import mlshell\n'), ((3682, 3788), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', (['self.subcolumns'], {'validate': '(False)', 'skip': '(False)', 'kw_args': '"""auto"""'}), "(self.subcolumns, validate=False,\n skip=False, kw_args='auto')\n", (3723, 3788), False, 'import mlshell\n'), ((4254, 4361), 'mlshell.preprocessing.PowerTransformer', 'mlshell.preprocessing.PowerTransformer', ([], {'method': '"""yeo-johnson"""', 'standardize': '(False)', 'copy': '(False)', 'skip': '(True)'}), "(method='yeo-johnson', standardize=\n False, copy=False, skip=True)\n", (4292, 4361), False, 'import mlshell\n'), ((4402, 4481), 'mlshell.preprocessing.FunctionTransformer', 'mlshell.preprocessing.FunctionTransformer', ([], {'func': 'None', 'validate': '(False)', 'skip': '(True)'}), '(func=None, validate=False, skip=True)\n', (4443, 4481), False, 'import mlshell\n'), ((4527, 4598), 'sklearn.preprocessing.RobustScaler', 'sklearn.preprocessing.RobustScaler', ([], {'quantile_range': '(0, 100)', 'copy': '(False)'}), '(quantile_range=(0, 100), copy=False)\n', (4561, 4598), False, 'import sklearn\n'), ((4644, 4714), 'sklearn.preprocessing.PolynomialFeatures', 'sklearn.preprocessing.PolynomialFeatures', ([], {'degree': '(1)', 'include_bias': '(False)'}), '(degree=1, include_bias=False)\n', (4684, 4714), False, 'import sklearn\n'), ((3958, 4032), 'sklearn.impute.MissingIndicator', 'sklearn.impute.MissingIndicator', ([], {'missing_values': 'np.nan', 'error_on_new': '(False)'}), '(missing_values=np.nan, error_on_new=False)\n', (3989, 4032), False, 'import sklearn\n'), ((4082, 4183), 'sklearn.impute.SimpleImputer', 'sklearn.impute.SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""constant"""', 'fill_value': '(0)', 'copy': '(True)'}), "(missing_values=np.nan, strategy='constant',\n fill_value=0, copy=True)\n", (4110, 4183), False, 'import sklearn\n'), ((4886, 4982), 'sklearn.preprocessing.KBinsDiscretizer', 'sklearn.preprocessing.KBinsDiscretizer', ([], {'n_bins': '(5)', 'encode': '"""onehot-dense"""', 'strategy': '"""quantile"""'}), "(n_bins=5, encode='onehot-dense',\n strategy='quantile')\n", (4924, 4982), False, 'import sklearn\n')]
|
"""
Sliding Window Matching
=======================
Find recurring patterns in neural signals using Sliding Window Matching.
This tutorial primarily covers the :func:`~.sliding_window_matching` function.
"""
###################################################################################################
# Overview
# --------
#
# Non-periodic or non-sinusoidal properties can be difficult to assess in frequency domain
# methods. To try and address this, the sliding window matching (SWM) algorithm has been
# proposed for detecting and measuring recurring, but unknown, patterns in time series data.
# Patterns of interest may be transient events, and/or the waveform shape of neural oscillations.
#
# In this example, we will explore applying the SWM algorithm to some LFP data.
#
# The SWM approach tries to find recurring patterns (or motifs) in the data, using sliding
# windows. An iterative process samples window randomly, and compares each to the average
# window. The goal is to find a selection of windows that look maximally like the average
# window, at which point the occurrences of the window have been detected, and the average
# window pattern can be examined.
#
# The sliding window matching algorithm is described in
# `Gips et al, 2017 <https://doi.org/10.1016/j.jneumeth.2016.11.001>`_
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
import numpy as np
# Import the sliding window matching function
from neurodsp.rhythm import sliding_window_matching
# Import utilities for loading and plotting data
from neurodsp.utils.download import load_ndsp_data
from neurodsp.plts.rhythm import plot_swm_pattern
from neurodsp.plts.time_series import plot_time_series
from neurodsp.utils import set_random_seed, create_times
from neurodsp.utils.norm import normalize_sig
###################################################################################################
# Set random seed, for reproducibility
set_random_seed(0)
###################################################################################################
# Load neural signal
# ------------------
#
# First, we will load a segment of ECoG data, as an example time series.
#
###################################################################################################
# Download, if needed, and load example data files
sig = load_ndsp_data('sample_data_1.npy', folder='data')
sig = normalize_sig(sig, mean=0, variance=1)
# Set sampling rate, and create a times vector for plotting
fs = 1000
times = create_times(len(sig)/fs, fs)
###################################################################################################
#
# Next, we can visualize this data segment. As we can see this segment of data has
# some prominent bursts of oscillations, in this case, in the beta frequency.
#
###################################################################################################
# Plot example signal
plot_time_series(times, sig)
###################################################################################################
# Apply sliding window matching
# -----------------------------
#
# The beta oscillation in our data segment looks like it might have some non-sinusoidal
# properties. We can investigate this with sliding window matching.
#
# Sliding window matching can be applied with the
# :func:`~.sliding_window_matching` function.
#
###################################################################################################
# Data Preprocessing
# ~~~~~~~~~~~~~~~~~~
#
# Typically, the input signal does not have to be filtered into a band of interest to use SWM.
#
# If the goal is to characterize non-sinusoidal rhythms, you typically won't want to
# apply a filter that will smooth out the features of interest.
#
# However, if the goal is to characterize higher frequency activity, it can be useful to
# apply a highpass filter, so that the method does not converge on a lower frequency motif.
#
# In our case, the beta rhythm of interest is the most prominent, low frequency, feature of the
# data, so we won't apply a filter.
#
###################################################################################################
# Algorithm Settings
# ~~~~~~~~~~~~~~~~~~
#
# The SWM algorithm has some algorithm specific settings that need to be applied, including:
#
# - `win_len` : the length of the window, defined in seconds
# - `win_spacing` : the minimum distance between windows, also defined in seconds
#
# The length of the window influences the patterns that are extracted from the data.
# Typically, you want to set the window length to match the expected timescale of the
# patterns under study.
#
# For our purposes, we will define the window length to be about 1 cycle of a beta oscillation,
# which should help the algorithm to find the waveform shape of the neural oscillation.
#
###################################################################################################
# Define window length & minimum window spacing, both in seconds
win_len = .055
win_spacing = .055
###################################################################################################
# Apply the sliding window matching algorithm to the time series
windows, window_starts = sliding_window_matching(sig, fs, win_len, win_spacing, var_thresh=.5)
###################################################################################################
# Examine the Results
# ~~~~~~~~~~~~~~~~~~~
#
# What we got back from the SWM function are the calculate average window, the list
# of indices in the data of the windows, and the calculated costs for each iteration of
# the algorithm run.
#
# In order to visualize the resulting pattern, we can use
# :func:`~.plot_swm_pattern`.
#
###################################################################################################
# Compute the average window
avg_window = np.mean(windows, 0)
# Plot the discovered pattern
plot_swm_pattern(avg_window)
###################################################################################################
#
# In the above average pattern, that looks to capture a beta rhythm, we can notice some
# waveform shape of the extracted rhythm.
#
###################################################################################################
# Concluding Notes
# ~~~~~~~~~~~~~~~~
#
# One thing to keep in mind is that the SWM algorithm includes a random element of sampling
# and comparing the windows - meaning it is not deterministic. Because of this, results
# can change with different random seeds.
#
# To explore this, go back and change the random seed, and see how the output changes.
#
# You can also set the number of iterations that the algorithm sweeps through. Increasing
# the number of iterations, and using longer data segments, can help improve the robustness
# of the algorithm results.
#
|
[
"numpy.mean",
"neurodsp.utils.set_random_seed",
"neurodsp.rhythm.sliding_window_matching",
"neurodsp.plts.time_series.plot_time_series",
"neurodsp.plts.rhythm.plot_swm_pattern",
"neurodsp.utils.download.load_ndsp_data",
"neurodsp.utils.norm.normalize_sig"
] |
[((2026, 2044), 'neurodsp.utils.set_random_seed', 'set_random_seed', (['(0)'], {}), '(0)\n', (2041, 2044), False, 'from neurodsp.utils import set_random_seed, create_times\n'), ((2424, 2474), 'neurodsp.utils.download.load_ndsp_data', 'load_ndsp_data', (['"""sample_data_1.npy"""'], {'folder': '"""data"""'}), "('sample_data_1.npy', folder='data')\n", (2438, 2474), False, 'from neurodsp.utils.download import load_ndsp_data\n'), ((2481, 2519), 'neurodsp.utils.norm.normalize_sig', 'normalize_sig', (['sig'], {'mean': '(0)', 'variance': '(1)'}), '(sig, mean=0, variance=1)\n', (2494, 2519), False, 'from neurodsp.utils.norm import normalize_sig\n'), ((3019, 3047), 'neurodsp.plts.time_series.plot_time_series', 'plot_time_series', (['times', 'sig'], {}), '(times, sig)\n', (3035, 3047), False, 'from neurodsp.plts.time_series import plot_time_series\n'), ((5342, 5412), 'neurodsp.rhythm.sliding_window_matching', 'sliding_window_matching', (['sig', 'fs', 'win_len', 'win_spacing'], {'var_thresh': '(0.5)'}), '(sig, fs, win_len, win_spacing, var_thresh=0.5)\n', (5365, 5412), False, 'from neurodsp.rhythm import sliding_window_matching\n'), ((5988, 6007), 'numpy.mean', 'np.mean', (['windows', '(0)'], {}), '(windows, 0)\n', (5995, 6007), True, 'import numpy as np\n'), ((6039, 6067), 'neurodsp.plts.rhythm.plot_swm_pattern', 'plot_swm_pattern', (['avg_window'], {}), '(avg_window)\n', (6055, 6067), False, 'from neurodsp.plts.rhythm import plot_swm_pattern\n')]
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
EPS = 1e-10
def get_required_argument(dotmap, key, message, default=None):
val = dotmap.get(key, default)
if val is default:
raise ValueError(message)
return val
def gaussian_kl_np(mu0, log_std0, mu1, log_std1):
"""interprets each entry in mu_i and log_std_i as independent,
preserves shape
output clipped to {0, 1e10}
"""
var0, var1 = np.exp(2 * log_std0), np.exp(2 * log_std1)
pre_sum = 0.5*(((mu1- mu0)**2 + var0)/(var1+EPS) - 1) + log_std1 - log_std0
all_kls = pre_sum
#all_kls = np.mean(all_kls)
all_kls = np.clip(all_kls, 0, 1/EPS) ### for stability
return all_kls
def gaussian_jsd_np(mu0, log_std0, mu1, log_std1):
pass
def average_dkl(mu, std):
"""
Calculates the average kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,…Pk) = 1/(k(k−1)) ∑_[k_(i,j)=1] DKL(Pi||Pj)
(<NAME>, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = None
for i in range(num_models):
for j in range(num_models):
if d_kl is None:
d_kl = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
else: d_kl+= gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
d_kl = d_kl/(num_models*(num_models-1)+EPS)
return d_kl
def median_dkl(mu, std):
"""
Calculates the median kullback leiber divergences of multiple univariate gaussian distributions.
K(P1,…Pk) = 1/(k(k−1)) ∑_[k_(i,j)=1] DKL(Pi||Pj)
(<NAME>, Informational divergence and the dissimilarity of probability distributions.)
expects the distributions along axis 0, and samples along axis 1.
Output is reduced by axis 0
Args:
mu: array-like means
std: array-like stds
"""
## clip log
log_std = np.log(std)
log_std = np.clip(log_std, -100, 1e8)
assert len(mu.shape)>=2 and len(log_std.shape)>=2
num_models = len(mu)
d_kl = np.zeros(shape=(num_models*(num_models-1),) + mu.shape[1:])
n = 0
for i in range(num_models):
for j in range(num_models):
if i != j:
d_kl[n] = gaussian_kl_np(mu[i], log_std[i], mu[j], log_std[j])
n += 1
d_kl_med = np.median(d_kl, axis=0)
return d_kl_med
class TensorStandardScaler:
"""Helper class for automatically normalizing inputs into the network.
"""
def __init__(self, x_dim, sc_factor=1, name='Scaler'):
"""Initializes a scaler.
Arguments:
x_dim (int): The dimensionality of the inputs into the scaler.
Returns: None.
"""
self.fitted = False
with tf.variable_scope(name):
self.count = tf.get_variable(
name=name+'_count', shape=(), initializer=tf.constant_initializer(0),
trainable=False
)
self.mu = tf.get_variable(
name=name+'_mu', shape=[1, x_dim], initializer=tf.constant_initializer(0.0),
trainable=False
)
self.var = tf.get_variable(
name=name+'_std', shape=[1, x_dim], initializer=tf.constant_initializer(1.0),
trainable=False
)
self.cached_count, self.cached_mu, self.cached_var = 0, np.zeros([1, x_dim]), np.ones([1, x_dim])
self.sc_factor = sc_factor
def fit(self, data):
"""Runs two ops, one for assigning the mean of the data to the internal mean, and
another for assigning the standard deviation of the data to the internal standard deviation.
This function must be called within a 'with <session>.as_default()' block.
Arguments:
data (np.ndarray): A numpy array containing the input
Returns: None.
"""
batch_count = data.shape[0]
batch_mu = np.mean(data, axis=0, keepdims=True)
batch_var = np.var(data, axis=0, keepdims=True)
new_mean, new_var, new_count = self.running_mean_var_from_batch(batch_mu, batch_var, batch_count)
#sigma[sigma < 1e-8] = 1.0
self.mu.load(new_mean)
self.var.load(new_var)
self.count.load(new_count)
self.fitted = True
self.cache()
def transform(self, data):
"""Transforms the input matrix data using the parameters of this scaler.
can be adjusted to scale with a factor, to control sensitivity to ood data:
d = (d-mu)/sigma = d + (d-mu)/sigma - d = d + (d(1-sigma)-mu)/sigma
and the version with scaling factor thus becomes
d = d + sc_factor*(d(1-sigma)-mu)/sigma
Arguments:
data (np.array): A numpy array containing the points to be transformed.
sc_factor: Factor to what degree the original dataset is transformed
Returns: (np.array) The transformed dataset.
"""
#scaled_transform = data + self.sc_factor * (data* (1-self.sigma) - self.mu) / self.sigma
# scaling = 1+self.sc_factor*(self.sigma-1)
# scaling = tf.clip_by_value(scaling, 1.0e-8, 1.0e8)
scaled_transform = (data-self.mu)/(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2))
return scaled_transform
def inverse_transform(self, data):
"""Undoes the transformation performed by this scaler.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return (tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data + self.mu
def inverse_transform_var(self, data):
"""Undoes the transformation performed by this scaler for variances.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return tf.square(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data
def inverse_transform_logvar(self, data):
"""Undoes the transformation performed by this scaler for variances.
Arguments:
data (np.array): A numpy array containing the points to be transformed.
Returns: (np.array) The transformed dataset.
"""
return 2*tf.log(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) + data
def get_vars(self):
"""Returns a list of variables managed by this object.
Returns: (list<tf.Variable>) The list of variables.
"""
return [self.mu, self.var]
def get_mu(self):
return self.mu
def get_var(self):
return self.var
def cache(self):
"""Caches current values of this scaler.
Returns: None.
"""
self.cached_mu = self.mu.eval()
self.cached_var = self.var.eval()
self.cached_count = self.count.eval()
def load_cache(self):
"""Loads values from the cache
Returns: None.
"""
self.mu.load(self.cached_mu)
self.var.load(self.cached_var)
self.count.load(self.cached_count)
def decay_count(self, decay_rate=0.99):
self.count.load(self.cached_count*decay_rate)
def running_mean_var_from_batch(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.cached_mu
tot_count = self.cached_count + batch_count
new_mean = self.cached_mu + delta * batch_count / tot_count
m_a = self.cached_var * self.cached_count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.cached_count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
|
[
"numpy.clip",
"numpy.mean",
"numpy.median",
"tensorflow.variable_scope",
"numpy.ones",
"numpy.log",
"numpy.square",
"numpy.exp",
"numpy.zeros",
"tensorflow.sqrt",
"tensorflow.constant_initializer",
"numpy.var"
] |
[((724, 752), 'numpy.clip', 'np.clip', (['all_kls', '(0)', '(1 / EPS)'], {}), '(all_kls, 0, 1 / EPS)\n', (731, 752), True, 'import numpy as np\n'), ((1366, 1377), 'numpy.log', 'np.log', (['std'], {}), '(std)\n', (1372, 1377), True, 'import numpy as np\n'), ((1392, 1427), 'numpy.clip', 'np.clip', (['log_std', '(-100)', '(100000000.0)'], {}), '(log_std, -100, 100000000.0)\n', (1399, 1427), True, 'import numpy as np\n'), ((2338, 2349), 'numpy.log', 'np.log', (['std'], {}), '(std)\n', (2344, 2349), True, 'import numpy as np\n'), ((2364, 2399), 'numpy.clip', 'np.clip', (['log_std', '(-100)', '(100000000.0)'], {}), '(log_std, -100, 100000000.0)\n', (2371, 2399), True, 'import numpy as np\n'), ((2482, 2545), 'numpy.zeros', 'np.zeros', ([], {'shape': '((num_models * (num_models - 1),) + mu.shape[1:])'}), '(shape=(num_models * (num_models - 1),) + mu.shape[1:])\n', (2490, 2545), True, 'import numpy as np\n'), ((2761, 2784), 'numpy.median', 'np.median', (['d_kl'], {'axis': '(0)'}), '(d_kl, axis=0)\n', (2770, 2784), True, 'import numpy as np\n'), ((532, 552), 'numpy.exp', 'np.exp', (['(2 * log_std0)'], {}), '(2 * log_std0)\n', (538, 552), True, 'import numpy as np\n'), ((554, 574), 'numpy.exp', 'np.exp', (['(2 * log_std1)'], {}), '(2 * log_std1)\n', (560, 574), True, 'import numpy as np\n'), ((4351, 4387), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (4358, 4387), True, 'import numpy as np\n'), ((4408, 4443), 'numpy.var', 'np.var', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (4414, 4443), True, 'import numpy as np\n'), ((3178, 3201), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (3195, 3201), True, 'import tensorflow as tf\n'), ((3801, 3821), 'numpy.zeros', 'np.zeros', (['[1, x_dim]'], {}), '([1, x_dim])\n', (3809, 3821), True, 'import numpy as np\n'), ((3823, 3842), 'numpy.ones', 'np.ones', (['[1, x_dim]'], {}), '([1, x_dim])\n', (3830, 3842), True, 'import numpy as np\n'), ((3303, 3329), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (3326, 3329), True, 'import tensorflow as tf\n'), ((3480, 3508), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3503, 3508), True, 'import tensorflow as tf\n'), ((3660, 3688), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (3683, 3688), True, 'import tensorflow as tf\n'), ((5629, 5646), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (5636, 5646), True, 'import tensorflow as tf\n'), ((5998, 6015), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (6005, 6015), True, 'import tensorflow as tf\n'), ((6379, 6396), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (6386, 6396), True, 'import tensorflow as tf\n'), ((8003, 8019), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (8012, 8019), True, 'import numpy as np\n'), ((6752, 6769), 'tensorflow.sqrt', 'tf.sqrt', (['self.var'], {}), '(self.var)\n', (6759, 6769), True, 'import tensorflow as tf\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy import array
from numpy import isnan
from numpy import isinf
from numpy import ones
from numpy import zeros
from scipy.linalg import norm
from scipy.sparse import diags
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
__all__ = ['dr_numpy']
K = [
[0.0],
[0.5, 0.5],
[0.5, 0.0, 0.5],
[1.0, 0.0, 0.0, 1.0],
]
class Coeff():
def __init__(self, c):
self.c = c
self.a = (1 - c * 0.5) / (1 + c * 0.5)
self.b = 0.5 * (1 + self.a)
def dr_numpy(vertices, edges, fixed, loads, qpre, fpre, lpre, linit, E, radius,
callback=None, callback_args=None, **kwargs):
"""Implementation of the dynamic relaxation method for form findong and analysis
of articulated networks of axial-force members.
Parameters
----------
vertices : list
XYZ coordinates of the vertices.
edges : list
Connectivity of the vertices.
fixed : list
Indices of the fixed vertices.
loads : list
XYZ components of the loads on the vertices.
qpre : list
Prescribed force densities in the edges.
fpre : list
Prescribed forces in the edges.
lpre : list
Prescribed lengths of the edges.
linit : list
Initial length of the edges.
E : list
Stiffness of the edges.
radius : list
Radius of the edges.
callback : callable, optional
User-defined function that is called at every iteration.
callback_args : tuple, optional
Additional arguments passed to the callback.
Returns
-------
xyz : array
XYZ coordinates of the equilibrium geometry.
q : array
Force densities in the edges.
f : array
Forces in the edges.
l : array
Lengths of the edges
r : array
Residual forces.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>.,
*Bending incorporated: designing tension structures by integrating bending-active elements*,
Proceedings of Tensinet Symposium 2013,Istanbul, Turkey, 2013.
Examples
--------
>>>
"""
# --------------------------------------------------------------------------
# callback
# --------------------------------------------------------------------------
if callback:
assert callable(callback), 'The provided callback is not callable.'
# --------------------------------------------------------------------------
# configuration
# --------------------------------------------------------------------------
kmax = kwargs.get('kmax', 10000)
dt = kwargs.get('dt', 1.0)
tol1 = kwargs.get('tol1', 1e-3)
tol2 = kwargs.get('tol2', 1e-6)
coeff = Coeff(kwargs.get('c', 0.1))
ca = coeff.a
cb = coeff.b
# --------------------------------------------------------------------------
# attribute lists
# --------------------------------------------------------------------------
num_v = len(vertices)
num_e = len(edges)
free = list(set(range(num_v)) - set(fixed))
# --------------------------------------------------------------------------
# attribute arrays
# --------------------------------------------------------------------------
x = array(vertices, dtype=float).reshape((-1, 3)) # m
p = array(loads, dtype=float).reshape((-1, 3)) # kN
qpre = array(qpre, dtype=float).reshape((-1, 1))
fpre = array(fpre, dtype=float).reshape((-1, 1)) # kN
lpre = array(lpre, dtype=float).reshape((-1, 1)) # m
linit = array(linit, dtype=float).reshape((-1, 1)) # m
E = array(E, dtype=float).reshape((-1, 1)) # kN/mm2 => GPa
radius = array(radius, dtype=float).reshape((-1, 1)) # mm
# --------------------------------------------------------------------------
# sectional properties
# --------------------------------------------------------------------------
A = 3.14159 * radius ** 2 # mm2
EA = E * A # kN
# --------------------------------------------------------------------------
# create the connectivity matrices
# after spline edges have been aligned
# --------------------------------------------------------------------------
C = connectivity_matrix(edges, 'csr')
Ct = C.transpose()
Ci = C[:, free]
Cit = Ci.transpose()
Ct2 = Ct.copy()
Ct2.data **= 2
# --------------------------------------------------------------------------
# if none of the initial lengths are set,
# set the initial lengths to the current lengths
# --------------------------------------------------------------------------
if all(linit == 0):
linit = normrow(C.dot(x))
# --------------------------------------------------------------------------
# initial values
# --------------------------------------------------------------------------
q = ones((num_e, 1), dtype=float)
l = normrow(C.dot(x)) # noqa: E741
f = q * l
v = zeros((num_v, 3), dtype=float)
r = zeros((num_v, 3), dtype=float)
# --------------------------------------------------------------------------
# helpers
# --------------------------------------------------------------------------
def rk(x0, v0, steps=2):
def a(t, v):
dx = v * t
x[free] = x0[free] + dx[free]
# update residual forces
r[free] = p[free] - D.dot(x)
return cb * r / mass
if steps == 1:
return a(dt, v0)
if steps == 2:
B = [0.0, 1.0]
K0 = dt * a(K[0][0] * dt, v0)
K1 = dt * a(K[1][0] * dt, v0 + K[1][1] * K0)
dv = B[0] * K0 + B[1] * K1
return dv
if steps == 4:
B = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
K0 = dt * a(K[0][0] * dt, v0)
K1 = dt * a(K[1][0] * dt, v0 + K[1][1] * K0)
K2 = dt * a(K[2][0] * dt, v0 + K[2][1] * K0 + K[2][2] * K1)
K3 = dt * a(K[3][0] * dt, v0 + K[3][1] * K0 + K[3][2] * K1 + K[3][3] * K2)
dv = B[0] * K0 + B[1] * K1 + B[2] * K2 + B[3] * K3
return dv
raise NotImplementedError
# --------------------------------------------------------------------------
# start iterating
# --------------------------------------------------------------------------
for k in range(kmax):
# print(k)
q_fpre = fpre / l
q_lpre = f / lpre
q_EA = EA * (l - linit) / (linit * l)
q_lpre[isinf(q_lpre)] = 0
q_lpre[isnan(q_lpre)] = 0
q_EA[isinf(q_EA)] = 0
q_EA[isnan(q_EA)] = 0
q = qpre + q_fpre + q_lpre + q_EA
Q = diags([q[:, 0]], [0])
D = Cit.dot(Q).dot(C)
mass = 0.5 * dt ** 2 * Ct2.dot(qpre + q_fpre + q_lpre + EA / linit)
# RK
x0 = x.copy()
v0 = ca * v.copy()
dv = rk(x0, v0, steps=4)
v[free] = v0[free] + dv[free]
dx = v * dt
x[free] = x0[free] + dx[free]
# update
u = C.dot(x)
l = normrow(u) # noqa: E741
f = q * l
r = p - Ct.dot(Q).dot(u)
# crits
crit1 = norm(r[free])
crit2 = norm(dx[free])
# callback
if callback:
callback(k, x, [crit1, crit2], callback_args)
# convergence
if crit1 < tol1:
break
if crit2 < tol2:
break
return x, q, f, l, r
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
[
"numpy.ones",
"numpy.array",
"numpy.zeros",
"compas.numerical.connectivity_matrix",
"scipy.linalg.norm",
"numpy.isnan",
"scipy.sparse.diags",
"numpy.isinf",
"compas.numerical.normrow"
] |
[((4682, 4715), 'compas.numerical.connectivity_matrix', 'connectivity_matrix', (['edges', '"""csr"""'], {}), "(edges, 'csr')\n", (4701, 4715), False, 'from compas.numerical import connectivity_matrix\n'), ((5333, 5362), 'numpy.ones', 'ones', (['(num_e, 1)'], {'dtype': 'float'}), '((num_e, 1), dtype=float)\n', (5337, 5362), False, 'from numpy import ones\n'), ((5425, 5455), 'numpy.zeros', 'zeros', (['(num_v, 3)'], {'dtype': 'float'}), '((num_v, 3), dtype=float)\n', (5430, 5455), False, 'from numpy import zeros\n'), ((5464, 5494), 'numpy.zeros', 'zeros', (['(num_v, 3)'], {'dtype': 'float'}), '((num_v, 3), dtype=float)\n', (5469, 5494), False, 'from numpy import zeros\n'), ((7129, 7150), 'scipy.sparse.diags', 'diags', (['[q[:, 0]]', '[0]'], {}), '([q[:, 0]], [0])\n', (7134, 7150), False, 'from scipy.sparse import diags\n'), ((7498, 7508), 'compas.numerical.normrow', 'normrow', (['u'], {}), '(u)\n', (7505, 7508), False, 'from compas.numerical import normrow\n'), ((7606, 7619), 'scipy.linalg.norm', 'norm', (['r[free]'], {}), '(r[free])\n', (7610, 7619), False, 'from scipy.linalg import norm\n'), ((7636, 7650), 'scipy.linalg.norm', 'norm', (['dx[free]'], {}), '(dx[free])\n', (7640, 7650), False, 'from scipy.linalg import norm\n'), ((3467, 3495), 'numpy.array', 'array', (['vertices'], {'dtype': 'float'}), '(vertices, dtype=float)\n', (3472, 3495), False, 'from numpy import array\n'), ((3546, 3571), 'numpy.array', 'array', (['loads'], {'dtype': 'float'}), '(loads, dtype=float)\n', (3551, 3571), False, 'from numpy import array\n'), ((3629, 3653), 'numpy.array', 'array', (['qpre'], {'dtype': 'float'}), '(qpre, dtype=float)\n', (3634, 3653), False, 'from numpy import array\n'), ((3682, 3706), 'numpy.array', 'array', (['fpre'], {'dtype': 'float'}), '(fpre, dtype=float)\n', (3687, 3706), False, 'from numpy import array\n'), ((3762, 3786), 'numpy.array', 'array', (['lpre'], {'dtype': 'float'}), '(lpre, dtype=float)\n', (3767, 3786), False, 'from numpy import array\n'), ((3842, 3867), 'numpy.array', 'array', (['linit'], {'dtype': 'float'}), '(linit, dtype=float)\n', (3847, 3867), False, 'from numpy import array\n'), ((3917, 3938), 'numpy.array', 'array', (['E'], {'dtype': 'float'}), '(E, dtype=float)\n', (3922, 3938), False, 'from numpy import array\n'), ((4013, 4039), 'numpy.array', 'array', (['radius'], {'dtype': 'float'}), '(radius, dtype=float)\n', (4018, 4039), False, 'from numpy import array\n'), ((6961, 6974), 'numpy.isinf', 'isinf', (['q_lpre'], {}), '(q_lpre)\n', (6966, 6974), False, 'from numpy import isinf\n'), ((6995, 7008), 'numpy.isnan', 'isnan', (['q_lpre'], {}), '(q_lpre)\n', (7000, 7008), False, 'from numpy import isnan\n'), ((7027, 7038), 'numpy.isinf', 'isinf', (['q_EA'], {}), '(q_EA)\n', (7032, 7038), False, 'from numpy import isinf\n'), ((7057, 7068), 'numpy.isnan', 'isnan', (['q_EA'], {}), '(q_EA)\n', (7062, 7068), False, 'from numpy import isnan\n')]
|
# Built-in
import os
from glob import glob
# Libs
import numpy as np
from tqdm import tqdm
from natsort import natsorted
# Own modules
from data import data_utils
from mrs_utils import misc_utils, process_block
# Settings
DS_NAME = 'spca'
def get_images(data_dir, valid_percent=0.5, split=False):
rgb_files = natsorted(glob(os.path.join(data_dir, '*RGB.jpg')))
lbl_files = natsorted(glob(os.path.join(data_dir, '*GT.png')))
'''ind = np.arange(len(rgb_files))
np.random.shuffle(ind)
rgb_files = [rgb_files[a] for a in ind]
lbl_files = [lbl_files[a] for a in ind]'''
assert len(rgb_files) == len(lbl_files)
city_names = ['Fresno', 'Modesto', 'Stockton', 'aus']
city_files = {city_name: [(rgb_file, lbl_file) for (rgb_file, lbl_file) in zip(rgb_files, lbl_files)
if city_name in rgb_file] for city_name in city_names}
train_files, valid_files = [], []
for city_name, file_pairs in city_files.items():
valid_size = int(valid_percent * len(file_pairs))
train_files.extend(file_pairs[valid_size:])
valid_files.extend(file_pairs[:valid_size])
if split:
return train_files, valid_files
else:
return [a[0] for a in valid_files], [a[1] for a in valid_files]
def create_dataset(data_dir, save_dir, patch_size, pad, overlap, valid_percent=0.1, visualize=False):
# create folders and files
patch_dir = os.path.join(save_dir, 'patches')
misc_utils.make_dir_if_not_exist(patch_dir)
record_file_train = open(os.path.join(save_dir, 'file_list_train_{}.txt').format(
misc_utils.float2str(valid_percent)), 'w+')
record_file_valid = open(os.path.join(save_dir, 'file_list_valid_{}.txt').format(
misc_utils.float2str(valid_percent)), 'w+')
train_files, valid_files = get_images(data_dir, valid_percent, split=True)
for img_file, lbl_file in tqdm(train_files):
city_name = os.path.splitext(os.path.basename(img_file))[0].split('_')[0]
for rgb_patch, gt_patch, y, x in data_utils.patch_tile(img_file, lbl_file, patch_size, pad, overlap):
if visualize:
from mrs_utils import vis_utils
vis_utils.compare_figures([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))
img_patchname = '{}_y{}x{}.jpg'.format(city_name, int(y), int(x))
lbl_patchname = '{}_y{}x{}.png'.format(city_name, int(y), int(x))
# misc_utils.save_file(os.path.join(patch_dir, img_patchname), rgb_patch.astype(np.uint8))
# misc_utils.save_file(os.path.join(patch_dir, lbl_patchname), gt_patch.astype(np.uint8))
record_file_train.write('{} {}\n'.format(img_patchname, lbl_patchname))
for img_file, lbl_file in tqdm(valid_files):
city_name = os.path.splitext(os.path.basename(img_file))[0].split('_')[0]
for rgb_patch, gt_patch, y, x in data_utils.patch_tile(img_file, lbl_file, patch_size, pad, overlap):
if visualize:
from mrs_utils import vis_utils
vis_utils.compare_figures([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))
img_patchname = '{}_y{}x{}.jpg'.format(city_name, int(y), int(x))
lbl_patchname = '{}_y{}x{}.png'.format(city_name, int(y), int(x))
# misc_utils.save_file(os.path.join(patch_dir, img_patchname), rgb_patch.astype(np.uint8))
# misc_utils.save_file(os.path.join(patch_dir, lbl_patchname), gt_patch.astype(np.uint8))
record_file_valid.write('{} {}\n'.format(img_patchname, lbl_patchname))
def get_stats(img_dir):
from data import data_utils
from glob import glob
rgb_imgs = glob(os.path.join(img_dir, '*RGB.jpg'))
ds_mean, ds_std = data_utils.get_ds_stats(rgb_imgs)
return np.stack([ds_mean, ds_std], axis=0)
def get_stats_pb(img_dir):
val = process_block.ValueComputeProcess(DS_NAME, os.path.join(os.path.dirname(__file__), '../stats/builtin'),
os.path.join(os.path.dirname(__file__), '../stats/builtin/{}.npy'.format(DS_NAME)), func=get_stats).\
run(img_dir=img_dir).val
val_test = val
return val, val_test
if __name__ == '__main__':
img_files = natsorted(glob(os.path.join(r'/home/wh145/data/caemo', '*RGB.jpg')))
np.random.seed(931004)
ps = 512
ol = 0
pd = 0
create_dataset(data_dir=r'/home/wh145/data/caemo',
save_dir=r'/home/wh145/data/caemo/ps_512_ol_0', patch_size=(ps, ps), pad=pd, overlap=ol, visualize=False, valid_percent=0.1)
# val = get_stats_pb(r'/media/ei-edl01/data/uab_datasets/spca/data/Original_Tiles')[0]
# data_utils.patches_to_hdf5(r'/hdd/mrs/spca', r'/hdd/mrs/spca/ps512_pd0_ol0_hdf5')
|
[
"mrs_utils.misc_utils.float2str",
"mrs_utils.misc_utils.make_dir_if_not_exist",
"data.data_utils.patch_tile",
"tqdm.tqdm",
"os.path.join",
"mrs_utils.vis_utils.compare_figures",
"numpy.stack",
"os.path.dirname",
"numpy.random.seed",
"os.path.basename",
"data.data_utils.get_ds_stats"
] |
[((1428, 1461), 'os.path.join', 'os.path.join', (['save_dir', '"""patches"""'], {}), "(save_dir, 'patches')\n", (1440, 1461), False, 'import os\n'), ((1466, 1509), 'mrs_utils.misc_utils.make_dir_if_not_exist', 'misc_utils.make_dir_if_not_exist', (['patch_dir'], {}), '(patch_dir)\n', (1498, 1509), False, 'from mrs_utils import misc_utils, process_block\n'), ((1896, 1913), 'tqdm.tqdm', 'tqdm', (['train_files'], {}), '(train_files)\n', (1900, 1913), False, 'from tqdm import tqdm\n'), ((2748, 2765), 'tqdm.tqdm', 'tqdm', (['valid_files'], {}), '(valid_files)\n', (2752, 2765), False, 'from tqdm import tqdm\n'), ((3730, 3763), 'data.data_utils.get_ds_stats', 'data_utils.get_ds_stats', (['rgb_imgs'], {}), '(rgb_imgs)\n', (3753, 3763), False, 'from data import data_utils\n'), ((3775, 3810), 'numpy.stack', 'np.stack', (['[ds_mean, ds_std]'], {'axis': '(0)'}), '([ds_mean, ds_std], axis=0)\n', (3783, 3810), True, 'import numpy as np\n'), ((4296, 4318), 'numpy.random.seed', 'np.random.seed', (['(931004)'], {}), '(931004)\n', (4310, 4318), True, 'import numpy as np\n'), ((2038, 2105), 'data.data_utils.patch_tile', 'data_utils.patch_tile', (['img_file', 'lbl_file', 'patch_size', 'pad', 'overlap'], {}), '(img_file, lbl_file, patch_size, pad, overlap)\n', (2059, 2105), False, 'from data import data_utils\n'), ((2890, 2957), 'data.data_utils.patch_tile', 'data_utils.patch_tile', (['img_file', 'lbl_file', 'patch_size', 'pad', 'overlap'], {}), '(img_file, lbl_file, patch_size, pad, overlap)\n', (2911, 2957), False, 'from data import data_utils\n'), ((3673, 3706), 'os.path.join', 'os.path.join', (['img_dir', '"""*RGB.jpg"""'], {}), "(img_dir, '*RGB.jpg')\n", (3685, 3706), False, 'import os\n'), ((333, 367), 'os.path.join', 'os.path.join', (['data_dir', '"""*RGB.jpg"""'], {}), "(data_dir, '*RGB.jpg')\n", (345, 367), False, 'import os\n'), ((401, 434), 'os.path.join', 'os.path.join', (['data_dir', '"""*GT.png"""'], {}), "(data_dir, '*GT.png')\n", (413, 434), False, 'import os\n'), ((1604, 1639), 'mrs_utils.misc_utils.float2str', 'misc_utils.float2str', (['valid_percent'], {}), '(valid_percent)\n', (1624, 1639), False, 'from mrs_utils import misc_utils, process_block\n'), ((1742, 1777), 'mrs_utils.misc_utils.float2str', 'misc_utils.float2str', (['valid_percent'], {}), '(valid_percent)\n', (1762, 1777), False, 'from mrs_utils import misc_utils, process_block\n'), ((4237, 4287), 'os.path.join', 'os.path.join', (['"""/home/wh145/data/caemo"""', '"""*RGB.jpg"""'], {}), "('/home/wh145/data/caemo', '*RGB.jpg')\n", (4249, 4287), False, 'import os\n'), ((1539, 1587), 'os.path.join', 'os.path.join', (['save_dir', '"""file_list_train_{}.txt"""'], {}), "(save_dir, 'file_list_train_{}.txt')\n", (1551, 1587), False, 'import os\n'), ((1677, 1725), 'os.path.join', 'os.path.join', (['save_dir', '"""file_list_valid_{}.txt"""'], {}), "(save_dir, 'file_list_valid_{}.txt')\n", (1689, 1725), False, 'import os\n'), ((2197, 2271), 'mrs_utils.vis_utils.compare_figures', 'vis_utils.compare_figures', (['[rgb_patch, gt_patch]', '(1, 2)'], {'fig_size': '(12, 5)'}), '([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))\n', (2222, 2271), False, 'from mrs_utils import vis_utils\n'), ((3049, 3123), 'mrs_utils.vis_utils.compare_figures', 'vis_utils.compare_figures', (['[rgb_patch, gt_patch]', '(1, 2)'], {'fig_size': '(12, 5)'}), '([rgb_patch, gt_patch], (1, 2), fig_size=(12, 5))\n', (3074, 3123), False, 'from mrs_utils import vis_utils\n'), ((3906, 3931), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3921, 3931), False, 'import os\n'), ((4011, 4036), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4026, 4036), False, 'import os\n'), ((1952, 1978), 'os.path.basename', 'os.path.basename', (['img_file'], {}), '(img_file)\n', (1968, 1978), False, 'import os\n'), ((2804, 2830), 'os.path.basename', 'os.path.basename', (['img_file'], {}), '(img_file)\n', (2820, 2830), False, 'import os\n')]
|
"""Solution of the exercises of Optimization of compute bound Python code"""
import math
import cmath
import numpy as np
import numexpr as ne
import numba as nb
# Needed here since it is used as global variables
# Maximum strain at surface
e0 = 0.01
# Width of the strain profile below the surface
w = 5.0
# Python: Circular crystal ###
def circ_python_1(N, h, k):
x = (np.arange(N) - N / 2).reshape(-1, 1)
y = (np.arange(N) - N / 2).reshape(1, -1)
omega = x * x + y * y <= (N / 2) ** 2
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
# One should discard bad values
tmp = 0.0
for n in range(N): # loop and sum over unit-cells
for m in range(N):
if omega[n, m]:
tmp += cmath.exp(2j * np.pi * (v_h * n + v_k * m))
result[i_h][i_k] = abs(tmp) ** 2
return result
# Alternative using Python `sum`
def circ_python_1_alt(N, h, k):
# Filter-out position outside crystal once for all
inside_pos = [
(n, m)
for n in range(N)
for m in range(N)
if ((n - N / 2) ** 2 + (m - N / 2) ** 2) <= (N / 2) ** 2
]
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
result[i_h][i_k] = (
abs(
sum( # Sum over positions inside the crystal
cmath.exp(2j * np.pi * (v_h * n + v_k * m))
for n, m in inside_pos
)
)
** 2
)
return result
# Python: Circular strained crystal ###
def circ_python(N, h, k):
N_2 = N / 2
positions = {}
for i in range(N):
x = i - N_2
for j in range(N):
y = j - N_2
r = (x * x + y * y) ** 0.5
if r <= N_2:
strain = e0 * (1 + math.tanh((r - N_2) / w))
positions[(i, j)] = (i + strain * x, j + strain * y)
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
# One should discard bad values
tmp = 0.0
for i_n in range(N): # loop and sum over unit-cells
for i_m in range(N):
pos = positions.get((i_n, i_m))
if pos:
n_s, m_s = pos
tmp += cmath.exp(2j * np.pi * (v_h * n_s + v_k * m_s))
result[i_h, i_k] = abs(tmp) ** 2
return result
# Alternative computing list of strained position
def circ_python_alt(N, h, k):
# Compute strained position inside the crystal once for all
strained_pos = []
crystal_radius = N / 2
for n in range(N):
for m in range(N):
# Center is at (N/2, N/2)
x = n - crystal_radius
y = m - crystal_radius
radius = (x ** 2 + y ** 2) ** 0.5
if radius <= crystal_radius:
delta = e0 * (1 + math.tanh((radius - crystal_radius) / w))
strained_pos.append((n + delta * x, m + delta * y))
result = np.zeros((h.size, k.size))
for i_h, v_h in enumerate(h): # loop over the reciprocal space coordinates
for i_k, v_k in enumerate(k):
result[i_h][i_k] = (
abs(
sum(
cmath.exp(2j * np.pi * (v_h * n_s + v_k * m_s))
for n_s, m_s in strained_pos
)
)
** 2
)
return result
# numpy ###
def circ_numpy(N, h, k):
N_2 = N / 2
h = h.reshape(-1, 1, 1, 1)
k = k.reshape(1, -1, 1, 1)
n = np.arange(N).reshape(1, 1, -1, 1)
m = np.arange(N).reshape(1, 1, 1, -1)
radius = np.sqrt((n - N_2) ** 2 + (m - N_2) ** 2)
strain = e0 * (1.0 + np.tanh((radius - N_2) / w))
p_n = n + strain * (n - N_2)
p_m = m + strain * (m - N_2)
omega = radius <= N_2
tmp = omega * np.exp(2j * np.pi * (h * p_n + k * p_m))
return np.abs(tmp.sum(axis=(2, 3))) ** 2
# numexpr ###
def circ_numexpr(N, h, k):
N_2 = N / 2
h = h.reshape(-1, 1, 1, 1)
k = k.reshape(1, -1, 1, 1)
n = np.arange(N).reshape(1, 1, -1, 1)
m = np.arange(N).reshape(1, 1, 1, -1)
radius = ne.evaluate("sqrt((n - N_2)**2 + (m - N_2)**2)")
strain = ne.evaluate("e0 * (1 + tanh((radius-N_2) / w))")
j2pi = np.pi * 2j
tmp = ne.evaluate(
"where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))"
)
result = abs(tmp.sum(axis=(2, 3))) ** 2
return result
# numba ###
@nb.jit(parallel=True)
def circ_numba(N, h, k):
result = np.zeros((h.size, k.size), dtype=np.float64)
N_2 = N / 2
for h_i in nb.prange(h.size): # loop over the reciprocal space coordinates
for k_i in range(k.size):
tmp = 0j
for n in range(N): # loop and sum over unit-cells
for m in range(N):
radius = math.sqrt((n - N_2) ** 2 + (m - N_2) ** 2)
if radius > (N_2):
value = 0j
# continue # Numba isn't working using the same continue pattern as below
else:
strain = e0 * (1 + math.tanh((radius - N_2) / w))
p_n = n + strain * (n - N_2)
p_m = m + strain * (m - N_2)
value = np.exp(2j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))
tmp += value
result[h_i, k_i] = abs(tmp) ** 2
return result
|
[
"numpy.sqrt",
"math.sqrt",
"numpy.tanh",
"numpy.exp",
"numpy.zeros",
"numba.jit",
"cmath.exp",
"math.tanh",
"numexpr.evaluate",
"numba.prange",
"numpy.arange"
] |
[((4845, 4866), 'numba.jit', 'nb.jit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (4851, 4866), True, 'import numba as nb\n'), ((517, 543), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (525, 543), True, 'import numpy as np\n'), ((1293, 1319), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (1301, 1319), True, 'import numpy as np\n'), ((2172, 2198), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (2180, 2198), True, 'import numpy as np\n'), ((3345, 3371), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {}), '((h.size, k.size))\n', (3353, 3371), True, 'import numpy as np\n'), ((4003, 4043), 'numpy.sqrt', 'np.sqrt', (['((n - N_2) ** 2 + (m - N_2) ** 2)'], {}), '((n - N_2) ** 2 + (m - N_2) ** 2)\n', (4010, 4043), True, 'import numpy as np\n'), ((4514, 4562), 'numexpr.evaluate', 'ne.evaluate', (['"""sqrt((n - N_2)**2 + (m - N_2)**2)"""'], {}), "('sqrt((n - N_2)**2 + (m - N_2)**2)')\n", (4525, 4562), True, 'import numexpr as ne\n'), ((4576, 4624), 'numexpr.evaluate', 'ne.evaluate', (['"""e0 * (1 + tanh((radius-N_2) / w))"""'], {}), "('e0 * (1 + tanh((radius-N_2) / w))')\n", (4587, 4624), True, 'import numexpr as ne\n'), ((4657, 4761), 'numexpr.evaluate', 'ne.evaluate', (['"""where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))"""'], {}), "(\n 'where(radius > N_2, 0, exp(j2pi*(h*(n+strain*(n-N_2)) + k*(m+strain*(m-N_2)))))'\n )\n", (4668, 4761), True, 'import numexpr as ne\n'), ((4905, 4949), 'numpy.zeros', 'np.zeros', (['(h.size, k.size)'], {'dtype': 'np.float64'}), '((h.size, k.size), dtype=np.float64)\n', (4913, 4949), True, 'import numpy as np\n'), ((4981, 4998), 'numba.prange', 'nb.prange', (['h.size'], {}), '(h.size)\n', (4990, 4998), True, 'import numba as nb\n'), ((4208, 4250), 'numpy.exp', 'np.exp', (['(2.0j * np.pi * (h * p_n + k * p_m))'], {}), '(2.0j * np.pi * (h * p_n + k * p_m))\n', (4214, 4250), True, 'import numpy as np\n'), ((3914, 3926), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3923, 3926), True, 'import numpy as np\n'), ((3956, 3968), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3965, 3968), True, 'import numpy as np\n'), ((4069, 4096), 'numpy.tanh', 'np.tanh', (['((radius - N_2) / w)'], {}), '((radius - N_2) / w)\n', (4076, 4096), True, 'import numpy as np\n'), ((4425, 4437), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4434, 4437), True, 'import numpy as np\n'), ((4467, 4479), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4476, 4479), True, 'import numpy as np\n'), ((379, 391), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (388, 391), True, 'import numpy as np\n'), ((425, 437), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (434, 437), True, 'import numpy as np\n'), ((5228, 5270), 'math.sqrt', 'math.sqrt', (['((n - N_2) ** 2 + (m - N_2) ** 2)'], {}), '((n - N_2) ** 2 + (m - N_2) ** 2)\n', (5237, 5270), False, 'import math\n'), ((893, 938), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n + v_k * m))'], {}), '(2.0j * np.pi * (v_h * n + v_k * m))\n', (902, 938), False, 'import cmath\n'), ((2064, 2088), 'math.tanh', 'math.tanh', (['((r - N_2) / w)'], {}), '((r - N_2) / w)\n', (2073, 2088), False, 'import math\n'), ((2636, 2685), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n_s + v_k * m_s))'], {}), '(2.0j * np.pi * (v_h * n_s + v_k * m_s))\n', (2645, 2685), False, 'import cmath\n'), ((3221, 3261), 'math.tanh', 'math.tanh', (['((radius - crystal_radius) / w)'], {}), '((radius - crystal_radius) / w)\n', (3230, 3261), False, 'import math\n'), ((5682, 5737), 'numpy.exp', 'np.exp', (['(2.0j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))'], {}), '(2.0j * cmath.pi * (h[h_i] * p_n + k[k_i] * p_m))\n', (5688, 5737), True, 'import numpy as np\n'), ((1582, 1627), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n + v_k * m))'], {}), '(2.0j * np.pi * (v_h * n + v_k * m))\n', (1591, 1627), False, 'import cmath\n'), ((3593, 3642), 'cmath.exp', 'cmath.exp', (['(2.0j * np.pi * (v_h * n_s + v_k * m_s))'], {}), '(2.0j * np.pi * (v_h * n_s + v_k * m_s))\n', (3602, 3642), False, 'import cmath\n'), ((5513, 5542), 'math.tanh', 'math.tanh', (['((radius - N_2) / w)'], {}), '((radius - N_2) / w)\n', (5522, 5542), False, 'import math\n')]
|
#!/usr/bin/env python
# @author <NAME> <<EMAIL>>, Interactive Robotics Lab, Arizona State University
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import rclpy
from policy_translation.srv import NetworkPT, TuneNetwork
from model_src.model import PolicyTranslationModel
from utils.network import Network
from utils.tf_util import trainOnCPU, limitGPUMemory
from utils.intprim.gaussian_model import GaussianModel
import tensorflow as tf
import numpy as np
import re
from cv_bridge import CvBridge, CvBridgeError
import cv2
import matplotlib.pyplot as plt
from utils.intprim.gaussian_model import GaussianModel
import glob
import json
import pickle
import copy
# Force TensorFlow to use the CPU
FORCE_CPU = True
# Use dropout at run-time for stochastif-forward passes
USE_DROPOUT = True
# Where can we find the trained model?
MODEL_PATH = "../GDrive/model/policy_translation"
# Where is a pre-trained faster-rcnn?
FRCNN_PATH = "../GDrive/rcnn"
# Where are the GloVe word embeddings?
GLOVE_PATH = "../GDrive/glove.6B.50d.txt"
# Where is the normalization of the dataset?
NORM_PATH = "../GDrive/normalization_v2.pkl"
if FORCE_CPU:
trainOnCPU()
else:
limitGPUMemory()
print("Running Policy Translation Model")
model = PolicyTranslationModel(
od_path=FRCNN_PATH,
glove_path=GLOVE_PATH,
special=None
)
bs = 2
model((
np.ones((bs, 15), dtype=np.int64),
np.ones((bs, 6, 5), dtype=np.float32),
np.ones((bs, 500, 7), dtype=np.float32)
))
model.load_weights(MODEL_PATH)
model.summary()
class NetworkService():
def __init__(self):
self.dictionary = self._loadDictionary(GLOVE_PATH)
self.regex = re.compile('[^a-z ]')
self.bridge = CvBridge()
self.history = []
rclpy.init(args=None)
self.node = rclpy.create_node("neural_network")
self.service_nn = self.node.create_service(NetworkPT, "/network", self.cbk_network_dmp_ros2)
self.normalization = pickle.load(open(NORM_PATH, mode="rb"), encoding="latin1")
print("Ready")
def runNode(self):
while rclpy.ok():
rclpy.spin_once(self.node)
self.node.destroy_service(self.service_nn)
self.node.destroy_service(self.service_tn)
rclpy.shutdown()
def _loadDictionary(self, file):
__dictionary = {}
__dictionary[""] = 0 # Empty string
fh = open(file, "r", encoding="utf-8")
for line in fh:
if len(__dictionary) >= 300000:
break
tokens = line.strip().split(" ")
__dictionary[tokens[0]] = len(__dictionary)
fh.close()
return __dictionary
def tokenize(self, language):
voice = self.regex.sub("", language.strip().lower())
tokens = []
for w in voice.split(" "):
idx = 0
try:
idx = self.dictionary[w]
except:
print("Unknown word: " + w)
tokens.append(idx)
return tokens
def normalize(self, value, v_min, v_max):
if (value.shape[1] != v_min.shape[0] or v_min.shape[0] != v_max.shape[0] or
len(value.shape) != 2 or len(v_min.shape) != 1 or len(v_max.shape) != 1):
raise ArrayDimensionMismatch()
value = np.copy(value)
v_min = np.tile(np.expand_dims(v_min, 0), [value.shape[0], 1])
v_max = np.tile(np.expand_dims(v_max, 0), [value.shape[0], 1])
value = (value - v_min) / (v_max - v_min)
return value
def interpolateTrajectory(self, trj, target):
current_length = trj.shape[0]
dimensions = trj.shape[1]
result = np.zeros((target, trj.shape[1]), dtype=np.float32)
for i in range(dimensions):
result[:,i] = np.interp(np.linspace(0.0, 1.0, num=target), np.linspace(0.0, 1.0, num=current_length), trj[:,i])
return result
def cbk_network_dmp_ros2(self, req, res):
res.trajectory, res.confidence, res.timesteps, res.weights, res.phase = self.cbk_network_dmp(req)
return res
def imgmsg_to_cv2(self, img_msg, desired_encoding="passthrough"):
if img_msg.encoding != "8UC3":
self.node.get_logger().info("Unrecognized image type: " + encoding)
exit(0)
dtype = "uint8"
n_channels = 3
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
img_buf = np.asarray(img_msg.data, dtype=dtype) if isinstance(img_msg.data, list) else img_msg.data
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_buf)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_buf)
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
if desired_encoding == 'passthrough':
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, img_msg.encoding, desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def cbk_network_dmp(self, req):
if req.reset:
self.req_step = 0
self.sfp_history = []
try:
image = self.imgmsg_to_cv2(req.image)
except CvBridgeError as e:
print(e)
language = self.tokenize(req.language)
self.language = language + [0] * (15-len(language))
image_features = model.frcnn(tf.convert_to_tensor([image], dtype=tf.uint8))
scores = image_features["detection_scores"][0, :6].numpy().astype(dtype=np.float32)
scores = [0.0 if v < 0.5 else 1.0 for v in scores.tolist()]
classes = image_features["detection_classes"][0, :6].numpy().astype(dtype=np.int32)
classes = [v * scores[k] for k, v in enumerate(classes.tolist())]
boxes = image_features["detection_boxes"][0, :6, :].numpy().astype(dtype=np.float32)
self.features = np.concatenate((np.expand_dims(classes,1), boxes), axis=1)
self.history = []
self.history.append(list(req.robot))
robot = np.asarray(self.history, dtype=np.float32)
self.input_data = (
tf.convert_to_tensor(np.tile([self.language],[250, 1]), dtype=tf.int64),
tf.convert_to_tensor(np.tile([self.features],[250, 1, 1]), dtype=tf.float32),
tf.convert_to_tensor(np.tile([robot],[250, 1, 1]), dtype=tf.float32)
)
generated, (atn, dmp_dt, phase, weights) = model(self.input_data, training=tf.constant(False), use_dropout=tf.constant(True))
self.trj_gen = tf.math.reduce_mean(generated, axis=0).numpy()
self.trj_std = tf.math.reduce_std(generated, axis=0).numpy()
self.timesteps = int(tf.math.reduce_mean(dmp_dt).numpy() * 500)
self.b_weights = tf.math.reduce_mean(weights, axis=0).numpy()
phase_value = tf.math.reduce_mean(phase, axis=0).numpy()
phase_value = phase_value[-1,0]
self.sfp_history.append(self.b_weights[-1,:,:])
if phase_value > 0.95 and len(self.sfp_history) > 100:
trj_len = len(self.sfp_history)
basismodel = GaussianModel(degree=11, scale=0.012, observed_dof_names=("Base","Shoulder","Ellbow","Wrist1","Wrist2","Wrist3","Gripper"))
domain = np.linspace(0, 1, trj_len, dtype=np.float64)
trajectories = []
for i in range(trj_len):
trajectories.append(np.asarray(basismodel.apply_coefficients(domain, self.sfp_history[i].flatten())))
trajectories = np.asarray(trajectories)
np.save("trajectories", trajectories)
np.save("history", self.history)
gen_trajectory = []
var_trj = np.zeros((trj_len, trj_len, 7), dtype=np.float32)
for w in range(trj_len):
gen_trajectory.append(trajectories[w,w,:])
gen_trajectory = np.asarray(gen_trajectory)
np.save("gen_trajectory", gen_trajectory)
self.sfp_history = []
self.req_step += 1
return (self.trj_gen.flatten().tolist(), self.trj_std.flatten().tolist(), self.timesteps, self.b_weights.flatten().tolist(), float(phase_value))
def idToText(self, id):
names = ["", "Yellow Small Round", "Red Small Round", "Green Small Round", "Blue Small Round", "Pink Small Round",
"Yellow Large Round", "Red Large Round", "Green Large Round", "Blue Large Round", "Pink Large Round",
"Yellow Small Square", "Red Small Square", "Green Small Square", "Blue Small Square", "Pink Small Square",
"Yellow Large Square", "Red Large Square", "Green Large Square", "Blue Large Square", "Pink Large Square",
"Cup Red", "Cup Green", "Cup Blue"]
return names[id]
def plotTrajectory(self, trj, error, image):
fig, ax = plt.subplots(3,3)
fig.set_size_inches(9, 9)
for sp in range(7):
idx = sp // 3
idy = sp % 3
ax[idx,idy].clear()
ax[idx,idy].plot(range(trj.shape[0]), trj[:,sp], alpha=0.5, color='mediumslateblue')
ax[idx,idy].errorbar(range(trj.shape[0]), trj[:,sp], xerr=None, yerr=error[:,sp], alpha=0.1, fmt='none', color='mediumslateblue')
ax[idx,idy].set_ylim([-0.1, 1.1])
ax[2,1].imshow(image)
def plotImageRegions(self, image_np, image_dict, atn):
# Visualization of the results of a detection.
tgt_object = np.argmax(atn)
num_detected = len([v for v in image_dict["detection_scores"][0] if v > 0.5])
num_detected = min(num_detected, len(atn))
for i in range(num_detected):
ymin, xmin, ymax, xmax = image_dict['detection_boxes'][0][i,:]
pt1 = (int(xmin*image_np.shape[1]), int(ymin*image_np.shape[0]))
pt2 = (int(xmax*image_np.shape[1]), int(ymax*image_np.shape[0]))
image_np = cv2.rectangle(image_np, pt1, pt2, (156, 2, 2), 1)
if i == tgt_object:
image_np = cv2.rectangle(image_np, pt1, pt2, (30, 156, 2), 2)
image_np = cv2.putText(image_np, "{:.1f}%".format(atn[i] * 100), (pt1[0]-10, pt1[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (30, 156, 2), 2, cv2.LINE_AA)
fig = plt.figure()
plt.imshow(image_np)
if __name__ == "__main__":
ot = NetworkService()
ot.runNode()
|
[
"cv2.rectangle",
"re.compile",
"rclpy.spin_once",
"rclpy.init",
"rclpy.create_node",
"numpy.save",
"matplotlib.pyplot.imshow",
"rclpy.ok",
"cv_bridge.CvBridgeError",
"model_src.model.PolicyTranslationModel",
"numpy.asarray",
"cv_bridge.boost.cv_bridge_boost.cvtColor2",
"cv_bridge.CvBridge",
"numpy.linspace",
"tensorflow.math.reduce_mean",
"tensorflow.convert_to_tensor",
"numpy.dtype",
"rclpy.shutdown",
"numpy.tile",
"numpy.ones",
"numpy.argmax",
"utils.tf_util.trainOnCPU",
"tensorflow.math.reduce_std",
"utils.intprim.gaussian_model.GaussianModel",
"numpy.copy",
"utils.tf_util.limitGPUMemory",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.ndarray",
"tensorflow.constant",
"numpy.expand_dims",
"matplotlib.pyplot.subplots"
] |
[((1287, 1366), 'model_src.model.PolicyTranslationModel', 'PolicyTranslationModel', ([], {'od_path': 'FRCNN_PATH', 'glove_path': 'GLOVE_PATH', 'special': 'None'}), '(od_path=FRCNN_PATH, glove_path=GLOVE_PATH, special=None)\n', (1309, 1366), False, 'from model_src.model import PolicyTranslationModel\n'), ((1196, 1208), 'utils.tf_util.trainOnCPU', 'trainOnCPU', ([], {}), '()\n', (1206, 1208), False, 'from utils.tf_util import trainOnCPU, limitGPUMemory\n'), ((1219, 1235), 'utils.tf_util.limitGPUMemory', 'limitGPUMemory', ([], {}), '()\n', (1233, 1235), False, 'from utils.tf_util import trainOnCPU, limitGPUMemory\n'), ((1402, 1435), 'numpy.ones', 'np.ones', (['(bs, 15)'], {'dtype': 'np.int64'}), '((bs, 15), dtype=np.int64)\n', (1409, 1435), True, 'import numpy as np\n'), ((1441, 1478), 'numpy.ones', 'np.ones', (['(bs, 6, 5)'], {'dtype': 'np.float32'}), '((bs, 6, 5), dtype=np.float32)\n', (1448, 1478), True, 'import numpy as np\n'), ((1484, 1523), 'numpy.ones', 'np.ones', (['(bs, 500, 7)'], {'dtype': 'np.float32'}), '((bs, 500, 7), dtype=np.float32)\n', (1491, 1523), True, 'import numpy as np\n'), ((1714, 1735), 're.compile', 're.compile', (['"""[^a-z ]"""'], {}), "('[^a-z ]')\n", (1724, 1735), False, 'import re\n'), ((1765, 1775), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1773, 1775), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1816, 1837), 'rclpy.init', 'rclpy.init', ([], {'args': 'None'}), '(args=None)\n', (1826, 1837), False, 'import rclpy\n'), ((1858, 1893), 'rclpy.create_node', 'rclpy.create_node', (['"""neural_network"""'], {}), "('neural_network')\n", (1875, 1893), False, 'import rclpy\n'), ((2151, 2161), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (2159, 2161), False, 'import rclpy\n'), ((2312, 2328), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2326, 2328), False, 'import rclpy\n'), ((3350, 3364), 'numpy.copy', 'np.copy', (['value'], {}), '(value)\n', (3357, 3364), True, 'import numpy as np\n'), ((3730, 3780), 'numpy.zeros', 'np.zeros', (['(target, trj.shape[1])'], {'dtype': 'np.float32'}), '((target, trj.shape[1]), dtype=np.float32)\n', (3738, 3780), True, 'import numpy as np\n'), ((4450, 4465), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4458, 4465), True, 'import numpy as np\n'), ((6491, 6533), 'numpy.asarray', 'np.asarray', (['self.history'], {'dtype': 'np.float32'}), '(self.history, dtype=np.float32)\n', (6501, 6533), True, 'import numpy as np\n'), ((9329, 9347), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), '(3, 3)\n', (9341, 9347), True, 'import matplotlib.pyplot as plt\n'), ((9948, 9962), 'numpy.argmax', 'np.argmax', (['atn'], {}), '(atn)\n', (9957, 9962), True, 'import numpy as np\n'), ((10744, 10756), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10754, 10756), True, 'import matplotlib.pyplot as plt\n'), ((10765, 10785), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_np'], {}), '(image_np)\n', (10775, 10785), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2201), 'rclpy.spin_once', 'rclpy.spin_once', (['self.node'], {}), '(self.node)\n', (2190, 2201), False, 'import rclpy\n'), ((3389, 3413), 'numpy.expand_dims', 'np.expand_dims', (['v_min', '(0)'], {}), '(v_min, 0)\n', (3403, 3413), True, 'import numpy as np\n'), ((3460, 3484), 'numpy.expand_dims', 'np.expand_dims', (['v_max', '(0)'], {}), '(v_max, 0)\n', (3474, 3484), True, 'import numpy as np\n'), ((4558, 4595), 'numpy.asarray', 'np.asarray', (['img_msg.data'], {'dtype': 'dtype'}), '(img_msg.data, dtype=dtype)\n', (4568, 4595), True, 'import numpy as np\n'), ((4694, 4772), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(img_msg.height, img_msg.width)', 'dtype': 'dtype', 'buffer': 'img_buf'}), '(shape=(img_msg.height, img_msg.width), dtype=dtype, buffer=img_buf)\n', (4704, 4772), True, 'import numpy as np\n'), ((4832, 4926), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(img_msg.height, img_msg.width, n_channels)', 'dtype': 'dtype', 'buffer': 'img_buf'}), '(shape=(img_msg.height, img_msg.width, n_channels), dtype=dtype,\n buffer=img_buf)\n', (4842, 4926), True, 'import numpy as np\n'), ((5225, 5274), 'cv_bridge.boost.cv_bridge_boost.cvtColor2', 'cvtColor2', (['im', 'img_msg.encoding', 'desired_encoding'], {}), '(im, img_msg.encoding, desired_encoding)\n', (5234, 5274), False, 'from cv_bridge.boost.cv_bridge_boost import cvtColor2\n'), ((7559, 7692), 'utils.intprim.gaussian_model.GaussianModel', 'GaussianModel', ([], {'degree': '(11)', 'scale': '(0.012)', 'observed_dof_names': "('Base', 'Shoulder', 'Ellbow', 'Wrist1', 'Wrist2', 'Wrist3', 'Gripper')"}), "(degree=11, scale=0.012, observed_dof_names=('Base',\n 'Shoulder', 'Ellbow', 'Wrist1', 'Wrist2', 'Wrist3', 'Gripper'))\n", (7572, 7692), False, 'from utils.intprim.gaussian_model import GaussianModel\n'), ((7708, 7752), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'trj_len'], {'dtype': 'np.float64'}), '(0, 1, trj_len, dtype=np.float64)\n', (7719, 7752), True, 'import numpy as np\n'), ((7965, 7989), 'numpy.asarray', 'np.asarray', (['trajectories'], {}), '(trajectories)\n', (7975, 7989), True, 'import numpy as np\n'), ((8002, 8039), 'numpy.save', 'np.save', (['"""trajectories"""', 'trajectories'], {}), "('trajectories', trajectories)\n", (8009, 8039), True, 'import numpy as np\n'), ((8052, 8084), 'numpy.save', 'np.save', (['"""history"""', 'self.history'], {}), "('history', self.history)\n", (8059, 8084), True, 'import numpy as np\n'), ((8147, 8196), 'numpy.zeros', 'np.zeros', (['(trj_len, trj_len, 7)'], {'dtype': 'np.float32'}), '((trj_len, trj_len, 7), dtype=np.float32)\n', (8155, 8196), True, 'import numpy as np\n'), ((8322, 8348), 'numpy.asarray', 'np.asarray', (['gen_trajectory'], {}), '(gen_trajectory)\n', (8332, 8348), True, 'import numpy as np\n'), ((8361, 8402), 'numpy.save', 'np.save', (['"""gen_trajectory"""', 'gen_trajectory'], {}), "('gen_trajectory', gen_trajectory)\n", (8368, 8402), True, 'import numpy as np\n'), ((10391, 10440), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'pt1', 'pt2', '(156, 2, 2)', '(1)'], {}), '(image_np, pt1, pt2, (156, 2, 2), 1)\n', (10404, 10440), False, 'import cv2\n'), ((3868, 3901), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'target'}), '(0.0, 1.0, num=target)\n', (3879, 3901), True, 'import numpy as np\n'), ((3903, 3944), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'current_length'}), '(0.0, 1.0, num=current_length)\n', (3914, 3944), True, 'import numpy as np\n'), ((5327, 5343), 'cv_bridge.CvBridgeError', 'CvBridgeError', (['e'], {}), '(e)\n', (5340, 5343), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((5779, 5824), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[image]'], {'dtype': 'tf.uint8'}), '([image], dtype=tf.uint8)\n', (5799, 5824), True, 'import tensorflow as tf\n'), ((6595, 6629), 'numpy.tile', 'np.tile', (['[self.language]', '[250, 1]'], {}), '([self.language], [250, 1])\n', (6602, 6629), True, 'import numpy as np\n'), ((6681, 6718), 'numpy.tile', 'np.tile', (['[self.features]', '[250, 1, 1]'], {}), '([self.features], [250, 1, 1])\n', (6688, 6718), True, 'import numpy as np\n'), ((6771, 6800), 'numpy.tile', 'np.tile', (['[robot]', '[250, 1, 1]'], {}), '([robot], [250, 1, 1])\n', (6778, 6800), True, 'import numpy as np\n'), ((6913, 6931), 'tensorflow.constant', 'tf.constant', (['(False)'], {}), '(False)\n', (6924, 6931), True, 'import tensorflow as tf\n'), ((6945, 6962), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (6956, 6962), True, 'import tensorflow as tf\n'), ((6990, 7028), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['generated'], {'axis': '(0)'}), '(generated, axis=0)\n', (7009, 7028), True, 'import tensorflow as tf\n'), ((7063, 7100), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['generated'], {'axis': '(0)'}), '(generated, axis=0)\n', (7081, 7100), True, 'import tensorflow as tf\n'), ((7208, 7244), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (7227, 7244), True, 'import tensorflow as tf\n'), ((7280, 7314), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['phase'], {'axis': '(0)'}), '(phase, axis=0)\n', (7299, 7314), True, 'import tensorflow as tf\n'), ((10500, 10550), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'pt1', 'pt2', '(30, 156, 2)', '(2)'], {}), '(image_np, pt1, pt2, (30, 156, 2), 2)\n', (10513, 10550), False, 'import cv2\n'), ((6334, 6360), 'numpy.expand_dims', 'np.expand_dims', (['classes', '(1)'], {}), '(classes, 1)\n', (6348, 6360), True, 'import numpy as np\n'), ((7139, 7166), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['dmp_dt'], {}), '(dmp_dt)\n', (7158, 7166), True, 'import tensorflow as tf\n')]
|
import time
import queue
import sys
import numpy as np
from scipy import optimize as sci_opt
from .node import Node
from .utilities import branch, is_integral
class BNBTree:
def __init__(self, x, y, inttol=1e-4, reltol=1e-4):
"""
Initiate a BnB Tree to solve the least squares regression problem with
l0l2 regularization
Parameters
----------
x: np.array
n x p numpy array
y: np.array
1 dimensional numpy array of size n
inttol: float
The integral tolerance of a variable.
reltol: float
primal-dual relative tolerance
"""
self.x = x
self.y = y
self.inttol = inttol
self.reltol = reltol
self.xi_xi = np.sum(x * x, axis=0)
# The number of features
self.p = x.shape[1]
self.n = x.shape[0]
self.node_bfs_queue = queue.Queue()
self.node_dfs_queue = queue.LifoQueue()
self.levels = {}
# self.leaves = []
self.number_of_nodes = 0
self.root = None
def solve(self, l0, l2, m, gaptol=1e-2, warm_start=None, mu=0.95,
branching='maxfrac', l1solver='l1cd', number_of_dfs_levels=0,
verbose=False):
"""
Solve the least squares problem with l0l2 regularization
Parameters
----------
l0: float
The zeroth norm coefficient
l2: float
The second norm coefficient
m: float
features bound (big M)
gaptol: float
the relative gap between the upper and lower bound after which the
algorithm will be terminated
warm_start: np.array
(p x 1) array representing a warm start
branching: str
'maxfrac' or 'strong'
l1solver: str
'l1cd', 'gurobi' or 'mosek'
mu: float
Used with strong branching
number_of_dfs_levels: int
number of levels to solve as dfs
verbose: int
print progress
Returns
-------
tuple
uppersol, upperbound, lower_bound, best_gap, sol_time
"""
st = time.time()
if warm_start is None:
upperbound = sys.maxsize
uppersol = None
else:
if verbose:
print("using a warm start")
support = np.nonzero(warm_start)[0]
x_support = self.x[:, support]
x_ridge = np.sqrt(2 * l2) * np.identity(len(support))
x_upper = np.concatenate((x_support, x_ridge), axis=0)
y_upper = np.concatenate((self.y, np.zeros(len(support))), axis=0)
res = sci_opt.lsq_linear(x_upper, y_upper, (-m, m))
upperbound = res.cost + l0 * len(support)
uppersol = warm_start
uppersol[support] = res.x
if verbose:
print(f"initializing using a warm start took {time.time() - st}")
# upper and lower bounds
zlb = np.zeros(self.p)
zub = np.ones(self.p)
# root node
self.root = Node(None, zlb, zub, x=self.x, y=self.y, l0=l0, l2=l2, m=m,
xi_xi=self.xi_xi)
self.node_bfs_queue.put(self.root)
# lower and upper bounds initialization
lower_bound = {}
dual_bound = {}
self.levels = {0: 1}
min_open_level = 0
if verbose:
print(f'solving using {number_of_dfs_levels} dfs levels')
while self.node_bfs_queue.qsize() > 0 or self.node_dfs_queue.qsize() > 0:
# get node
if self.node_dfs_queue.qsize() > 0:
current_node = self.node_dfs_queue.get()
else:
current_node = self.node_bfs_queue.get()
# print(current_node.level, upperbound, self.levels)
# prune?
if current_node.parent_cost and upperbound <= \
current_node.parent_cost:
self.levels[current_node.level] -= 1
# self.leaves.append(current_node)
continue
# calculate lower bound and update
self.number_of_nodes += 1
current_lower_bound, current_dual_cost = current_node.\
lower_solve(l1solver, self.reltol, self.inttol)
lower_bound[current_node.level] = \
min(current_lower_bound,
lower_bound.get(current_node.level, sys.maxsize))
dual_bound[current_node.level] = \
min(current_dual_cost,
dual_bound.get(current_node.level, sys.maxsize))
self.levels[current_node.level] -= 1
# update gap?
if self.levels[min_open_level] == 0:
del self.levels[min_open_level]
min_value = max([j for i, j in dual_bound.items()
if i <= min_open_level])
best_gap = (upperbound - min_value)/abs(upperbound)
if verbose:
print(f'l: {min_open_level}, (d: {min_value}, '
f'p: {lower_bound[min_open_level]}), u: {upperbound},'
f' g: {best_gap}, t: {time.time() - st} s')
# arrived at a solution?
if best_gap <= gaptol:
# self.leaves += [current_node] + \
# list(self.node_bfs_queue.queue) + \
# list(self.node_dfs_queue.queue)
return uppersol, upperbound, lower_bound, best_gap, \
time.time() - st
min_open_level += 1
# integral solution?
if is_integral(current_node.lower_bound_z, self.inttol):
current_upper_bound = current_lower_bound
if current_upper_bound < upperbound:
upperbound = current_upper_bound
uppersol = current_node.lower_bound_solution
# self.leaves.append(current_node)
if verbose:
print('itegral:', current_node)
# branch?
elif current_dual_cost < upperbound:
current_upper_bound = current_node.upper_solve()
if current_upper_bound < upperbound:
upperbound = current_upper_bound
uppersol = current_node.upper_bound_solution
left_node, right_node = branch(current_node, self.x, l0, l2, m,
self.xi_xi, self.inttol,
branching, mu)
self.levels[current_node.level + 1] = \
self.levels.get(current_node.level + 1, 0) + 2
if current_node.level < min_open_level + number_of_dfs_levels:
self.node_dfs_queue.put(right_node)
self.node_dfs_queue.put(left_node)
else:
self.node_bfs_queue.put(right_node)
self.node_bfs_queue.put(left_node)
# prune?
else:
pass
# self.leaves.append(current_node)
min_value = max([j for i, j in dual_bound.items()
if i <= min_open_level])
best_gap = (upperbound - min_value)/abs(upperbound)
return uppersol, upperbound, lower_bound, best_gap, time.time() - st
# def get_lower_optimal_node(self):
# self.leaves = sorted(self.leaves)
# if self.leaves[-1].lower_bound_value:
# return self.leaves[-1]
# else:
# return self.leaves[-1].parent
#
# @staticmethod
# def support_list(current_node):
# list_ = []
# while current_node:
# list_.append(current_node.support)
# current_node = current_node.parent
# return list_
#
# def optimal_support_list(self):
# list_ = []
# current_node = self.get_lower_optimal_node()
# while current_node:
# list_.append(current_node.support)
# current_node = current_node.parent
# return list_
|
[
"numpy.sqrt",
"numpy.ones",
"numpy.sum",
"queue.LifoQueue",
"numpy.zeros",
"scipy.optimize.lsq_linear",
"numpy.concatenate",
"numpy.nonzero",
"queue.Queue",
"time.time"
] |
[((776, 797), 'numpy.sum', 'np.sum', (['(x * x)'], {'axis': '(0)'}), '(x * x, axis=0)\n', (782, 797), True, 'import numpy as np\n'), ((919, 932), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (930, 932), False, 'import queue\n'), ((963, 980), 'queue.LifoQueue', 'queue.LifoQueue', ([], {}), '()\n', (978, 980), False, 'import queue\n'), ((2217, 2228), 'time.time', 'time.time', ([], {}), '()\n', (2226, 2228), False, 'import time\n'), ((3045, 3061), 'numpy.zeros', 'np.zeros', (['self.p'], {}), '(self.p)\n', (3053, 3061), True, 'import numpy as np\n'), ((3076, 3091), 'numpy.ones', 'np.ones', (['self.p'], {}), '(self.p)\n', (3083, 3091), True, 'import numpy as np\n'), ((2586, 2630), 'numpy.concatenate', 'np.concatenate', (['(x_support, x_ridge)'], {'axis': '(0)'}), '((x_support, x_ridge), axis=0)\n', (2600, 2630), True, 'import numpy as np\n'), ((2728, 2773), 'scipy.optimize.lsq_linear', 'sci_opt.lsq_linear', (['x_upper', 'y_upper', '(-m, m)'], {}), '(x_upper, y_upper, (-m, m))\n', (2746, 2773), True, 'from scipy import optimize as sci_opt\n'), ((2429, 2451), 'numpy.nonzero', 'np.nonzero', (['warm_start'], {}), '(warm_start)\n', (2439, 2451), True, 'import numpy as np\n'), ((2520, 2535), 'numpy.sqrt', 'np.sqrt', (['(2 * l2)'], {}), '(2 * l2)\n', (2527, 2535), True, 'import numpy as np\n'), ((7490, 7501), 'time.time', 'time.time', ([], {}), '()\n', (7499, 7501), False, 'import time\n'), ((2978, 2989), 'time.time', 'time.time', ([], {}), '()\n', (2987, 2989), False, 'import time\n'), ((5654, 5665), 'time.time', 'time.time', ([], {}), '()\n', (5663, 5665), False, 'import time\n'), ((5253, 5264), 'time.time', 'time.time', ([], {}), '()\n', (5262, 5264), False, 'import time\n')]
|
import argparse
import cv2
import time
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
"""
封装并调用tf-openpose项目所提供的骨架信息识别接口
"""
class TFPOSE:
def __init__(self):
# 0. 参数
self.fps_time = 0
self.frame_count = 0
# 1. 解析参数
self.parseArgs()
# 2. 输出参数
self.printArgs()
# 3. 生成tfpose实例
self.w, self.h = model_wh(self.args.resize)
self.e = TfPoseEstimator(get_graph_path(self.args.model), target_size=(self.w, self.h))
def parseArgs(self):
"""解析参数"""
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--video', type=str, default=0,
help='if provided, set the video path')
parser.add_argument('--isoutput', type=bool, default=False,
help='whether write to file')
parser.add_argument('--output', type=str, default='test.avi',
help='if provided, set the output video path')
parser.add_argument('--isorigin', type=bool, default=False,
help='whether output origin img')
parser.add_argument('--resize', type=str, default='432x368',
help='if provided, resize images before they are processed. default=256x256, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_v2_large',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
# 命令行解析模块
self.args = parser.parse_args()
def printArgs(self):
"""输出参数"""
print('获取的参数如下:')
print('video-视频: %s' % (self.args.video))
print('resize-重写图片大小: %s' % (self.args.resize))
print('resize-out-ratio-重写关键点热图大小: %s' % (self.args.resize_out_ratio))
print('show-process-是否展示过程: %s' % (self.args.show_process))
print('model-模型: %s, 模型路径: %s' % (self.args.model, get_graph_path(self.args.model)))
def setArgsVideo(self, video):
"""设置video参数"""
self.args.__setattr__('video', video)
def setArgsIsOrigin(self, isorigin):
"""设置isorigin参数"""
self.args.__setattr__('isorigin', isorigin)
def setArgsIsOutput(self, isoutput):
"""设置isorigin参数"""
self.args.__setattr__('isoutput', isoutput)
def initVideo(self):
"""
初始化视频信息
"""
print('读取视频')
self.cam = cv2.VideoCapture(self.args.video)
self.ret_val, self.image = self.cam.read() # 获取视频第一帧图片,ret_val为bool值
self.frame_count = 0 # 重置帧数为0,因为会换视频
# 是否写入文件
if self.args.isoutput :
fps = self.cam.get(cv2.CAP_PROP_FPS) # 视频帧率
fourcc = cv2.VideoWriter_fourcc(*'XVID') # 保存视频为MPEG-4编码
frame_size = (int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.videoWriter = cv2.VideoWriter(self.args.output, fourcc, fps, frame_size)
print('源视频信息: 帧图片大小 %s, 帧率 %s, 视频大小 %s' % (self.image.shape, fps, frame_size))
def getHumans(self):
humans = self.e.inference(self.image, resize_to_default=(self.w > 0 and self.h > 0), upsample_size=self.args.resize_out_ratio)
return humans
def getNextFrame(self):
"""获取下一帧的图片"""
self.ret_val, self.image = self.cam.read()
self.frame_count += 1
return self.ret_val
def hasNextFrame(self):
"""是否还有下一帧"""
return self.ret_val
def getFrameCount(self):
"""获取帧数"""
return self.frame_count
def runOnce(self):
"""
运行一次,即识别一帧,并返回此帧的cv2图片
"""
fps_time = time.time()
# 帧图片处理
print('帧图片处理...')
humans = self.getHumans()
# 关键点绘图
print('画图...')
if self.args.isorigin :
# 显示原图
pose_img = TfPoseEstimator.draw_humans(np.array(self.image), humans, imgcopy=False)
else:
# 不显示原图
emptyImage = np.zeros(self.image.shape, np.uint8)
emptyImage[...] = 0
pose_img = TfPoseEstimator.draw_humans(emptyImage, humans, imgcopy=False)
# cv2.putText(pose_img, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# 判断写入文件
if self.args.isoutput :
self.videoWriter.write(pose_img)
return pose_img, humans
if __name__ == '__main__':
TFPOSE()
|
[
"argparse.ArgumentParser",
"tf_pose.networks.get_graph_path",
"tf_pose.estimator.TfPoseEstimator.draw_humans",
"cv2.VideoWriter",
"numpy.array",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"tf_pose.networks.model_wh",
"time.time"
] |
[((443, 469), 'tf_pose.networks.model_wh', 'model_wh', (['self.args.resize'], {}), '(self.args.resize)\n', (451, 469), False, 'from tf_pose.networks import get_graph_path, model_wh\n'), ((628, 701), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""tf-pose-estimation realtime webcam"""'}), "(description='tf-pose-estimation realtime webcam')\n", (651, 701), False, 'import argparse\n'), ((2915, 2948), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.args.video'], {}), '(self.args.video)\n', (2931, 2948), False, 'import cv2\n'), ((4149, 4160), 'time.time', 'time.time', ([], {}), '()\n', (4158, 4160), False, 'import time\n'), ((503, 534), 'tf_pose.networks.get_graph_path', 'get_graph_path', (['self.args.model'], {}), '(self.args.model)\n', (517, 534), False, 'from tf_pose.networks import get_graph_path, model_wh\n'), ((3200, 3231), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (3222, 3231), False, 'import cv2\n'), ((3397, 3455), 'cv2.VideoWriter', 'cv2.VideoWriter', (['self.args.output', 'fourcc', 'fps', 'frame_size'], {}), '(self.args.output, fourcc, fps, frame_size)\n', (3412, 3455), False, 'import cv2\n'), ((4483, 4519), 'numpy.zeros', 'np.zeros', (['self.image.shape', 'np.uint8'], {}), '(self.image.shape, np.uint8)\n', (4491, 4519), True, 'import numpy as np\n'), ((4575, 4637), 'tf_pose.estimator.TfPoseEstimator.draw_humans', 'TfPoseEstimator.draw_humans', (['emptyImage', 'humans'], {'imgcopy': '(False)'}), '(emptyImage, humans, imgcopy=False)\n', (4602, 4637), False, 'from tf_pose.estimator import TfPoseEstimator\n'), ((4379, 4399), 'numpy.array', 'np.array', (['self.image'], {}), '(self.image)\n', (4387, 4399), True, 'import numpy as np\n'), ((2438, 2469), 'tf_pose.networks.get_graph_path', 'get_graph_path', (['self.args.model'], {}), '(self.args.model)\n', (2452, 2469), False, 'from tf_pose.networks import get_graph_path, model_wh\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description: Choose a set of data points as weights and calculate RBF nodes for the
first layer. Those are then used as inputs for a one-layer perceptron, which gives the
output
"""
import numpy as np
import pcn
class rbf:
""" radial basic function """
def __init__(self,inputs,targets,nRBF,sigma=0,normalise=0,eta=0.25,functype='sigmoid',traintype='batch'):
""" constructor """
self.inputs = inputs
self.targets = targets
self.nRBF = nRBF #number of RBF nodes
self.normalise = normalise
self.eta = eta #learning rate
self.functype = functype
self.traintype = traintype
#set width of gaussian
if sigma==0:
d = (self.inputs.max(axis=0)-self.inputs.min(axis=0)).max()
self.sigma = d/np.sqrt(2*nRBF)
else:
self.sigma = sigma
#input array of RBF nodes
self.hidden = np.zeros((np.shape(self.inputs)[0],self.nRBF))
#set RBF weights to be random datapoints
self.weights = np.zeros((np.shape(inputs)[1],self.nRBF))
indices = np.arange(np.shape(self.inputs)[0])
np.random.shuffle(indices)
for i in range(self.nRBF):
self.weights[:,i] = self.inputs[indices[i],:]
#calculate the hidden rbf nodes (first layer)
self.hidden = self.rbffwd(self.inputs,1)
#use initialise perceptron for second layer
self.perceptron = pcn.pcn(self.hidden,self.targets,self.eta,self.functype,self.traintype)
def errfunc(self,outputs,targets):
""" error function """
E = 1/2*np.trace(np.dot(np.transpose(targets-outputs),targets-outputs))
return E
def rbftrain(self,nIt=100):
""" training the network """
#train perceptron
self.perceptron.pcntrain(nIt)
def rbftrain_automatic(self,valid,validt,itSteps):
""" train the perceptron until the error on the validation data increases """
#calculate the hidden rbf nodes (first layer)
rbfvalid = self.rbffwd(valid,1)
trainerror = np.array([])
validerror = np.array([])
(trainerror,validerror) = self.perceptron.pcntrain_automatic(rbfvalid,validt,itSteps)
return trainerror,validerror
def rbffwd(self,inputs,layer):
""" run the network forward """
#rbf nodes
hidden = np.zeros((np.shape(inputs)[0],self.nRBF))
#calculate gaussian overlap of input with weights
for i in range(self.nRBF):
hidden[:,i] = np.exp(-np.sum((inputs - np.ones((1,np.shape(inputs)[1]))*self.weights[:,i])**2,axis=1)/(2*self.sigma**2))
#normalise RBF layer
if self.normalise:
hidden[:,:] /= np.transpose(np.ones((1,np.shape(hidden)[0]))*hidden[:,:].sum(axis=1))
#output of hidden (rbf) layer
outputs = hidden
#output of perceptron layer
if layer == 2:
outputs = self.perceptron.pcnfwd(hidden,True)
return outputs
def confmat(self,inputs,targets):
""" confusion matrix to evaluate the performance of the network """
#calculate hidden nodes
hidden = self.rbffwd(inputs,1)
#confusion matrix of perceptron
self.perceptron.confmat(hidden,targets)
return 0
|
[
"numpy.sqrt",
"pcn.pcn",
"numpy.array",
"numpy.shape",
"numpy.transpose",
"numpy.random.shuffle"
] |
[((1237, 1263), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1254, 1263), True, 'import numpy as np\n'), ((1552, 1627), 'pcn.pcn', 'pcn.pcn', (['self.hidden', 'self.targets', 'self.eta', 'self.functype', 'self.traintype'], {}), '(self.hidden, self.targets, self.eta, self.functype, self.traintype)\n', (1559, 1627), False, 'import pcn\n'), ((2213, 2225), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2221, 2225), True, 'import numpy as np\n'), ((2247, 2259), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2255, 2259), True, 'import numpy as np\n'), ((869, 886), 'numpy.sqrt', 'np.sqrt', (['(2 * nRBF)'], {}), '(2 * nRBF)\n', (876, 886), True, 'import numpy as np\n'), ((1203, 1224), 'numpy.shape', 'np.shape', (['self.inputs'], {}), '(self.inputs)\n', (1211, 1224), True, 'import numpy as np\n'), ((1015, 1036), 'numpy.shape', 'np.shape', (['self.inputs'], {}), '(self.inputs)\n', (1023, 1036), True, 'import numpy as np\n'), ((1143, 1159), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (1151, 1159), True, 'import numpy as np\n'), ((1727, 1758), 'numpy.transpose', 'np.transpose', (['(targets - outputs)'], {}), '(targets - outputs)\n', (1739, 1758), True, 'import numpy as np\n'), ((2540, 2556), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (2548, 2556), True, 'import numpy as np\n'), ((2907, 2923), 'numpy.shape', 'np.shape', (['hidden'], {}), '(hidden)\n', (2915, 2923), True, 'import numpy as np\n'), ((2728, 2744), 'numpy.shape', 'np.shape', (['inputs'], {}), '(inputs)\n', (2736, 2744), True, 'import numpy as np\n')]
|
"""
Convert data and then visualize
Data Manupulation
1. Save metrics for validation and test data
Save figures
1. Loss curve
2. plume dispersion and errors
3. metrics
"""
import pathlib
import numpy as np
import xarray as xr
from numpy import ma
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.style
from matplotlib.colors import LogNorm
from ._base_postscript import _BasePostscripts
from .metrics import get_metric
class CityTransformerPostscripts(_BasePostscripts):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_name = 'CityTransformer'
self.modes = ['val', 'test']
self.threshold = 0.5
self.clip = 1.e-8
self.alpha = 0.9
self.vmin = self.clip
self.vmax = 1.0
self.nb_bins = 100
self.fig_names = ['loss', 'contour', 'metrics']
self.extent = [-1024,1024,-1024,1024]
self.metrics = {'FAC2',
'FAC5',
'MG',
'VG',
'NAD',
'FB',
}
# Matplotlib settings
mpl.style.use('classic')
fontsize = 28
self.fontsize = fontsize
fontname = 'Times New Roman'
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
plt.rc('font', family=fontname)
self.title_font = {'fontname':fontname, 'size':fontsize, 'color':'black',
'verticalalignment':'bottom'}
self.axis_font = {'fontname':fontname, 'size':fontsize}
def __preprocess(self, epoch):
for mode in self.modes:
all_metrics = {metric_name: [] for metric_name in self.metrics}
nb_shots = self.nb_shots_dict[mode]
for i in range(nb_shots):
filename = pathlib.Path(self.inference_dir) / mode / f'{mode}{i:06}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
levelset = ds['levelset'].values
# Target metrics
metric_dict = {'FAC2': {'factor': 2, 'levelset': levelset},
'FAC5': {'factor': 5, 'levelset': levelset},
'MG': {'levelset': levelset},
'VG': {'levelset': levelset},
'NAD': {'levelset': levelset},
'FB': {'levelset': levelset},
}
evaluated_metrics = self.__evaluate_metrics(ds, metric_dict=metric_dict)
for metric_name in metric_dict.keys():
all_metrics[metric_name].append(evaluated_metrics[metric_name])
# Saving dataset
data_vars = {}
for metric_name, evaluated_values in all_metrics.items():
data_vars[metric_name] = (['shot_idx'], np.asarray(evaluated_values))
coords = {'shot_idx': np.arange(nb_shots)}
filename = self.data_dir / f'{mode}_epoch{epoch:04}.nc'
ds = xr.Dataset(data_vars=data_vars, coords=coords)
ds.to_netcdf(filename)
def __evaluate_metrics(self, ds, metric_dict):
evaluated_metrics = {}
pred, pred_binary = ds['pred_plume'].values.squeeze(), ds['pred_zeros_map'].values
ref, ref_binary = ds['ref_plume'].values.squeeze(), ds['ref_zeros_map'].values
levelset = ds['levelset'].values
pred = self.__mask_img(img=pred, binary=pred_binary, levelset=levelset, threshold=self.threshold, clip=self.clip)
ref = self.__mask_img(img=ref, binary=ref_binary, levelset=levelset, threshold=self.threshold, clip=self.clip)
for metric_name, kwargs in metric_dict.items():
metric = get_metric(metric_name)(**kwargs)
evaluated_metrics[metric_name] = metric.evaluate(pred, ref)
return evaluated_metrics
def __mask_img(self, img, binary, levelset, threshold, clip, apply_mask=False):
img, binary = np.squeeze(img), np.squeeze(binary)
mask = np.logical_or(binary<threshold, levelset >= 0.)
img = 10**img
img = np.where(mask, -1., img) * clip
if apply_mask:
return ma.masked_where(img <= 0, img)
else:
return img
def __classification_by_factor(self, pred, ref, levelset, threshold, clip):
"""
factor2 == 0
factor5 == 0.5
factor5++ == 1.0
"""
if type(pred) is tuple:
pred, pred_binary = pred
ref, ref_binary = ref
# Create mask based on zeros map and levelset
def mask_on_img(img, binary):
mask = np.logical_or(binary < threshold, levelset >= 0.)
img = 10**img
img = np.where(mask, -1, img) * clip
return img
pred = mask_on_img(pred, pred_binary)
ref = mask_on_img(ref, ref_binary)
factor = np.ones_like(ref) # Default 1.0
target_area = np.logical_and(ref > 0., levelset < 0)
fraction = np.where(target_area, pred/ref, 0)
fac2_area = np.logical_and( fraction >= 1/2., fraction <= 2. )
fac5_area = np.logical_and( fraction >= 1/5., fraction <= 5. )
fac2_area = np.logical_and(target_area, fac2_area)
fac5_area = np.logical_and(target_area, fac5_area)
factor[fac5_area] = np.ones_like(ref)[fac5_area] * 0.5
factor[fac2_area] = np.zeros_like(ref)[fac2_area]
correct_zeros = np.logical_and(pred_binary < 0.5, ref_binary < 0.5)
masked_fraction = ma.masked_where(np.logical_or(correct_zeros, levelset >= 0.), factor)
return masked_fraction
def _visualize(self, epoch):
self.data_dir = self.img_dir / 'metrics/data'
if not self.data_dir.exists():
self.data_dir.mkdir(parents=True)
super()._visualize_loss()
self.__preprocess(epoch)
self.__visualize_plume_dispersion(epoch)
self.__visualize_metrics(epoch)
def __visualize_plume_dispersion(self, epoch):
figsize = (8, 8)
for mode in self.modes:
nb_shots = self.nb_shots_dict[mode]
for i in range(nb_shots):
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=figsize,
subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.05))
axes[1, 0].set_visible(False)
filename = pathlib.Path(self.inference_dir) / mode / f'{mode}{i:06}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
levelset = ds['levelset'].values
x, y = ds.attrs['release_x'], ds.attrs['release_y']
# apply masks
pred, pred_binary = ds['pred_plume'].values.squeeze(), ds['pred_zeros_map'].values
ref, ref_binary = ds['ref_plume'].values.squeeze(), ds['ref_zeros_map'].values
levelset = ds['levelset'].values
factor = self.__classification_by_factor((pred, pred_binary), (ref, ref_binary), levelset=levelset, threshold=self.threshold, clip=self.clip)
masked_pred = self.__mask_img(img=pred, binary=pred_binary, levelset=levelset, threshold=self.threshold, clip=self.clip, apply_mask=True)
masked_ref = self.__mask_img(img=ref, binary=ref_binary, levelset=levelset, threshold=self.threshold, clip=self.clip, apply_mask=True)
# Plotting the ground truth and prediction
im = axes[0, 0].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = axes[0, 0].imshow(masked_ref, cmap='coolwarm', origin='lower', extent=self.extent, norm=LogNorm(vmin=self.vmin, vmax=self.vmax), alpha=self.alpha, interpolation='none')
axes[0, 0].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
im = axes[0, 1].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im = axes[0, 1].imshow(masked_pred, cmap='coolwarm', origin='lower', extent=self.extent, norm=LogNorm(vmin=self.vmin, vmax=self.vmax), alpha=self.alpha, interpolation='none')
axes[0, 1].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
# Plotting the factor map
im2 = axes[1, 1].imshow(levelset < 0., cmap='gray', origin='lower', extent=self.extent, interpolation='none')
im2 = axes[1, 1].imshow(factor, cmap='jet', origin='lower', extent=self.extent, vmin=0, vmax=1, alpha=self.alpha, interpolation='none')
axes[1, 1].plot(x, y, color='none', marker='*', markeredgecolor='g', markeredgewidth=2, markersize=12)
axes[0, 0].set_title('Ground Truth', **self.title_font)
axes[0, 1].set_title(f'{self.arch_name}', **self.title_font)
cbar = fig.colorbar(im, ax=axes[0, :])
cbar2 = fig.colorbar(im2, ax=axes[1, :])
cbar2.remove()
figname = self.img_dir / 'contour' / f'log_{mode}{i:06}_epoch{epoch:04}.png'
plt.savefig(figname, bbox_inches='tight')
plt.close('all')
def __visualize_metrics(self, epoch):
figsize = (20, 12)
plot_dict = {}
# key: metric_name, value: xmin, xmax, ymin, ymax, label
# xmin, xmax are also used to make histogram
plot_dict['FAC2'] = (0, 1, 0, 0.05, 'FAC_2')
plot_dict['FAC5'] = (0, 1, 0, 0.1, 'FAC_5')
plot_dict['FB'] = (-2, 2, 0, 0.05, 'FB')
plot_dict['NAD'] = (0, 0.15, 0, 0.15, 'NAD')
plot_dict['MG'] = (0, 2, 0, 0.1, 'MG')
plot_dict['VG'] = (1, 1.15, 0, 0.5, 'VG')
metric_names = plot_dict.keys()
for mode in self.modes:
filename = self.data_dir / f'{mode}_epoch{epoch:04}.nc'
ds = xr.open_dataset(filename)
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=figsize)
for metric_name, ax in zip(metric_names, axes.flatten()):
xmin, xmax, ymin, ymax, label = plot_dict[metric_name]
bins = np.linspace(xmin, xmax, self.nb_bins)
metric = ds[metric_name].values
weights = np.ones_like(metric) / len(metric)
_hist, _bins, _patches = ax.hist(metric, bins=bins, alpha=0.5, weights=weights, label=self.arch_name)
average = np.mean( np.abs(metric) )
std = np.std( np.abs(metric) )
print(f'model: {self.arch_name}, metric_name: {metric_name}, average: {average}, std: {std}')
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_title(metric_name, **self.title_font)
ax.legend(loc='upper right', prop={'size': self.fontsize*0.6})
ax.grid(ls='dashed', lw=1)
figname = self.img_dir / 'metrics' / f'metric_{self.arch_name}.png'
plt.savefig(figname, bbox_inches='tight')
plt.close('all')
|
[
"matplotlib.style.use",
"numpy.arange",
"matplotlib.colors.LogNorm",
"pathlib.Path",
"numpy.where",
"numpy.asarray",
"numpy.ma.masked_where",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.abs",
"matplotlib.pyplot.savefig",
"xarray.Dataset",
"numpy.squeeze",
"xarray.open_dataset",
"matplotlib.pyplot.rc",
"numpy.ones_like",
"numpy.logical_and",
"numpy.logical_or",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((1179, 1203), 'matplotlib.style.use', 'mpl.style.use', (['"""classic"""'], {}), "('classic')\n", (1192, 1203), True, 'import matplotlib as mpl\n'), ((1304, 1339), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'fontsize'}), "('xtick', labelsize=fontsize)\n", (1310, 1339), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1383), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'fontsize'}), "('ytick', labelsize=fontsize)\n", (1354, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1423), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': 'fontname'}), "('font', family=fontname)\n", (1398, 1423), True, 'import matplotlib.pyplot as plt\n'), ((4132, 4182), 'numpy.logical_or', 'np.logical_or', (['(binary < threshold)', '(levelset >= 0.0)'], {}), '(binary < threshold, levelset >= 0.0)\n', (4145, 4182), True, 'import numpy as np\n'), ((5019, 5036), 'numpy.ones_like', 'np.ones_like', (['ref'], {}), '(ref)\n', (5031, 5036), True, 'import numpy as np\n'), ((5074, 5113), 'numpy.logical_and', 'np.logical_and', (['(ref > 0.0)', '(levelset < 0)'], {}), '(ref > 0.0, levelset < 0)\n', (5088, 5113), True, 'import numpy as np\n'), ((5132, 5168), 'numpy.where', 'np.where', (['target_area', '(pred / ref)', '(0)'], {}), '(target_area, pred / ref, 0)\n', (5140, 5168), True, 'import numpy as np\n'), ((5188, 5240), 'numpy.logical_and', 'np.logical_and', (['(fraction >= 1 / 2.0)', '(fraction <= 2.0)'], {}), '(fraction >= 1 / 2.0, fraction <= 2.0)\n', (5202, 5240), True, 'import numpy as np\n'), ((5259, 5311), 'numpy.logical_and', 'np.logical_and', (['(fraction >= 1 / 5.0)', '(fraction <= 5.0)'], {}), '(fraction >= 1 / 5.0, fraction <= 5.0)\n', (5273, 5311), True, 'import numpy as np\n'), ((5340, 5378), 'numpy.logical_and', 'np.logical_and', (['target_area', 'fac2_area'], {}), '(target_area, fac2_area)\n', (5354, 5378), True, 'import numpy as np\n'), ((5399, 5437), 'numpy.logical_and', 'np.logical_and', (['target_area', 'fac5_area'], {}), '(target_area, fac5_area)\n', (5413, 5437), True, 'import numpy as np\n'), ((5609, 5660), 'numpy.logical_and', 'np.logical_and', (['(pred_binary < 0.5)', '(ref_binary < 0.5)'], {}), '(pred_binary < 0.5, ref_binary < 0.5)\n', (5623, 5660), True, 'import numpy as np\n'), ((3113, 3159), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': 'data_vars', 'coords': 'coords'}), '(data_vars=data_vars, coords=coords)\n', (3123, 3159), True, 'import xarray as xr\n'), ((4080, 4095), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (4090, 4095), True, 'import numpy as np\n'), ((4097, 4115), 'numpy.squeeze', 'np.squeeze', (['binary'], {}), '(binary)\n', (4107, 4115), True, 'import numpy as np\n'), ((4217, 4242), 'numpy.where', 'np.where', (['mask', '(-1.0)', 'img'], {}), '(mask, -1.0, img)\n', (4225, 4242), True, 'import numpy as np\n'), ((4292, 4322), 'numpy.ma.masked_where', 'ma.masked_where', (['(img <= 0)', 'img'], {}), '(img <= 0, img)\n', (4307, 4322), False, 'from numpy import ma\n'), ((4762, 4812), 'numpy.logical_or', 'np.logical_or', (['(binary < threshold)', '(levelset >= 0.0)'], {}), '(binary < threshold, levelset >= 0.0)\n', (4775, 4812), True, 'import numpy as np\n'), ((5546, 5564), 'numpy.zeros_like', 'np.zeros_like', (['ref'], {}), '(ref)\n', (5559, 5564), True, 'import numpy as np\n'), ((5703, 5748), 'numpy.logical_or', 'np.logical_or', (['correct_zeros', '(levelset >= 0.0)'], {}), '(correct_zeros, levelset >= 0.0)\n', (5716, 5748), True, 'import numpy as np\n'), ((10125, 10150), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (10140, 10150), True, 'import xarray as xr\n'), ((10176, 10223), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': 'figsize'}), '(nrows=2, ncols=3, figsize=figsize)\n', (10188, 10223), True, 'import matplotlib.pyplot as plt\n'), ((11223, 11264), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""'}), "(figname, bbox_inches='tight')\n", (11234, 11264), True, 'import matplotlib.pyplot as plt\n'), ((11277, 11293), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11286, 11293), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2016), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (2006, 2016), True, 'import xarray as xr\n'), ((3007, 3026), 'numpy.arange', 'np.arange', (['nb_shots'], {}), '(nb_shots)\n', (3016, 3026), True, 'import numpy as np\n'), ((4856, 4879), 'numpy.where', 'np.where', (['mask', '(-1)', 'img'], {}), '(mask, -1, img)\n', (4864, 4879), True, 'import numpy as np\n'), ((5483, 5500), 'numpy.ones_like', 'np.ones_like', (['ref'], {}), '(ref)\n', (5495, 5500), True, 'import numpy as np\n'), ((6693, 6718), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (6708, 6718), True, 'import xarray as xr\n'), ((9369, 9410), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'bbox_inches': '"""tight"""'}), "(figname, bbox_inches='tight')\n", (9380, 9410), True, 'import matplotlib.pyplot as plt\n'), ((9427, 9443), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9436, 9443), True, 'import matplotlib.pyplot as plt\n'), ((10388, 10425), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'self.nb_bins'], {}), '(xmin, xmax, self.nb_bins)\n', (10399, 10425), True, 'import numpy as np\n'), ((2942, 2970), 'numpy.asarray', 'np.asarray', (['evaluated_values'], {}), '(evaluated_values)\n', (2952, 2970), True, 'import numpy as np\n'), ((10500, 10520), 'numpy.ones_like', 'np.ones_like', (['metric'], {}), '(metric)\n', (10512, 10520), True, 'import numpy as np\n'), ((10688, 10702), 'numpy.abs', 'np.abs', (['metric'], {}), '(metric)\n', (10694, 10702), True, 'import numpy as np\n'), ((10735, 10749), 'numpy.abs', 'np.abs', (['metric'], {}), '(metric)\n', (10741, 10749), True, 'import numpy as np\n'), ((1893, 1925), 'pathlib.Path', 'pathlib.Path', (['self.inference_dir'], {}), '(self.inference_dir)\n', (1905, 1925), False, 'import pathlib\n'), ((6595, 6627), 'pathlib.Path', 'pathlib.Path', (['self.inference_dir'], {}), '(self.inference_dir)\n', (6607, 6627), False, 'import pathlib\n'), ((7870, 7909), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': 'self.vmin', 'vmax': 'self.vmax'}), '(vmin=self.vmin, vmax=self.vmax)\n', (7877, 7909), False, 'from matplotlib.colors import LogNorm\n'), ((8306, 8345), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {'vmin': 'self.vmin', 'vmax': 'self.vmax'}), '(vmin=self.vmin, vmax=self.vmax)\n', (8313, 8345), False, 'from matplotlib.colors import LogNorm\n')]
|
"""
Copyright (c) 2018, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the <project name> project.
"""
import numpy as np
import pickle
import time
from cflib.crazyflie import Crazyflie
from cflib.positioning.motion_commander import MotionCommander
import rospy
import actionlib
from std_msgs.msg import UInt16
from geometry_msgs.msg import Vector3
from rospy_crazyflie.msg import *
from rospy_crazyflie.srv import *
from rospy_crazyflie.motion_commands import *
class CrazyflieControl:
def __init__(self, name, crazyflie):
# Instantiate motion commander
self._cf = crazyflie
self._name = name
self._mc = MotionCommander(self._cf)
# Topic Publishers
self._velocity_setpoint_pub = rospy.Publisher(
self._name + '/velocity_setpoint',
Vector3,
queue_size = 10
)
"""
Services hosted for this crazyflie controller
"""
self._reset_position_estimator_srv = rospy.Service(
self._name + '/reset_position_estimator',
ResetPositionEstimator,
self._reset_position_estimator_cb
)
self._send_hover_setpoint_srv = rospy.Service(
self._name + '/send_hover_setpoint',
SendHoverSetpoint,
self._send_hover_setpoint_cb
)
self._set_param_srv = rospy.Service(
self._name + '/set_param',
SetParam,
self._set_param_cb
)
self._velocity_control_srv = rospy.Service(
self._name + '/velocity_control',
VelocityControl,
self._velocity_control_cb
)
"""
Action servers for this crazyflie controller
"""
self._position_control_as = actionlib.SimpleActionServer(
self._name + '/position_control',
PositionControlAction,
self._position_control_cb,
False
)
self._position_control_as.start()
"""
Service Callbacks
"""
def _reset_position_estimator_cb(self, req):
pass
def _send_hover_setpoint_cb(self, req):
vx = req.vx
vy = req.vy
z = req.z
yaw_rate = req.yaw_rate
self._cf.commander.send_hover_setpoint(vx, vy, yaw_rate, z)
return []
def _set_param_cb(self, req):
self._cf.param.set_value(req.param, req.value)
print("set %s to %s" % (req.param, req.value))
return SetParamResponse()
def _velocity_control_cb(self, req):
try:
obj = pickle.loads(req.pickle)
print(self._mc)
if isinstance(obj, SetVelSetpoint):
self._mc._set_vel_setpoint(obj.vx, obj.vy, obj.vz, obj.rate_yaw)
elif isinstance(obj, StartBack):
self._mc.start_back(velocity = obj.velocity)
elif isinstance(obj, StartCircleLeft):
self._mc.start_circle_left(obj.radius_m, velocity = obj.velocity)
elif isinstance(obj, StartCircleRight):
self._mc.start_turn_right(obj.radius_m, velocity = obj.velocity)
elif isinstance(obj, StartDown):
self._mc.start_down(velocity = obj.velocity)
elif isinstance(obj, StartForward):
self._mc.start_forward(velocity = obj.velocity)
elif isinstance(obj, StartLeft):
self._mc.start_left(velocity = obj.velocity)
elif isinstance(obj, StartLinearMotion):
self._mc.start_linear_motion(obj.vx, obj.vy, obj.vz)
elif isinstance(obj, StartRight):
self._mc.start_right(velocity = obj.velocity)
elif isinstance(obj, StartTurnLeft):
self._mc.start_turn_left(rate = obj.rate)
elif isinstance(obj, StartTurnRight):
self._mc.start_turn_right(rate = obj.rate)
elif isinstance(obj, StartUp):
self._mc.start_up(velocity = obj.velocity)
elif isinstance(obj, Stop):
self._mc.stop()
else:
return 'Object is not a valid velocity command'
except Exception as e:
print(str(e))
raise e
return 'ok'
"""
Action Implementations
"""
def _position_control_cb(self, goal):
try:
obj = pickle.loads(goal.pickle)
if isinstance(obj, Back):
self._mc.back(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, CircleLeft):
self._mc.circle_left(obj.radius_m,
velocity = obj.velocity,
angle_degrees = obj.angle_degrees
)
elif isinstance(obj, CircleRight):
self._mc.circle_right(obj.radius_m,
velocity = obj.velocity,
angle_degrees = obj.angle_degrees
)
elif isinstance(obj, Down):
self._mc.down(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, Forward):
self._mc.forward(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, Land):
self._mc.land(velocity=obj.velocity)
elif isinstance(obj, Left):
self._mc.left(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, MoveDistance):
self._mc.move_distance(obj.x, obj.y, obj.z, velocity=obj.velocity)
elif isinstance(obj, Right):
self._mc.right(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, TakeOff):
self._mc.take_off(height=obj.height, velocity = obj.velocity)
elif isinstance(obj, TurnLeft):
self._mc.turn_left(obj.angle_degrees, rate=obj.rate)
elif isinstance(obj, TurnRight):
self._mc.turn_right(obj.angle_degrees, rate=obj.rate)
elif isinstance(obj, Up):
self._mc.up(obj.distance_m, velocity=obj.velocity)
except Exception as e:
print('Exception in action server %s' % self._name + '/position_control')
print(str(e))
print('Action aborted')
self._position_control_as.set_aborted()
return
self._position_control_as.set_succeeded()
def _takeoff(self, goal):
try:
self._mc.take_off(height = goal.height)
time.sleep(5)
except BaseException as e:
self._takeoff_as.set_aborted()
print(e)
return
self._takeoff_as.set_succeeded(TakeOffResult(True))
def _land(self, goal):
try:
self._mc.land(velocity=goal.velocity)
except BaseException as e:
self._land_as.set_aborted()
print(e)
return
self._land_as.set_succeeded(LandResult(True))
def _move_distance(self, goal):
try:
x = goal.x
y = goal.y
z = goal.z
velocity = goal.velocity
dist = np.sqrt(x**2 + y**2 + z**2)
vx = x / dist * velocity
vy = y / dist * velocity
vz = z / dist * velocity
# self._velocity_setpoint_pub.publish(Vector3(vx, vy, vz))
self._mc.move_distance(x, y, z, velocity = velocity)
# self._velocity_setpoint_pub.publish(Vector3(vx, vy, vz))
except BaseException as e:
self._move_distance_as.set_aborted()
print(e)
return
self._move_distance_as.set_succeeded()
|
[
"numpy.sqrt",
"actionlib.SimpleActionServer",
"rospy.Service",
"time.sleep",
"cflib.positioning.motion_commander.MotionCommander",
"pickle.loads",
"rospy.Publisher"
] |
[((2066, 2091), 'cflib.positioning.motion_commander.MotionCommander', 'MotionCommander', (['self._cf'], {}), '(self._cf)\n', (2081, 2091), False, 'from cflib.positioning.motion_commander import MotionCommander\n'), ((2158, 2232), 'rospy.Publisher', 'rospy.Publisher', (["(self._name + '/velocity_setpoint')", 'Vector3'], {'queue_size': '(10)'}), "(self._name + '/velocity_setpoint', Vector3, queue_size=10)\n", (2173, 2232), False, 'import rospy\n'), ((2405, 2523), 'rospy.Service', 'rospy.Service', (["(self._name + '/reset_position_estimator')", 'ResetPositionEstimator', 'self._reset_position_estimator_cb'], {}), "(self._name + '/reset_position_estimator',\n ResetPositionEstimator, self._reset_position_estimator_cb)\n", (2418, 2523), False, 'import rospy\n'), ((2607, 2711), 'rospy.Service', 'rospy.Service', (["(self._name + '/send_hover_setpoint')", 'SendHoverSetpoint', 'self._send_hover_setpoint_cb'], {}), "(self._name + '/send_hover_setpoint', SendHoverSetpoint, self.\n _send_hover_setpoint_cb)\n", (2620, 2711), False, 'import rospy\n'), ((2784, 2854), 'rospy.Service', 'rospy.Service', (["(self._name + '/set_param')", 'SetParam', 'self._set_param_cb'], {}), "(self._name + '/set_param', SetParam, self._set_param_cb)\n", (2797, 2854), False, 'import rospy\n'), ((2939, 3035), 'rospy.Service', 'rospy.Service', (["(self._name + '/velocity_control')", 'VelocityControl', 'self._velocity_control_cb'], {}), "(self._name + '/velocity_control', VelocityControl, self.\n _velocity_control_cb)\n", (2952, 3035), False, 'import rospy\n'), ((3191, 3314), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (["(self._name + '/position_control')", 'PositionControlAction', 'self._position_control_cb', '(False)'], {}), "(self._name + '/position_control',\n PositionControlAction, self._position_control_cb, False)\n", (3219, 3314), False, 'import actionlib\n'), ((3984, 4008), 'pickle.loads', 'pickle.loads', (['req.pickle'], {}), '(req.pickle)\n', (3996, 4008), False, 'import pickle\n'), ((5780, 5805), 'pickle.loads', 'pickle.loads', (['goal.pickle'], {}), '(goal.pickle)\n', (5792, 5805), False, 'import pickle\n'), ((7873, 7886), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (7883, 7886), False, 'import time\n'), ((8501, 8534), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (8508, 8534), True, 'import numpy as np\n')]
|
import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
from .integral import *
def get_velocity_field(panels, freestream, X, Y):
"""
Computes the velocity field on a given 2D mesh.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
X: 2D Numpy array of floats
x-coordinates of the mesh points.
Y: 2D Numpy array of floats
y-coordinate of the mesh points.
Returns
-------
u: 2D Numpy array of floats
x-component of the velocity vector field.
v: 2D Numpy array of floats
y-component of the velocity vector field.
"""
# freestream contribution
u = freestream.u_inf * math.cos(freestream.alpha) * numpy.ones_like(X, dtype=float)
v = freestream.u_inf * math.sin(freestream.alpha) * numpy.ones_like(X, dtype=float)
# add the contribution from each source (superposition powers!!!)
vec_intregral = numpy.vectorize(integral)
for panel in panels:
u += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 1, 0)
v += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 0, 1)
return u, v
|
[
"math.cos",
"numpy.ones_like",
"math.sin",
"numpy.vectorize"
] |
[((1089, 1114), 'numpy.vectorize', 'numpy.vectorize', (['integral'], {}), '(integral)\n', (1104, 1114), False, 'import numpy\n'), ((879, 910), 'numpy.ones_like', 'numpy.ones_like', (['X'], {'dtype': 'float'}), '(X, dtype=float)\n', (894, 910), False, 'import numpy\n'), ((967, 998), 'numpy.ones_like', 'numpy.ones_like', (['X'], {'dtype': 'float'}), '(X, dtype=float)\n', (982, 998), False, 'import numpy\n'), ((850, 876), 'math.cos', 'math.cos', (['freestream.alpha'], {}), '(freestream.alpha)\n', (858, 876), False, 'import math\n'), ((938, 964), 'math.sin', 'math.sin', (['freestream.alpha'], {}), '(freestream.alpha)\n', (946, 964), False, 'import math\n')]
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
import seaborn # NOQA
from spherecluster import sample_vMF
plt.ion()
n_clusters = 3
mus = np.random.randn(3, n_clusters)
mus, r = np.linalg.qr(mus, mode='reduced')
kappas = [15, 15, 15]
num_points_per_class = 250
Xs = []
for nn in range(n_clusters):
new_X = sample_vMF(mus[nn], kappas[nn], num_points_per_class)
Xs.append(new_X.T)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(
1, 1, 1, aspect='equal', projection='3d',
adjustable='box-forced', xlim=[-1.1, 1.1], ylim=[-1.1, 1.1],
zlim=[-1.1, 1.1]
)
colors = ['b', 'r', 'g']
for nn in range(n_clusters):
ax.scatter(Xs[nn][0, :], Xs[nn][1, :], Xs[nn][2, :], c=colors[nn])
ax.set_aspect('equal')
plt.axis('off')
plt.show()
def r_input(val=None):
val = val or ''
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
r_input()
|
[
"numpy.linalg.qr",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.axis",
"numpy.random.randn",
"spherecluster.sample_vMF",
"matplotlib.pyplot.show"
] |
[((177, 186), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (184, 186), True, 'from matplotlib import pyplot as plt\n'), ((209, 239), 'numpy.random.randn', 'np.random.randn', (['(3)', 'n_clusters'], {}), '(3, n_clusters)\n', (224, 239), True, 'import numpy as np\n'), ((249, 282), 'numpy.linalg.qr', 'np.linalg.qr', (['mus'], {'mode': '"""reduced"""'}), "(mus, mode='reduced')\n", (261, 282), True, 'import numpy as np\n'), ((468, 494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (478, 494), True, 'from matplotlib import pyplot as plt\n'), ((801, 816), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (809, 816), True, 'from matplotlib import pyplot as plt\n'), ((817, 827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (825, 827), True, 'from matplotlib import pyplot as plt\n'), ((383, 436), 'spherecluster.sample_vMF', 'sample_vMF', (['mus[nn]', 'kappas[nn]', 'num_points_per_class'], {}), '(mus[nn], kappas[nn], num_points_per_class)\n', (393, 436), False, 'from spherecluster import sample_vMF\n')]
|
from typing import Optional
import napari
import napari.layers
import numpy as np
from napari.utils.geometry import project_point_onto_plane
def point_in_bounding_box(point: np.ndarray, bounding_box: np.ndarray) -> bool:
"""Determine whether an nD point is inside an nD bounding box.
Parameters
----------
point : np.ndarray
(n,) array containing nD point coordinates to check.
bounding_box : np.ndarray
(2, n) array containing the min and max of the nD bounding box.
As returned by `Layer._extent_data`.
"""
if np.all(point > bounding_box[0]) and np.all(point < bounding_box[1]):
return True
return False
def drag_data_to_projected_distance(
start_position, end_position, view_direction, vector
):
"""Calculate the projected distance between two mouse events.
Project the drag vector between two mouse events onto a 3D vector
specified in data coordinates.
The general strategy is to
1) find mouse drag start and end positions, project them onto a
pseudo-canvas (a plane aligned with the canvas) in data coordinates.
2) project the mouse drag vector onto the (normalised) vector in data
coordinates
Parameters
----------
start_position : np.ndarray
Starting point of the drag vector in data coordinates
end_position : np.ndarray
End point of the drag vector in data coordinates
view_direction : np.ndarray
Vector defining the plane normal of the plane onto which the drag
vector is projected.
vector : np.ndarray
(3,) unit vector or (n, 3) array thereof on which to project the drag
vector from start_event to end_event. This argument is defined in data
coordinates.
Returns
-------
projected_distance : (1, ) or (n, ) np.ndarray of float
"""
# enforce at least 2d input
vector = np.atleast_2d(vector)
# Store the start and end positions in world coordinates
start_position = np.array(start_position)
end_position = np.array(end_position)
# Project the start and end positions onto a pseudo-canvas, a plane
# parallel to the rendered canvas in data coordinates.
start_position_canvas = start_position
end_position_canvas = project_point_onto_plane(
end_position, start_position_canvas, view_direction
)
# Calculate the drag vector on the pseudo-canvas.
drag_vector_canvas = np.squeeze(
end_position_canvas - start_position_canvas
)
# Project the drag vector onto the specified vector(s), return the distance
return np.einsum('j, ij -> i', drag_vector_canvas, vector).squeeze()
def point_in_layer_bounding_box(point, layer):
bbox = layer._display_bounding_box(layer._dims_displayed).T
if np.any(point < bbox[0]) or np.any(point > bbox[1]):
return False
else:
return True
def rotation_matrices_to_align_vectors(a: np.ndarray, b: np.ndarray):
"""
Find rotation matrices r such that r @ a = b
Implementation designed to avoid trig calls, a and b must be normalised.
based on https://iquilezles.org/www/articles/noacos/noacos.htm
Parameters
----------
a : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
b : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
Returns
-------
r : np.ndarray
(3, 3) rotation matrix or (n, 3, 3) array thereof.
"""
# setup
a = a.reshape(-1, 3)
b = b.reshape(-1, 3)
n_vectors = a.shape[0]
# cross product to find axis about which rotation should occur
axis = np.cross(a, b, axis=1)
# dot product equals cosine of angle between normalised vectors
cos_angle = np.einsum('ij, ij -> i', a, b)
# k is a constant which appears as a factor in the rotation matrix
k = 1 / (1 + cos_angle)
# construct rotation matrix
r = np.empty((n_vectors, 3, 3))
r[:, 0, 0] = (axis[:, 0] * axis[:, 0] * k) + cos_angle
r[:, 0, 1] = (axis[:, 1] * axis[:, 0] * k) - axis[:, 2]
r[:, 0, 2] = (axis[:, 2] * axis[:, 0] * k) + axis[:, 1]
r[:, 1, 0] = (axis[:, 0] * axis[:, 1] * k) + axis[:, 2]
r[:, 1, 1] = (axis[:, 1] * axis[:, 1] * k) + cos_angle
r[:, 1, 2] = (axis[:, 2] * axis[:, 1] * k) - axis[:, 0]
r[:, 2, 0] = (axis[:, 0] * axis[:, 2] * k) - axis[:, 1]
r[:, 2, 1] = (axis[:, 1] * axis[:, 2] * k) + axis[:, 0]
r[:, 2, 2] = (axis[:, 2] * axis[:, 2] * k) + cos_angle
return r.squeeze()
def rotation_matrix_from_z_vector(z_vector: np.ndarray):
return rotation_matrices_to_align_vectors(np.array([0, 0, 1]), z_vector)
def theta2rotz(theta: np.ndarray) -> np.ndarray:
"""
Rz = [[c(t), -s(t), 0],
[s(t), c(t), 0],
[ 0, 0, 1]]
"""
theta = np.deg2rad(np.asarray(theta).reshape(-1))
rotation_matrices = np.zeros((theta.shape[0], 3, 3), dtype=float)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_matrices[:, 2, 2] = 1
rotation_matrices[:, (0, 1), (0, 1)] = cos_theta[:, np.newaxis]
rotation_matrices[:, 0, 1] = -sin_theta
rotation_matrices[:, 1, 0] = sin_theta
return rotation_matrices.squeeze()
|
[
"numpy.atleast_2d",
"numpy.cross",
"numpy.asarray",
"numpy.any",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numpy.all",
"napari.utils.geometry.project_point_onto_plane"
] |
[((1901, 1922), 'numpy.atleast_2d', 'np.atleast_2d', (['vector'], {}), '(vector)\n', (1914, 1922), True, 'import numpy as np\n'), ((2006, 2030), 'numpy.array', 'np.array', (['start_position'], {}), '(start_position)\n', (2014, 2030), True, 'import numpy as np\n'), ((2050, 2072), 'numpy.array', 'np.array', (['end_position'], {}), '(end_position)\n', (2058, 2072), True, 'import numpy as np\n'), ((2274, 2351), 'napari.utils.geometry.project_point_onto_plane', 'project_point_onto_plane', (['end_position', 'start_position_canvas', 'view_direction'], {}), '(end_position, start_position_canvas, view_direction)\n', (2298, 2351), False, 'from napari.utils.geometry import project_point_onto_plane\n'), ((2445, 2500), 'numpy.squeeze', 'np.squeeze', (['(end_position_canvas - start_position_canvas)'], {}), '(end_position_canvas - start_position_canvas)\n', (2455, 2500), True, 'import numpy as np\n'), ((3622, 3644), 'numpy.cross', 'np.cross', (['a', 'b'], {'axis': '(1)'}), '(a, b, axis=1)\n', (3630, 3644), True, 'import numpy as np\n'), ((3729, 3759), 'numpy.einsum', 'np.einsum', (['"""ij, ij -> i"""', 'a', 'b'], {}), "('ij, ij -> i', a, b)\n", (3738, 3759), True, 'import numpy as np\n'), ((3900, 3927), 'numpy.empty', 'np.empty', (['(n_vectors, 3, 3)'], {}), '((n_vectors, 3, 3))\n', (3908, 3927), True, 'import numpy as np\n'), ((4854, 4899), 'numpy.zeros', 'np.zeros', (['(theta.shape[0], 3, 3)'], {'dtype': 'float'}), '((theta.shape[0], 3, 3), dtype=float)\n', (4862, 4899), True, 'import numpy as np\n'), ((4916, 4929), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4922, 4929), True, 'import numpy as np\n'), ((4946, 4959), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4952, 4959), True, 'import numpy as np\n'), ((568, 599), 'numpy.all', 'np.all', (['(point > bounding_box[0])'], {}), '(point > bounding_box[0])\n', (574, 599), True, 'import numpy as np\n'), ((604, 635), 'numpy.all', 'np.all', (['(point < bounding_box[1])'], {}), '(point < bounding_box[1])\n', (610, 635), True, 'import numpy as np\n'), ((2789, 2812), 'numpy.any', 'np.any', (['(point < bbox[0])'], {}), '(point < bbox[0])\n', (2795, 2812), True, 'import numpy as np\n'), ((2816, 2839), 'numpy.any', 'np.any', (['(point > bbox[1])'], {}), '(point > bbox[1])\n', (2822, 2839), True, 'import numpy as np\n'), ((4594, 4613), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4602, 4613), True, 'import numpy as np\n'), ((2607, 2658), 'numpy.einsum', 'np.einsum', (['"""j, ij -> i"""', 'drag_vector_canvas', 'vector'], {}), "('j, ij -> i', drag_vector_canvas, vector)\n", (2616, 2658), True, 'import numpy as np\n'), ((4799, 4816), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (4809, 4816), True, 'import numpy as np\n')]
|
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def plot_1d(X_train, Y_train, X_test, Y_test, mean=None, std=None, str_figure=None, show_fig=True):
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(8, 6))
ax = fig.gca()
ax.plot(X_test, Y_test, linewidth=4)
if mean is not None:
line, = ax.plot(X_test, mean, linewidth=4)
if mean is not None and std is not None:
ax.fill_between(X_test.flatten(), mean - 1.96 * std, mean + 1.96 * std, alpha=0.25, color=line.get_color())
ax.plot(X_train, Y_train, 'x', linestyle='none', markersize=10, mew=4)
ax.set_xlabel('$x$', fontsize=32)
ax.set_ylabel('$y$', fontsize=32)
ax.tick_params(labelsize=24)
ax.set_xlim([np.min(X_test), np.max(X_test)])
ax.grid()
plt.tight_layout()
if str_figure is not None:
path_figures = '../figures'
if not os.path.exists(path_figures):
os.mkdir(path_figures)
plt.savefig(
os.path.join(path_figures, str_figure + '.pdf'),
format='pdf',
transparent=True
)
if show_fig:
plt.show()
plt.close('all')
def get_parser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-f', '--function', type=str)
args = parser.parse_args()
return parser, args
def compute_nll(preds_mu, preds_sigma, X_test, Y_test, X_train):
assert len(preds_mu.shape) == len(preds_sigma.shape) == len(X_test.shape) == len(Y_test.shape) == len(X_train.shape) == 1
assert preds_mu.shape[0] == preds_sigma.shape[0] == X_test.shape[0] == Y_test.shape[0]
nll = 0.0
for mu, sigma, x, y in zip(preds_mu, preds_sigma, X_test, Y_test):
if np.any(np.abs(X_train - x) < 0.025):
continue
log_pdf = norm.logpdf(y, loc=mu, scale=sigma)
nll -= log_pdf
nll /= preds_mu.shape[0]
return nll
def compute_kl(preds_mu, preds_sigma, mean_gp, std_gp):
assert len(preds_mu.shape) == len(preds_sigma.shape) == len(mean_gp.shape) == len(std_gp.shape) == 1
assert preds_mu.shape[0] == preds_sigma.shape[0] == mean_gp.shape[0] == std_gp.shape[0]
kl = 0.0
for mu, sigma, mu_gp, sigma_gp in zip(preds_mu, preds_sigma, mean_gp, std_gp):
cur_kl = np.log(sigma_gp / (sigma + 1e-7)) + (sigma**2 + (mu - mu_gp)**2) / (2 * sigma_gp**2) - 1 / 2
kl = cur_kl
kl /= preds_mu.shape[0]
return kl
if __name__ == '__main__':
pass
|
[
"os.path.exists",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.log",
"os.path.join",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"scipy.stats.norm.logpdf",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"os.mkdir",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] |
[((212, 239), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (218, 239), True, 'import matplotlib.pyplot as plt\n'), ((251, 277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (261, 277), True, 'import matplotlib.pyplot as plt\n'), ((833, 851), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (849, 851), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1207), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1200, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1279), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (1263, 1279), False, 'import argparse\n'), ((1175, 1185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1183, 1185), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1883), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['y'], {'loc': 'mu', 'scale': 'sigma'}), '(y, loc=mu, scale=sigma)\n', (1859, 1883), False, 'from scipy.stats import norm\n'), ((781, 795), 'numpy.min', 'np.min', (['X_test'], {}), '(X_test)\n', (787, 795), True, 'import numpy as np\n'), ((797, 811), 'numpy.max', 'np.max', (['X_test'], {}), '(X_test)\n', (803, 811), True, 'import numpy as np\n'), ((936, 964), 'os.path.exists', 'os.path.exists', (['path_figures'], {}), '(path_figures)\n', (950, 964), False, 'import os\n'), ((978, 1000), 'os.mkdir', 'os.mkdir', (['path_figures'], {}), '(path_figures)\n', (986, 1000), False, 'import os\n'), ((1035, 1082), 'os.path.join', 'os.path.join', (['path_figures', "(str_figure + '.pdf')"], {}), "(path_figures, str_figure + '.pdf')\n", (1047, 1082), False, 'import os\n'), ((1779, 1798), 'numpy.abs', 'np.abs', (['(X_train - x)'], {}), '(X_train - x)\n', (1785, 1798), True, 'import numpy as np\n'), ((2322, 2356), 'numpy.log', 'np.log', (['(sigma_gp / (sigma + 1e-07))'], {}), '(sigma_gp / (sigma + 1e-07))\n', (2328, 2356), True, 'import numpy as np\n')]
|
import collections
import functools
import json
import logging
import multiprocessing
import os
import time
from collections import OrderedDict
from queue import PriorityQueue, Empty
from typing import List, Tuple, Any
from itertools import cycle, islice
import minerl.herobraine.env_spec
from minerl.herobraine.hero import spaces
import cv2
import os
import numpy as np
import gym
logger = logging.getLogger(__name__)
from minerl.data.version import assert_version, assert_prefix
import copy
import tqdm
import queue
import minerl.data.util
from minerl.data.util import forever, minibatch_gen
import concurrent
from IPython import embed
if os.name != "nt":
class WindowsError(OSError):
pass
def tree_slice(tree, slc):
if isinstance(tree, OrderedDict):
return OrderedDict(
[(k, tree_slice(v, slc)) for k, v in tree.items()]
)
else:
return tree[slc]
class DataPipeline:
"""
Creates a data pipeline object used to itterate through the MineRL-v0 dataset
"""
def __init__(self,
data_directory: os.path,
environment: str,
num_workers: int,
worker_batch_size: int,
min_size_to_dequeue: int,
random_seed=42):
"""
Sets up a tensorflow dataset to load videos from a given data directory.
:param data_directory:
:type data_directory:
:param num_workers:
:type num_workers:
:param worker_batch_size:
:type worker_batch_size:
:param min_size_to_dequeue:
:type min_size_to_dequeue:
:param random_seed:
"""
self.seed = random_seed
self.data_dir = data_directory
self.environment = environment
self.number_of_workers = num_workers
self.worker_batch_size = worker_batch_size
self.size_to_dequeue = min_size_to_dequeue
self.processing_pool = multiprocessing.Pool(self.number_of_workers)
self._env_spec = gym.envs.registration.spec(self.environment)._kwargs['env_spec']
self._action_space = gym.envs.registration.spec(self.environment)._kwargs['action_space']
self._observation_space = gym.envs.registration.spec(self.environment)._kwargs['observation_space']
@property
def spec(self) -> minerl.herobraine.env_spec.EnvSpec:
return self._env_spec
@property
def action_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._action_space
@property
def observation_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._observation_space
# return result
def load_data(self, stream_name: str, skip_interval=0, include_metadata=False, video_name='recording.mp4'):
"""Iterates over an individual trajectory named stream_name.
Args:
stream_name (str): The stream name desired to be iterated through.
skip_interval (int, optional): How many sices should be skipped.. Defaults to 0.
include_metadata (bool, optional): Whether or not meta data about the loaded trajectory should be included.. Defaults to False.
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal).
These are tuples are yielded in order of the episode.
"""
if '/' in stream_name:
file_dir = stream_name
else:
file_dir = os.path.join(self.data_dir, stream_name)
if DataPipeline._is_blacklisted(stream_name):
raise RuntimeError("This stream is corrupted (and will be removed in the next version of the data!)")
seq = DataPipeline._load_data_pyfunc(file_dir, -1, None, self.environment, skip_interval=skip_interval,
include_metadata=include_metadata, video_name=video_name)
if include_metadata:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq, meta = seq
else:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq = seq
# make a copty
gym_spec = gym.envs.registration.spec(self.environment)
target_space = copy.deepcopy(gym_spec._kwargs['observation_space'])
x = list(target_space.spaces.items())
target_space.spaces = collections.OrderedDict(
sorted(x, key=lambda x:
x[0] if x[0] is not 'pov' else 'z')
)
# Now we just need to slice the dict.
for idx in tqdm.tqdm(range(len(reward_seq))):
# Wrap in dict
action_dict = tree_slice(action_seq, idx)
observation_dict = tree_slice(observation_seq, idx)
next_observation_dict = tree_slice(next_observation_seq, idx)
yield_list = [observation_dict, action_dict, reward_seq[idx], next_observation_dict, done_seq[idx]]
yield yield_list + [meta] if include_metadata else yield_list
def get_trajectory_names(self):
"""Gets all the trajectory names
Returns:
A list of experiment names: [description]
"""
return [os.path.basename(x) for x in self._get_all_valid_recordings(self.data_dir)]
############################
# PRIVATE METHODS #
############################
@staticmethod
def read_frame(cap):
try:
ret, frame = cap.read()
if ret:
cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB, dst=frame)
frame = np.asarray(np.clip(frame, 0, 255), dtype=np.uint8)
return ret, frame
except Exception as err:
logger.error("error reading capture device:", err)
raise err
@staticmethod
def _roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to <NAME>
pending = len(iterables)
nexts = cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
# Todo: Make data pipeline split files per push.
@staticmethod
def _load_data_pyfunc(file_dir: str, max_seq_len: int, data_queue, env_str="", skip_interval=0,
include_metadata=False, video_name='recording.mp4'):
"""
Enqueueing mechanism for loading a trajectory from a file onto the data_queue
:param file_dir: file path to data directory
:param skip_interval: Number of time steps to skip between each sample
:param max_seq_len: Number of time steps in each enqueued batch
:param data_queue: multiprocessing data queue, or None to return streams directly
:param include_metadata: whether or not to return an additional tuple containing metadata
:return:
"""
logger.debug("Loading from file {}".format(file_dir))
video_path = str(os.path.join(file_dir, video_name))
numpy_path = str(os.path.join(file_dir, 'rendered.npz'))
meta_path = str(os.path.join(file_dir, 'metadata.json'))
try:
# Start video decompression
cap = cv2.VideoCapture(video_path)
# Load numpy file
state = np.load(numpy_path, allow_pickle=True)
# Load metadata file
with open(meta_path) as file:
meta = json.load(file)
if 'stream_name' not in meta:
meta['stream_name'] = file_dir
action_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('action$')])
reward_vec = state['reward']
info_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('observation$')])
# Recursively sorts nested dicts
def recursive_sort(dct):
for key in list(dct.keys()):
if isinstance(dct[key], OrderedDict):
dct[key] = recursive_sort(dct[key])
dct[key] = OrderedDict(sorted(dct[key].items()))
return dct
def unflatten(dct, sep='$'):
out_dict = OrderedDict({})
for k, v in dct.items():
keys = k.split(sep)
cur_dict = out_dict
for key in keys[:-1]:
if key not in cur_dict:
cur_dict[key] = OrderedDict({})
cur_dict = cur_dict[key]
cur_dict[keys[-1]] = v
# Sort dict recursively
recursive_sort(out_dict)
return out_dict
# There is no action or reward for the terminal state of an episode.
# Hence in Publish.py we shorten the action and reward vector to reflect this.
# We know FOR SURE that the last video frame corresponds to the last state (from Universal.json).
num_states = len(reward_vec) + 1
max_frame_num = meta['true_video_frame_count']
frames = []
frame_num, stop_idx = 0, 0
# Advance video capture past first i-frame to start of experiment
cap = cv2.VideoCapture(video_path)
# for _ in range(max_frame_num - num_states):
# ret, _ = DataPipeline.read_frame(cap)
# frame_num += 1
# if not ret:
# raise RuntimeError()
# Rendered Frames
# Loop through the video and construct frames
# of observations to be sent via the multiprocessing queue
# in chunks of worker_batch_size to the batch_iter loop.
while True:
ret = True
start_idx = stop_idx
# Collect up to worker_batch_size number of frames
try:
# Go until max_seq_len +1 for S_t, A_t, -> R_t, S_{t+1}, D_{t+1}
while ret and frame_num < max_frame_num and (len(frames) < max_seq_len + 1 or max_seq_len == -1):
ret, frame = DataPipeline.read_frame(cap)
frames.append(frame)
frame_num += 1
except Exception as err:
logger.error("error reading capture device:", err)
raise err
if len(frames) <= 1:
break
if frame_num == max_frame_num:
frames[-1] = frames[-2]
# Next sarsd pair index
stop_idx = start_idx + len(frames) - 1
# print('Num frames in batch:', stop_idx - start_idx)
# Load non-image data from npz
current_observation_data = OrderedDict()
action_data = OrderedDict()
next_observation_data = OrderedDict()
try:
for key in list(info_dict.keys()) + ['observation$pov']:
if 'pov' in key:
current_observation_data[key] = np.asanyarray(frames[:-1])
next_observation_data[key] = np.asanyarray(frames[1:])
else:
current_observation_data[key] = np.asanyarray(info_dict[key][start_idx:stop_idx])
next_observation_data[key] = np.asanyarray(info_dict[key][start_idx + 1:stop_idx + 1])
# We are getting (S_t, A_t -> R_t), S_{t+1}, D_{t+1} so there are less actions and rewards
for key in action_dict:
action_data[key] = np.asanyarray(action_dict[key][start_idx: stop_idx])
reward_data = np.asanyarray(reward_vec[start_idx:stop_idx], dtype=np.float32)
done_data = [False for _ in range(len(reward_data))]
if frame_num == max_frame_num:
done_data[-1] = True
except Exception as err:
logger.error("error drawing batch from npz file:", err)
raise err
# unflatten these dictioanries.
current_observation_data = unflatten(current_observation_data)['observation']
action_data = unflatten(action_data)['action']
next_observation_data = unflatten(next_observation_data)['observation']
batches = [current_observation_data, action_data, reward_data, next_observation_data,
np.array(done_data, dtype=np.bool)]
if include_metadata:
batches += [meta]
if data_queue is None:
return batches
else:
data_queue.put(batches)
logger.debug("Enqueued from file {}".format(file_dir))
if not ret:
break
else:
frames = [frames[-1]]
logger.error("Finished")
return None
except WindowsError as e:
logger.debug("Caught windows error {} - this is expected when closing the data pool".format(e))
return None
except FileNotFoundError as e:
print("File not found!")
raise e
except Exception as e:
logger.error("Exception caught on file \"{}\" by a worker of the data pipeline.".format(file_dir))
logger.error(repr(e))
return None
def batch_iter(self,
batch_size: int,
seq_len: int,
num_epochs: int = -1,
preload_buffer_size: int = 2,
seed: int = None,
include_metadata: bool = False):
"""Returns batches of sequences length SEQ_LEN of the data of size BATCH_SIZE.
The iterator produces batches sequentially. If an element of a batch reaches the
end of its
Args:
batch_size (int): The batch size.
seq_len (int): The size of sequences to produce.
num_epochs (int, optional): The number of epochs to iterate over the data. Defaults to -1.
preload_buffer_size (int, optional): Increase to IMPROVE PERFORMANCE. The data iterator uses a queue to prevent blocking, the queue size is the number of trajectories to load into the buffer. Adjust based on memory constraints. Defaults to 32.
seed (int, optional): [int]. NOT IMPLEMENTED Defaults to None.
include_metadata (bool, optional): Include metadata on the source trajectory. Defaults to False.
Returns:
Generator: A generator that yields (sarsd) batches
"""
# Todo: Not implemented/
for epoch in (range(num_epochs) if num_epochs > 0 else forever()):
trajectory_queue = queue.Queue(maxsize=preload_buffer_size)
def traj_iter():
for _ in jobs:
s, a, r, sp1, d = trajectory_queue.get()
yield dict(
obs=s,
act=a,
reward=r,
next_obs=sp1,
done=d
)
jobs = [(f, -1, None) for f in self._get_all_valid_recordings(self.data_dir)]
np.random.shuffle(jobs)
trajectory_loader = minerl.data.util.OrderedJobStreamer(
job,
jobs,
trajectory_queue,
# executor=concurrent.futures.ThreadPoolExecutor,
max_workers=preload_buffer_size
)
trajectory_loader.start()
for seg_batch in minibatch_gen(traj_iter(), batch_size=batch_size, nsteps=seq_len):
yield seg_batch['obs'], seg_batch['act'], seg_batch['reward'], seg_batch['next_obs'], seg_batch['done']
trajectory_loader.shutdown()
@staticmethod
def _is_blacklisted(path):
for p in [
'tempting_capers_shapeshifter-14'
]:
if p in path:
return True
return False
@staticmethod
def _get_all_valid_recordings(path):
directoryList = []
# return nothing if path is a file
if os.path.isfile(path):
return []
# Skip this file.
if DataPipeline._is_blacklisted(path):
return []
# add dir to directory list if it contains .txt files
if len([f for f in os.listdir(path) if f.endswith('.mp4')]) > 0:
if len([f for f in os.listdir(path) if f.endswith('.npz')]) > 0:
assert_prefix(path)
directoryList.append(path)
for d in os.listdir(path):
new_path = os.path.join(path, d)
if os.path.isdir(new_path):
directoryList += DataPipeline._get_all_valid_recordings(new_path)
directoryList = np.array(directoryList)
np.random.shuffle(directoryList)
return directoryList.tolist()
###
# DEPRECATED API
###
def seq_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""DEPRECATED METHOD FOR SAMPLING DATA FROM THE MINERL DATASET.
This function is now :code:`DataPipeline.batch_iter()`
"""
raise DeprecationWarning(
"The `DataPipeline.seq_iter` method is deprecated! Please use DataPipeline.batch_iter()."
"\nNOTE: The new method `DataPipeline.batch_iter` has a different return signature! "
"\n\t Please see how to use it @ http://www.minerl.io/docs/tutorials/data_sampling.html")
def sarsd_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""
Returns a generator for iterating through (state, action, reward, next_state, is_terminal)
tuples in the dataset.
Loads num_workers files at once as defined in minerl.data.make() and return up to
max_sequence_len consecutive samples wrapped in a dict observation space
Args:
num_epochs (int, optional): number of epochs to iterate over or -1
to loop forever. Defaults to -1
max_sequence_len (int, optional): maximum number of consecutive samples - may be less. Defaults to 32
seed (int, optional): seed for random directory walk - note, specifying seed as well as a finite num_epochs
will cause the ordering of examples to be the same after every call to seq_iter
queue_size (int, optional): maximum number of elements to buffer at a time, each worker may hold an
additional item while waiting to enqueue. Defaults to 16*self.number_of_workers or 2*
self.number_of_workers if max_sequence_len == -1
include_metadata (bool, optional): adds an additional member to the tuple containing metadata about the
stream the data was loaded from. Defaults to False
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal, (metadata)).
Each element is in the format of the environment action/state/reward space and contains as many
samples are requested.
"""
raise DeprecationWarning(
"The `DataPipeline.sarsd_iter` method is deprecated! Please use DataPipeline.batch_iter().")
def job(arg):
return DataPipeline._load_data_pyfunc(*arg)
|
[
"logging.getLogger",
"numpy.clip",
"numpy.asanyarray",
"numpy.array",
"copy.deepcopy",
"minerl.data.version.assert_prefix",
"os.listdir",
"os.path.isdir",
"minerl.data.util.forever",
"collections.OrderedDict",
"os.path.isfile",
"cv2.cvtColor",
"itertools.islice",
"gym.envs.registration.spec",
"os.path.join",
"multiprocessing.Pool",
"os.path.basename",
"cv2.VideoCapture",
"json.load",
"queue.Queue",
"numpy.load",
"numpy.random.shuffle"
] |
[((393, 420), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (410, 420), False, 'import logging\n'), ((1962, 2006), 'multiprocessing.Pool', 'multiprocessing.Pool', (['self.number_of_workers'], {}), '(self.number_of_workers)\n', (1982, 2006), False, 'import multiprocessing\n'), ((4271, 4315), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (4297, 4315), False, 'import gym\n'), ((4339, 4391), 'copy.deepcopy', 'copy.deepcopy', (["gym_spec._kwargs['observation_space']"], {}), "(gym_spec._kwargs['observation_space'])\n", (4352, 4391), False, 'import copy\n'), ((16574, 16594), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (16588, 16594), False, 'import os\n'), ((17024, 17040), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (17034, 17040), False, 'import os\n'), ((17234, 17257), 'numpy.array', 'np.array', (['directoryList'], {}), '(directoryList)\n', (17242, 17257), True, 'import numpy as np\n'), ((17266, 17298), 'numpy.random.shuffle', 'np.random.shuffle', (['directoryList'], {}), '(directoryList)\n', (17283, 17298), True, 'import numpy as np\n'), ((3574, 3614), 'os.path.join', 'os.path.join', (['self.data_dir', 'stream_name'], {}), '(self.data_dir, stream_name)\n', (3586, 3614), False, 'import os\n'), ((5273, 5292), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (5289, 5292), False, 'import os\n'), ((7174, 7208), 'os.path.join', 'os.path.join', (['file_dir', 'video_name'], {}), '(file_dir, video_name)\n', (7186, 7208), False, 'import os\n'), ((7235, 7273), 'os.path.join', 'os.path.join', (['file_dir', '"""rendered.npz"""'], {}), "(file_dir, 'rendered.npz')\n", (7247, 7273), False, 'import os\n'), ((7299, 7338), 'os.path.join', 'os.path.join', (['file_dir', '"""metadata.json"""'], {}), "(file_dir, 'metadata.json')\n", (7311, 7338), False, 'import os\n'), ((7412, 7440), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (7428, 7440), False, 'import cv2\n'), ((7492, 7530), 'numpy.load', 'np.load', (['numpy_path'], {'allow_pickle': '(True)'}), '(numpy_path, allow_pickle=True)\n', (7499, 7530), True, 'import numpy as np\n'), ((9478, 9506), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9494, 9506), False, 'import cv2\n'), ((15107, 15116), 'minerl.data.util.forever', 'forever', ([], {}), '()\n', (15114, 15116), False, 'from minerl.data.util import forever, minibatch_gen\n'), ((15150, 15190), 'queue.Queue', 'queue.Queue', ([], {'maxsize': 'preload_buffer_size'}), '(maxsize=preload_buffer_size)\n', (15161, 15190), False, 'import queue\n'), ((15635, 15658), 'numpy.random.shuffle', 'np.random.shuffle', (['jobs'], {}), '(jobs)\n', (15652, 15658), True, 'import numpy as np\n'), ((17065, 17086), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (17077, 17086), False, 'import os\n'), ((17102, 17125), 'os.path.isdir', 'os.path.isdir', (['new_path'], {}), '(new_path)\n', (17115, 17125), False, 'import os\n'), ((2033, 2077), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (2059, 2077), False, 'import gym\n'), ((2127, 2171), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (2153, 2171), False, 'import gym\n'), ((2230, 2274), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (2256, 2274), False, 'import gym\n'), ((5579, 5633), 'cv2.cvtColor', 'cv2.cvtColor', (['frame'], {'code': 'cv2.COLOR_BGR2RGB', 'dst': 'frame'}), '(frame, code=cv2.COLOR_BGR2RGB, dst=frame)\n', (5591, 5633), False, 'import cv2\n'), ((7630, 7645), 'json.load', 'json.load', (['file'], {}), '(file)\n', (7639, 7645), False, 'import json\n'), ((8437, 8452), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (8448, 8452), False, 'from collections import OrderedDict\n'), ((11044, 11057), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11055, 11057), False, 'from collections import OrderedDict\n'), ((11088, 11101), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11099, 11101), False, 'from collections import OrderedDict\n'), ((11142, 11155), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11153, 11155), False, 'from collections import OrderedDict\n'), ((16943, 16962), 'minerl.data.version.assert_prefix', 'assert_prefix', (['path'], {}), '(path)\n', (16956, 16962), False, 'from minerl.data.version import assert_version, assert_prefix\n'), ((5669, 5691), 'numpy.clip', 'np.clip', (['frame', '(0)', '(255)'], {}), '(frame, 0, 255)\n', (5676, 5691), True, 'import numpy as np\n'), ((12009, 12072), 'numpy.asanyarray', 'np.asanyarray', (['reward_vec[start_idx:stop_idx]'], {'dtype': 'np.float32'}), '(reward_vec[start_idx:stop_idx], dtype=np.float32)\n', (12022, 12072), True, 'import numpy as np\n'), ((12814, 12848), 'numpy.array', 'np.array', (['done_data'], {'dtype': 'np.bool'}), '(done_data, dtype=np.bool)\n', (12822, 12848), True, 'import numpy as np\n'), ((6292, 6314), 'itertools.islice', 'islice', (['nexts', 'pending'], {}), '(nexts, pending)\n', (6298, 6314), False, 'from itertools import cycle, islice\n'), ((11921, 11972), 'numpy.asanyarray', 'np.asanyarray', (['action_dict[key][start_idx:stop_idx]'], {}), '(action_dict[key][start_idx:stop_idx])\n', (11934, 11972), True, 'import numpy as np\n'), ((16804, 16820), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (16814, 16820), False, 'import os\n'), ((8708, 8723), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (8719, 8723), False, 'from collections import OrderedDict\n'), ((11355, 11381), 'numpy.asanyarray', 'np.asanyarray', (['frames[:-1]'], {}), '(frames[:-1])\n', (11368, 11381), True, 'import numpy as np\n'), ((11439, 11464), 'numpy.asanyarray', 'np.asanyarray', (['frames[1:]'], {}), '(frames[1:])\n', (11452, 11464), True, 'import numpy as np\n'), ((11555, 11604), 'numpy.asanyarray', 'np.asanyarray', (['info_dict[key][start_idx:stop_idx]'], {}), '(info_dict[key][start_idx:stop_idx])\n', (11568, 11604), True, 'import numpy as np\n'), ((11662, 11719), 'numpy.asanyarray', 'np.asanyarray', (['info_dict[key][start_idx + 1:stop_idx + 1]'], {}), '(info_dict[key][start_idx + 1:stop_idx + 1])\n', (11675, 11719), True, 'import numpy as np\n'), ((16881, 16897), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (16891, 16897), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
import torch
from torch.autograd import Variable
from pytorch2keras.converter import pytorch_to_keras
import torchvision
import os.path as osp
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras import backend as K
K.clear_session()
K.set_image_dim_ordering('tf')
import test
import tensorflow as tf
import torch
from torch import nn
from torchsummary import summary
from torch.autograd import Variable
import tensorflow
from tensorflow.python.keras.backend import get_session
from tensorflow.python.keras.models import load_model
from tensorflow.python.framework import graph_util, graph_io
from keras.utils import plot_model
# K.set_image_data_format('channels_first') 0
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def softmax(x):
exp_x = np.exp(x)
softmax_x = exp_x / np.sum(exp_x)
return softmax_x
def check_error(output, k_model, input_np, epsilon=1e-3):
pytorch_output = output[0].data.cpu().numpy()
# pytorch_output = np.max(pytorch_output)
# print('torch:',pytorch_output)
# print('=====================')
# print('torch:',pytorch_output)
keras_output = k_model.predict(input_np)
keras_output = keras_output[0]
# keras_output = np.max(keras_output)
# print('=====================')
# print('keras pre:',keras_output)
error = np.max(pytorch_output - keras_output)
print('Error:', error)
assert error < epsilon
return error
import numpy as np
def normalization0_1(data):
_range = np.max(data) - np.min(data)
data = (data - np.min(data)) / _range
mean = [0.485, 0.456, 0.406]
std_ad = [0.229, 0.224, 0.225]
return np.divide(np.subtract(data, mean), std_ad)
def h5_to_pb(h5_model, output_dir, model_name, out_prefix="output_", ):
if osp.exists(output_dir) == False:
os.mkdir(output_dir)
out_nodes = ["output_0_1"] ##get from init_graph
# out_nodes.append(out_prefix + str(0))
tf.identity(h5_model.output[0], out_prefix + str(0))
sess = get_session()
init_graph = sess.graph.as_graph_def() ##get out_nodes
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if __name__ == '__main__':
##step1: load pytorch model
# model = test.main()
model = torch.load("/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth")
model = model.cuda() ##cuda
summary(model, (3, 304, 304)) ##summary(model, (channels, pic_h, pic_w))
model.eval()
##step2: pytorch .pth to keras .h5 and test .h5
input_np = np.random.uniform(0, 1, (1, 3, 304, 304))
input_var = Variable(torch.FloatTensor(input_np)).cuda() ##cuda
# input_var = Variable(torch.FloatTensor(input_np))
k_model = pytorch_to_keras(model, input_var, (3, 304, 304,), verbose=True, name_policy='short')
k_model.summary()
k_model.save('my_model.h5')
output = model(input_var)
check_error(output, k_model, input_np) ## check the error between .pth and .h5
##step3: load .h5 and .h5 to .pb
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(0) ##不可少,
my_model = load_model('my_model.h5')
h5_to_pb(my_model, output_dir='./model/', model_name='model.pb')
##step4: load .pb and test .pb
pb_path = './model/model.pb'
with tf.Session() as sess:
tf.global_variables_initializer().run()
graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, 'rb') as f:
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name="")
pic_file = './datasets/data'
pic_list = os.listdir(pic_file)
for name in pic_list:
img_path = '{}/{}'.format(pic_file, name)
im = cv2.imread(img_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
img = cv2.resize(im, (304, 304))
img = np.asarray(img, dtype=np.float32)
img = normalization0_1(img)
img_data = np.transpose(img, (2, 0, 1))
img_input = np.asarray(img_data, dtype=np.float32)[np.newaxis, :, :, :]
input = sess.graph.get_tensor_by_name("input_0:0")
output = sess.graph.get_tensor_by_name("output_0_1:0")
pre_label = sess.run([output], feed_dict={input: img_input})
pre_label = pre_label[0][0]
# print(pre_label)
pre_label = np.argmax(softmax(pre_label))
print('------------------------')
print('{} prelabel is {}'.format(pic_name, pre_label))
|
[
"tensorflow.gfile.GFile",
"os.path.exists",
"os.listdir",
"tensorflow.Session",
"numpy.asarray",
"numpy.subtract",
"numpy.max",
"numpy.exp",
"tensorflow.python.keras.backend.get_session",
"tensorflow.GraphDef",
"keras.backend.clear_session",
"tensorflow.python.keras.models.load_model",
"numpy.min",
"os.mkdir",
"torchsummary.summary",
"tensorflow.python.framework.graph_io.write_graph",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.import_graph_def",
"cv2.cvtColor",
"cv2.resize",
"numpy.transpose",
"cv2.imread",
"keras.backend.set_image_dim_ordering",
"torch.load",
"tensorflow.global_variables_initializer",
"numpy.sum",
"numpy.random.uniform",
"pytorch2keras.converter.pytorch_to_keras",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.backend.set_learning_phase",
"torch.FloatTensor"
] |
[((284, 301), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (299, 301), True, 'from keras import backend as K\n'), ((302, 332), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (326, 332), True, 'from keras import backend as K\n'), ((828, 837), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (834, 837), True, 'import numpy as np\n'), ((1375, 1412), 'numpy.max', 'np.max', (['(pytorch_output - keras_output)'], {}), '(pytorch_output - keras_output)\n', (1381, 1412), True, 'import numpy as np\n'), ((2050, 2063), 'tensorflow.python.keras.backend.get_session', 'get_session', ([], {}), '()\n', (2061, 2063), False, 'from tensorflow.python.keras.backend import get_session\n'), ((2141, 2211), 'tensorflow.python.framework.graph_util.convert_variables_to_constants', 'graph_util.convert_variables_to_constants', (['sess', 'init_graph', 'out_nodes'], {}), '(sess, init_graph, out_nodes)\n', (2182, 2211), False, 'from tensorflow.python.framework import graph_util, graph_io\n'), ((2216, 2292), 'tensorflow.python.framework.graph_io.write_graph', 'graph_io.write_graph', (['main_graph', 'output_dir'], {'name': 'model_name', 'as_text': '(False)'}), '(main_graph, output_dir, name=model_name, as_text=False)\n', (2236, 2292), False, 'from tensorflow.python.framework import graph_util, graph_io\n'), ((2392, 2496), 'torch.load', 'torch.load', (['"""/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth"""'], {}), "(\n '/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth'\n )\n", (2402, 2496), False, 'import torch\n'), ((2524, 2553), 'torchsummary.summary', 'summary', (['model', '(3, 304, 304)'], {}), '(model, (3, 304, 304))\n', (2531, 2553), False, 'from torchsummary import summary\n'), ((2684, 2725), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1, 3, 304, 304)'], {}), '(0, 1, (1, 3, 304, 304))\n', (2701, 2725), True, 'import numpy as np\n'), ((2865, 2954), 'pytorch2keras.converter.pytorch_to_keras', 'pytorch_to_keras', (['model', 'input_var', '(3, 304, 304)'], {'verbose': '(True)', 'name_policy': '"""short"""'}), "(model, input_var, (3, 304, 304), verbose=True, name_policy\n ='short')\n", (2881, 2954), False, 'from pytorch2keras.converter import pytorch_to_keras\n'), ((3162, 3194), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (3192, 3194), True, 'import tensorflow as tf\n'), ((3199, 3237), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (3234, 3237), True, 'import tensorflow as tf\n'), ((3261, 3286), 'tensorflow.python.keras.models.load_model', 'load_model', (['"""my_model.h5"""'], {}), "('my_model.h5')\n", (3271, 3286), False, 'from tensorflow.python.keras.models import load_model\n'), ((862, 875), 'numpy.sum', 'np.sum', (['exp_x'], {}), '(exp_x)\n', (868, 875), True, 'import numpy as np\n'), ((1549, 1561), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (1555, 1561), True, 'import numpy as np\n'), ((1564, 1576), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1570, 1576), True, 'import numpy as np\n'), ((1708, 1731), 'numpy.subtract', 'np.subtract', (['data', 'mean'], {}), '(data, mean)\n', (1719, 1731), True, 'import numpy as np\n'), ((1822, 1844), 'os.path.exists', 'osp.exists', (['output_dir'], {}), '(output_dir)\n', (1832, 1844), True, 'import os.path as osp\n'), ((1863, 1883), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (1871, 1883), False, 'import os\n'), ((3435, 3447), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3445, 3447), True, 'import tensorflow as tf\n'), ((3525, 3538), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (3536, 3538), True, 'import tensorflow as tf\n'), ((3750, 3770), 'os.listdir', 'os.listdir', (['pic_file'], {}), '(pic_file)\n', (3760, 3770), False, 'import os\n'), ((1596, 1608), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1602, 1608), True, 'import numpy as np\n'), ((3553, 3582), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_path', '"""rb"""'], {}), "(pb_path, 'rb')\n", (3567, 3582), True, 'import tensorflow as tf\n'), ((3653, 3692), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (3672, 3692), True, 'import tensorflow as tf\n'), ((3872, 3892), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3882, 3892), False, 'import cv2\n'), ((3910, 3945), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (3922, 3945), False, 'import cv2\n'), ((3964, 3990), 'cv2.resize', 'cv2.resize', (['im', '(304, 304)'], {}), '(im, (304, 304))\n', (3974, 3990), False, 'import cv2\n'), ((4009, 4042), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (4019, 4042), True, 'import numpy as np\n'), ((4106, 4134), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (4118, 4134), True, 'import numpy as np\n'), ((2751, 2778), 'torch.FloatTensor', 'torch.FloatTensor', (['input_np'], {}), '(input_np)\n', (2768, 2778), False, 'import torch\n'), ((3465, 3498), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3496, 3498), True, 'import tensorflow as tf\n'), ((4159, 4197), 'numpy.asarray', 'np.asarray', (['img_data'], {'dtype': 'np.float32'}), '(img_data, dtype=np.float32)\n', (4169, 4197), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
""".. moduleauthor:: <NAME>"""
import abc
from copy import copy
from dataclasses import dataclass
from multiprocessing.managers import SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
from typing import Tuple, List, Optional, final, TypeVar, Generic
from torch.utils.data import Dataset
import numpy as np # type: ignore
from bann.b_data_functions.errors.custom_erors import KnownErrorBannData
@final
@dataclass
class TypeShapeCon:
type: np.dtype = np.dtype('float')
shape: Tuple[int, ...] = (4,)
data: Optional[np.ndarray] = None
shared_data: Optional[SharedMemory] = None
@final
class SmmConManger:
def __init__(self) -> None:
self.__smm: SharedMemoryManager = SharedMemoryManager()
self.__started: bool = False
self.__stopped: bool = False
@property
def smm(self) -> SharedMemoryManager:
return self.__smm
def smm_shutdown(self) -> None:
if self.__started and not self.__stopped:
self.__smm.shutdown()
self.__stopped = True
def smm_start(self) -> None:
if not (self.__started or self.__stopped):
self.__smm.start()
self.__started = True
_TypD = TypeVar('_TypD')
class DataSetSharedMemoryA(abc.ABC, Dataset, Generic[_TypD]):
def __init__(self, data_len: int, /) -> None:
super().__init__()
self.__subset: List[int] = []
self.__subsets_locked: bool = False
self.__smm: Optional[SharedMemoryManager] = None
self.__data_len = data_len
@final
def __len__(self) -> int:
return self.__data_len
@final
@property
def subset(self) -> List[int]:
return self.__subset
@final
def _set_subset(self, indices: List[int], /) -> None:
self.__subset = indices
if indices:
self.__data_len = len(indices)
@final
def _lock_subsets(self) -> None:
self.__subsets_locked = True
@final
def create_subsets(self, indices: List[int], /) -> 'DataSetSharedMemoryA':
if self.__subsets_locked:
raise KnownErrorBannData("subset of subset is prohibited")
shallow_copy = copy(self)
shallow_copy._set_subset(indices)
shallow_copy._lock_subsets()
shallow_copy._trim_shallow_copy(indices)
return shallow_copy
@abc.abstractmethod
def _getitem(self, item: int, /) -> _TypD:
raise NotImplementedError("Abstract method!")
@final
def __getitem__(self, item: int) -> _TypD:
self.remap_shared_memory()
return self._getitem(item)
@final
@property
def used_smm(self) -> Optional[SharedMemoryManager]:
return self.__smm
@abc.abstractmethod
def _trim_shallow_copy(self, indices: List[int], /) -> None:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def remap_shared_memory(self) -> None:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def _pre_send_empty(self) -> None:
raise NotImplementedError("Abstract method!")
@final
def pre_send_empty(self) -> None:
self.__smm = None
self._pre_send_empty()
@abc.abstractmethod
def _move_data_to_shared_memory(self) -> None:
raise NotImplementedError("Abstract method!")
@final
def move_data_to_shared_memory(self, smm: SharedMemoryManager, /) -> None:
if self.__smm is not None:
raise KnownErrorBannData("SharedMemoryManager already set")
self.__smm = smm
self._move_data_to_shared_memory()
def _generate_shared_mem_it(np_array: np.ndarray, cont: TypeShapeCon,
smm: SharedMemoryManager, /) -> SharedMemory:
cont.shape = np_array.shape
cont.type = np_array.dtype
shm = smm.SharedMemory(size=np_array.nbytes)
np_buffered = np.ndarray(np_array.shape, dtype=np_array.dtype, buffer=shm.buf)
np_buffered[:] = np_array[:]
return shm
def remap_shared_mem(data: TypeShapeCon, indices: List[int], /) -> None:
# TODO (remove copy) at this point DataLoader doesn't work without copy
if not (data.shared_data is None or data.shape is None or data.type is None):
data_point = data.shared_data
np_buffered_data = np.ndarray(data.shape, dtype=data.type, buffer=data_point.buf)
if indices:
data.data = np.array(list(np_buffered_data[index_i] for index_i in indices))
else:
data.data = copy(np_buffered_data)
data.shared_data = None
def generate_shared_mem(data_type_shape: TypeShapeCon, smm: SharedMemoryManager, /) -> None:
data_l = data_type_shape.data
if data_type_shape.shared_data is None and data_l is None:
raise KnownErrorBannData("Both data types are empty!")
if data_l is not None:
data_type_shape.shared_data = _generate_shared_mem_it(data_l, data_type_shape, smm)
data_type_shape.data = None
def trim_shallow_copy(data_type_shape: TypeShapeCon, indices: List[int], /) -> TypeShapeCon:
if data_type_shape.shared_data is None and data_type_shape.data is None:
raise KnownErrorBannData("Both data types are empty!")
new_con = TypeShapeCon(type=data_type_shape.type, shape=data_type_shape.shape)
if indices:
new_data = data_type_shape.data
if new_data is not None:
new_con.data = np.array(list(new_data[data_index] for data_index in indices))
new_con.shared_data = data_type_shape.shared_data
return new_con
new_con.shared_data = data_type_shape.shared_data
new_con.data = data_type_shape.data
return new_con
def data_get_item(data: TypeShapeCon, index: int, /) -> np.ndarray:
if data.data is not None:
return np.array(data.data[index])
raise KnownErrorBannData("Should never happen")
def data_shallow_copy_shared_mem(data: TypeShapeCon, /) -> TypeShapeCon:
if data.shared_data is None:
raise KnownErrorBannData("Shared data is empty!")
new_con = TypeShapeCon(type=data.type, shape=data.shape)
new_con.shared_data = data.shared_data
return new_con
|
[
"bann.b_data_functions.errors.custom_erors.KnownErrorBannData",
"copy.copy",
"numpy.array",
"multiprocessing.managers.SharedMemoryManager",
"numpy.ndarray",
"numpy.dtype",
"typing.TypeVar"
] |
[((1243, 1259), 'typing.TypeVar', 'TypeVar', (['"""_TypD"""'], {}), "('_TypD')\n", (1250, 1259), False, 'from typing import Tuple, List, Optional, final, TypeVar, Generic\n'), ((509, 526), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (517, 526), True, 'import numpy as np\n'), ((3903, 3967), 'numpy.ndarray', 'np.ndarray', (['np_array.shape'], {'dtype': 'np_array.dtype', 'buffer': 'shm.buf'}), '(np_array.shape, dtype=np_array.dtype, buffer=shm.buf)\n', (3913, 3967), True, 'import numpy as np\n'), ((5832, 5873), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Should never happen"""'], {}), "('Should never happen')\n", (5850, 5873), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((749, 770), 'multiprocessing.managers.SharedMemoryManager', 'SharedMemoryManager', ([], {}), '()\n', (768, 770), False, 'from multiprocessing.managers import SharedMemoryManager\n'), ((2209, 2219), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (2213, 2219), False, 'from copy import copy\n'), ((4314, 4376), 'numpy.ndarray', 'np.ndarray', (['data.shape'], {'dtype': 'data.type', 'buffer': 'data_point.buf'}), '(data.shape, dtype=data.type, buffer=data_point.buf)\n', (4324, 4376), True, 'import numpy as np\n'), ((4785, 4833), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Both data types are empty!"""'], {}), "('Both data types are empty!')\n", (4803, 4833), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((5175, 5223), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Both data types are empty!"""'], {}), "('Both data types are empty!')\n", (5193, 5223), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((5795, 5821), 'numpy.array', 'np.array', (['data.data[index]'], {}), '(data.data[index])\n', (5803, 5821), True, 'import numpy as np\n'), ((5996, 6039), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Shared data is empty!"""'], {}), "('Shared data is empty!')\n", (6014, 6039), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((2133, 2185), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""subset of subset is prohibited"""'], {}), "('subset of subset is prohibited')\n", (2151, 2185), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((3505, 3558), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""SharedMemoryManager already set"""'], {}), "('SharedMemoryManager already set')\n", (3523, 3558), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((4524, 4546), 'copy.copy', 'copy', (['np_buffered_data'], {}), '(np_buffered_data)\n', (4528, 4546), False, 'from copy import copy\n')]
|
import os
import tensorflow as tf
import numpy as np
import mcubes
from ops import *
class ZGenerator:
def __init__(self, sess, z_dim=128, ef_dim=32, gf_dim=128, dataset_name=None):
self.sess = sess
self.input_size = 64
self.z_dim = z_dim
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.dataset_name = dataset_name
self.real_size = 64
self.test_size = 32
self.batch_size = self.test_size*self.test_size*self.test_size
self.build_model()
def build_model(self):
self.z_vector = tf.placeholder(shape=[1,self.z_dim], dtype=tf.float32)
self.point_coord = tf.placeholder(shape=[self.batch_size,3], dtype=tf.float32)
self.point_value = tf.placeholder(shape=[self.batch_size,1], dtype=tf.float32)
self.zG = self.generator(self.point_coord, self.z_vector, phase_train=True, reuse=False)
self.loss = tf.reduce_mean(tf.square(self.point_value - self.zG))
self.saver = tf.train.Saver(max_to_keep=10)
def generator(self, points, z, phase_train=True, reuse=False):
with tf.variable_scope('simple_net') as scope:
if reuse:
scope.reuse_variables()
zs = tf.tile(z, [self.batch_size,1])
pointz = tf.concat([points,zs],1)
h1 = lrelu(linear(pointz, self.gf_dim*16, 'h1_lin'))
h1 = tf.concat([h1,pointz],1)
h2 = lrelu(linear(h1, self.gf_dim*8, 'h4_lin'))
h2 = tf.concat([h2,pointz],1)
h3 = lrelu(linear(h2, self.gf_dim*4, 'h5_lin'))
h3 = tf.concat([h3,pointz],1)
h4 = lrelu(linear(h3, self.gf_dim*2, 'h6_lin'))
h4 = tf.concat([h4,pointz],1)
h5 = lrelu(linear(h4, self.gf_dim, 'h7_lin'))
h6 = tf.nn.sigmoid(linear(h5, 1, 'h8_lin'))
return tf.reshape(h6, [self.batch_size,1])
def test(self, checkpoint_dir, batch_z, dim=64):
could_load, checkpoint_counter = self.load(checkpoint_dir)
if could_load:
print(' [*] Load SUCCESS')
else:
print(' [!] Load failed...')
return
dima = self.test_size
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
aux_x = np.zeros([dima,dima,dima],np.int32)
aux_y = np.zeros([dima,dima,dima],np.int32)
aux_z = np.zeros([dima,dima,dima],np.int32)
for i in range(dima):
for j in range(dima):
for k in range(dima):
aux_x[i,j,k] = i*multiplier
aux_y[i,j,k] = j*multiplier
aux_z[i,j,k] = k*multiplier
coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
coords = (coords+0.5)/dim*2.0-1.0
coords = np.reshape(coords,[multiplier3,self.batch_size,3])
for t in range(batch_z.shape[0]):
model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
minib = i*multiplier2+j*multiplier+k
model_out = self.sess.run(self.zG,
feed_dict={
self.z_vector: batch_z[t:t+1],
self.point_coord: coords[minib],
})
model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
thres = 0.2
vertices, triangles = mcubes.marching_cubes(model_float, thres)
return vertices, triangles
def load(self, checkpoint_dir):
import re
print(' [*] Reading checkpoints...')
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer('(\d+)(?!.*\d)',ckpt_name)).group(0))
print(' [*] Success to read {}'.format(ckpt_name))
return True, counter
else:
print(' [*] Failed to find a checkpoint')
return False, 0
|
[
"tensorflow.tile",
"numpy.reshape",
"tensorflow.variable_scope",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"os.path.join",
"mcubes.marching_cubes",
"tensorflow.train.get_checkpoint_state",
"tensorflow.concat",
"numpy.zeros",
"os.path.basename",
"re.finditer",
"tensorflow.square"
] |
[((587, 642), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, self.z_dim]', 'dtype': 'tf.float32'}), '(shape=[1, self.z_dim], dtype=tf.float32)\n', (601, 642), True, 'import tensorflow as tf\n'), ((669, 729), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.batch_size, 3]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, 3], dtype=tf.float32)\n', (683, 729), True, 'import tensorflow as tf\n'), ((756, 816), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.batch_size, 1]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, 1], dtype=tf.float32)\n', (770, 816), True, 'import tensorflow as tf\n'), ((1035, 1065), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (1049, 1065), True, 'import tensorflow as tf\n'), ((2374, 2412), 'numpy.zeros', 'np.zeros', (['[dima, dima, dima]', 'np.int32'], {}), '([dima, dima, dima], np.int32)\n', (2382, 2412), True, 'import numpy as np\n'), ((2426, 2464), 'numpy.zeros', 'np.zeros', (['[dima, dima, dima]', 'np.int32'], {}), '([dima, dima, dima], np.int32)\n', (2434, 2464), True, 'import numpy as np\n'), ((2478, 2516), 'numpy.zeros', 'np.zeros', (['[dima, dima, dima]', 'np.int32'], {}), '([dima, dima, dima], np.int32)\n', (2486, 2516), True, 'import numpy as np\n'), ((2777, 2833), 'numpy.zeros', 'np.zeros', (['[multiplier3, dima, dima, dima, 3]', 'np.float32'], {}), '([multiplier3, dima, dima, dima, 3], np.float32)\n', (2785, 2833), True, 'import numpy as np\n'), ((3233, 3286), 'numpy.reshape', 'np.reshape', (['coords', '[multiplier3, self.batch_size, 3]'], {}), '(coords, [multiplier3, self.batch_size, 3])\n', (3243, 3286), True, 'import numpy as np\n'), ((4211, 4256), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (4240, 4256), True, 'import tensorflow as tf\n'), ((966, 1003), 'tensorflow.square', 'tf.square', (['(self.point_value - self.zG)'], {}), '(self.point_value - self.zG)\n', (975, 1003), True, 'import tensorflow as tf\n'), ((1147, 1178), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""simple_net"""'], {}), "('simple_net')\n", (1164, 1178), True, 'import tensorflow as tf\n'), ((1269, 1301), 'tensorflow.tile', 'tf.tile', (['z', '[self.batch_size, 1]'], {}), '(z, [self.batch_size, 1])\n', (1276, 1301), True, 'import tensorflow as tf\n'), ((1322, 1348), 'tensorflow.concat', 'tf.concat', (['[points, zs]', '(1)'], {}), '([points, zs], 1)\n', (1331, 1348), True, 'import tensorflow as tf\n'), ((1430, 1456), 'tensorflow.concat', 'tf.concat', (['[h1, pointz]', '(1)'], {}), '([h1, pointz], 1)\n', (1439, 1456), True, 'import tensorflow as tf\n'), ((1533, 1559), 'tensorflow.concat', 'tf.concat', (['[h2, pointz]', '(1)'], {}), '([h2, pointz], 1)\n', (1542, 1559), True, 'import tensorflow as tf\n'), ((1636, 1662), 'tensorflow.concat', 'tf.concat', (['[h3, pointz]', '(1)'], {}), '([h3, pointz], 1)\n', (1645, 1662), True, 'import tensorflow as tf\n'), ((1739, 1765), 'tensorflow.concat', 'tf.concat', (['[h4, pointz]', '(1)'], {}), '([h4, pointz], 1)\n', (1748, 1765), True, 'import tensorflow as tf\n'), ((1899, 1935), 'tensorflow.reshape', 'tf.reshape', (['h6', '[self.batch_size, 1]'], {}), '(h6, [self.batch_size, 1])\n', (1909, 1935), True, 'import tensorflow as tf\n'), ((3353, 3402), 'numpy.zeros', 'np.zeros', (['[dim + 2, dim + 2, dim + 2]', 'np.float32'], {}), '([dim + 2, dim + 2, dim + 2], np.float32)\n', (3361, 3402), True, 'import numpy as np\n'), ((4013, 4054), 'mcubes.marching_cubes', 'mcubes.marching_cubes', (['model_float', 'thres'], {}), '(model_float, thres)\n', (4034, 4054), False, 'import mcubes\n'), ((4329, 4373), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (4345, 4373), False, 'import os\n'), ((4416, 4455), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (4428, 4455), False, 'import os\n'), ((3914, 3955), 'numpy.reshape', 'np.reshape', (['model_out', '[dima, dima, dima]'], {}), '(model_out, [dima, dima, dima])\n', (3924, 3955), True, 'import numpy as np\n'), ((4488, 4529), 're.finditer', 're.finditer', (['"""(\\\\d+)(?!.*\\\\d)"""', 'ckpt_name'], {}), "('(\\\\d+)(?!.*\\\\d)', ckpt_name)\n", (4499, 4529), False, 'import re\n')]
|
#/bin/python3
import numpy as np
from scipy import signal as sig
class pySparSDRCompress():
'''
Implementation of the SparSDR Compressor based on
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2019, June. Sparsdr: Sparsity-proportional backhaul and compute for sdrs. In Proceedings of the 17th Annual International Conference on Mobile Systems, Applications, and Services (pp. 391-403).
'''
def __init__(self,nfft=1024,thresholdVec=None):
'''
Initialize SparSDR Compressor
:input: nfft :shouldBeEven: Number of bins in fft
'''
assert not nfft%2
self.nfft = nfft
self.nover = int(self.nfft/2)
self.windowVec = sig.windows.hann(self.nfft, sym=False)
self.windowVec = np.expand_dims(self.windowVec,axis=1)
if thresholdVec is None:
self.setThreshold(np.zeros((1,self.nfft)))
else:
self.setThreshold(thresholdVec)
self.bufferState = np.zeros((self.nover,))
self.numWinProcessed = 0
def reset(self):
'''
Resets internal memory if the compressor needs to be re-started
(soft-reset)
'''
self.bufferState = 0*self.bufferState
self.numWinProcessed = 0
def setThreshold(self, thresholdVec):
'''
Sets internal threshold vector
:input: thresholdVec :shape==(1,nfft): real-valued thresholds as numpy array
'''
assert thresholdVec.shape == (1,self.nfft)
self.thresholdVec = thresholdVec
def work(self, xIn):
'''
Perform compression on input vector
:input: xIn :numElements==k*nfft: input signal as a numpy array
:output: (windowIdx, binIdx, binValue)
:output: windowIdx : Index of window over all-time
:output: binIdx : Index of bin in a particular window
:output: binValue : Value of the binIdx at the windowIdx
This function remembers past input and stores overlap in the bufferState
variable
'''
assert not xIn.size%self.nfft
# concatenate filter state
xIn = np.concatenate((self.bufferState, xIn))
# Half-Overlapped windowing
evenWindows = self.windowVec*xIn[:-self.nover].reshape((self.nfft,-1))
oddWindows = self.windowVec*xIn[self.nover:].reshape((self.nfft,-1))
# Fourier Transform
evenWindows = np.fft.fft(evenWindows,axis=0)
oddWindows = np.fft.fft(oddWindows,axis=0)
# Interleave overlapped windows
output = np.empty((self.nfft, 2*evenWindows.shape[1]) , dtype=evenWindows.dtype)
output[:,0::2] = evenWindows
output[:,1::2] = oddWindows
output = output.transpose()
# Threshold to find areas of activity
thresholdFlag = np.abs(output) > self.thresholdVec
thresholdFlag = np.transpose(thresholdFlag.nonzero())
# Select only active bins
output = output[thresholdFlag[:,0],thresholdFlag[:,1]]
thresholdFlag[:,0] = self.numWinProcessed + thresholdFlag[:,0]
# Update internal states
self.bufferState = xIn[-self.nover:]
self.numWinProcessed = self.numWinProcessed + 2*evenWindows.shape[1]
return thresholdFlag[:,0], thresholdFlag[:,1], output
|
[
"numpy.abs",
"numpy.fft.fft",
"scipy.signal.windows.hann",
"numpy.zeros",
"numpy.empty",
"numpy.concatenate",
"numpy.expand_dims"
] |
[((718, 756), 'scipy.signal.windows.hann', 'sig.windows.hann', (['self.nfft'], {'sym': '(False)'}), '(self.nfft, sym=False)\n', (734, 756), True, 'from scipy import signal as sig\n'), ((782, 820), 'numpy.expand_dims', 'np.expand_dims', (['self.windowVec'], {'axis': '(1)'}), '(self.windowVec, axis=1)\n', (796, 820), True, 'import numpy as np\n'), ((995, 1018), 'numpy.zeros', 'np.zeros', (['(self.nover,)'], {}), '((self.nover,))\n', (1003, 1018), True, 'import numpy as np\n'), ((2144, 2183), 'numpy.concatenate', 'np.concatenate', (['(self.bufferState, xIn)'], {}), '((self.bufferState, xIn))\n', (2158, 2183), True, 'import numpy as np\n'), ((2437, 2468), 'numpy.fft.fft', 'np.fft.fft', (['evenWindows'], {'axis': '(0)'}), '(evenWindows, axis=0)\n', (2447, 2468), True, 'import numpy as np\n'), ((2489, 2519), 'numpy.fft.fft', 'np.fft.fft', (['oddWindows'], {'axis': '(0)'}), '(oddWindows, axis=0)\n', (2499, 2519), True, 'import numpy as np\n'), ((2577, 2649), 'numpy.empty', 'np.empty', (['(self.nfft, 2 * evenWindows.shape[1])'], {'dtype': 'evenWindows.dtype'}), '((self.nfft, 2 * evenWindows.shape[1]), dtype=evenWindows.dtype)\n', (2585, 2649), True, 'import numpy as np\n'), ((2829, 2843), 'numpy.abs', 'np.abs', (['output'], {}), '(output)\n', (2835, 2843), True, 'import numpy as np\n'), ((883, 907), 'numpy.zeros', 'np.zeros', (['(1, self.nfft)'], {}), '((1, self.nfft))\n', (891, 907), True, 'import numpy as np\n')]
|
from typing import Dict
from numba import njit
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'binary'
def read_parameters(filename: str) -> Dict[str, float]:
"""Read parameters from a file to a dictionary and return it."""
parameters = {}
with open(filename, "r") as file:
for line in file.readlines():
if line != '\n':
line_split = line.split()
try:
parameters[line_split[0]] = int(line_split[2])
except ValueError:
parameters[line_split[0]] = float(line_split[2])
if len(parameters) != 6:
raise RuntimeError("Incorrect list of parameters in " + filename)
return parameters
def random_population(population_size: int, board_size: int) -> np.ndarray:
"""Return a random population of solutions."""
return np.array([np.random.permutation(board_size)
for _ in range(population_size)], dtype=np.int32)
@njit
def fitness(population: np.ndarray) -> np.ndarray:
"""Return an array of fitnesses of a given population"""
fitness_arr = np.empty(population.shape[0], dtype=np.float32)
for i, genome in enumerate(population):
diags_1 = np.array([0 for n in range(2 * genome.size - 1)])
diags_2 = np.array([0 for n in range(2 * genome.size - 1)])
for j in range(genome.size):
diags_1[j - genome[j] + genome.size - 1] += 1
diags_2[j + genome[j]] += 1
colls_1 = diags_1 > 1
colls_2 = diags_2 > 1
diags_1[colls_1] = diags_1[colls_1] * (diags_1[colls_1] - 1) // 2
diags_1[~colls_1] = 0
diags_2[colls_2] = diags_2[colls_2] * (diags_2[colls_2] - 1) // 2
diags_2[~colls_2] = 0
fitness_arr[i] = 1 / (1 + np.sum(diags_1) + np.sum(diags_2))
return fitness_arr
@njit
def selection(population: np.ndarray, n_best: int) -> np.ndarray:
"""Return an array of indices of individuals selected to mate.
n_best is the number of best individuals who will always be selected.
"""
fitnesses = fitness(population)
winners = np.empty((population.shape[0] // 2,), dtype=np.int32)
winners[0:n_best] = np.argsort(fitnesses)[-n_best:]
for i in range(n_best, fitnesses.shape[0] // 2):
pair = np.random.randint(0, fitnesses.shape[0], size=(2,))
if fitnesses[pair[0]] > fitnesses[pair[1]]:
winners[i] = pair[0]
else:
winners[i] = pair[1]
return winners
@njit
def crossover(population: np.ndarray, selected: np.ndarray):
"""Return a new population that results from crossover."""
N = population.shape[1]
new_population = np.empty_like(population)
for k in range(0, selected.shape[0]):
parents_ids = np.random.choice(selected, replace=False, size=2)
child_1 = np.empty_like(population[parents_ids[0]])
child_2 = np.empty_like(population[parents_ids[1]])
points = np.random.randint(0, N + 1, 2)
if points[0] != points[1]:
points = (np.min(points), np.max(points))
else:
if points[0] == N:
points = (points[0] - 1, points[0])
else:
points = (points[0], points[0] + 1)
cut_out = population[parents_ids[0]][points[0]:points[1]]
child_1[points[0]:points[1]] = cut_out
j = 0
for i in range(N):
if j == points[0]:
j = points[1]
if not np.any(cut_out == population[parents_ids[1]][i]):
child_1[j] = population[parents_ids[1]][i]
j += 1
cut_out = population[parents_ids[1]][points[0]:points[1]]
child_2[points[0]:points[1]] = cut_out
j = 0
for i in range(N):
if j == points[0]:
j = points[1]
if not np.any(cut_out == population[parents_ids[0]][i]):
child_2[j] = population[parents_ids[0]][i]
j += 1
new_population[2 * k, :] = child_1
new_population[2 * k + 1, :] = child_2
return new_population
@njit
def mutation(population: np.ndarray):
"""Perform mutation on a population."""
for i in range(population.shape[0]):
if np.random.random() > 0.7:
for _ in range(3):
points = np.random.randint(0, population.shape[1], 2)
tmp = population[i, points[0]]
population[i, points[0]] = population[i, points[1]]
population[i, points[1]] = tmp
def plot_genome_expression(genome: np.ndarray) -> None:
"""Plot a solution represented by the given genome."""
points = np.zeros((genome.shape[0], genome.shape[0]))
for i, g in enumerate(genome):
points[i, g] = 1
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(points, cmap='Purples')
ax.grid(True)
ax.set_xlim(-0.5, genome.shape[0] - 0.5)
ax.set_ylim(-0.5, genome.shape[0] - 0.5)
ax.set_xticks([i + 0.5 for i in range(genome.shape[0])])
ax.set_yticks([i + 0.5 for i in range(genome.shape[0])])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.tick_params(axis='both', which='both', bottom=False, left=False)
plt.title("$N = {}$".format(genome.shape[0]), size=15)
plt.show()
def main() -> None:
parameters = read_parameters('parameters.txt')
population = random_population(parameters['pop_size'], parameters['N'])
generation_data = []
best_member_id = 0
winner_gen = parameters['generations']
for i in range(1, parameters['generations'] + 1):
selected = selection(population, parameters['n_best'])
population = crossover(population, selected)
mutation(population)
gen_fit = fitness(population)
best_member_id = np.argmax(gen_fit)
generation_data.append([i, gen_fit.mean(), gen_fit[best_member_id]])
if gen_fit[best_member_id] == 1.0:
print("\nWinner (gen. {}):\n{}".format(
i, str(population[best_member_id])))
winner_gen = i
break
if i % 50 == 0:
print("Gen", i)
if parameters['plot_winner_genome']:
plot_genome_expression(population[best_member_id])
if __name__ == "__main__":
main()
|
[
"numpy.random.choice",
"numpy.random.permutation",
"numpy.random.random",
"matplotlib.pyplot.tick_params",
"numpy.argmax",
"numpy.any",
"numpy.max",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.empty_like",
"numpy.empty",
"numpy.min",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((1140, 1187), 'numpy.empty', 'np.empty', (['population.shape[0]'], {'dtype': 'np.float32'}), '(population.shape[0], dtype=np.float32)\n', (1148, 1187), True, 'import numpy as np\n'), ((2136, 2189), 'numpy.empty', 'np.empty', (['(population.shape[0] // 2,)'], {'dtype': 'np.int32'}), '((population.shape[0] // 2,), dtype=np.int32)\n', (2144, 2189), True, 'import numpy as np\n'), ((2698, 2723), 'numpy.empty_like', 'np.empty_like', (['population'], {}), '(population)\n', (2711, 2723), True, 'import numpy as np\n'), ((4671, 4715), 'numpy.zeros', 'np.zeros', (['(genome.shape[0], genome.shape[0])'], {}), '((genome.shape[0], genome.shape[0]))\n', (4679, 4715), True, 'import numpy as np\n'), ((4788, 4818), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4800, 4818), True, 'import matplotlib.pyplot as plt\n'), ((5145, 5213), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'left': '(False)'}), "(axis='both', which='both', bottom=False, left=False)\n", (5160, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5285, 5287), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2235), 'numpy.argsort', 'np.argsort', (['fitnesses'], {}), '(fitnesses)\n', (2224, 2235), True, 'import numpy as np\n'), ((2314, 2365), 'numpy.random.randint', 'np.random.randint', (['(0)', 'fitnesses.shape[0]'], {'size': '(2,)'}), '(0, fitnesses.shape[0], size=(2,))\n', (2331, 2365), True, 'import numpy as np\n'), ((2788, 2837), 'numpy.random.choice', 'np.random.choice', (['selected'], {'replace': '(False)', 'size': '(2)'}), '(selected, replace=False, size=2)\n', (2804, 2837), True, 'import numpy as np\n'), ((2856, 2897), 'numpy.empty_like', 'np.empty_like', (['population[parents_ids[0]]'], {}), '(population[parents_ids[0]])\n', (2869, 2897), True, 'import numpy as np\n'), ((2916, 2957), 'numpy.empty_like', 'np.empty_like', (['population[parents_ids[1]]'], {}), '(population[parents_ids[1]])\n', (2929, 2957), True, 'import numpy as np\n'), ((2975, 3005), 'numpy.random.randint', 'np.random.randint', (['(0)', '(N + 1)', '(2)'], {}), '(0, N + 1, 2)\n', (2992, 3005), True, 'import numpy as np\n'), ((5790, 5808), 'numpy.argmax', 'np.argmax', (['gen_fit'], {}), '(gen_fit)\n', (5799, 5808), True, 'import numpy as np\n'), ((897, 930), 'numpy.random.permutation', 'np.random.permutation', (['board_size'], {}), '(board_size)\n', (918, 930), True, 'import numpy as np\n'), ((4252, 4270), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4268, 4270), True, 'import numpy as np\n'), ((1823, 1838), 'numpy.sum', 'np.sum', (['diags_2'], {}), '(diags_2)\n', (1829, 1838), True, 'import numpy as np\n'), ((3063, 3077), 'numpy.min', 'np.min', (['points'], {}), '(points)\n', (3069, 3077), True, 'import numpy as np\n'), ((3079, 3093), 'numpy.max', 'np.max', (['points'], {}), '(points)\n', (3085, 3093), True, 'import numpy as np\n'), ((3496, 3544), 'numpy.any', 'np.any', (['(cut_out == population[parents_ids[1]][i])'], {}), '(cut_out == population[parents_ids[1]][i])\n', (3502, 3544), True, 'import numpy as np\n'), ((3862, 3910), 'numpy.any', 'np.any', (['(cut_out == population[parents_ids[0]][i])'], {}), '(cut_out == population[parents_ids[0]][i])\n', (3868, 3910), True, 'import numpy as np\n'), ((4334, 4378), 'numpy.random.randint', 'np.random.randint', (['(0)', 'population.shape[1]', '(2)'], {}), '(0, population.shape[1], 2)\n', (4351, 4378), True, 'import numpy as np\n'), ((1805, 1820), 'numpy.sum', 'np.sum', (['diags_1'], {}), '(diags_1)\n', (1811, 1820), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
u"""
radial_basis.py
Written by <NAME> (01/2022)
Interpolates data using radial basis functions
CALLING SEQUENCE:
ZI = radial_basis(xs, ys, zs, XI, YI, polynomial=0,
smooth=smooth, epsilon=epsilon, method='inverse')
INPUTS:
xs: scaled input X data
ys: scaled input Y data
zs: input data
XI: scaled grid X for output ZI
YI: scaled grid Y for output ZI
OUTPUTS:
ZI: interpolated data grid
OPTIONS:
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
default is mean Euclidean distance
polynomial: polynomial order if augmenting radial basis functions
default None: no polynomials
method: radial basis function
multiquadric
inverse_multiquadric or inverse (default)
inverse_quadratic
gaussian
linear (first-order polyharmonic spline)
cubic (third-order polyharmonic spline)
quintic (fifth-order polyharmonic spline)
thin_plate: thin-plate spline
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
REFERENCES:
<NAME>, Multiquadric equations of topography and other irregular
surfaces, J. Geophys. Res., 76(8), 1905-1915, 1971.
<NAME>, "Radial Basis Functions", Cambridge Monographs on Applied and
Computational Mathematics, 2003.
UPDATE HISTORY:
Updated 01/2022: added function docstrings
Updated 07/2021: using scipy spatial distance routines
Updated 09/2017: using rcond=-1 in numpy least-squares algorithms
Updated 01/2017: epsilon in polyharmonic splines (linear, cubic, quintic)
Updated 08/2016: using format text within ValueError, edit constant vector
added low-order polynomial option (previously used default constant)
Updated 01/2016: new hierarchical_radial_basis function
that first reduces to points within distance. added cutoff option
Updated 10/2014: added third dimension (spherical)
Written 08/2014
"""
from __future__ import print_function, division
import numpy as np
import scipy.spatial
def radial_basis(xs, ys, zs, XI, YI, smooth=0.0, metric='euclidean',
epsilon=None, method='inverse', polynomial=None):
"""
Interpolates data using radial basis functions
Arguments
---------
xs: scaled input x-coordinates
ys: scaled input y-coordinates
zs: input data
XI: scaled output x-coordinates for data grid
YI: scaled output y-coordinates for data grid
Keyword arguments
-----------------
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
method: radial basis function
- multiquadric
- inverse_multiquadric or inverse (default)
- inverse_quadratic
- gaussian
- linear (first-order polyharmonic spline)
- cubic (third-order polyharmonic spline)
- quintic (fifth-order polyharmonic spline)
- thin_plate: thin-plate spline
polynomial: polynomial order if augmenting radial basis functions
Returns
-------
ZI: interpolated data grid
"""
#-- remove singleton dimensions
xs = np.squeeze(xs)
ys = np.squeeze(ys)
zs = np.squeeze(zs)
XI = np.squeeze(XI)
YI = np.squeeze(YI)
#-- size of new matrix
if (np.ndim(XI) == 1):
nx = len(XI)
else:
nx,ny = np.shape(XI)
#-- Check to make sure sizes of input arguments are correct and consistent
if (len(zs) != len(xs)) | (len(zs) != len(ys)):
raise Exception('Length of X, Y, and Z must be equal')
if (np.shape(XI) != np.shape(YI)):
raise Exception('Size of XI and YI must be equal')
#-- create python dictionary of radial basis function formulas
radial_basis_functions = {}
radial_basis_functions['multiquadric'] = multiquadric
radial_basis_functions['inverse_multiquadric'] = inverse_multiquadric
radial_basis_functions['inverse'] = inverse_multiquadric
radial_basis_functions['inverse_quadratic'] = inverse_quadratic
radial_basis_functions['gaussian'] = gaussian
radial_basis_functions['linear'] = poly_spline1
radial_basis_functions['cubic'] = poly_spline3
radial_basis_functions['quintic'] = poly_spline5
radial_basis_functions['thin_plate'] = thin_plate
#-- check if formula name is listed
if method in radial_basis_functions.keys():
RBF = radial_basis_functions[method]
else:
raise ValueError("Method {0} not implemented".format(method))
#-- Creation of data distance matrix
#-- Data to Data
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Rd = distance_matrix(
np.array([xs, ys]),
np.array([xs, ys])
)
else:
#-- use scipy spatial distance routines
Rd = scipy.spatial.distance.cdist(
np.array([xs, ys]).T,
np.array([xs, ys]).T,
metric=metric)
#-- shape of distance matrix
N,M = np.shape(Rd)
#-- if epsilon is not specified
if epsilon is None:
#-- calculate norm with mean euclidean distance
uix,uiy = np.nonzero(np.tri(N,M=M,k=-1))
epsilon = np.mean(Rd[uix,uiy])
#-- possible augmentation of the PHI Matrix with polynomial Vectors
if polynomial is None:
#-- calculate radial basis function for data-to-data with smoothing
PHI = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
DMAT = zs.copy()
else:
#-- number of polynomial coefficients
nt = (polynomial**2 + 3*polynomial)//2 + 1
#-- calculate radial basis function for data-to-data with smoothing
PHI = np.zeros((N+nt,M+nt))
PHI[:N,:M] = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
#-- augmentation of PHI matrix with polynomials
POLY = polynomial_matrix(xs,ys,polynomial)
DMAT = np.concatenate(([zs,np.zeros((nt))]),axis=0)
#-- augment PHI matrix
for t in range(nt):
PHI[:N,M+t] = POLY[:,t]
PHI[N+t,:M] = POLY[:,t]
#-- Computation of the Weights
w = np.linalg.lstsq(PHI,DMAT[:,np.newaxis],rcond=-1)[0]
#-- Computation of distance Matrix
#-- Computation of distance Matrix (data to mesh points)
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Re = distance_matrix(
np.array([XI.flatten(),YI.flatten()]),
np.array([xs,ys])
)
else:
#-- use scipy spatial distance routines
Re = scipy.spatial.distance.cdist(
np.array([XI.flatten(),YI.flatten()]).T,
np.array([xs, ys]).T,
metric=metric)
#-- calculate radial basis function for data-to-mesh matrix
E = RBF(epsilon,Re)
#-- possible augmentation of the Evaluation Matrix with polynomial vectors
if polynomial is not None:
P = polynomial_matrix(XI.flatten(),YI.flatten(),polynomial)
E = np.concatenate(([E, P]),axis=1)
#-- calculate output interpolated array (or matrix)
if (np.ndim(XI) == 1):
ZI = np.squeeze(np.dot(E,w))
else:
ZI = np.zeros((nx,ny))
ZI[:,:] = np.dot(E,w).reshape(nx,ny)
#-- return the interpolated array (or matrix)
return ZI
#-- define radial basis function formulas
def multiquadric(epsilon, r):
#-- multiquadratic
f = np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_multiquadric(epsilon, r):
#-- inverse multiquadratic
f = 1.0/np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_quadratic(epsilon, r):
#-- inverse quadratic
f = 1.0/(1.0+(epsilon*r)**2)
return f
def gaussian(epsilon, r):
#-- gaussian
f = np.exp(-(epsilon*r)**2)
return f
def poly_spline1(epsilon, r):
#-- First-order polyharmonic spline
f = (epsilon*r)
return f
def poly_spline3(epsilon, r):
#-- Third-order polyharmonic spline
f = (epsilon*r)**3
return f
def poly_spline5(epsilon, r):
#-- Fifth-order polyharmonic spline
f = (epsilon*r)**5
return f
def thin_plate(epsilon, r):
#-- thin plate spline
f = r**2 * np.log(r)
#-- the spline is zero at zero
f[r == 0] = 0.0
return f
#-- calculate Euclidean distances between points as matrices
def distance_matrix(x,cntrs):
s,M = np.shape(x)
s,N = np.shape(cntrs)
D = np.zeros((M,N))
for d in range(s):
ii, = np.dot(d,np.ones((1,N))).astype(np.int)
jj, = np.dot(d,np.ones((1,M))).astype(np.int)
dx = x[ii,:].transpose() - cntrs[jj,:]
D += dx**2
D = np.sqrt(D)
return D
#-- calculate polynomial matrix to augment radial basis functions
def polynomial_matrix(x,y,order):
c = 0
M = len(x)
N = (order**2 + 3*order)//2 + 1
POLY = np.zeros((M,N))
for ii in range(order + 1):
for jj in range(ii + 1):
POLY[:,c] = (x**jj)*(y**(ii-jj))
c += 1
return POLY
|
[
"numpy.mean",
"numpy.eye",
"numpy.sqrt",
"numpy.ones",
"numpy.log",
"numpy.ndim",
"numpy.squeeze",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.lstsq",
"numpy.concatenate",
"numpy.shape",
"numpy.tri"
] |
[((3345, 3359), 'numpy.squeeze', 'np.squeeze', (['xs'], {}), '(xs)\n', (3355, 3359), True, 'import numpy as np\n'), ((3369, 3383), 'numpy.squeeze', 'np.squeeze', (['ys'], {}), '(ys)\n', (3379, 3383), True, 'import numpy as np\n'), ((3393, 3407), 'numpy.squeeze', 'np.squeeze', (['zs'], {}), '(zs)\n', (3403, 3407), True, 'import numpy as np\n'), ((3417, 3431), 'numpy.squeeze', 'np.squeeze', (['XI'], {}), '(XI)\n', (3427, 3431), True, 'import numpy as np\n'), ((3441, 3455), 'numpy.squeeze', 'np.squeeze', (['YI'], {}), '(YI)\n', (3451, 3455), True, 'import numpy as np\n'), ((5196, 5208), 'numpy.shape', 'np.shape', (['Rd'], {}), '(Rd)\n', (5204, 5208), True, 'import numpy as np\n'), ((7558, 7591), 'numpy.sqrt', 'np.sqrt', (['((epsilon * r) ** 2 + 1.0)'], {}), '((epsilon * r) ** 2 + 1.0)\n', (7565, 7591), True, 'import numpy as np\n'), ((7886, 7913), 'numpy.exp', 'np.exp', (['(-(epsilon * r) ** 2)'], {}), '(-(epsilon * r) ** 2)\n', (7892, 7913), True, 'import numpy as np\n'), ((8491, 8502), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (8499, 8502), True, 'import numpy as np\n'), ((8513, 8528), 'numpy.shape', 'np.shape', (['cntrs'], {}), '(cntrs)\n', (8521, 8528), True, 'import numpy as np\n'), ((8537, 8553), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (8545, 8553), True, 'import numpy as np\n'), ((8758, 8768), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (8765, 8768), True, 'import numpy as np\n'), ((8955, 8971), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (8963, 8971), True, 'import numpy as np\n'), ((3491, 3502), 'numpy.ndim', 'np.ndim', (['XI'], {}), '(XI)\n', (3498, 3502), True, 'import numpy as np\n'), ((3557, 3569), 'numpy.shape', 'np.shape', (['XI'], {}), '(XI)\n', (3565, 3569), True, 'import numpy as np\n'), ((3773, 3785), 'numpy.shape', 'np.shape', (['XI'], {}), '(XI)\n', (3781, 3785), True, 'import numpy as np\n'), ((3789, 3801), 'numpy.shape', 'np.shape', (['YI'], {}), '(YI)\n', (3797, 3801), True, 'import numpy as np\n'), ((5392, 5413), 'numpy.mean', 'np.mean', (['Rd[uix, uiy]'], {}), '(Rd[uix, uiy])\n', (5399, 5413), True, 'import numpy as np\n'), ((5865, 5891), 'numpy.zeros', 'np.zeros', (['(N + nt, M + nt)'], {}), '((N + nt, M + nt))\n', (5873, 5891), True, 'import numpy as np\n'), ((6290, 6341), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['PHI', 'DMAT[:, np.newaxis]'], {'rcond': '(-1)'}), '(PHI, DMAT[:, np.newaxis], rcond=-1)\n', (6305, 6341), True, 'import numpy as np\n'), ((7152, 7182), 'numpy.concatenate', 'np.concatenate', (['[E, P]'], {'axis': '(1)'}), '([E, P], axis=1)\n', (7166, 7182), True, 'import numpy as np\n'), ((7248, 7259), 'numpy.ndim', 'np.ndim', (['XI'], {}), '(XI)\n', (7255, 7259), True, 'import numpy as np\n'), ((7327, 7345), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (7335, 7345), True, 'import numpy as np\n'), ((7683, 7716), 'numpy.sqrt', 'np.sqrt', (['((epsilon * r) ** 2 + 1.0)'], {}), '((epsilon * r) ** 2 + 1.0)\n', (7690, 7716), True, 'import numpy as np\n'), ((8311, 8320), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (8317, 8320), True, 'import numpy as np\n'), ((4892, 4910), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (4900, 4910), True, 'import numpy as np\n'), ((4924, 4942), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (4932, 4942), True, 'import numpy as np\n'), ((5354, 5374), 'numpy.tri', 'np.tri', (['N'], {'M': 'M', 'k': '(-1)'}), '(N, M=M, k=-1)\n', (5360, 5374), True, 'import numpy as np\n'), ((6626, 6644), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (6634, 6644), True, 'import numpy as np\n'), ((7291, 7303), 'numpy.dot', 'np.dot', (['E', 'w'], {}), '(E, w)\n', (7297, 7303), True, 'import numpy as np\n'), ((5070, 5088), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (5078, 5088), True, 'import numpy as np\n'), ((5104, 5122), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (5112, 5122), True, 'import numpy as np\n'), ((5622, 5636), 'numpy.eye', 'np.eye', (['N'], {'M': 'M'}), '(N, M=M)\n', (5628, 5636), True, 'import numpy as np\n'), ((5927, 5941), 'numpy.eye', 'np.eye', (['N'], {'M': 'M'}), '(N, M=M)\n', (5933, 5941), True, 'import numpy as np\n'), ((6090, 6102), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (6098, 6102), True, 'import numpy as np\n'), ((6824, 6842), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (6832, 6842), True, 'import numpy as np\n'), ((7363, 7375), 'numpy.dot', 'np.dot', (['E', 'w'], {}), '(E, w)\n', (7369, 7375), True, 'import numpy as np\n'), ((8599, 8614), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (8606, 8614), True, 'import numpy as np\n'), ((8653, 8668), 'numpy.ones', 'np.ones', (['(1, M)'], {}), '((1, M))\n', (8660, 8668), True, 'import numpy as np\n')]
|
import os
import sys
import glob
import time
import copy
import random
import numpy as np
import utils
import logging
import argparse
import tensorflow as tf
import tensorflow.keras as keras
from model import NASNetworkCIFAR
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# Basic model parameters.
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
choices=['train', 'test'])
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10, cifar100'])
parser.add_argument('--model_dir', type=str, default='models')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--eval_batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=600)
parser.add_argument('--cells', type=int, default=6)
parser.add_argument('--nodes', type=int, default=5)
parser.add_argument('--channels', type=int, default=36)
parser.add_argument('--cutout_size', type=int, default=8)
parser.add_argument('--grad_bound', type=float, default=10.0)
parser.add_argument('--initial_lr', type=float, default=0.025)
parser.add_argument('--keep_prob', type=float, default=0.6)
parser.add_argument('--drop_path_keep_prob', type=float, default=0.8)
parser.add_argument('--l2_reg', type=float, default=3e-4)
parser.add_argument('--arch', type=str, default=None)
parser.add_argument('--use_aux_head', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=9)
parser.add_argument('--train_from_scratch', type=bool, default=False)
args = parser.parse_args()
utils.create_exp_dir(args.model_dir)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
def train(train_ds, model, optimizer, global_step, criterion, classes=10):
objs = utils.AvgMeter()
top1 = utils.AvgMeter()
top5 = utils.AvgMeter()
for step, (input, labels) in enumerate(train_ds):
global_step.assign_add(1)
with tf.GradientTape() as tape:
logits, aux_logits = model(input, global_step, training=True)
loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), logits)
if aux_logits is not None:
aux_loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), aux_logits)
loss += 0.4 * aux_loss
reg_loss = args.l2_reg * tf.sqrt(
tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in model.trainable_variables]))
loss += reg_loss
gradients = tape.gradient(loss, model.trainable_variables)
if args.grad_bound != 0.0:
gradients, _ = tf.clip_by_global_norm(gradients, 15)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
################################################################################################################
acc1, acc5 = utils.accuracy(tf.nn.softmax(logits, axis=-1), tf.squeeze(labels), topk=(1, 5))
batch_size = input.shape[0]
objs.update(loss.numpy(), batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
if (step + 1) % 100 == 0:
print('train step {} loss {} top1 {} top5 {}'.format(step + 1, objs.avg, top1.avg, top5.avg))
logging.info('train step %03d loss %e top1 %f top5 %f', step + 1, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg, global_step
def valid(valid_ds, model, criterion, classes=10):
objs = utils.AvgMeter()
top1 = utils.AvgMeter()
top5 = utils.AvgMeter()
for step, (input, labels) in enumerate(valid_ds):
logits, _ = model(input, training=False)
loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), logits)
acc1, acc5 = utils.accuracy(tf.nn.softmax(logits, axis=-1), tf.squeeze(labels), topk=(1, 5))
batch_size = input.shape[0]
objs.update(loss.numpy(), batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
if (step + 1) % 100 == 0:
print('valid step {} loss {} top1 {} top5 {}'.format(step + 1, objs.avg, top1.avg, top5.avg))
logging.info('valid step %03d %e %f %f', step + 1, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def train_cifar10():
logging.info("Args = %s", args)
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
global_step = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
epoch = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
best_acc_top1 = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32)
################################################ model setup #######################################################
train_ds, test_ds = utils.load_cifar10(args.batch_size, args.cutout_size)
total_steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
model = NASNetworkCIFAR(classes=10,
reduce_distance=args.cells,
num_nodes=args.nodes,
channels=args.channels,
keep_prob=args.keep_prob,
drop_path_keep_prob=args.drop_path_keep_prob,
use_aux_head=args.use_aux_head,
steps=total_steps,
arch=args.arch)
temp_ = tf.random.uniform((64,32,32,3), minval=0, maxval=1, dtype=tf.float32)
temp_ = model(temp_, step=1, training=True)
model.summary()
model_size = utils.count_parameters_in_MB(model)
print("param size = {} MB".format(model_size))
logging.info("param size = %fMB", model_size)
criterion = keras.losses.CategoricalCrossentropy(from_logits=True)
learning_rate = keras.experimental.CosineDecay(initial_learning_rate=args.initial_lr,
decay_steps=total_steps, alpha=0.0001)
# learning_rate = keras.optimizers.schedules.ExponentialDecay(
# initial_learning_rate=args.initial_lr, decay_steps=total_steps, decay_rate=0.99, staircase=False, name=None
# )
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
########################################## restore checkpoint ######################################################
if args.train_from_scratch:
utils.clean_dir(args.model_dir)
checkpoint_path = os.path.join(args.model_dir, 'checkpoints')
ckpt = tf.train.Checkpoint(model=model,
optimizer=optimizer,
global_step=global_step,
epoch=epoch,
best_acc_top1=best_acc_top1)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=3)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
############################################# training process #####################################################
acc_train_result = []
loss_train_result = []
acc_test_result = []
loss_test_result = []
while epoch.numpy() < args.epochs:
print('epoch {} lr {}'.format(epoch.numpy(), optimizer._decayed_lr(tf.float32)))
train_acc, train_loss, step = train(train_ds, model, optimizer, global_step, criterion, classes=10)
test_acc, test_loss = valid(test_ds, model, criterion, classes=10)
acc_train_result.append(train_acc)
loss_train_result.append(train_loss)
acc_test_result.append(test_acc)
loss_test_result.append(test_loss)
logging.info('epoch %d lr %e', epoch.numpy(), optimizer._decayed_lr(tf.float32))
logging.info(acc_train_result)
logging.info(loss_train_result)
logging.info(acc_test_result)
logging.info(loss_test_result)
is_best = False
if test_acc > best_acc_top1:
best_acc_top1 = test_acc
is_best = True
epoch.assign_add(1)
if (epoch.numpy() + 1) % 1 == 0:
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(epoch.numpy() + 1, ckpt_save_path))
if is_best:
pass
utils.plot_single_list(acc_train_result, x_label='epochs', y_label='acc', file_name='acc_train')
utils.plot_single_list(loss_train_result, x_label='epochs', y_label='loss', file_name='loss_train')
utils.plot_single_list(acc_test_result, x_label='epochs', y_label='acc', file_name='acc_test')
utils.plot_single_list(loss_test_result, x_label='epochs', y_label='loss', file_name='loss_test')
if __name__ == '__main__':
import time
start_time = time.time()
train_cifar10()
print("--- %s seconds ---" % (time.time() - start_time))
|
[
"tensorflow.train.Checkpoint",
"model.NASNetworkCIFAR",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"tensorflow.keras.losses.CategoricalCrossentropy",
"utils.AvgMeter",
"utils.count_parameters_in_MB",
"logging.info",
"tensorflow.clip_by_global_norm",
"argparse.ArgumentParser",
"tensorflow.keras.optimizers.SGD",
"utils.clean_dir",
"numpy.random.seed",
"tensorflow.square",
"tensorflow.train.CheckpointManager",
"utils.create_exp_dir",
"tensorflow.random.uniform",
"numpy.ceil",
"tensorflow.Variable",
"tensorflow.keras.experimental.CosineDecay",
"utils.plot_single_list",
"time.time",
"logging.basicConfig",
"tensorflow.random.set_seed",
"utils.load_cifar10",
"os.path.join",
"tensorflow.squeeze"
] |
[((346, 371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (369, 371), False, 'import argparse\n'), ((1616, 1652), 'utils.create_exp_dir', 'utils.create_exp_dir', (['args.model_dir'], {}), '(args.model_dir)\n', (1636, 1652), False, 'import utils\n'), ((1692, 1803), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (1711, 1803), False, 'import logging\n'), ((1886, 1902), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (1900, 1902), False, 'import utils\n'), ((1914, 1930), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (1928, 1930), False, 'import utils\n'), ((1942, 1958), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (1956, 1958), False, 'import utils\n'), ((3574, 3590), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (3588, 3590), False, 'import utils\n'), ((3602, 3618), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (3616, 3618), False, 'import utils\n'), ((3630, 3646), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (3644, 3646), False, 'import utils\n'), ((4384, 4415), 'logging.info', 'logging.info', (['"""Args = %s"""', 'args'], {}), "('Args = %s', args)\n", (4396, 4415), False, 'import logging\n'), ((4420, 4445), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4434, 4445), True, 'import numpy as np\n'), ((4450, 4479), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['args.seed'], {}), '(args.seed)\n', (4468, 4479), True, 'import tensorflow as tf\n'), ((4499, 4560), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'trainable': '(False)', 'dtype': 'tf.int32'}), '(initial_value=0, trainable=False, dtype=tf.int32)\n', (4510, 4560), True, 'import tensorflow as tf\n'), ((4573, 4634), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'trainable': '(False)', 'dtype': 'tf.int32'}), '(initial_value=0, trainable=False, dtype=tf.int32)\n', (4584, 4634), True, 'import tensorflow as tf\n'), ((4655, 4720), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0.0)', 'trainable': '(False)', 'dtype': 'tf.float32'}), '(initial_value=0.0, trainable=False, dtype=tf.float32)\n', (4666, 4720), True, 'import tensorflow as tf\n'), ((4867, 4920), 'utils.load_cifar10', 'utils.load_cifar10', (['args.batch_size', 'args.cutout_size'], {}), '(args.batch_size, args.cutout_size)\n', (4885, 4920), False, 'import utils\n'), ((5004, 5258), 'model.NASNetworkCIFAR', 'NASNetworkCIFAR', ([], {'classes': '(10)', 'reduce_distance': 'args.cells', 'num_nodes': 'args.nodes', 'channels': 'args.channels', 'keep_prob': 'args.keep_prob', 'drop_path_keep_prob': 'args.drop_path_keep_prob', 'use_aux_head': 'args.use_aux_head', 'steps': 'total_steps', 'arch': 'args.arch'}), '(classes=10, reduce_distance=args.cells, num_nodes=args.\n nodes, channels=args.channels, keep_prob=args.keep_prob,\n drop_path_keep_prob=args.drop_path_keep_prob, use_aux_head=args.\n use_aux_head, steps=total_steps, arch=args.arch)\n', (5019, 5258), False, 'from model import NASNetworkCIFAR\n'), ((5482, 5554), 'tensorflow.random.uniform', 'tf.random.uniform', (['(64, 32, 32, 3)'], {'minval': '(0)', 'maxval': '(1)', 'dtype': 'tf.float32'}), '((64, 32, 32, 3), minval=0, maxval=1, dtype=tf.float32)\n', (5499, 5554), True, 'import tensorflow as tf\n'), ((5637, 5672), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (5665, 5672), False, 'import utils\n'), ((5728, 5773), 'logging.info', 'logging.info', (['"""param size = %fMB"""', 'model_size'], {}), "('param size = %fMB', model_size)\n", (5740, 5773), False, 'import logging\n'), ((5791, 5845), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5827, 5845), True, 'import tensorflow.keras as keras\n'), ((5866, 5978), 'tensorflow.keras.experimental.CosineDecay', 'keras.experimental.CosineDecay', ([], {'initial_learning_rate': 'args.initial_lr', 'decay_steps': 'total_steps', 'alpha': '(0.0001)'}), '(initial_learning_rate=args.initial_lr,\n decay_steps=total_steps, alpha=0.0001)\n', (5896, 5978), True, 'import tensorflow.keras as keras\n'), ((6235, 6287), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6258, 6287), True, 'import tensorflow as tf\n'), ((6506, 6549), 'os.path.join', 'os.path.join', (['args.model_dir', '"""checkpoints"""'], {}), "(args.model_dir, 'checkpoints')\n", (6518, 6549), False, 'import os\n'), ((6561, 6686), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model', 'optimizer': 'optimizer', 'global_step': 'global_step', 'epoch': 'epoch', 'best_acc_top1': 'best_acc_top1'}), '(model=model, optimizer=optimizer, global_step=\n global_step, epoch=epoch, best_acc_top1=best_acc_top1)\n', (6580, 6686), True, 'import tensorflow as tf\n'), ((6825, 6889), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'checkpoint_path'], {'max_to_keep': '(3)'}), '(ckpt, checkpoint_path, max_to_keep=3)\n', (6851, 6889), True, 'import tensorflow as tf\n'), ((8433, 8533), 'utils.plot_single_list', 'utils.plot_single_list', (['acc_train_result'], {'x_label': '"""epochs"""', 'y_label': '"""acc"""', 'file_name': '"""acc_train"""'}), "(acc_train_result, x_label='epochs', y_label='acc',\n file_name='acc_train')\n", (8455, 8533), False, 'import utils\n'), ((8534, 8637), 'utils.plot_single_list', 'utils.plot_single_list', (['loss_train_result'], {'x_label': '"""epochs"""', 'y_label': '"""loss"""', 'file_name': '"""loss_train"""'}), "(loss_train_result, x_label='epochs', y_label='loss',\n file_name='loss_train')\n", (8556, 8637), False, 'import utils\n'), ((8638, 8736), 'utils.plot_single_list', 'utils.plot_single_list', (['acc_test_result'], {'x_label': '"""epochs"""', 'y_label': '"""acc"""', 'file_name': '"""acc_test"""'}), "(acc_test_result, x_label='epochs', y_label='acc',\n file_name='acc_test')\n", (8660, 8736), False, 'import utils\n'), ((8737, 8838), 'utils.plot_single_list', 'utils.plot_single_list', (['loss_test_result'], {'x_label': '"""epochs"""', 'y_label': '"""loss"""', 'file_name': '"""loss_test"""'}), "(loss_test_result, x_label='epochs', y_label='loss',\n file_name='loss_test')\n", (8759, 8838), False, 'import utils\n'), ((8896, 8907), 'time.time', 'time.time', ([], {}), '()\n', (8905, 8907), False, 'import time\n'), ((6451, 6482), 'utils.clean_dir', 'utils.clean_dir', (['args.model_dir'], {}), '(args.model_dir)\n', (6466, 6482), False, 'import utils\n'), ((7899, 7929), 'logging.info', 'logging.info', (['acc_train_result'], {}), '(acc_train_result)\n', (7911, 7929), False, 'import logging\n'), ((7938, 7969), 'logging.info', 'logging.info', (['loss_train_result'], {}), '(loss_train_result)\n', (7950, 7969), False, 'import logging\n'), ((7978, 8007), 'logging.info', 'logging.info', (['acc_test_result'], {}), '(acc_test_result)\n', (7990, 8007), False, 'import logging\n'), ((8016, 8046), 'logging.info', 'logging.info', (['loss_test_result'], {}), '(loss_test_result)\n', (8028, 8046), False, 'import logging\n'), ((2061, 2078), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2076, 2078), True, 'import tensorflow as tf\n'), ((2721, 2758), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', '(15)'], {}), '(gradients, 15)\n', (2743, 2758), True, 'import tensorflow as tf\n'), ((2994, 3024), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3007, 3024), True, 'import tensorflow as tf\n'), ((3026, 3044), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (3036, 3044), True, 'import tensorflow as tf\n'), ((3370, 3469), 'logging.info', 'logging.info', (['"""train step %03d loss %e top1 %f top5 %f"""', '(step + 1)', 'objs.avg', 'top1.avg', 'top5.avg'], {}), "('train step %03d loss %e top1 %f top5 %f', step + 1, objs.avg,\n top1.avg, top5.avg)\n", (3382, 3469), False, 'import logging\n'), ((3868, 3898), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3881, 3898), True, 'import tensorflow as tf\n'), ((3900, 3918), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (3910, 3918), True, 'import tensorflow as tf\n'), ((4244, 4329), 'logging.info', 'logging.info', (['"""valid step %03d %e %f %f"""', '(step + 1)', 'objs.avg', 'top1.avg', 'top5.avg'], {}), "('valid step %03d %e %f %f', step + 1, objs.avg, top1.avg, top5.avg\n )\n", (4256, 4329), False, 'import logging\n'), ((4943, 4975), 'numpy.ceil', 'np.ceil', (['(50000 / args.batch_size)'], {}), '(50000 / args.batch_size)\n', (4950, 4975), True, 'import numpy as np\n'), ((3787, 3805), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (3797, 3805), True, 'import tensorflow as tf\n'), ((8962, 8973), 'time.time', 'time.time', ([], {}), '()\n', (8971, 8973), False, 'import time\n'), ((2202, 2220), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (2212, 2220), True, 'import tensorflow as tf\n'), ((2333, 2351), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (2343, 2351), True, 'import tensorflow as tf\n'), ((2511, 2523), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (2520, 2523), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : live_visualisation.py
# Author : <NAME> <<EMAIL>>
# Date : 10.04.2020
# Last Modified By: <NAME> <<EMAIL>>
from djitellopy.realtime_plot.RealtimePlotter import *
import redis
import numpy as np
import traceback
import matplotlib
# define data to get from db
# sensorMeshList = ['baro', 'h', 'tof', 'runtime']
# row = len(sensorMeshList)
data_len = 300
plot_update_interval = 0.005
datasource = redis.StrictRedis(host='localhost', port=6379, db=0)
plt.figure()
baro_axes = plt.subplot(3, 1, 1)
plt.title('tello_edu sensors')
baro_data_list = ['baro', 'runtime']
baro_ylim = [-47, -57]
baro_option = DataplotOption.TIMESTAMP_CUSTOM
baro_dataplot = DataPlot(2, data_len, option=baro_option)
baro_plot = RealtimePlotter(baro_dataplot)
baro_plot.config_plots(baro_axes, y_labels=baro_data_list, ylim=baro_ylim)
baro_plot.axes.set_xlabel('time in ms')
baro_plot.axes.set_ylabel('barometer in cmHg')
tof_axes = plt.subplot(3, 1, 2)
tof_data_list = ['tof', 'runtime']
tof_ylim = [-10, 500]
tof_option = DataplotOption.TIMESTAMP_CUSTOM
tof_dataplot = DataPlot(2, data_len, option=tof_option)
tof_plot = RealtimePlotter(tof_dataplot)
tof_plot.config_plots(tof_axes, y_labels=tof_data_list, ylim=tof_ylim)
tof_plot.axes.set_xlabel('time in ms')
tof_plot.axes.set_ylabel('vertical distance in cm')
h_axes = plt.subplot(3, 1, 3)
h_ylim = [-50, 300]
h_data_list = ['h', 'runtime']
h_option = DataplotOption.TIMESTAMP_CUSTOM
h_dataplot = DataPlot(2, data_len, option=h_option)
h_plot = RealtimePlotter(h_dataplot)
h_plot.config_plots(h_axes, y_labels=h_data_list, ylim=h_ylim)
h_plot.axes.set_xlabel('time in ms')
tof_plot.axes.set_ylabel('height in cm')
if __name__ == "__main__":
while True:
# get new data from database and plot
# baro
baro_plot.dataplot.clear_data_regs()
new_data = []
for sensor in baro_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
baro_y = np.array(new_data[:-1], dtype=np.float)
baro_x = np.array(new_data[-1], dtype=np.int64)
baro_plot.dataplot.append(
y=baro_y, x=baro_x, single=False)
baro_plot.plot_data()
except Exception as e:
print(e)
# tof
tof_plot.dataplot.clear_data_regs()
new_data = []
for sensor in tof_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
tof_y = np.array(new_data[:-1], dtype=np.float)
tof_x = np.array(new_data[-1], dtype=np.int64)
tof_plot.dataplot.append(
y=tof_y, x=tof_x, single=False)
tof_plot.plot_data()
except Exception as e:
print(e)
# height
h_plot.dataplot.clear_data_regs()
new_data = []
for sensor in h_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
h_y = np.array(new_data[:-1], dtype=np.float)
h_x = np.array(new_data[-1], dtype=np.int64)
h_plot.dataplot.append(
y=h_y, x=h_x, single=False)
h_plot.plot_data()
except Exception as e:
print(e)
plt.pause(plot_update_interval)
input("Exit(press any key)?")
|
[
"numpy.array",
"redis.StrictRedis"
] |
[((484, 536), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""localhost"""', 'port': '(6379)', 'db': '(0)'}), "(host='localhost', port=6379, db=0)\n", (501, 536), False, 'import redis\n'), ((2214, 2253), 'numpy.array', 'np.array', (['new_data[:-1]'], {'dtype': 'np.float'}), '(new_data[:-1], dtype=np.float)\n', (2222, 2253), True, 'import numpy as np\n'), ((2275, 2313), 'numpy.array', 'np.array', (['new_data[-1]'], {'dtype': 'np.int64'}), '(new_data[-1], dtype=np.int64)\n', (2283, 2313), True, 'import numpy as np\n'), ((2873, 2912), 'numpy.array', 'np.array', (['new_data[:-1]'], {'dtype': 'np.float'}), '(new_data[:-1], dtype=np.float)\n', (2881, 2912), True, 'import numpy as np\n'), ((2933, 2971), 'numpy.array', 'np.array', (['new_data[-1]'], {'dtype': 'np.int64'}), '(new_data[-1], dtype=np.int64)\n', (2941, 2971), True, 'import numpy as np\n'), ((3524, 3563), 'numpy.array', 'np.array', (['new_data[:-1]'], {'dtype': 'np.float'}), '(new_data[:-1], dtype=np.float)\n', (3532, 3563), True, 'import numpy as np\n'), ((3582, 3620), 'numpy.array', 'np.array', (['new_data[-1]'], {'dtype': 'np.int64'}), '(new_data[-1], dtype=np.int64)\n', (3590, 3620), True, 'import numpy as np\n')]
|
import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from scipy import signal
import matplotlib.image as mpimg
# matplotlib.use('Agg')
# define normalized 2D gaussian
def gaus2d(x, y, mx, my, sx, sy):
return 1. / (2. * np.pi * sx * sy) * np.exp(-((x - mx)**2. / (2. * sx**2.) + (y - my)**2. / (2. * sy**2.)))
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
ellipse = Ellipse(xy=(0,0), width=3.6, height=1.8, edgecolor='r', lw=2, facecolor='none')
x = np.linspace(0, 10, 101)
y = np.linspace(0, 10, 101)
x1, y1 = np.meshgrid(x, y) # get 2D variables instead of 1D
z1 = gaus2d(x1, y1, 5, 5, 2.7, 1.35)
z1_copy = z1.copy()
z1 = z1/z1.max()
x2, y2 = np.meshgrid(x, y) # get 2D variables instead of 1D
z2 = gaus2d(x2, y2, 5, 5, 0.9, 0.45)
z2_copy = z2.copy()
z2 = z2/z2.max()
dog_not_norm = z1 - z2
dog = (z1 - z2)/np.max(z1-z2)
dog[dog<0] = 0
# path
# path1 = 'image_puck.png'
# img1 = mpimg.imread(path1)
# gray1 = rgb2gray(img1)
# img1 = (np.array(gray1))[0:84, 0:84]
# path2 = 'circle.png'
# img2 = mpimg.imread(path2)
# gray2 = rgb2gray(img2)
# img2 = (np.array(gray1))[0:84, 0:84]
# img_conv = signal.convolve2d(img1, z1)
# # img_product = img1 * img2
#
# # Displaying the image
# fig1 = plt.figure()
#
# plt.imshow(img_conv)
# plt.show()
# fig2 = plt.figure()
# plt.imshow(img)
# plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(3,2,5)
ax1.add_artist(ellipse)
im = ax1.imshow(dog, cmap="viridis", extent=(-5, 5, -5, 5))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.title.set_text('dog 2D')
cbar = fig.colorbar(im, ax=ax1)
ax2 = fig.add_subplot(3,2,6,projection='3d')
ax2.contour3D(x, y, dog, 100, cmap=cm.viridis)
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_zlabel('z')
ax2.title.set_text('dog 3D')
ax3 = fig.add_subplot(3,2,1)
im1 = ax3.imshow(z1, cmap="viridis", extent=(-5, 5, -5, 5))
ax3.set_xlabel('x')
ax3.set_ylabel('y')
ax3.title.set_text('g1 2D')
ax4 = fig.add_subplot(3,2,2,projection='3d')
ax4.contour3D(x, y, z1, 50, cmap=cm.viridis)
ax4.set_xlabel('x')
ax4.set_ylabel('y')
ax4.set_zlabel('z')
ax4.title.set_text('g1 3D')
ax5 = fig.add_subplot(3,2,3)
im2 = ax5.imshow(z2, cmap="viridis", extent=(-5, 5, -5, 5))
ax5.set_xlabel('x')
ax5.set_ylabel('y')
ax5.title.set_text('g2 2D')
ax6 = fig.add_subplot(3,2,4,projection='3d')
ax6.contour3D(x, y, z2, 50, cmap=cm.viridis)
ax6.set_xlabel('x')
ax6.set_ylabel('y')
ax6.set_zlabel('z')
ax6.title.set_text('g2 3D')
plt.show()
|
[
"numpy.max",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.meshgrid",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.show"
] |
[((487, 572), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': '(0, 0)', 'width': '(3.6)', 'height': '(1.8)', 'edgecolor': '"""r"""', 'lw': '(2)', 'facecolor': '"""none"""'}), "(xy=(0, 0), width=3.6, height=1.8, edgecolor='r', lw=2, facecolor='none'\n )\n", (494, 572), False, 'from matplotlib.patches import Ellipse\n'), ((571, 594), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (582, 594), True, 'import numpy as np\n'), ((599, 622), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (610, 622), True, 'import numpy as np\n'), ((632, 649), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (643, 649), True, 'import numpy as np\n'), ((767, 784), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (778, 784), True, 'import numpy as np\n'), ((1424, 1436), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1434, 1436), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2514), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2512, 2514), True, 'import matplotlib.pyplot as plt\n'), ((429, 473), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.2989, 0.587, 0.114])\n', (435, 473), True, 'import numpy as np\n'), ((932, 947), 'numpy.max', 'np.max', (['(z1 - z2)'], {}), '(z1 - z2)\n', (938, 947), True, 'import numpy as np\n'), ((327, 415), 'numpy.exp', 'np.exp', (['(-((x - mx) ** 2.0 / (2.0 * sx ** 2.0) + (y - my) ** 2.0 / (2.0 * sy ** 2.0)))'], {}), '(-((x - mx) ** 2.0 / (2.0 * sx ** 2.0) + (y - my) ** 2.0 / (2.0 * sy **\n 2.0)))\n', (333, 415), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*
"""
:py:class:`GenerateLabelFieldReader`
"""
import numpy as np
from senta.common.register import RegisterSet
from senta.common.rule import DataShape, FieldLength, InstanceName
from senta.data.field_reader.base_field_reader import BaseFieldReader
from senta.data.util_helper import generate_pad_batch_data
from senta.modules.token_embedding.custom_fluid_embedding import CustomFluidTokenEmbedding
@RegisterSet.field_reader.register
class GenerateLabelFieldReader(BaseFieldReader):
"""seq2seq label的专用field_reader
"""
def __init__(self, field_config):
"""
:param field_config:
"""
BaseFieldReader.__init__(self, field_config=field_config)
self.paddle_version_code = 1.6
if self.field_config.tokenizer_info:
tokenizer_class = RegisterSet.tokenizer.__getitem__(self.field_config.tokenizer_info["type"])
params = None
if self.field_config.tokenizer_info.__contains__("params"):
params = self.field_config.tokenizer_info["params"]
self.tokenizer = tokenizer_class(vocab_file=self.field_config.vocab_path,
split_char=self.field_config.tokenizer_info["split_char"],
unk_token=self.field_config.tokenizer_info["unk_token"],
params=params)
if self.field_config.embedding_info and self.field_config.embedding_info["use_reader_emb"]:
self.token_embedding = CustomFluidTokenEmbedding(emb_dim=self.field_config.embedding_info["emb_dim"],
vocab_size=self.tokenizer.vocabulary.get_vocab_size())
def init_reader(self):
""" 初始化reader格式
:return: reader的shape[]、type[]、level[]
"""
shape = []
types = []
levels = []
"""train_tar_ids"""
if self.field_config.data_type == DataShape.STRING:
"""src_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('int64')
else:
raise TypeError("GenerateLabelFieldReader's data_type must be string")
"""mask_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('float32')
"""seq_lens"""
shape.append([-1])
levels.append(0)
types.append('int64')
"""infer_tar_ids"""
shape.append([-1, self.field_config.max_seq_len, 1])
levels.append(0)
types.append('int64')
"""mask_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('float32')
"""seq_lens"""
shape.append([-1])
levels.append(0)
types.append('int64')
return shape, types, levels
def convert_texts_to_ids(self, batch_text):
"""将一个batch的明文text转成id
:param batch_text:
:return:
"""
train_src_ids = []
infer_src_ids = []
for text in batch_text:
if self.field_config.need_convert:
tokens = self.tokenizer.tokenize(text)
src_id = self.tokenizer.convert_tokens_to_ids(tokens)
else:
src_id = text.split(" ")
# 加上截断策略
if len(src_id) > self.field_config.max_seq_len - 1:
src_id = src_id[0:self.field_config.max_seq_len - 1]
train_src_id = [self.field_config.label_start_id] + src_id
infer_src_id = src_id + [self.field_config.label_end_id]
train_src_ids.append(train_src_id)
infer_src_ids.append(infer_src_id)
return_list = []
train_label_ids, train_label_mask, label_lens = generate_pad_batch_data(train_src_ids,
pad_idx=self.field_config.padding_id,
return_input_mask=True,
return_seq_lens=True,
paddle_version_code=self.paddle_version_code)
infer_label_ids, infer_label_mask, label_lens = generate_pad_batch_data(infer_src_ids,
pad_idx=self.field_config.padding_id,
return_input_mask=True,
return_seq_lens=True,
paddle_version_code=self.paddle_version_code)
infer_label_ids = np.reshape(infer_label_ids, (infer_label_ids.shape[0], infer_label_ids.shape[1], 1))
return_list.append(train_label_ids)
return_list.append(train_label_mask)
return_list.append(label_lens)
return_list.append(infer_label_ids)
return_list.append(infer_label_mask)
return_list.append(label_lens)
return return_list
def structure_fields_dict(self, slots_id, start_index, need_emb=True):
"""静态图调用的方法,生成一个dict, dict有两个key:id , emb. id对应的是pyreader读出来的各个field产出的id,emb对应的是各个
field对应的embedding
:param slots_id: pyreader输出的完整的id序列
:param start_index:当前需要处理的field在slot_id_list中的起始位置
:param need_emb:是否需要embedding(预测过程中是不需要embedding的)
:return:
"""
record_id_dict = {}
record_id_dict[InstanceName.TRAIN_LABEL_SRC_IDS] = slots_id[start_index]
record_id_dict[InstanceName.TRAIN_LABEL_MASK_IDS] = slots_id[start_index + 1]
record_id_dict[InstanceName.TRAIN_LABEL_SEQ_LENS] = slots_id[start_index + 2]
record_id_dict[InstanceName.INFER_LABEL_SRC_IDS] = slots_id[start_index + 3]
record_id_dict[InstanceName.INFER_LABEL_MASK_IDS] = slots_id[start_index + 4]
record_id_dict[InstanceName.INFER_LABEL_SEQ_LENS] = slots_id[start_index + 5]
record_emb_dict = None
if need_emb and self.token_embedding:
record_emb_dict = self.token_embedding.get_token_embedding(record_id_dict)
record_dict = {}
record_dict[InstanceName.RECORD_ID] = record_id_dict
record_dict[InstanceName.RECORD_EMB] = record_emb_dict
return record_dict
def get_field_length(self):
"""获取当前这个field在进行了序列化之后,在slot_id_list中占多少长度
:return:
"""
return FieldLength.GENERATE_LABEL_FIELD
|
[
"senta.data.field_reader.base_field_reader.BaseFieldReader.__init__",
"senta.data.util_helper.generate_pad_batch_data",
"numpy.reshape",
"senta.common.register.RegisterSet.tokenizer.__getitem__"
] |
[((652, 709), 'senta.data.field_reader.base_field_reader.BaseFieldReader.__init__', 'BaseFieldReader.__init__', (['self'], {'field_config': 'field_config'}), '(self, field_config=field_config)\n', (676, 709), False, 'from senta.data.field_reader.base_field_reader import BaseFieldReader\n'), ((3848, 4025), 'senta.data.util_helper.generate_pad_batch_data', 'generate_pad_batch_data', (['train_src_ids'], {'pad_idx': 'self.field_config.padding_id', 'return_input_mask': '(True)', 'return_seq_lens': '(True)', 'paddle_version_code': 'self.paddle_version_code'}), '(train_src_ids, pad_idx=self.field_config.padding_id,\n return_input_mask=True, return_seq_lens=True, paddle_version_code=self.\n paddle_version_code)\n', (3871, 4025), False, 'from senta.data.util_helper import generate_pad_batch_data\n'), ((4394, 4571), 'senta.data.util_helper.generate_pad_batch_data', 'generate_pad_batch_data', (['infer_src_ids'], {'pad_idx': 'self.field_config.padding_id', 'return_input_mask': '(True)', 'return_seq_lens': '(True)', 'paddle_version_code': 'self.paddle_version_code'}), '(infer_src_ids, pad_idx=self.field_config.padding_id,\n return_input_mask=True, return_seq_lens=True, paddle_version_code=self.\n paddle_version_code)\n', (4417, 4571), False, 'from senta.data.util_helper import generate_pad_batch_data\n'), ((4910, 4999), 'numpy.reshape', 'np.reshape', (['infer_label_ids', '(infer_label_ids.shape[0], infer_label_ids.shape[1], 1)'], {}), '(infer_label_ids, (infer_label_ids.shape[0], infer_label_ids.\n shape[1], 1))\n', (4920, 4999), True, 'import numpy as np\n'), ((825, 900), 'senta.common.register.RegisterSet.tokenizer.__getitem__', 'RegisterSet.tokenizer.__getitem__', (["self.field_config.tokenizer_info['type']"], {}), "(self.field_config.tokenizer_info['type'])\n", (858, 900), False, 'from senta.common.register import RegisterSet\n')]
|
import argparse,time,os,pickle
import matplotlib.pyplot as plt
import numpy as np
from player import *
plt.switch_backend('agg')
np.set_printoptions(precision=2)
class lemon:
def __init__(self, std, num_sellers, num_actions, unit, minx):
self.std = std
self.unit = unit
self.num_sellers = num_sellers
self.num_players = num_sellers + 1
self.quality = self.transform(np.arange(num_sellers) )
self.num_actions = num_actions
self.welfare_factor = 1.5
self.listing_cost = 3
def __str__(self):
return f"Lemon({self.num_sellers}) with noise std. {self.std},\nquality: {self.quality}\n"
def transform(self, x):
return x*unit + minx
def feedback(self, actions):
rewards = np.zeros(self.num_players)
seller_actions = actions[1:]
price = self.transform( actions[0] ) - 1
sold = seller_actions * (self.quality < price) ### quality below price and is selling
supply = np.sum(sold)
if supply > 0:
avg_quality = np.sum(sold * self.quality) / supply
q_noise = np.random.randn(self.num_sellers) * 5
rewards[1:] = seller_actions * [ (self.quality + q_noise < price) * (price - self.quality) - self.listing_cost ]
rewards[0] = ( self.welfare_factor * avg_quality - price )
noise = np.random.randn(self.num_players) * self.std
rewards += noise
else:
avg_quality = 0
rewards = np.zeros(self.num_players)
rewards[1:] = - seller_actions * self.listing_cost
rewards /= self.num_players
return rewards, supply, price, avg_quality
class logger:
def __init__(self, log_dir, env, iterations, samples=None):
self.log_dir = log_dir
self.env = env
self.supply_history = []
self.demand_history = []
self.price_history = []
self.avg_quality_history = []
self.iterations = iterations
self.samples = self.iterations if not samples else samples
self.step_size = self.iterations // self.samples
self.sampled_actions = []
def write(self, text):
with open(self.log_dir+ '.log', 'a') as f:
f.write(text)
def record_round(self, t, supply, price, avg_quality, actions):
if t % self.step_size == 0:
self.supply_history.append(supply)
self.price_history.append(price)
self.avg_quality_history.append(avg_quality)
self.sampled_actions.append(actions[1:].copy())
def plot(self):
time_axis = np.arange(0, self.iterations, step=self.step_size)
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(time_axis, self.supply_history, label=f"supply")
ax1.set_ylabel('#units')
ax1.legend(loc="upper left")
ax2.plot(time_axis, self.price_history, label=f"price")
ax2.plot(time_axis, self.avg_quality_history, label=f"avg. quality")
ax2.set_ylabel('$')
ax2.set_xlabel('#round')
ax2.legend(loc="upper left")
fig.suptitle( f"Lemon({self.env.num_sellers}) with noise std. {self.env.std}")
plt.savefig(self.log_dir+ '_price' '.png')
plt.clf()
fig, ax3 = plt.subplots(1, 1)
im = ax3.imshow(np.asarray( self.sampled_actions).T, aspect="auto")
cbar = ax3.figure.colorbar(im, ax=ax3)
cbar.ax.set_ylabel("prob. to sell", rotation=-90, va="bottom")
ax3.set_yticks(np.arange(0, self.env.num_sellers, step=5))
ax3.set_ylabel('#player')
ax3.set_xlabel('#round')
fig.suptitle( f"Lemon({self.env.num_sellers}) with noise std. {self.env.std}")
plt.savefig(self.log_dir+ '_trend' '.png')
plt.clf()
with open(self.log_dir+'_history.pickle', 'wb') as f:
pickle.dump(self.sampled_actions, f)
def find_latest(prefix, suffix):
i = 0
while os.path.exists(f'{prefix}{i}{suffix}'):
i += 1
return i
if __name__ == '__main__':
parser = argparse.ArgumentParser()
describe = lambda names : ''.join( [', {}: {}'.format(i, n) for i,n in enumerate(names)] )
parser.add_argument('--std', type=float, default=0, help='noise std. in feedback')
parser.add_argument('--iterations', type=int, default=100, help='number of rounds to play')
parser.add_argument('--strategy', type=int, help='player strategy' + describe(strategy_choice_names))
parser.add_argument('--num_sellers', type=int, help='number of sellers ' )
parser.add_argument('--num_actions', type=int, help='number of buyers ')
parser.add_argument('--unit', type=float, default=1, help='discretized unit')
parser.add_argument('--minx', type=float, default=0, help='min action')
parser.add_argument('--samples', type=int, default=100, help='number of samples to save' )
parser.add_argument('--new', default=False, action='store_true', help='whether to generate a new env instance')
parser.add_argument('--num_repeat', type=int, default=1, help='number of repeated simulation')
parser.add_argument('--force_env', default=False, action='store_true', help='whether to use a specified env instance')
args = parser.parse_args()
std = args.std
iterations = args.iterations
strategy = args.strategy
num_sellers = args.num_sellers
num_buyers = 1
num_actions = args.num_actions
num_players = num_sellers+num_buyers
unit = args.unit
minx = args.minx
samples = args.samples
env_name = "lemon3"
strategy_name = strategy_choice_names[strategy]
j = 0
while j < args.num_repeat:
log_dir = f'results/{env_name}/{strategy_name}'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print("created directory")
else:
print("existing directory")
prefix = f'results/{env_name}/{num_sellers}_{num_buyers}|{std}|{unit}|{minx}#'
if not args.force_env:
i = find_latest(prefix, '.pickle')
if not args.new and i > 0:
env_dir = prefix + str(i-1) + '.pickle'
f = open(env_dir, 'rb')
env = pickle.load(f)
print("load env at " + env_dir)
f.close()
else:
env = lemon(std, num_sellers, num_actions, unit, minx)
env_dir = prefix + str(i) + '.pickle'
f = open(env_dir, 'wb')
pickle.dump(env, f )
print("save env at "+ env_dir)
f.close()
else:
i = specified_env[j]
env_dir = prefix + str(i) + '.pickle'
if not os.path.exists(log_dir):
print("env path not found ", log_dir)
exit()
f = open(env_dir, 'rb')
env = pickle.load(f)
print("load env at " + env_dir)
f.close()
player_module = __import__('player')
if strategy != 4:
players = [getattr(player_module, strategy_name)(num_actions, iterations) ]
players.extend( [getattr(player_module, strategy_name)(2, iterations) for i in range(num_sellers) ] )
else:
a0 = 50
b0 = 0.5
a1 = 50
b1 = 0.5
players = [getattr(player_module, strategy_name)(num_actions, iterations, a0, b0) ]
players.extend( [getattr(player_module, strategy_name)(2, iterations, a1, b1) for i in range(num_sellers) ] )
print(f'beta = {players[0].beta}, b = {players[0].b}, beta = {players[1].beta}, b = {players[1].b}' )
i = find_latest(f'{log_dir}/', '.log')
log_dir = f'{log_dir}/{i}'
L = logger(log_dir, env, iterations, samples=samples)
start = time.time()
L.write("iterations: "+str(iterations) + "\n")
L.write('Environment:\n\t'+str(env)+'\n')
actions = np.zeros(num_players, dtype=int)
action_probs = np.zeros(num_players, dtype=float)
for t in range(1, iterations+1):
for i, p in enumerate(players):
actions[i] = p.act()
action_probs[i] = p.action_prob[1]
rewards, supply, price, avg_quality = env.feedback( actions )
for a, p, r in zip(actions, players, rewards ):
p.feedback(a, r)
L.record_round(t, supply, price, avg_quality, action_probs)
for i, p in enumerate(players):
L.write(f'Player{i}:\n\t{p}\n')
L.plot()
end = time.time()
print(log_dir, end-start)
j += 1
|
[
"os.path.exists",
"matplotlib.pyplot.savefig",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.clf",
"pickle.load",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"numpy.random.randn",
"matplotlib.pyplot.switch_backend",
"time.time",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.set_printoptions"
] |
[((103, 128), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (121, 128), True, 'import matplotlib.pyplot as plt\n'), ((131, 163), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (150, 163), True, 'import numpy as np\n'), ((3454, 3492), 'os.path.exists', 'os.path.exists', (['f"""{prefix}{i}{suffix}"""'], {}), "(f'{prefix}{i}{suffix}')\n", (3468, 3492), False, 'import argparse, time, os, pickle\n'), ((3553, 3578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3576, 3578), False, 'import argparse, time, os, pickle\n'), ((697, 723), 'numpy.zeros', 'np.zeros', (['self.num_players'], {}), '(self.num_players)\n', (705, 723), True, 'import numpy as np\n'), ((900, 912), 'numpy.sum', 'np.sum', (['sold'], {}), '(sold)\n', (906, 912), True, 'import numpy as np\n'), ((2285, 2335), 'numpy.arange', 'np.arange', (['(0)', 'self.iterations'], {'step': 'self.step_size'}), '(0, self.iterations, step=self.step_size)\n', (2294, 2335), True, 'import numpy as np\n'), ((2358, 2376), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (2370, 2376), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2829), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.log_dir + '_price.png')"], {}), "(self.log_dir + '_price.png')\n", (2800, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2843), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2876), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2870, 2876), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3294), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.log_dir + '_trend.png')"], {}), "(self.log_dir + '_trend.png')\n", (3265, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3308), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3306, 3308), True, 'import matplotlib.pyplot as plt\n'), ((6807, 6818), 'time.time', 'time.time', ([], {}), '()\n', (6816, 6818), False, 'import argparse, time, os, pickle\n'), ((6925, 6957), 'numpy.zeros', 'np.zeros', (['num_players'], {'dtype': 'int'}), '(num_players, dtype=int)\n', (6933, 6957), True, 'import numpy as np\n'), ((6975, 7009), 'numpy.zeros', 'np.zeros', (['num_players'], {'dtype': 'float'}), '(num_players, dtype=float)\n', (6983, 7009), True, 'import numpy as np\n'), ((7443, 7454), 'time.time', 'time.time', ([], {}), '()\n', (7452, 7454), False, 'import argparse, time, os, pickle\n'), ((380, 402), 'numpy.arange', 'np.arange', (['num_sellers'], {}), '(num_sellers)\n', (389, 402), True, 'import numpy as np\n'), ((1334, 1360), 'numpy.zeros', 'np.zeros', (['self.num_players'], {}), '(self.num_players)\n', (1342, 1360), True, 'import numpy as np\n'), ((3071, 3113), 'numpy.arange', 'np.arange', (['(0)', 'self.env.num_sellers'], {'step': '(5)'}), '(0, self.env.num_sellers, step=5)\n', (3080, 3113), True, 'import numpy as np\n'), ((3369, 3405), 'pickle.dump', 'pickle.dump', (['self.sampled_actions', 'f'], {}), '(self.sampled_actions, f)\n', (3380, 3405), False, 'import argparse, time, os, pickle\n'), ((5126, 5149), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (5140, 5149), False, 'import argparse, time, os, pickle\n'), ((5157, 5177), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (5168, 5177), False, 'import argparse, time, os, pickle\n'), ((5991, 6005), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6002, 6005), False, 'import argparse, time, os, pickle\n'), ((948, 975), 'numpy.sum', 'np.sum', (['(sold * self.quality)'], {}), '(sold * self.quality)\n', (954, 975), True, 'import numpy as np\n'), ((998, 1031), 'numpy.random.randn', 'np.random.randn', (['self.num_sellers'], {}), '(self.num_sellers)\n', (1013, 1031), True, 'import numpy as np\n'), ((1227, 1260), 'numpy.random.randn', 'np.random.randn', (['self.num_players'], {}), '(self.num_players)\n', (1242, 1260), True, 'import numpy as np\n'), ((2895, 2927), 'numpy.asarray', 'np.asarray', (['self.sampled_actions'], {}), '(self.sampled_actions)\n', (2905, 2927), True, 'import numpy as np\n'), ((5508, 5522), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5519, 5522), False, 'import argparse, time, os, pickle\n'), ((5717, 5736), 'pickle.dump', 'pickle.dump', (['env', 'f'], {}), '(env, f)\n', (5728, 5736), False, 'import argparse, time, os, pickle\n'), ((5871, 5894), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (5885, 5894), False, 'import argparse, time, os, pickle\n')]
|
import pytest
import gpmap
from epistasis import models
import numpy as np
import pandas as pd
import os
def test__genotypes_to_X(test_data):
# Make sure function catches bad genotype passes
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# Duplicated
g = list(gpm.genotype)
g.extend(g)
# not in gpmap
b = list(gpm.genotype)
b.append("stupid")
bad_genotypes = [g,b]
for bad in bad_genotypes:
with pytest.raises(ValueError):
models.base._genotypes_to_X(bad,gpm,order=1,model_type="local")
# Sample through various model comobos
allowed = {"local":set([0,1]),
"global":set([-1,1])}
for d in test_data:
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
for i in range(1,gpm.length+1,1):
for model_type in ["local","global"]:
X = models.base._genotypes_to_X(gpm.genotype,
gpm,
order=i,
model_type=model_type)
assert X.shape[0] == len(gpm.genotype)
assert set(np.unique(X)).issubset(allowed[model_type])
def test_arghandler_decorator():
class Yo:
def _a(self,data=5,method=None):
return data
def _b(self,data=None,method=None):
return 6
@models.base.arghandler
def test_method(self,a=None,b=None,**kwargs):
return a, b
@models.base.arghandler
def bad_method(self,c=None,d=None,**kwargs):
return c, d
yo = Yo()
assert yo.test_method() == (None,6)
assert yo.test_method(a=5) == (5,6)
assert yo.test_method(a=10) == (10,6)
assert yo.test_method(b=10) == (None,6)
with pytest.raises(AttributeError):
yo.bad_method()
### Tests for AbstractModel:
# AbstractModel cannot be instantiated on its own, as it is designed to be a
# mixin with sklearn classes. Many methods have to be defined in subclass
# (.fit, .predict, etc.) These will not be tested here, but instead in the
# subclass tests. For methods defined here that are never redefined in subclass
# (._X, .add_gpm, etc.) we test using the simplest mixed/subclass
# (EpistasisLinearRegression).
def test_abstractmodel_predict_to_df(test_data):
"""
Test basic functionality. Real test of values will be done on .predict
for subclasses.
"""
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
# This should fail -- no fit run
with pytest.raises(Exception):
df = m.predict_to_df()
m.fit()
# This should work
df = m.predict_to_df()
assert type(df) is type(pd.DataFrame())
assert len(df) == len(d["genotype"])
# Create and fit a new model.
m = models.linear.EpistasisLinearRegression()
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# No gpm added -- should fail
with pytest.raises(RuntimeError):
m.predict_to_df()
m.add_gpm(gpm)
m.fit()
df = m.predict_to_df(genotypes=d["genotype"][0])
assert len(df) == 1
bad_stuff = [1,{},[1,2],"STUPID",["STUPID","IS","REAL"]]
for b in bad_stuff:
with pytest.raises(ValueError):
print(f"Trying bad genotypes {b}")
m.predict_to_df(genotypes=b)
df = m.predict_to_df(genotypes=d["genotype"][:3])
assert len(df) == 3
def test_abstractmodel_predict_to_csv(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
csv_file = os.path.join(tmp_path,"tmp.csv")
m.predict_to_csv(filename=csv_file)
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_csv(filename=csv_file,genotypes=d["genotype"][0])
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == 1
def test_abstractmodel_predict_to_excel(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
excel_file = os.path.join(tmp_path,"tmp.xlsx")
m.predict_to_excel(filename=excel_file)
assert os.path.exists(excel_file)
df = pd.read_excel(excel_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_excel(filename=excel_file,genotypes=d["genotype"][0])
assert os.path.exists(excel_file)
df = pd.read_excel(excel_file)
assert len(df) == 1
def test_abstractmodel_add_gpm(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
bad_gpm = [1,None,"test",[],{}]
for b in bad_gpm:
with pytest.raises(TypeError):
m.add_gpm(b)
m.add_gpm(gpm)
# Test genotype_column arg
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
bad_genotype_column = [1,None,[],{},(1,)]
for b in bad_genotype_column:
with pytest.raises(TypeError):
print(f"trying {b}")
m.add_gpm(gpm,genotype_column=b)
with pytest.raises(KeyError):
m.add_gpm(gpm,genotype_column="not_a_column")
m.add_gpm(gpm,genotype_column="genotype")
assert m.genotype_column == "genotype"
# Test phenotype_column arg
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"])
m = models.linear.EpistasisLinearRegression()
# Shouldn't work b/c no float column
with pytest.raises(ValueError):
m.add_gpm(gpm)
# Shouldn't work because there is no column with that name
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
with pytest.raises(KeyError):
m.add_gpm(gpm,phenotype_column="not_real")
# Shouldn't work because column is not numeric
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["genotype"])
with pytest.raises(ValueError):
m.add_gpm(gpm,phenotype_column="phenotype")
# Make sure it gets right column (first float that is not reserved)
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
something_else=d["phenotype"])
m.add_gpm(gpm)
assert m.phenotype_column == "coolness"
# Test uncertainty_column arg.
# Do default = None
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
assert m.uncertainty_column == "epi_zero_uncertainty"
unc = np.array(m.gpm.data.loc[:,"epi_zero_uncertainty"])
assert len(np.unique(unc)) == 1
assert np.isclose(unc[0],np.min(gpm.data.loc[:,m.phenotype_column])*1e-6)
# pass missing column
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
coolness=d["phenotype"],
not_float=d["genotype"])
# Send in same as phenotype
with pytest.raises(ValueError):
m.add_gpm(gpm,uncertainty_column="phenotype")
# send in not there
with pytest.raises(KeyError):
m.add_gpm(gpm,uncertainty_column="not_there")
# send in not float
with pytest.raises(ValueError):
m.add_gpm(gpm,uncertainty_column="not_float")
# Shoud work
m.add_gpm(gpm,uncertainty_column="coolness")
assert m.uncertainty_column == "coolness"
# Check final output
assert m.gpm is gpm
assert m.Xcolumns is not None
assert m.epistasis is not None
assert m._previous_X is None
def test_gpm_getter(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
assert m.gpm is None
m.add_gpm(gpm)
assert m.gpm is gpm
def test_results_getter(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
m.add_gpm(gpm)
assert m.results is None
m.fit()
assert isinstance(m.results,pd.DataFrame)
def test_column_getters(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
uncertainty=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
assert m.genotype_column is None
assert m.phenotype_column is None
assert m.uncertainty_column is None
m.add_gpm(gpm,uncertainty_column="uncertainty")
assert m.genotype_column == "genotype"
assert m.phenotype_column == "phenotype"
assert m.uncertainty_column == "uncertainty"
def test__X_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._X()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm)
# Make sure calling _X() naked-ly populates previous_X
assert m._previous_X is None
X = m._X()
assert m._previous_X is X
# If we access after having run, make sure X is the same object
assert X is m._X()
# Should wipe out previous_X and force recalculation.
m.add_gpm(gpm)
assert X is not m._X()
# Get x for single genotype. should work. should not update _previous_X
X = m._X(d["genotype"][0])
assert len(X) == 1
assert X is not m._previous_X
# Get x for two genotypes. should work and not update _previous_X
X = m._X(d["genotype"][0:2])
assert len(X) == 2
assert X is not m._previous_X
# Get x for two genotypes. should work and not update _previous_X
X = m._X(np.array(d["genotype"][0:2]))
assert len(X) == 2
assert X is not m._previous_X
# Just keep the array, do not update previous_X
hack = np.ones((1,1))
X = m._X(data=hack)
assert X is hack
assert X is not m._previous_X
# pass in bad genotypes
with pytest.raises(ValueError):
X = m._X("NOT_A_GENOTYPE")
with pytest.raises(ValueError):
X = m._X([d["genotype"][0],"NOT_A_GENOTYPE"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(ValueError):
m._X(b)
def test__y_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._y()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness")
assert np.array_equal(m._y(),d["phenotype"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._y(b)
y = m._y([1.0])
assert np.array_equal(y,[1.0])
def test__yerr_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._yerr()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
assert np.array_equal(m._yerr(),d["phenotype"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._yerr(b)
y = m._yerr([1.0])
assert np.array_equal(y,[1.0])
def test__thetas_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
# No thetas calcualted yet
with pytest.raises(RuntimeError):
m._thetas()
m.fit()
# Get thetas, calcualted
t = m._thetas()
assert len(t) == 4
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._thetas(b)
y = m._thetas([1.0])
assert np.array_equal(y,[1.0])
def test__lnprior(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._lnprior()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
x = m._lnprior()
assert np.array_equal(x,np.zeros(len(d["genotype"])))
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._lnprior(b)
y = m._lnprior([1.0])
assert np.array_equal(y,[1.0])
|
[
"os.path.exists",
"gpmap.GenotypePhenotypeMap",
"numpy.ones",
"pandas.read_csv",
"numpy.unique",
"os.path.join",
"numpy.min",
"numpy.array",
"numpy.array_equal",
"pytest.raises",
"epistasis.models.base._genotypes_to_X",
"pandas.read_excel",
"pandas.DataFrame",
"epistasis.models.linear.EpistasisLinearRegression"
] |
[((231, 307), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (257, 307), False, 'import gpmap\n'), ((2639, 2680), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (2678, 2680), False, 'from epistasis import models\n'), ((2712, 2788), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (2738, 2788), False, 'import gpmap\n'), ((3141, 3182), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (3180, 3182), False, 'from epistasis import models\n'), ((3193, 3269), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (3219, 3269), False, 'import gpmap\n'), ((3878, 3919), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (3917, 3919), False, 'from epistasis import models\n'), ((3951, 4027), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (3977, 4027), False, 'import gpmap\n'), ((4112, 4145), 'os.path.join', 'os.path.join', (['tmp_path', '"""tmp.csv"""'], {}), "(tmp_path, 'tmp.csv')\n", (4124, 4145), False, 'import os\n'), ((4197, 4221), 'os.path.exists', 'os.path.exists', (['csv_file'], {}), '(csv_file)\n', (4211, 4221), False, 'import os\n'), ((4231, 4252), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (4242, 4252), True, 'import pandas as pd\n'), ((4410, 4434), 'os.path.exists', 'os.path.exists', (['csv_file'], {}), '(csv_file)\n', (4424, 4434), False, 'import os\n'), ((4444, 4465), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (4455, 4465), True, 'import pandas as pd\n'), ((4561, 4602), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (4600, 4602), False, 'from epistasis import models\n'), ((4634, 4710), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (4660, 4710), False, 'import gpmap\n'), ((4797, 4831), 'os.path.join', 'os.path.join', (['tmp_path', '"""tmp.xlsx"""'], {}), "(tmp_path, 'tmp.xlsx')\n", (4809, 4831), False, 'import os\n'), ((4887, 4913), 'os.path.exists', 'os.path.exists', (['excel_file'], {}), '(excel_file)\n', (4901, 4913), False, 'import os\n'), ((4923, 4948), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (4936, 4948), True, 'import pandas as pd\n'), ((5110, 5136), 'os.path.exists', 'os.path.exists', (['excel_file'], {}), '(excel_file)\n', (5124, 5136), False, 'import os\n'), ((5146, 5171), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (5159, 5171), True, 'import pandas as pd\n'), ((5272, 5348), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (5298, 5348), False, 'import gpmap\n'), ((5395, 5436), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (5434, 5436), False, 'from epistasis import models\n'), ((5643, 5719), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (5669, 5719), False, 'import gpmap\n'), ((5766, 5807), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (5805, 5807), False, 'from epistasis import models\n'), ((6249, 6299), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']"}), "(genotype=d['genotype'])\n", (6275, 6299), False, 'import gpmap\n'), ((6309, 6350), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (6348, 6350), False, 'from epistasis import models\n'), ((6526, 6602), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (6552, 6602), False, 'import gpmap\n'), ((6787, 6862), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['genotype']"}), "(genotype=d['genotype'], phenotype=d['genotype'])\n", (6813, 6862), False, 'import gpmap\n'), ((7071, 7181), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'something_else': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n something_else=d['phenotype'])\n", (7097, 7181), False, 'import gpmap\n'), ((7386, 7462), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (7412, 7462), False, 'import gpmap\n'), ((7587, 7638), 'numpy.array', 'np.array', (["m.gpm.data.loc[:, 'epi_zero_uncertainty']"], {}), "(m.gpm.data.loc[:, 'epi_zero_uncertainty'])\n", (7595, 7638), True, 'import numpy as np\n'), ((7789, 7919), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']", 'coolness': "d['phenotype']", 'not_float': "d['genotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'],\n coolness=d['phenotype'], not_float=d['genotype'])\n", (7815, 7919), False, 'import gpmap\n'), ((8708, 8784), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (8734, 8784), False, 'import gpmap\n'), ((8831, 8872), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (8870, 8872), False, 'from epistasis import models\n'), ((9010, 9086), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (9036, 9086), False, 'import gpmap\n'), ((9133, 9174), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (9172, 9174), False, 'from epistasis import models\n'), ((9351, 9459), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'],\n uncertainty=d['phenotype'])\n", (9377, 9459), False, 'import gpmap\n'), ((9539, 9580), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (9578, 9580), False, 'from epistasis import models\n'), ((9933, 9974), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (9972, 9974), False, 'from epistasis import models\n'), ((10058, 10166), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'],\n uncertainty=d['phenotype'])\n", (10084, 10166), False, 'import gpmap\n'), ((11152, 11167), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (11159, 11167), True, 'import numpy as np\n'), ((11654, 11695), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (11693, 11695), False, 'from epistasis import models\n'), ((11779, 11886), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (11805, 11886), False, 'import gpmap\n'), ((12290, 12314), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (12304, 12314), True, 'import numpy as np\n'), ((12363, 12404), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (12402, 12404), False, 'from epistasis import models\n'), ((12491, 12598), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (12517, 12598), False, 'import gpmap\n'), ((13044, 13068), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (13058, 13068), True, 'import numpy as np\n'), ((13118, 13159), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (13157, 13159), False, 'from epistasis import models\n'), ((13192, 13299), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (13218, 13299), False, 'import gpmap\n'), ((13872, 13896), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (13886, 13896), True, 'import numpy as np\n'), ((13936, 13977), 'epistasis.models.linear.EpistasisLinearRegression', 'models.linear.EpistasisLinearRegression', ([], {}), '()\n', (13975, 13977), False, 'from epistasis import models\n'), ((14067, 14174), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'coolness': "d['phenotype']", 'uncertainty': "d['phenotype']"}), "(genotype=d['genotype'], coolness=d['phenotype'],\n uncertainty=d['phenotype'])\n", (14093, 14174), False, 'import gpmap\n'), ((14654, 14678), 'numpy.array_equal', 'np.array_equal', (['y', '[1.0]'], {}), '(y, [1.0])\n', (14668, 14678), True, 'import numpy as np\n'), ((804, 880), 'gpmap.GenotypePhenotypeMap', 'gpmap.GenotypePhenotypeMap', ([], {'genotype': "d['genotype']", 'phenotype': "d['phenotype']"}), "(genotype=d['genotype'], phenotype=d['phenotype'])\n", (830, 880), False, 'import gpmap\n'), ((1980, 2009), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1993, 2009), False, 'import pytest\n'), ((2892, 2916), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2905, 2916), False, 'import pytest\n'), ((3351, 3378), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3364, 3378), False, 'import pytest\n'), ((6015, 6038), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6028, 6038), False, 'import pytest\n'), ((6402, 6427), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6415, 6427), False, 'import pytest\n'), ((6649, 6672), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6662, 6672), False, 'import pytest\n'), ((6909, 6934), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6922, 6934), False, 'import pytest\n'), ((8069, 8094), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8082, 8094), False, 'import pytest\n'), ((8184, 8207), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (8197, 8207), False, 'import pytest\n'), ((8297, 8322), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8310, 8322), False, 'import pytest\n'), ((9984, 10009), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9997, 10009), False, 'import pytest\n'), ((11001, 11029), 'numpy.array', 'np.array', (["d['genotype'][0:2]"], {}), "(d['genotype'][0:2])\n", (11009, 11029), True, 'import numpy as np\n'), ((11284, 11309), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11297, 11309), False, 'import pytest\n'), ((11355, 11380), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11368, 11380), False, 'import pytest\n'), ((11485, 11503), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (11492, 11503), True, 'import numpy as np\n'), ((11705, 11730), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11718, 11730), False, 'import pytest\n'), ((12102, 12120), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (12109, 12120), True, 'import numpy as np\n'), ((12414, 12439), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12427, 12439), False, 'import pytest\n'), ((12850, 12868), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (12857, 12868), True, 'import numpy as np\n'), ((13491, 13518), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13504, 13518), False, 'import pytest\n'), ((13674, 13692), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (13681, 13692), True, 'import numpy as np\n'), ((13987, 14012), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14000, 14012), False, 'import pytest\n'), ((14454, 14472), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (14461, 14472), True, 'import numpy as np\n'), ((545, 570), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (558, 570), False, 'import pytest\n'), ((584, 650), 'epistasis.models.base._genotypes_to_X', 'models.base._genotypes_to_X', (['bad', 'gpm'], {'order': '(1)', 'model_type': '"""local"""'}), "(bad, gpm, order=1, model_type='local')\n", (611, 650), False, 'from epistasis import models\n'), ((3041, 3055), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3053, 3055), True, 'import pandas as pd\n'), ((3615, 3640), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3628, 3640), False, 'import pytest\n'), ((5509, 5533), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5522, 5533), False, 'import pytest\n'), ((5901, 5925), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5914, 5925), False, 'import pytest\n'), ((7653, 7667), 'numpy.unique', 'np.unique', (['unc'], {}), '(unc)\n', (7662, 7667), True, 'import numpy as np\n'), ((7703, 7746), 'numpy.min', 'np.min', (['gpm.data.loc[:, m.phenotype_column]'], {}), '(gpm.data.loc[:, m.phenotype_column])\n', (7709, 7746), True, 'import numpy as np\n'), ((11562, 11587), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11575, 11587), False, 'import pytest\n'), ((12179, 12203), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12192, 12203), False, 'import pytest\n'), ((12927, 12951), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12940, 12951), False, 'import pytest\n'), ((13751, 13775), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (13764, 13775), False, 'import pytest\n'), ((14531, 14555), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (14544, 14555), False, 'import pytest\n'), ((1035, 1113), 'epistasis.models.base._genotypes_to_X', 'models.base._genotypes_to_X', (['gpm.genotype', 'gpm'], {'order': 'i', 'model_type': 'model_type'}), '(gpm.genotype, gpm, order=i, model_type=model_type)\n', (1062, 1113), False, 'from epistasis import models\n'), ((1340, 1352), 'numpy.unique', 'np.unique', (['X'], {}), '(X)\n', (1349, 1352), True, 'import numpy as np\n')]
|
# File name: spyview.py
#
# This example should be run with "execfile('spyview.py')"
from numpy import pi, linspace, sinc, sqrt
from lib.file_support.spyview import SpyView
x_vec = linspace(-2 * pi, 2 * pi, 100)
y_vec = linspace(-2 * pi, 2 * pi, 100)
qt.mstart()
data = qt.Data(name='testmeasurement')
# to make the spyview meta.txt file dimension info is required:
data.add_coordinate('X',
size=len(x_vec),
start=x_vec[0],
end=x_vec[-1])
data.add_coordinate('Y',
size=len(y_vec),
start=y_vec[0],
end=y_vec[-1])
data.add_value('Z')
data.create_file()
for y in y_vec:
for x in x_vec:
result = sinc(sqrt(x**2 + y**2))
data.add_data_point(x, y, result)
qt.msleep(0.001)
data.new_block()
data.close_file()
qt.mend()
# create the spyview meta.txt file:
SpyView(data).write_meta_file()
|
[
"numpy.sqrt",
"numpy.linspace",
"lib.file_support.spyview.SpyView"
] |
[((183, 213), 'numpy.linspace', 'linspace', (['(-2 * pi)', '(2 * pi)', '(100)'], {}), '(-2 * pi, 2 * pi, 100)\n', (191, 213), False, 'from numpy import pi, linspace, sinc, sqrt\n'), ((222, 252), 'numpy.linspace', 'linspace', (['(-2 * pi)', '(2 * pi)', '(100)'], {}), '(-2 * pi, 2 * pi, 100)\n', (230, 252), False, 'from numpy import pi, linspace, sinc, sqrt\n'), ((839, 852), 'lib.file_support.spyview.SpyView', 'SpyView', (['data'], {}), '(data)\n', (846, 852), False, 'from lib.file_support.spyview import SpyView\n'), ((665, 686), 'numpy.sqrt', 'sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (669, 686), False, 'from numpy import pi, linspace, sinc, sqrt\n')]
|
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import h5py
import copy
import time
import os
from whacc import utils
def isnotebook():
try:
c = str(get_ipython().__class__)
shell = get_ipython().__class__.__name__
if 'colab' in c:
return True
elif shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
def stack_imgs_lag(imgs, frames_1=None, buffer=2, shift_to_the_right_by=0):
if frames_1 is None:
frames_1 = [imgs.shape[0]]
array_group = []
for k1, k2 in utils.loop_segments(frames_1):
x = (np.random.random(imgs[0].shape) * 255).astype(np.uint8)
tile_axes = [1] * len(x.shape) + [buffer]
x = np.tile(x[:, :, None], tile_axes)
tmp1 = x.copy()
for ii, stack_i in enumerate(range(k1, k2)):
x = np.concatenate((x, imgs[stack_i][:, :, None]), axis=2)
x = np.concatenate((x, tmp1), axis=2)
for k3 in range(k2 - k1):
array_group.append(x[:, :, k3 + shift_to_the_right_by: k3 + 1 + buffer + shift_to_the_right_by])
return np.asarray(array_group)
def get_h5_key_and_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
h5_list = utils.make_list(h5_list, suppress_warning=True)
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
if i == 0:
out = np.asarray(h[key_name][:])
else:
out = np.concatenate((out, h[key_name][:]))
return out
def get_h5_key_and_dont_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
out = []
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
out.append(list(h[key_name][:]))
return out
def clone_h5_basic_info(H5_list, fold_name=None, file_end='_QUICK_SAVE.h5'):
"""
copies all the info form H5 into another H5 file NOT INCLUDING the labels or images. so it have all the file info,
like names and pole locations and polate match max value stack. anything with 'images' , 'MODEL__' or 'labels' is
not copied over to the new file.
Parameters
----------
H5_list : list
list of H5 files to clone
fold_name : str
default None, where to place the cloned H5 files. if left blank it will place in the same folder as the original file
file_end : str
default '_QUICK_SAVE.h5', how to change the name of the H5 file to be cloned to differentiate it from the original
Returns
-------
all_new_h5s: list
list of new H5 full file names
"""
if fold_name is not None:
try:
os.mkdir(fold_name)
except:
pass
all_new_h5s = []
for h5 in H5_list:
if fold_name is not None:
new_fn = fold_name + os.path.sep + os.path.basename(h5)[:-3] + file_end
else: #
new_fn = os.path.dirname(h5) + os.path.sep + os.path.basename(h5)[:-3] + file_end
all_new_h5s.append(new_fn)
try:
os.remove(new_fn)
except:
pass
with h5py.File(new_fn, 'w') as f1:
with h5py.File(h5, 'r') as f2:
for i, k in enumerate(f2.keys()):
if 'images' != k and 'MODEL__' not in k and 'labels' not in k:
f1.create_dataset(k, data=f2[k][:])
f2.close()
f1.close()
return all_new_h5s
def del_h5_with_term(h5_list, str_2_cmp):
"""
Parameters
----------
h5_list : list
list of H5 strings (full path)
str_2_cmp : str
will delete keys with this in their title ... e.g. '__RETRAIN'
"""
for k2 in h5_list:
with h5py.File(k2, 'a') as h5_source:
for k in h5_source.keys():
if str_2_cmp in k:
print('del--> ' + k)
del h5_source[k]
print('_______')
def split_h5_loop_segments(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000,
add_numbers_to_name=True,
disable_TQDM=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
Examples
--------
from whacc import image_tools, utils
h5_to_split_list = "/Users/phil/Downloads/untitled folder 2/AH0000x000000_small_tester.h5"
h5_to_split_list = [h5_to_split_list]
utils.print_h5_keys(h5_to_split_list[0])
bd = '/Users/phil/Downloads/untitled folder 2/'
image_tools.split_h5_loop_segments(h5_to_split_list, [1, 3], [bd+'TRASH', bd+'TRASH2'], chunk_size=10000, add_numbers_to_name=False,
disable_TQDM=False, set_seed = None)
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
for i, k in enumerate(temp_base_name):
if k[-3:] == '.h5':
temp_base_name[i] = temp_base_name[i][:-3]
frame_num_array_list = get_h5_key_and_dont_concatenate(h5_to_split_list, 'frame_nums')
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
tmp_frame_list = frame_num_array_list[iii]
L = len(tmp_frame_list)
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
random_segment_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
random_segment_inds = [sorted(tmpk) for tmpk in random_segment_inds]
random_frame_inds = [[None]] * len(random_segment_inds)
list_of_new_frame_nums = [[None]] * len(random_segment_inds)
loop_seg_list = list(utils.loop_segments(tmp_frame_list))
for pi, p in enumerate(random_segment_inds):
tmp1 = []
tmp2 = []
for pp in p:
x = list(loop_seg_list[pp])
tmp1 += list(range(x[0], x[1]))
tmp2.append(tmp_frame_list[pp])
random_frame_inds[pi] = tmp1
list_of_new_frame_nums[pi] = tmp2
for i, k in enumerate(split_percentages): # for each new h5 created
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
with h5py.File(h5_creators[i].h5_full_file_name,
'r+') as h2: # wanted to do this to allow NONE as input and still have frame nums, but I need to have an append after creating and its a pain
frame_nums = np.asarray(list_of_new_frame_nums[i])
if 'frame_nums' not in h2.keys():
h2.create_dataset('frame_nums', shape=np.shape(frame_nums), maxshape=(None,), chunks=True,
data=frame_nums)
else:
h2['frame_nums'].resize(h2['frame_nums'].shape[0] + frame_nums.shape[0], axis=0)
h2['frame_nums'][-frame_nums.shape[0]:] = frame_nums
# # add the frame info to each
# for i, frame_nums in enumerate(list_of_new_frame_nums):
# with h5py.File(h5_creators[i].h5_full_file_name, 'r+') as h:
# h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
return final_names
def make_sure_frame_nums_exist(h5file):
with h5py.File(h5file, 'r+') as h:
key_list = list(h.keys())
if 'frame_nums' in key_list:
print("""'frame_nums' already in the key list""")
return None
if 'trial_nums_and_frame_nums' not in key_list:
print(
"""key 'trial_nums_and_frame_nums' must be in the provided h5 this is the only reason program exists""")
return None
frame_nums = h['trial_nums_and_frame_nums'][1, :]
h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
def split_h5(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000, add_numbers_to_name=True,
disable_TQDM=False, skip_if_label_is_neg_1=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
L = len(h['labels'][:])
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
if skip_if_label_is_neg_1: # remove -1s
mixed_inds = mixed_inds[mixed_inds != -1]
random_frame_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
for i, k in enumerate(split_percentages):
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
# print('starting ' + str(iii*i + 1) + ' of ' + str(len(split_percentages)*len(h5_to_split_list)))
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
return final_names
class h5_iterative_creator():
"""Create an H5 file using a for loop easily. used to create the augmented H5 file for training
Attributes:
Parameters
----------
h5_new_full_file_name : string
full path name to your H5 file to be created
overwrite_if_file_exists : bool
overwrites the h5 file if it already exists
max_img_height : int
default 61, only the max size, can be larger in case you are going to have larger images
max_img_width : int
default 61, only the max size, can be larger in case you are going to have larger images
close_and_open_on_each_iteration : bool
default True, this prevents the user from forgetting to close H5 which
can lead to corruption.
Example
_______
h5creator = h5_iterative_creator(new_H5_file)
h5creator.add_to_h5(img_stack1, labels_stack1)
h5creator.add_to_h5(img_stack2, labels_stack2)
h5creator.add_to_h5(img_stack3, labels_stack3)
"""
def __init__(self, h5_new_full_file_name,
overwrite_if_file_exists=False,
max_img_height=61,
max_img_width=61,
close_and_open_on_each_iteration=True,
color_channel=True,
add_to_existing_H5=False):
if not close_and_open_on_each_iteration:
print('**remember to CLOSE the H5 file when you are done!!!**')
if overwrite_if_file_exists and os.path.isfile(h5_new_full_file_name):
os.remove(h5_new_full_file_name)
self.h5_full_file_name = h5_new_full_file_name
if add_to_existing_H5:
self.hf_file = h5py.File(h5_new_full_file_name, "r+")
else:
self.hf_file = h5py.File(h5_new_full_file_name, "w")
self.color_channel = color_channel
self.max_img_height = max_img_height
self.max_img_width = max_img_width
self._went_through_create_h5 = False
self.close_it = close_and_open_on_each_iteration
if self.close_it:
self.hf_file.close()
def add_to_h5(self, images, labels):
"""
Parameters
----------
images : numpy tensor
chunk of images
labels : numpy array
array oof labels
"""
if self.close_it:
self.open_or_close_h5('r+')
if self._went_through_create_h5: # already initialized with the correct size
self._add_next_chunk_to_h5(images, labels)
else:
self._create_h5(images, labels)
if self.close_it:
self.open_or_close_h5('close')
def _create_h5(self, images, labels):
"""
Parameters
----------
images :
labels :
"""
# if set_multiplier:
self.hf_file.create_dataset("multiplier", [1], h5py.h5t.STD_I32LE, data=images.shape[0])
if self.color_channel:
self.hf_file.create_dataset('images',
np.shape(images),
h5py.h5t.STD_U8BE,
maxshape=(None, self.max_img_height, self.max_img_width, 3),
chunks=True,
data=images)
else:
self.hf_file.create_dataset('images',
np.shape(images),
h5py.h5t.STD_U8BE,
maxshape=(None, self.max_img_height, self.max_img_width),
chunks=True,
data=images)
self.hf_file.create_dataset('labels',
np.shape(labels),
h5py.h5t.STD_I32LE,
maxshape=(None,),
chunks=True,
data=labels)
self._went_through_create_h5 = True
def _add_next_chunk_to_h5(self, images, labels):
"""
Parameters
----------
images :
labels :
Returns
-------
"""
self.hf_file['images'].resize(self.hf_file['images'].shape[0] + images.shape[0], axis=0)
self.hf_file['labels'].resize(self.hf_file['labels'].shape[0] + labels.shape[0], axis=0)
self.hf_file['images'][-images.shape[0]:] = images
self.hf_file['labels'][-labels.shape[0]:] = labels
def read_h5(self):
""" """
self.open_or_close_h5('r')
print('''**remember to CLOSE the H5 file when you are done!!!** with ".close_h5()" method''')
def close_h5(self):
""" """
self.open_or_close_h5('close')
print('H5 file was closed')
def open_or_close_h5(self, mode_='r'):
"""
Parameters
----------
mode_ : str
mode can be H5py modes 'r', 'r+' 'w' (w overwrites file!) etc OR 'close' to
# ensure it is closed. separate function to prevent a bunch of try statements (Default value = 'r')
Returns
-------
"""
try:
self.hf_file.close()
finally:
if mode_.lower() != 'close':
self.hf_file = h5py.File(self.h5_full_file_name, mode_)
#
def augment_helper(keras_datagen, num_aug_ims, num_reg_ims, in_img, in_label):
"""
Parameters
----------
keras_datagen : keras_datagen: keras_datagen: keras.preprocessing.image.ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator-- keras_datagen = ImageDataGenerator(...)
num_aug_ims : int
number of augmented images to generate from single input image
num_reg_ims : int
number of copies of in_img to produce. will be stacked at the beginning of all_augment variable.
Use dot see augmentation when testing and can be useful if splitting into many H5s if you want an original in each.
in_img : numpy array
numpy array either 3D with color channel for the last dim ot 2D
in_label : int
the label associate with in_img. simply repeats it creating 'out_labels' the be size of 'all_augment'
Returns
-------
"""
if len(in_img.shape) == 2: # or not np.any(np.asarray(in_img.shape)==3)
in_img = np.repeat(in_img[..., np.newaxis], 3, -1) # for 2D arrays without color channels
set_zoom = keras_datagen.zoom_range
in_img = np.expand_dims(in_img, 0)
it = keras_datagen.flow(in_img, batch_size=1)
all_augment = np.tile(in_img, [num_reg_ims, 1, 1, 1])
for i in range(num_aug_ims): ##
if set_zoom != [0, 0]: # if zoom is being used...
# keras 'zoom' is annoying. it zooms x and y differently randomly
# in order to get an equal zoom I use the following workaround.
z_val = np.random.uniform(low=set_zoom[0], high=set_zoom[1])
keras_datagen.zoom_range = [z_val, z_val]
it = keras_datagen.flow(in_img, batch_size=1)
batch = it.next()
image = batch[0].astype('uint8')
all_augment = np.append(all_augment, np.expand_dims(image, 0), 0)
out_labels = np.repeat(in_label, sum([num_aug_ims, num_reg_ims]))
keras_datagen.zoom_range = set_zoom
return all_augment, out_labels
def img_unstacker(img_array, num_frames_wide=8, color_channel=True):
"""unstacks image stack and combines them into one large image for easy display. reads left to right and then top to bottom.
Parameters
----------
img_array : numpy array
stacked image array
num_frames_wide : int
width of destacked image. if = 8 with input 20 images it will be 8 wide 3 long and 4 blank images (Default value = 8)
Returns
-------
"""
im_stack = None
for i, k in enumerate(img_array):
if i % num_frames_wide == 0:
if i != 0: # stack it
if im_stack is None:
im_stack = im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
im_stack_tmp = k # must be at the end
else:
im_stack_tmp = np.hstack((im_stack_tmp, k))
x = num_frames_wide - len(img_array) % num_frames_wide
if x != 0:
if x != num_frames_wide:
for i in range(x):
im_stack_tmp = np.hstack((im_stack_tmp, np.ones_like(k)))
if im_stack is None:
return im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
return im_stack
def original_image(x):
"""This is used to transform batch generated images [-1 1] to the original image [0,255] for plotting
Parameters
----------
x :
Returns
-------
"""
image = tf.cast((x + 1) * 127.5, tf.uint8)
return image
def predict_multiple_H5_files(H5_file_list, model_2_load, append_model_and_labels_to_name_string=False,
batch_size=1000, model_2_load_is_model=False, save_on=False,
label_save_name=None, disable_TQDM=False,
save_labels_to_this_h5_file_instead=None) -> object:
"""
Parameters
----------
H5_file_list : list: list
list of string(s) of H5 file full paths
model_2_load : param append_model_and_labels_to_name_string: if True label_save_name = 'MODEL__' + label_save_name + '__labels',
it is a simple way to keep track of labels form many models in a single H5 file. also make sit easier to find :
those labels for later processing. :
either full path to model folder ending with ".ckpt" OR the loaded model itself. if the later,
the user MUST set "model_2_load_is_model" is True and "label_save_name" must be explicitly defined (when using model
path we use the model name to name the labels).
append_model_and_labels_to_name_string : bool
if True label_save_name = 'MODEL__' + label_save_name + '__labels',it is a simple way to keep track of labels
form many models in a single H5 file. also make sit easier to find those labels for later processing. (Default value = False)
batch_size : int
number of images to process per batch, -- slower prediction speeds << ideal predictionsspeed <<
memory issues and crashes -- 1000 is normally pretty good on Google CoLab (Default value = 1000)
model_2_load_is_model : bool
lets the program know if you are directly inserting a model (instead of a path to model folder) (Default value = False)
save_on : bool
saves to H5 file. either the original H5 (image source) or new H5 if a path to "save_labels_to_this_h5_file_instead"
is given (Default value = False)
label_save_name : string
h5 file key used to save the labels to, default is 'MODEL__' + **model_name** + '__labels'
disable_TQDM : bool
if True, turns off loading progress bar. (Default value = False)
save_labels_to_this_h5_file_instead : string
full path to H5 file to insert labels into instead of the H5 used as the image source (Default value = None)
Returns
-------
"""
for i, H5_file in enumerate(H5_file_list):
# save_what_is_left_of_your_h5_file(H5_file, do_del_and_rename = 1) # only matters if file is corrupt otherwise doesnt touch it
gen = ImageBatchGenerator(batch_size, [H5_file])
if model_2_load_is_model:
if label_save_name is None and save_on == True:
assert 1 == 0, 'label_save_name must be assigned if you are loading a model in directly and saveon == True.'
model = model_2_load
else:
if label_save_name is None:
label_save_name = model_2_load.split(os.path.sep)[-1].split('.')[0]
label_save_name = 'MODEL__' + label_save_name + '__labels'
append_model_and_labels_to_name_string = False # turn off because defaults to this naming scheme if user doesnt put in name
model = tf.keras.models.load_model(model_2_load)
if append_model_and_labels_to_name_string:
label_save_name = 'MODEL__' + label_save_name + '__labels'
start = time.time()
labels_2_save = np.asarray([])
for k in tqdm(range(gen.__len__()), disable=disable_TQDM):
TMP_X, tmp_y = gen.getXandY(k)
outY = model.predict(TMP_X)
labels_2_save = np.append(labels_2_save, outY)
total_seconds = time.time() - start
time_per_mil = np.round(1000000 * total_seconds / len(labels_2_save))
print(str(time_per_mil) + ' seconds per 1 million images predicted')
if save_on:
if save_labels_to_this_h5_file_instead is not None: # add to differnt H5 file
H5_file = save_labels_to_this_h5_file_instead # otherwise it will add to the current H5 file
# based on the loop through "H5_file_list" above
try:
hf.close()
except:
pass
with h5py.File(H5_file, 'r+') as hf:
try:
del hf[label_save_name]
time.sleep(10) # give time to process the deleted file... maybe???
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
except:
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
hf.close()
return labels_2_save
def get_total_frame_count(h5_file_list):
"""
Parameters
----------
h5_file_list :
Returns
-------
"""
total_frame_count = []
for H5_file in h5_file_list:
H5 = h5py.File(H5_file, 'r')
images = H5['images']
total_frame_count.append(images.shape[0])
return total_frame_count
def batch_size_file_ind_selector(num_in_each, batch_size):
"""batch_size_file_ind_selector - needed for ImageBatchGenerator to know which H5 file index
to use depending on the iteration number used in __getitem__ in the generator.
this all depends on the variable batch size.
Example: the output of the following...
batch_size_file_ind_selector([4000, 4001, 3999], [2000])
would be [0, 0, 1, 1, 1, 2, 2] which means that there are 2 chunks in the first
H5 file, 3 in the second and 2 in the third based on chunk size of 2000
Parameters
----------
num_in_each :
param batch_size:
batch_size :
Returns
-------
"""
break_into = np.ceil(np.array(num_in_each) / batch_size)
extract_inds = np.array([])
for k, elem in enumerate(break_into):
tmp1 = np.array(np.ones(np.int(elem)) * k)
extract_inds = np.concatenate((extract_inds, tmp1), axis=0)
return extract_inds
# file_inds_for_H5_extraction is the same as extract_inds output from the above function
def reset_to_first_frame_for_each_file_ind(file_inds_for_H5_extraction):
"""reset_to_first_frame_for_each_file_ind - uses the output of batch_size_file_ind_selector
to determine when to reset the index for each individual H5 file. using the above example
the out put would be [0, 0, 2, 2, 2, 5, 5], each would be subtracted from the indexing to
set the position of the index to 0 for each new H5 file.
Parameters
----------
file_inds_for_H5_extraction :
Returns
-------
"""
subtract_for_index = []
for k, elem in enumerate(file_inds_for_H5_extraction):
tmp1 = np.diff(file_inds_for_H5_extraction)
tmp1 = np.where(tmp1 != 0)
tmp1 = np.append(-1, tmp1[0]) + 1
subtract_for_index.append(tmp1[np.int(file_inds_for_H5_extraction[k])])
return subtract_for_index
class ImageBatchGenerator(keras.utils.Sequence):
""" """
def __init__(self, batch_size, h5_file_list, label_key = 'labels'):
h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)
num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)
file_inds_for_H5_extraction = batch_size_file_ind_selector(
num_frames_in_all_H5_files, batch_size)
subtract_for_index = reset_to_first_frame_for_each_file_ind(
file_inds_for_H5_extraction)
# self.to_fit = to_fit #set to True to return XY and False to return X
self.label_key = label_key
self.batch_size = batch_size
self.H5_file_list = h5_file_list
self.num_frames_in_all_H5_files = num_frames_in_all_H5_files
self.file_inds_for_H5_extraction = file_inds_for_H5_extraction
self.subtract_for_index = subtract_for_index
self.IMG_SIZE = 96
def __len__(self):
return len(self.file_inds_for_H5_extraction)
def __getitem__(self, num_2_extract):
b = self.batch_size
h = self.H5_file_list
i = self.file_inds_for_H5_extraction
H5_file = h[np.int(i[num_2_extract])]
with h5py.File(H5_file, 'r') as H5:
# H5 = h5py.File(H5_file, 'r')
images = H5['images']
num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
rgb_tensor = self.image_transform(raw_X)
labels_tmp = H5[self.label_key]
raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
H5.close()
return rgb_tensor, raw_Y
# def __getitem__(self, num_2_extract):
# b = self.batch_size
# h = self.H5_file_list
# i = self.file_inds_for_H5_extraction
# H5_file = h[np.int(i[num_2_extract])]
# H5 = h5py.File(H5_file, 'r')
# # list(H5.keys())
#
# images = H5['images']
# num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
# raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
# rgb_tensor = self.image_transform(raw_X)
#
# # if self.to_fit:
# # labels_tmp = H5[self.label_key]
# # raw_Y = labels_tmp[b*num_2_extract_mod:b*(num_2_extract_mod+1)]
# # return rgb_tensor, raw_Y
# # else:
# return rgb_tensor
def getXandY(self, num_2_extract):
"""
Parameters
----------
num_2_extract :
Returns
-------
"""
b = self.batch_size
h = self.H5_file_list
i = self.file_inds_for_H5_extraction
H5_file = h[np.int(i[num_2_extract])]
H5 = h5py.File(H5_file, 'r')
# list(H5.keys())
images = H5['images']
num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]
raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
rgb_tensor = self.image_transform(raw_X)
labels_tmp = H5[self.label_key]
raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]
return rgb_tensor, raw_Y
def image_transform(self, raw_X):
"""input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
# rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
# rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
# rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
# rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing
# self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)
# return rgb_tensor
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing
self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)
return rgb_tensor
def plot_batch_distribution(self):
""" """
# randomly select a batch and generate images and labels
batch_num = np.random.choice(np.arange(0, self.__len__()))
samp_x, samp_y = self.getXandY(batch_num)
# look at the distribution of classes
plt.pie([1 - np.mean(samp_y), np.mean(samp_y)],
labels=['non-touch frames', 'touch frames'], autopct='%1.1f%%', )
plt.title('class distribution from batch ' + str(batch_num))
plt.show()
# generate indices for positive and negative classes
images_to_sample = 20
neg_class = [i for i, val in enumerate(samp_y) if val == 0]
pos_class = [i for i, val in enumerate(samp_y) if val == 1]
neg_index = np.random.choice(neg_class, images_to_sample)
pos_index = np.random.choice(pos_class, images_to_sample)
# plot sample positive and negative class images
plt.figure(figsize=(10, 10))
samp_x = (samp_x + 1) / 2
for i in range(images_to_sample):
plt.subplot(5, 10, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
_ = plt.imshow(samp_x[neg_index[i]])
plt.xlabel('0')
plt.subplot(5, 10, images_to_sample + i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(samp_x[pos_index[i]])
plt.xlabel('1')
plt.suptitle('sample images from batch ' + str(batch_num))
plt.show()
def image_transform_(IMG_SIZE, raw_X):
"""
input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (IMG_SIZE, IMG_SIZE)) # resizing
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
return rgb_tensor
|
[
"whacc.utils.make_list",
"matplotlib.pyplot.grid",
"numpy.hstack",
"time.sleep",
"numpy.array",
"whacc.utils.loop_segments",
"tensorflow.keras.models.load_model",
"copy.deepcopy",
"tensorflow.cast",
"os.remove",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.repeat",
"numpy.where",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"numpy.float64",
"numpy.asarray",
"numpy.diff",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.concatenate",
"os.mkdir",
"numpy.random.seed",
"numpy.tile",
"matplotlib.pyplot.xticks",
"numpy.random.choice",
"h5py.File",
"os.path.isfile",
"os.path.dirname",
"numpy.shape",
"time.time",
"numpy.int",
"matplotlib.pyplot.show",
"numpy.ones_like",
"tensorflow.image.resize",
"numpy.append",
"numpy.sum",
"matplotlib.pyplot.figure",
"os.path.basename",
"numpy.expand_dims",
"numpy.random.uniform",
"numpy.cumsum",
"matplotlib.pyplot.subplot"
] |
[((950, 979), 'whacc.utils.loop_segments', 'utils.loop_segments', (['frames_1'], {}), '(frames_1)\n', (969, 979), False, 'from whacc import utils\n'), ((1494, 1517), 'numpy.asarray', 'np.asarray', (['array_group'], {}), '(array_group)\n', (1504, 1517), True, 'import numpy as np\n'), ((1989, 2036), 'whacc.utils.make_list', 'utils.make_list', (['h5_list'], {'suppress_warning': '(True)'}), '(h5_list, suppress_warning=True)\n', (2004, 2036), False, 'from whacc import utils\n'), ((22064, 22089), 'numpy.expand_dims', 'np.expand_dims', (['in_img', '(0)'], {}), '(in_img, 0)\n', (22078, 22089), True, 'import numpy as np\n'), ((22159, 22198), 'numpy.tile', 'np.tile', (['in_img', '[num_reg_ims, 1, 1, 1]'], {}), '(in_img, [num_reg_ims, 1, 1, 1])\n', (22166, 22198), True, 'import numpy as np\n'), ((24403, 24437), 'tensorflow.cast', 'tf.cast', (['((x + 1) * 127.5)', 'tf.uint8'], {}), '((x + 1) * 127.5, tf.uint8)\n', (24410, 24437), True, 'import tensorflow as tf\n'), ((30296, 30308), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (30304, 30308), True, 'import numpy as np\n'), ((38221, 38251), 'tensorflow.cast', 'tf.cast', (['rgb_batch', 'tf.float32'], {}), '(rgb_batch, tf.float32)\n', (38228, 38251), True, 'import tensorflow as tf\n'), ((38410, 38459), 'tensorflow.image.resize', 'tf.image.resize', (['rgb_tensor', '(IMG_SIZE, IMG_SIZE)'], {}), '(rgb_tensor, (IMG_SIZE, IMG_SIZE))\n', (38425, 38459), True, 'import tensorflow as tf\n'), ((1112, 1145), 'numpy.tile', 'np.tile', (['x[:, :, None]', 'tile_axes'], {}), '(x[:, :, None], tile_axes)\n', (1119, 1145), True, 'import numpy as np\n'), ((1306, 1339), 'numpy.concatenate', 'np.concatenate', (['(x, tmp1)'], {'axis': '(2)'}), '((x, tmp1), axis=2)\n', (1320, 1339), True, 'import numpy as np\n'), ((7569, 7594), 'numpy.sum', 'np.sum', (['split_percentages'], {}), '(split_percentages)\n', (7575, 7594), True, 'import numpy as np\n'), ((11165, 11188), 'h5py.File', 'h5py.File', (['h5file', '"""r+"""'], {}), "(h5file, 'r+')\n", (11174, 11188), False, 'import h5py\n'), ((13496, 13521), 'numpy.sum', 'np.sum', (['split_percentages'], {}), '(split_percentages)\n', (13502, 13521), True, 'import numpy as np\n'), ((21929, 21970), 'numpy.repeat', 'np.repeat', (['in_img[..., np.newaxis]', '(3)', '(-1)'], {}), '(in_img[..., np.newaxis], 3, -1)\n', (21938, 21970), True, 'import numpy as np\n'), ((24113, 24148), 'numpy.vstack', 'np.vstack', (['(im_stack, im_stack_tmp)'], {}), '((im_stack, im_stack_tmp))\n', (24122, 24148), True, 'import numpy as np\n'), ((27876, 27887), 'time.time', 'time.time', ([], {}), '()\n', (27885, 27887), False, 'import time\n'), ((27912, 27926), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (27922, 27926), True, 'import numpy as np\n'), ((29382, 29405), 'h5py.File', 'h5py.File', (['H5_file', '"""r"""'], {}), "(H5_file, 'r')\n", (29391, 29405), False, 'import h5py\n'), ((30425, 30469), 'numpy.concatenate', 'np.concatenate', (['(extract_inds, tmp1)'], {'axis': '(0)'}), '((extract_inds, tmp1), axis=0)\n', (30439, 30469), True, 'import numpy as np\n'), ((31218, 31254), 'numpy.diff', 'np.diff', (['file_inds_for_H5_extraction'], {}), '(file_inds_for_H5_extraction)\n', (31225, 31254), True, 'import numpy as np\n'), ((31270, 31289), 'numpy.where', 'np.where', (['(tmp1 != 0)'], {}), '(tmp1 != 0)\n', (31278, 31289), True, 'import numpy as np\n'), ((31601, 31653), 'whacc.utils.make_list', 'utils.make_list', (['h5_file_list'], {'suppress_warning': '(True)'}), '(h5_file_list, suppress_warning=True)\n', (31616, 31653), False, 'from whacc import utils\n'), ((34269, 34292), 'h5py.File', 'h5py.File', (['H5_file', '"""r"""'], {}), "(H5_file, 'r')\n", (34278, 34292), False, 'import h5py\n'), ((35778, 35808), 'tensorflow.cast', 'tf.cast', (['rgb_batch', 'tf.float32'], {}), '(rgb_batch, tf.float32)\n', (35785, 35808), True, 'import tensorflow as tf\n'), ((35975, 36034), 'tensorflow.image.resize', 'tf.image.resize', (['rgb_tensor', '(self.IMG_SIZE, self.IMG_SIZE)'], {}), '(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE))\n', (35990, 36034), True, 'import tensorflow as tf\n'), ((36632, 36642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36640, 36642), True, 'import matplotlib.pyplot as plt\n'), ((36891, 36936), 'numpy.random.choice', 'np.random.choice', (['neg_class', 'images_to_sample'], {}), '(neg_class, images_to_sample)\n', (36907, 36936), True, 'import numpy as np\n'), ((36957, 37002), 'numpy.random.choice', 'np.random.choice', (['pos_class', 'images_to_sample'], {}), '(pos_class, images_to_sample)\n', (36973, 37002), True, 'import numpy as np\n'), ((37069, 37097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (37079, 37097), True, 'import matplotlib.pyplot as plt\n'), ((37660, 37670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37668, 37670), True, 'import matplotlib.pyplot as plt\n'), ((38112, 38132), 'copy.deepcopy', 'copy.deepcopy', (['raw_X'], {}), '(raw_X)\n', (38125, 38132), False, 'import copy\n'), ((38163, 38203), 'numpy.repeat', 'np.repeat', (['raw_X[..., np.newaxis]', '(3)', '(-1)'], {}), '(raw_X[..., np.newaxis], 3, -1)\n', (38172, 38203), True, 'import numpy as np\n'), ((1239, 1293), 'numpy.concatenate', 'np.concatenate', (['(x, imgs[stack_i][:, :, None])'], {'axis': '(2)'}), '((x, imgs[stack_i][:, :, None]), axis=2)\n', (1253, 1293), True, 'import numpy as np\n'), ((2086, 2103), 'h5py.File', 'h5py.File', (['k', '"""r"""'], {}), "(k, 'r')\n", (2095, 2103), False, 'import h5py\n'), ((2799, 2816), 'h5py.File', 'h5py.File', (['k', '"""r"""'], {}), "(k, 'r')\n", (2808, 2816), False, 'import h5py\n'), ((3763, 3782), 'os.mkdir', 'os.mkdir', (['fold_name'], {}), '(fold_name)\n', (3771, 3782), False, 'import os\n'), ((4154, 4171), 'os.remove', 'os.remove', (['new_fn'], {}), '(new_fn)\n', (4163, 4171), False, 'import os\n'), ((4218, 4240), 'h5py.File', 'h5py.File', (['new_fn', '"""w"""'], {}), "(new_fn, 'w')\n", (4227, 4240), False, 'import h5py\n'), ((4836, 4854), 'h5py.File', 'h5py.File', (['k2', '"""a"""'], {}), "(k2, 'a')\n", (4845, 4854), False, 'import h5py\n'), ((7726, 7753), 'h5py.File', 'h5py.File', (['h5_to_split', '"""r"""'], {}), "(h5_to_split, 'r')\n", (7735, 7753), False, 'import h5py\n'), ((7955, 7992), 'numpy.random.choice', 'np.random.choice', (['L', 'L'], {'replace': '(False)'}), '(L, L, replace=False)\n', (7971, 7992), True, 'import numpy as np\n'), ((13653, 13680), 'h5py.File', 'h5py.File', (['h5_to_split', '"""r"""'], {}), "(h5_to_split, 'r')\n", (13662, 13680), False, 'import h5py\n'), ((13826, 13863), 'numpy.random.choice', 'np.random.choice', (['L', 'L'], {'replace': '(False)'}), '(L, L, replace=False)\n', (13842, 13863), True, 'import numpy as np\n'), ((16964, 17001), 'os.path.isfile', 'os.path.isfile', (['h5_new_full_file_name'], {}), '(h5_new_full_file_name)\n', (16978, 17001), False, 'import os\n'), ((17015, 17047), 'os.remove', 'os.remove', (['h5_new_full_file_name'], {}), '(h5_new_full_file_name)\n', (17024, 17047), False, 'import os\n'), ((17161, 17199), 'h5py.File', 'h5py.File', (['h5_new_full_file_name', '"""r+"""'], {}), "(h5_new_full_file_name, 'r+')\n", (17170, 17199), False, 'import h5py\n'), ((17241, 17278), 'h5py.File', 'h5py.File', (['h5_new_full_file_name', '"""w"""'], {}), "(h5_new_full_file_name, 'w')\n", (17250, 17278), False, 'import h5py\n'), ((19272, 19288), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (19280, 19288), True, 'import numpy as np\n'), ((22469, 22521), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'set_zoom[0]', 'high': 'set_zoom[1]'}), '(low=set_zoom[0], high=set_zoom[1])\n', (22486, 22521), True, 'import numpy as np\n'), ((22746, 22770), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (22760, 22770), True, 'import numpy as np\n'), ((23790, 23818), 'numpy.hstack', 'np.hstack', (['(im_stack_tmp, k)'], {}), '((im_stack_tmp, k))\n', (23799, 23818), True, 'import numpy as np\n'), ((27695, 27735), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_2_load'], {}), '(model_2_load)\n', (27721, 27735), True, 'import tensorflow as tf\n'), ((28106, 28136), 'numpy.append', 'np.append', (['labels_2_save', 'outY'], {}), '(labels_2_save, outY)\n', (28115, 28136), True, 'import numpy as np\n'), ((28161, 28172), 'time.time', 'time.time', ([], {}), '()\n', (28170, 28172), False, 'import time\n'), ((30241, 30262), 'numpy.array', 'np.array', (['num_in_each'], {}), '(num_in_each)\n', (30249, 30262), True, 'import numpy as np\n'), ((31305, 31327), 'numpy.append', 'np.append', (['(-1)', 'tmp1[0]'], {}), '(-1, tmp1[0])\n', (31314, 31327), True, 'import numpy as np\n'), ((32612, 32636), 'numpy.int', 'np.int', (['i[num_2_extract]'], {}), '(i[num_2_extract])\n', (32618, 32636), True, 'import numpy as np\n'), ((32651, 32674), 'h5py.File', 'h5py.File', (['H5_file', '"""r"""'], {}), "(H5_file, 'r')\n", (32660, 32674), False, 'import h5py\n'), ((34230, 34254), 'numpy.int', 'np.int', (['i[num_2_extract]'], {}), '(i[num_2_extract])\n', (34236, 34254), True, 'import numpy as np\n'), ((35657, 35677), 'copy.deepcopy', 'copy.deepcopy', (['raw_X'], {}), '(raw_X)\n', (35670, 35677), False, 'import copy\n'), ((35716, 35756), 'numpy.repeat', 'np.repeat', (['raw_X[..., np.newaxis]', '(3)', '(-1)'], {}), '(raw_X[..., np.newaxis], 3, -1)\n', (35725, 35756), True, 'import numpy as np\n'), ((37186, 37211), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(10)', '(i + 1)'], {}), '(5, 10, i + 1)\n', (37197, 37211), True, 'import matplotlib.pyplot as plt\n'), ((37224, 37238), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (37234, 37238), True, 'import matplotlib.pyplot as plt\n'), ((37251, 37265), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (37261, 37265), True, 'import matplotlib.pyplot as plt\n'), ((37278, 37293), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (37286, 37293), True, 'import matplotlib.pyplot as plt\n'), ((37310, 37342), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samp_x[neg_index[i]]'], {}), '(samp_x[neg_index[i]])\n', (37320, 37342), True, 'import matplotlib.pyplot as plt\n'), ((37355, 37370), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""0"""'], {}), "('0')\n", (37365, 37370), True, 'import matplotlib.pyplot as plt\n'), ((37384, 37428), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(10)', '(images_to_sample + i + 1)'], {}), '(5, 10, images_to_sample + i + 1)\n', (37395, 37428), True, 'import matplotlib.pyplot as plt\n'), ((37441, 37455), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (37451, 37455), True, 'import matplotlib.pyplot as plt\n'), ((37468, 37482), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (37478, 37482), True, 'import matplotlib.pyplot as plt\n'), ((37495, 37510), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (37503, 37510), True, 'import matplotlib.pyplot as plt\n'), ((37523, 37555), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samp_x[pos_index[i]]'], {}), '(samp_x[pos_index[i]])\n', (37533, 37555), True, 'import matplotlib.pyplot as plt\n'), ((37568, 37583), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""1"""'], {}), "('1')\n", (37578, 37583), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2181), 'numpy.asarray', 'np.asarray', (['h[key_name][:]'], {}), '(h[key_name][:])\n', (2165, 2181), True, 'import numpy as np\n'), ((2222, 2259), 'numpy.concatenate', 'np.concatenate', (['(out, h[key_name][:])'], {}), '((out, h[key_name][:]))\n', (2236, 2259), True, 'import numpy as np\n'), ((4265, 4283), 'h5py.File', 'h5py.File', (['h5', '"""r"""'], {}), "(h5, 'r')\n", (4274, 4283), False, 'import h5py\n'), ((7905, 7929), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (7919, 7929), True, 'import numpy as np\n'), ((8366, 8401), 'whacc.utils.loop_segments', 'utils.loop_segments', (['tmp_frame_list'], {}), '(tmp_frame_list)\n', (8385, 8401), False, 'from whacc import utils\n'), ((11675, 11695), 'numpy.shape', 'np.shape', (['frame_nums'], {}), '(frame_nums)\n', (11683, 11695), True, 'import numpy as np\n'), ((13776, 13800), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (13790, 13800), True, 'import numpy as np\n'), ((18521, 18537), 'numpy.shape', 'np.shape', (['images'], {}), '(images)\n', (18529, 18537), True, 'import numpy as np\n'), ((18909, 18925), 'numpy.shape', 'np.shape', (['images'], {}), '(images)\n', (18917, 18925), True, 'import numpy as np\n'), ((20861, 20901), 'h5py.File', 'h5py.File', (['self.h5_full_file_name', 'mode_'], {}), '(self.h5_full_file_name, mode_)\n', (20870, 20901), False, 'import h5py\n'), ((28725, 28749), 'h5py.File', 'h5py.File', (['H5_file', '"""r+"""'], {}), "(H5_file, 'r+')\n", (28734, 28749), False, 'import h5py\n'), ((31371, 31409), 'numpy.int', 'np.int', (['file_inds_for_H5_extraction[k]'], {}), '(file_inds_for_H5_extraction[k])\n', (31377, 31409), True, 'import numpy as np\n'), ((36455, 36470), 'numpy.mean', 'np.mean', (['samp_y'], {}), '(samp_y)\n', (36462, 36470), True, 'import numpy as np\n'), ((994, 1025), 'numpy.random.random', 'np.random.random', (['imgs[0].shape'], {}), '(imgs[0].shape)\n', (1010, 1025), True, 'import numpy as np\n'), ((10050, 10065), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (10060, 10065), True, 'import numpy as np\n'), ((10067, 10085), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (10077, 10085), True, 'import numpy as np\n'), ((10108, 10157), 'h5py.File', 'h5py.File', (['h5_creators[i].h5_full_file_name', '"""r+"""'], {}), "(h5_creators[i].h5_full_file_name, 'r+')\n", (10117, 10157), False, 'import h5py\n'), ((10359, 10396), 'numpy.asarray', 'np.asarray', (['list_of_new_frame_nums[i]'], {}), '(list_of_new_frame_nums[i])\n', (10369, 10396), True, 'import numpy as np\n'), ((15439, 15454), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (15449, 15454), True, 'import numpy as np\n'), ((15456, 15474), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (15466, 15474), True, 'import numpy as np\n'), ((23662, 23697), 'numpy.vstack', 'np.vstack', (['(im_stack, im_stack_tmp)'], {}), '((im_stack, im_stack_tmp))\n', (23671, 23697), True, 'import numpy as np\n'), ((28842, 28856), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (28852, 28856), False, 'import time\n'), ((30383, 30395), 'numpy.int', 'np.int', (['elem'], {}), '(elem)\n', (30389, 30395), True, 'import numpy as np\n'), ((36438, 36453), 'numpy.mean', 'np.mean', (['samp_y'], {}), '(samp_y)\n', (36445, 36453), True, 'import numpy as np\n'), ((3946, 3966), 'os.path.basename', 'os.path.basename', (['h5'], {}), '(h5)\n', (3962, 3966), False, 'import os\n'), ((4021, 4040), 'os.path.dirname', 'os.path.dirname', (['h5'], {}), '(h5)\n', (4036, 4040), False, 'import os\n'), ((4057, 4077), 'os.path.basename', 'os.path.basename', (['h5'], {}), '(h5)\n', (4073, 4077), False, 'import os\n'), ((24013, 24028), 'numpy.ones_like', 'np.ones_like', (['k'], {}), '(k)\n', (24025, 24028), True, 'import numpy as np\n'), ((9903, 9918), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (9913, 9918), True, 'import numpy as np\n'), ((9920, 9938), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (9930, 9938), True, 'import numpy as np\n'), ((15292, 15307), 'numpy.asarray', 'np.asarray', (['ims'], {}), '(ims)\n', (15302, 15307), True, 'import numpy as np\n'), ((15309, 15327), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (15319, 15327), True, 'import numpy as np\n'), ((28970, 28995), 'numpy.float64', 'np.float64', (['labels_2_save'], {}), '(labels_2_save)\n', (28980, 28995), True, 'import numpy as np\n'), ((8061, 8094), 'numpy.cumsum', 'np.cumsum', (['split_percentages[:-1]'], {}), '(split_percentages[:-1])\n', (8070, 8094), True, 'import numpy as np\n'), ((10513, 10533), 'numpy.shape', 'np.shape', (['frame_nums'], {}), '(frame_nums)\n', (10521, 10533), True, 'import numpy as np\n'), ((14040, 14073), 'numpy.cumsum', 'np.cumsum', (['split_percentages[:-1]'], {}), '(split_percentages[:-1])\n', (14049, 14073), True, 'import numpy as np\n'), ((29081, 29106), 'numpy.float64', 'np.float64', (['labels_2_save'], {}), '(labels_2_save)\n', (29091, 29106), True, 'import numpy as np\n')]
|
import numpy as np
import apm_id as arx
######################################################
# Configuration
######################################################
# number of terms
ny = 2 # output coefficients
nu = 1 # input coefficients
# number of inputs
ni = 1
# number of outputs
no = 1
# load data and parse into columns
data = np.loadtxt('data_step_test.csv',delimiter=',')
######################################################
# generate time-series model
arx.apm_id(data,ni,nu,ny)
|
[
"numpy.loadtxt",
"apm_id.apm_id"
] |
[((351, 398), 'numpy.loadtxt', 'np.loadtxt', (['"""data_step_test.csv"""'], {'delimiter': '""","""'}), "('data_step_test.csv', delimiter=',')\n", (361, 398), True, 'import numpy as np\n'), ((487, 515), 'apm_id.apm_id', 'arx.apm_id', (['data', 'ni', 'nu', 'ny'], {}), '(data, ni, nu, ny)\n', (497, 515), True, 'import apm_id as arx\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
plt.ion()
bands = [2,3]
single_channel_readout = 2
nsamp = 2**25
new_chans = False
def etaPhaseModDegree(etaPhase):
return (etaPhase+180)%360-180
#For resonator I/Q high sampled data use eta_mag + eta_phase found in eta scans for Q and +/- 90 deg for I, for off resonance data to look at HEMT, etc set eta_mag = 1 and eta_phase = 0 & 90 or the eta_phase from the closest resonator for "Q" and that +/- 90 for "I"
#In single_channel_readout mode 2 you take data at 2.4MHz and don't need to worry about decimation & filter_alpha, for single_channel_reaout = 1 600 kHz data you do, see confluence page https://confluence.slac.stanford.edu/display/SMuRF/SMuRF+firmware#SMuRFfirmware-Datamodes
if new_chans == True:
chans = {}
freqs = {}
sbs = {}
eta_mags_scaled = {}
eta_phases = {}
for band in bands:
chans[band] = S.which_on(band)
freqs[band] = []
sbs[band] = []
eta_mags_scaled[band] = []
eta_phases[band] = []
for chan in chans[band]:
freqs[band].append(S.channel_to_freq(band,chan))
sbs[band].append(S.freq_to_subband(band,S.channel_to_freq(band,chan))[0])
eta_mags_scaled[band].append(S.get_eta_mag_scaled_channel(band,chan))
eta_phases[band].append(S.get_eta_phase_degree_channel(band,chan))
S.channel_off(band,chan)
freqs[band] = np.asarray(freqs[band])
sbs[band] = np.asarray(sbs[band])
eta_mags_scaled[band] = np.asarray(eta_mags_scaled[band])
eta_phases[band] = np.asarray(eta_phases[band])
for band in bands:
for i,chan in enumerate(chans[band]):
plt.figure()
S.set_fixed_tone(freqs[band][i],12)
S.set_feedback_enable(band,0)
#S.run_serial_gradient_descent(band)
#S.run_serial_eta_scan(band)
S.flux_ramp_off()
#qEtaPhaseDegree = eta_phases[band][i]
qEtaPhaseDegree = 0
#EtaMag = eta_mags_scaled[band][i]
EtaMag = 1
channel = S.which_on(band)[0]
S.set_eta_mag_scaled_channel(band,channel,EtaMag)
alpha = 1.0
for IorQ in ['Q0','Q+','I+','I-']:
if IorQ is 'Q0':
S.set_eta_phase_degree_channel(band,channel,qEtaPhaseDegree)
if IorQ is 'Q+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+180))
if IorQ is 'I+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+90))
if IorQ is 'I-':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree-90))
ctime1=int(S.get_timestamp())
filename='%d.dat'%ctime1
# take ~56 sec of data (18750 Hz)^-1 * (2^20) ~ 55.9sec. Have to set kludge_sec=60.
f, df, sync = S.take_debug_data(band, channel=channel, IQstream=False, single_channel_readout=single_channel_readout, nsamp=nsamp,filename=str(ctime1));
f,Pxx = signal.welch(df,nperseg = 2**16,fs=2.4e6)
Pxx = np.sqrt(Pxx)
plt.loglog(f,Pxx,alpha=alpha,label = IorQ+': '+str(ctime1))
alpha = alpha*0.8
#dfs.append(df)
#data=fmt.format([str(ctime1),'%0.6f'%(S.channel_to_freq(band,channel)),filename,IorQ])
#of.write(data)
#of.flush()
plt.xlabel('Frequency [Hz]',fontsize = 16)
plt.ylabel('I/Q Noise',fontsize = 16)
plt.title('Resonator at '+str(np.round(freqs[band][i],1))+ 'MHz')
plt.legend()
plt.show()
plt.savefig(S.plot_dir+'/'+str(ctime1)+'_band_'+str(band)+'_chan_'+str(chan)+'.png')
plt.close()
S.channel_off(band,channel)
S.flux_ramp_on()
|
[
"scipy.signal.welch",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.round",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((81, 90), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (88, 90), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1488), 'numpy.asarray', 'np.asarray', (['freqs[band]'], {}), '(freqs[band])\n', (1475, 1488), True, 'import numpy as np\n'), ((1509, 1530), 'numpy.asarray', 'np.asarray', (['sbs[band]'], {}), '(sbs[band])\n', (1519, 1530), True, 'import numpy as np\n'), ((1563, 1596), 'numpy.asarray', 'np.asarray', (['eta_mags_scaled[band]'], {}), '(eta_mags_scaled[band])\n', (1573, 1596), True, 'import numpy as np\n'), ((1624, 1652), 'numpy.asarray', 'np.asarray', (['eta_phases[band]'], {}), '(eta_phases[band])\n', (1634, 1652), True, 'import numpy as np\n'), ((1723, 1735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1733, 1735), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {'fontsize': '(16)'}), "('Frequency [Hz]', fontsize=16)\n", (3447, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3488, 3524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""I/Q Noise"""'], {'fontsize': '(16)'}), "('I/Q Noise', fontsize=16)\n", (3498, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3620), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3618, 3620), True, 'import matplotlib.pyplot as plt\n'), ((3629, 3639), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3637, 3639), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3752), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3750, 3752), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3121), 'scipy.signal.welch', 'signal.welch', (['df'], {'nperseg': '(2 ** 16)', 'fs': '(2400000.0)'}), '(df, nperseg=2 ** 16, fs=2400000.0)\n', (3086, 3121), True, 'import scipy.signal as signal\n'), ((3134, 3146), 'numpy.sqrt', 'np.sqrt', (['Pxx'], {}), '(Pxx)\n', (3141, 3146), True, 'import numpy as np\n'), ((3564, 3591), 'numpy.round', 'np.round', (['freqs[band][i]', '(1)'], {}), '(freqs[band][i], 1)\n', (3572, 3591), True, 'import numpy as np\n')]
|
from typing import List, Tuple
import numpy as np
import pymeshfix
import trimesh.voxel.creation
from skimage.measure import marching_cubes
from trimesh import Trimesh
from trimesh.smoothing import filter_taubin
from ..types import BinaryImage, LabelImage
def _round_to_pitch(coordinate: np.ndarray, pitch: float) -> np.ndarray:
"""Round a point to the nearest point on a grid that starts at the origin
with a specified pitch.
Parameters
----------
coordinate : np.ndarray
The coordinate to round
pitch : float
The pitch of the grid. Assumed to the be same in all directions.
Returns
-------
rounded_point : np.ndarray
The point after rounding to the nearest grid point.
"""
return pitch * np.round(coordinate / pitch, decimals=0)
def repair_mesh(mesh: Trimesh) -> Trimesh:
"""Repair a mesh using pymeshfix.
Parameters
----------
mesh : Trimesh
The mesh to be repaired
"""
vertices = np.asarray(mesh.vertices)
faces = np.asarray(mesh.faces)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
repaired_mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
assert repaired_mesh.is_watertight, "Mesh was unable to be repaired"
return repaired_mesh
def binary_mask_to_surface(
object_mask: BinaryImage, n_mesh_smoothing_interations: int = 50
) -> Trimesh:
"""Convert surface of a 3D binary mask (segmented object) into a watertight mesh.
Parameters
----------
object_mask : BinaryMask
A 3D binary image corresponding to the object you want to mesh.
n_mesh_smoothing_interations : int
The number of interations of smooting to perform. Smoothing is
done by the trimesh taubin filter:
https://trimsh.org/trimesh.smoothing.html#trimesh.smoothing.filter_taubin
Default value is 50.
Returns
-------
mesh : trimesh.Trimesh
The resulting mesh as a trimesh.Trimesh object.
https://trimsh.org/trimesh.base.html#github-com-mikedh-trimesh
"""
vertices, faces, _, _ = marching_cubes(object_mask, 0)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
# optionally clean up the mesh
if n_mesh_smoothing_interations > 0:
filter_taubin(mesh, iterations=n_mesh_smoothing_interations)
return mesh
def voxelize_closed_surface(
mesh: Trimesh, pitch: float, repair_mesh: bool = True
) -> Tuple[BinaryImage, np.ndarray]:
"""Voxelize a closed surface mesh.
Parameters
----------
mesh : Trimesh
The surface to voxelize
pitch : float
The voxel width in mesh units. Voxels have the
same width in each dimension (i.e., are cubes).
repair_mesh : bool
Flag to attept to repair the mesh if set to True.
Default value is True.
Returns
-------
image : BinaryImage
The binary mask created from the
image_origin : np.ndarray
The upper left hand corner of the voxelized image in mesh units
(i.e., minimun of the axis aligned bounding box)
"""
bounding_box = mesh.bounds
centroid = np.mean(bounding_box, axis=0)
# convert the centroid to the nearest integer multiple of the pitch
rounded_centroid = _round_to_pitch(coordinate=centroid, pitch=pitch)
# find the minimum cube half-width that encompases the full mesh
cube_half_width = np.max(bounding_box - rounded_centroid)
# get the number of voxels for the cube half-width
n_voxels_cube_half_width = int(np.ceil(cube_half_width / pitch))
# pad with one voxel on each side to make sure the full mesh is in range
n_voxels_cube_half_width += 1
# get the upper left hand (i.e., minimum) corner of the voxelized image in mesh coordinates
image_origin = rounded_centroid - (n_voxels_cube_half_width * pitch)
# if and (not mesh.is_watertight):
# mesh = repair_mesh(mesh)
voxel_grid = trimesh.voxel.creation.local_voxelize(
mesh=mesh,
point=rounded_centroid,
pitch=pitch,
radius=n_voxels_cube_half_width,
fill=True,
)
return voxel_grid.matrix.astype(bool), image_origin
def closed_surfaces_to_label_image(
meshes: List[Trimesh],
pitch: float,
crop_around_mesh: bool = False,
repair_mesh: bool = False,
) -> Tuple[LabelImage, np.ndarray]:
"""Create a label image from a set of meshes with closed surfaces.
Notes:
- meshes must be water tight for accurate voxelization.
- Labels are assigned in the order the meshes appear in the list.
- all meshes must be in the same coordinate system and scale.
Parameters
----------
meshes : List[Trimesh]
The meshes to convert to a label image.
pitch : float
The width of a voxel in mesh units. Voxels are assumed to be cubes.
crop_around_mesh : bool
When set to True, the image is cropped around the axis aligned bounding box
of the set of meshes with a one voxel pad in each direction.
The default value is False
repair_mesh : bool
When set to True, will attempt to repair meshes with PyMeshFix.
Default value is False.
Returns
-------
label_image : LabelImage
The label image generated from the meshes.
image_origin : np.ndarray
The coordinate of the upper left hand corner (i.e., minimum) of the
label_image in mesh coordinates.
"""
# get the bounding box around the meshes
bounding_boxes = [mesh.bounds for mesh in meshes]
# get the bounding box around all of them
all_corners = np.concatenate(bounding_boxes, axis=0)
min_corner = np.min(all_corners, axis=0)
max_corner = np.max(all_corners, axis=0)
# round the corners to the nearest voxel (in mesh coordinates)
min_corner_rounded = _round_to_pitch(coordinate=min_corner, pitch=pitch)
max_corner_rounded = _round_to_pitch(coordinate=max_corner, pitch=pitch)
# pad the bounding box to make sure everything is accounted for
min_corner_rounded -= pitch
max_corner_rounded += pitch
if crop_around_mesh is True:
image_origin = min_corner_rounded
else:
image_origin = np.array([0, 0, 0])
# determine the size of the image in pixels
image_shape_mesh_units = max_corner_rounded - image_origin
image_shape_voxels = np.round(image_shape_mesh_units / pitch, decimals=0).astype(
int
)
# create the blank label image
label_image = np.zeros(image_shape_voxels, dtype=np.uint16)
for i, mesh in enumerate(meshes):
voxelized, origin = voxelize_closed_surface(
mesh, pitch=pitch, repair_mesh=repair_mesh
)
# get the coordinates of the voxels inside of the mesh
filled_voxel_coordinates = np.argwhere(voxelized)
# get the offset between the label image indices and the voxelized mesh indices
mesh_offset = np.round((origin - image_origin) / pitch, decimals=0)
# offset the voxel coordinates
filled_voxel_indices = np.round(
filled_voxel_coordinates + mesh_offset, decimals=0
).astype(int)
# set the label value
label_value = i + 1
label_image[
filled_voxel_indices[:, 0],
filled_voxel_indices[:, 1],
filled_voxel_indices[:, 2],
] = label_value
return label_image, image_origin
|
[
"numpy.mean",
"numpy.ceil",
"numpy.asarray",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.zeros",
"trimesh.smoothing.filter_taubin",
"numpy.argwhere",
"trimesh.Trimesh",
"skimage.measure.marching_cubes",
"numpy.concatenate",
"pymeshfix.clean_from_arrays",
"numpy.round"
] |
[((994, 1019), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (1004, 1019), True, 'import numpy as np\n'), ((1032, 1054), 'numpy.asarray', 'np.asarray', (['mesh.faces'], {}), '(mesh.faces)\n', (1042, 1054), True, 'import numpy as np\n'), ((1090, 1134), 'pymeshfix.clean_from_arrays', 'pymeshfix.clean_from_arrays', (['vertices', 'faces'], {}), '(vertices, faces)\n', (1117, 1134), False, 'import pymeshfix\n'), ((1185, 1236), 'trimesh.Trimesh', 'Trimesh', ([], {'vertices': 'vertices_clean', 'faces': 'faces_clean'}), '(vertices=vertices_clean, faces=faces_clean)\n', (1192, 1236), False, 'from trimesh import Trimesh\n'), ((2148, 2178), 'skimage.measure.marching_cubes', 'marching_cubes', (['object_mask', '(0)'], {}), '(object_mask, 0)\n', (2162, 2178), False, 'from skimage.measure import marching_cubes\n'), ((2214, 2258), 'pymeshfix.clean_from_arrays', 'pymeshfix.clean_from_arrays', (['vertices', 'faces'], {}), '(vertices, faces)\n', (2241, 2258), False, 'import pymeshfix\n'), ((2300, 2351), 'trimesh.Trimesh', 'Trimesh', ([], {'vertices': 'vertices_clean', 'faces': 'faces_clean'}), '(vertices=vertices_clean, faces=faces_clean)\n', (2307, 2351), False, 'from trimesh import Trimesh\n'), ((3306, 3335), 'numpy.mean', 'np.mean', (['bounding_box'], {'axis': '(0)'}), '(bounding_box, axis=0)\n', (3313, 3335), True, 'import numpy as np\n'), ((3574, 3613), 'numpy.max', 'np.max', (['(bounding_box - rounded_centroid)'], {}), '(bounding_box - rounded_centroid)\n', (3580, 3613), True, 'import numpy as np\n'), ((5793, 5831), 'numpy.concatenate', 'np.concatenate', (['bounding_boxes'], {'axis': '(0)'}), '(bounding_boxes, axis=0)\n', (5807, 5831), True, 'import numpy as np\n'), ((5849, 5876), 'numpy.min', 'np.min', (['all_corners'], {'axis': '(0)'}), '(all_corners, axis=0)\n', (5855, 5876), True, 'import numpy as np\n'), ((5894, 5921), 'numpy.max', 'np.max', (['all_corners'], {'axis': '(0)'}), '(all_corners, axis=0)\n', (5900, 5921), True, 'import numpy as np\n'), ((6676, 6721), 'numpy.zeros', 'np.zeros', (['image_shape_voxels'], {'dtype': 'np.uint16'}), '(image_shape_voxels, dtype=np.uint16)\n', (6684, 6721), True, 'import numpy as np\n'), ((765, 805), 'numpy.round', 'np.round', (['(coordinate / pitch)'], {'decimals': '(0)'}), '(coordinate / pitch, decimals=0)\n', (773, 805), True, 'import numpy as np\n'), ((2437, 2497), 'trimesh.smoothing.filter_taubin', 'filter_taubin', (['mesh'], {'iterations': 'n_mesh_smoothing_interations'}), '(mesh, iterations=n_mesh_smoothing_interations)\n', (2450, 2497), False, 'from trimesh.smoothing import filter_taubin\n'), ((3705, 3737), 'numpy.ceil', 'np.ceil', (['(cube_half_width / pitch)'], {}), '(cube_half_width / pitch)\n', (3712, 3737), True, 'import numpy as np\n'), ((6386, 6405), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6394, 6405), True, 'import numpy as np\n'), ((6978, 7000), 'numpy.argwhere', 'np.argwhere', (['voxelized'], {}), '(voxelized)\n', (6989, 7000), True, 'import numpy as np\n'), ((7112, 7165), 'numpy.round', 'np.round', (['((origin - image_origin) / pitch)'], {'decimals': '(0)'}), '((origin - image_origin) / pitch, decimals=0)\n', (7120, 7165), True, 'import numpy as np\n'), ((6543, 6595), 'numpy.round', 'np.round', (['(image_shape_mesh_units / pitch)'], {'decimals': '(0)'}), '(image_shape_mesh_units / pitch, decimals=0)\n', (6551, 6595), True, 'import numpy as np\n'), ((7237, 7297), 'numpy.round', 'np.round', (['(filled_voxel_coordinates + mesh_offset)'], {'decimals': '(0)'}), '(filled_voxel_coordinates + mesh_offset, decimals=0)\n', (7245, 7297), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import numpy as np
from lmnet.datasets.cifar100 import Cifar100
from lmnet.datasets.base import DistributionInterface
from lmnet.utils.random import shuffle
class Cifar100Distribution(Cifar100, DistributionInterface):
def __init__(
self,
subset="train",
batch_size=100,
*args,
**kwargs
):
super().__init__(
subset=subset,
batch_size=batch_size,
*args,
**kwargs,
)
self._init_images_and_labels()
@functools.lru_cache(maxsize=None)
def _images_and_labels(self):
if self.subset == "train":
files = ["train"]
else:
files = ["test"]
data = [self._load_data(filename) for filename in files]
images = [images for images, labels in data]
images = np.concatenate(images, axis=0)
labels = [labels for images, labels in data]
labels = np.concatenate(labels, axis=0)
return images, labels
def update_dataset(self, indices):
"""Update own dataset by indices."""
# Re Initialize dataset
self._init_images_and_labels()
# Update dataset by given indices
self.images = self.images[indices, :]
self.labels = self.labels[indices]
self.current_element_index = 0
def get_shuffle_index(self):
"""Return list of shuffled index."""
images, _ = self._images_and_labels()
random_indices = shuffle(range(len(images)), seed=self.seed)
print("Shuffle {} train dataset with random state {}.".format(self.__class__.__name__, self.seed))
self.seed += 1
return random_indices
|
[
"functools.lru_cache",
"numpy.concatenate"
] |
[((1271, 1304), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (1290, 1304), False, 'import functools\n'), ((1585, 1615), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (1599, 1615), True, 'import numpy as np\n'), ((1687, 1717), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (1701, 1717), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
image = cv2.imread('champaigneditedcompressed.png')
kernel = np.ones((20, 20), np.float32) / 25
img = cv2.filter2D(image, -1, kernel)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,10,0.01,10)
corners = np.int0(corners)
print(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
plt.imshow(img),plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.ones",
"cv2.goodFeaturesToTrack",
"numpy.int0",
"cv2.filter2D",
"cv2.circle",
"cv2.cvtColor",
"cv2.imread",
"matplotlib.pyplot.show"
] |
[((77, 120), 'cv2.imread', 'cv2.imread', (['"""champaigneditedcompressed.png"""'], {}), "('champaigneditedcompressed.png')\n", (87, 120), False, 'import cv2\n'), ((171, 202), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel'], {}), '(image, -1, kernel)\n', (183, 202), False, 'import cv2\n'), ((210, 247), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (222, 247), False, 'import cv2\n'), ((258, 301), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['gray', '(10)', '(0.01)', '(10)'], {}), '(gray, 10, 0.01, 10)\n', (281, 301), False, 'import cv2\n'), ((309, 325), 'numpy.int0', 'np.int0', (['corners'], {}), '(corners)\n', (316, 325), True, 'import numpy as np\n'), ((130, 159), 'numpy.ones', 'np.ones', (['(20, 20)', 'np.float32'], {}), '((20, 20), np.float32)\n', (137, 159), True, 'import numpy as np\n'), ((383, 418), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(3)', '(255)', '(-1)'], {}), '(img, (x, y), 3, 255, -1)\n', (393, 418), False, 'import cv2\n'), ((415, 430), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (425, 430), True, 'from matplotlib import pyplot as plt\n'), ((431, 441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (439, 441), True, 'from matplotlib import pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
<NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)
# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')
# Regression, with mean square error
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric=None)
# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, X, n_epoch=20, validation_set=(testX, testX),
run_id="auto_encoder", batch_size=256)
# Encoding X[0] for test
print("\nTest encoding of X[0]:")
# New model, re-using the same session, for weights sharing
encoding_model = tflearn.DNN(encoder, session=model.session)
print(encoding_model.predict([X[0]]))
# Testing the image reconstruction on new data (test set)
print("\nVisualizing results after being encoded and decoded:")
testX = tflearn.data_utils.shuffle(testX)[0]
# Applying encode and decode over test set
encode_decode = model.predict(testX)
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(10):
temp = [[ii, ii, ii] for ii in list(testX[i])]
a[0][i].imshow(np.reshape(temp, (28, 28, 3)))
temp = [[ii, ii, ii] for ii in list(encode_decode[i])]
a[1][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress()
|
[
"matplotlib.pyplot.draw",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.reshape",
"tflearn.data_utils.shuffle",
"tflearn.datasets.mnist.load_data",
"tflearn.DNN",
"matplotlib.pyplot.subplots",
"tflearn.regression",
"tflearn.fully_connected",
"tflearn.input_data"
] |
[((574, 603), 'tflearn.datasets.mnist.load_data', 'mnist.load_data', ([], {'one_hot': '(True)'}), '(one_hot=True)\n', (589, 603), True, 'import tflearn.datasets.mnist as mnist\n'), ((638, 675), 'tflearn.input_data', 'tflearn.input_data', ([], {'shape': '[None, 784]'}), '(shape=[None, 784])\n', (656, 675), False, 'import tflearn\n'), ((686, 723), 'tflearn.fully_connected', 'tflearn.fully_connected', (['encoder', '(256)'], {}), '(encoder, 256)\n', (709, 723), False, 'import tflearn\n'), ((734, 770), 'tflearn.fully_connected', 'tflearn.fully_connected', (['encoder', '(64)'], {}), '(encoder, 64)\n', (757, 770), False, 'import tflearn\n'), ((805, 842), 'tflearn.fully_connected', 'tflearn.fully_connected', (['encoder', '(256)'], {}), '(encoder, 256)\n', (828, 842), False, 'import tflearn\n'), ((853, 912), 'tflearn.fully_connected', 'tflearn.fully_connected', (['decoder', '(784)'], {'activation': '"""sigmoid"""'}), "(decoder, 784, activation='sigmoid')\n", (876, 912), False, 'import tflearn\n'), ((957, 1061), 'tflearn.regression', 'tflearn.regression', (['decoder'], {'optimizer': '"""adam"""', 'learning_rate': '(0.001)', 'loss': '"""mean_square"""', 'metric': 'None'}), "(decoder, optimizer='adam', learning_rate=0.001, loss=\n 'mean_square', metric=None)\n", (975, 1061), False, 'import tflearn\n'), ((1119, 1158), 'tflearn.DNN', 'tflearn.DNN', (['net'], {'tensorboard_verbose': '(0)'}), '(net, tensorboard_verbose=0)\n', (1130, 1158), False, 'import tflearn\n'), ((1404, 1447), 'tflearn.DNN', 'tflearn.DNN', (['encoder'], {'session': 'model.session'}), '(encoder, session=model.session)\n', (1415, 1447), False, 'import tflearn\n'), ((1794, 1830), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(10)'], {'figsize': '(10, 2)'}), '(2, 10, figsize=(10, 2))\n', (1806, 1830), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2080), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2078, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2105), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (2103, 2105), True, 'import matplotlib.pyplot as plt\n'), ((1617, 1650), 'tflearn.data_utils.shuffle', 'tflearn.data_utils.shuffle', (['testX'], {}), '(testX)\n', (1643, 1650), False, 'import tflearn\n'), ((1921, 1950), 'numpy.reshape', 'np.reshape', (['temp', '(28, 28, 3)'], {}), '(temp, (28, 28, 3))\n', (1931, 1950), True, 'import numpy as np\n'), ((2030, 2059), 'numpy.reshape', 'np.reshape', (['temp', '(28, 28, 3)'], {}), '(temp, (28, 28, 3))\n', (2040, 2059), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.