code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import json
import os
from os.path import join
from random import shuffle
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler, normalize
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split
from sklearn.metrics import accuracy_score
from transformers import BertTokenizer, BertConfig, BartTokenizer
def make_vector(text, tokenizer):
token_ids = tokenizer.encode(text)[1:-1]
count_vector = np.zeros(tokenizer.vocab_size, dtype=np.int16)
for ID in token_ids:
count_vector[ID] += 1
return count_vector
def dataloader(data_dir, batch_size=5000):
names = [x[:-6] for x in os.listdir(data_dir) if x[-5:] == '3.txt']
index = 0
while index < len(names):
cur_names = names[index:index+batch_size]
tuples = []
for name in cur_names:
hard = open(join(data_dir, f'{name}.0.txt')).read()
simple = open(join(data_dir, f'{name}.3.txt')).read()
tuples.append((hard, simple))
yield tuples
index += batch_size
def construct_dataset(tuples, tokenizer):
X = np.empty((2*len(tuples), tokenizer.vocab_size), dtype=np.int16)
y = np.empty(2*len(tuples), dtype=np.int16)
index = 0
for s,t in tuples:
X[index] = make_vector(s, tokenizer)
X[index+1] = make_vector(t, tokenizer)
y[index] = 0
y[index+1] = 1
index += 2
return X, y
def get_vocab(tokenizer):
tokens = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(tokenizer.vocab_size)]
return tokens
def simple_term_counts(data_dir='data/newsela/articles'):
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-xsum')
model = LogisticRegression(max_iter=100)
for batch in dataloader(data_dir):
X, y = construct_dataset(batch, tokenizer)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#apply feature scaling
#X_train = normalize(X_train)
#X_test = normalize(X_test)
#model.fit(X_train, y_train)
#predictions = model.predict(X_test)
#print(accuracy_score(y_test, predictions))
X = normalize(X)
model.fit(X, y)
vocab = get_vocab(tokenizer)
weights = np.squeeze(model.coef_, axis=0).tolist()
sorted_weights = filter(lambda x: len(x[1].strip()) > 0, zip(range(tokenizer.vocab_size), vocab, weights))
sorted_weights = list(sorted(sorted_weights, key=lambda x: x[2]))
with open('data/logr_weights/bart_freq_newsela_ids.txt', 'w') as f:
for ID, word, weight in sorted_weights:
f.write(f'{ID} {weight}\n')
with open('data/logr_weights/bart_freq_newsela_tokens.txt', 'w') as f:
for ID, word, weight in sorted_weights:
f.write(f'{word} {weight}\n')
print(simple_term_counts())
|
[
"os.listdir",
"os.path.join",
"sklearn.linear_model.LogisticRegression",
"numpy.squeeze",
"numpy.zeros",
"transformers.BartTokenizer.from_pretrained",
"sklearn.preprocessing.normalize"
] |
[((576, 622), 'numpy.zeros', 'np.zeros', (['tokenizer.vocab_size'], {'dtype': 'np.int16'}), '(tokenizer.vocab_size, dtype=np.int16)\n', (584, 622), True, 'import numpy as np\n'), ((1803, 1860), 'transformers.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', (['"""facebook/bart-large-xsum"""'], {}), "('facebook/bart-large-xsum')\n", (1832, 1860), False, 'from transformers import BertTokenizer, BertConfig, BartTokenizer\n'), ((1873, 1905), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(100)'}), '(max_iter=100)\n', (1891, 1905), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2334, 2346), 'sklearn.preprocessing.normalize', 'normalize', (['X'], {}), '(X)\n', (2343, 2346), False, 'from sklearn.preprocessing import MinMaxScaler, normalize\n'), ((777, 797), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (787, 797), False, 'import os\n'), ((2419, 2450), 'numpy.squeeze', 'np.squeeze', (['model.coef_'], {'axis': '(0)'}), '(model.coef_, axis=0)\n', (2429, 2450), True, 'import numpy as np\n'), ((999, 1030), 'os.path.join', 'join', (['data_dir', 'f"""{name}.0.txt"""'], {}), "(data_dir, f'{name}.0.txt')\n", (1003, 1030), False, 'from os.path import join\n'), ((1065, 1096), 'os.path.join', 'join', (['data_dir', 'f"""{name}.3.txt"""'], {}), "(data_dir, f'{name}.3.txt')\n", (1069, 1096), False, 'from os.path import join\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 30 17:03:01 2017
@author: misakawa
"""
from pattern_matching import Match, when, var, T, t, _, overwrite
from numpy.random import randint
@overwrite(var[(t == int) | (t == float)], var[(t == int) | (t == float)])
def add(a, b):
return a + b
@when(var[t == str], var[t == str])
def add(a, b):
return a + b
class Bound1:
pass
class Bound2:
pass
class Bound3(Bound1, Bound2):
def __repr__(self):
return "bound3"
class Bound4(Bound3):
pass
@when(_[(t != Bound3) & (t < Bound4)])
def add():
return 2
@when(_)
def add():
return 3
assert add(1, 1) == 2
assert add(Bound2()) == 2
assert add(Bound3()) == 3
@when(_[int], _[Bound1], var)
def add(u):
return u
assert add(1, Bound1(), 'last') == 'last'
def is_type(x):
return isinstance(x, type)
m = Match(1, 2, (3, int))
[a, b, c] = m.case(var[int], var, *var[tuple]).get
assert a == 1 and b == 2 and c == ((3, int), )
[c2] = m.case((_, _, (_, var.when(is_type)))).get
assert c2 == int
@overwrite(_ == None)
def summary():
return 0
@when([var[int], *(_ == [])], var)
def summary(head, res):
return head + res
@when([var[int], *var[list]], var)
def summary(head, tail, res):
return summary(tail, res + head)
@when(var[list])
def summary(lst):
return summary(lst, 0)
assert summary(list(range(100))) == 4950
@overwrite([var, *var])
def qsort(head, tail):
lowers = [i for i in tail if i < head]
highers = [i for i in tail if i >= head]
return qsort(lowers) + [head] + qsort(highers)
@when(var)
def qsort(lst):
return lst
qsort(randint(0, 500, size=(1200, )))
@when(_[t.when(lambda _: _ == int)])
def trait_test():
return 1
assert trait_test(1) == 1
class Population:
num: int = 1000
@when(var[t.when(lambda _: hasattr(_, 'num'))])
def trait_test(x):
return x.num
assert trait_test(Population()) == 1000
|
[
"pattern_matching.t.when",
"pattern_matching.overwrite",
"pattern_matching.when",
"numpy.random.randint",
"pattern_matching.var.when",
"pattern_matching.Match"
] |
[((190, 263), 'pattern_matching.overwrite', 'overwrite', (['var[(t == int) | (t == float)]', 'var[(t == int) | (t == float)]'], {}), '(var[(t == int) | (t == float)], var[(t == int) | (t == float)])\n', (199, 263), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((299, 333), 'pattern_matching.when', 'when', (['var[t == str]', 'var[t == str]'], {}), '(var[t == str], var[t == str])\n', (303, 333), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((533, 570), 'pattern_matching.when', 'when', (['_[(t != Bound3) & (t < Bound4)]'], {}), '(_[(t != Bound3) & (t < Bound4)])\n', (537, 570), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((598, 605), 'pattern_matching.when', 'when', (['_'], {}), '(_)\n', (602, 605), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((709, 737), 'pattern_matching.when', 'when', (['_[int]', '_[Bound1]', 'var'], {}), '(_[int], _[Bound1], var)\n', (713, 737), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((862, 883), 'pattern_matching.Match', 'Match', (['(1)', '(2)', '(3, int)'], {}), '(1, 2, (3, int))\n', (867, 883), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1053, 1073), 'pattern_matching.overwrite', 'overwrite', (['(_ == None)'], {}), '(_ == None)\n', (1062, 1073), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1105, 1138), 'pattern_matching.when', 'when', (['[var[int], *(_ == [])]', 'var'], {}), '([var[int], *(_ == [])], var)\n', (1109, 1138), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1188, 1221), 'pattern_matching.when', 'when', (['[var[int], *var[list]]', 'var'], {}), '([var[int], *var[list]], var)\n', (1192, 1221), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1292, 1307), 'pattern_matching.when', 'when', (['var[list]'], {}), '(var[list])\n', (1296, 1307), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1399, 1421), 'pattern_matching.overwrite', 'overwrite', (['[var, *var]'], {}), '([var, *var])\n', (1408, 1421), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1587, 1596), 'pattern_matching.when', 'when', (['var'], {}), '(var)\n', (1591, 1596), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1636, 1665), 'numpy.random.randint', 'randint', (['(0)', '(500)'], {'size': '(1200,)'}), '(0, 500, size=(1200,))\n', (1643, 1665), False, 'from numpy.random import randint\n'), ((1678, 1704), 'pattern_matching.t.when', 't.when', (['(lambda _: _ == int)'], {}), '(lambda _: _ == int)\n', (1684, 1704), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1008, 1025), 'pattern_matching.var.when', 'var.when', (['is_type'], {}), '(is_type)\n', (1016, 1025), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n')]
|
"""
Environment for basic obstacle avoidance controlling a robotic arm from UR.
In this environment the obstacle is only moving up and down in a vertical line in front of the robot.
The goal is for the robot to stay within a predefined minimum distance to the moving obstacle.
When feasible the robot should continue to the original configuration,
otherwise wait for the obstacle to move away before proceeding
"""
import numpy as np
from typing import Tuple
from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2
from robo_gym.envs.simulation_wrapper import Simulation
from robo_gym.envs.ur.ur_base_avoidance_env import URBaseAvoidanceEnv
# base, shoulder, elbow, wrist_1, wrist_2, wrist_3
JOINT_POSITIONS = [-1.57, -1.31, -1.31, -2.18, 1.57, 0.0]
DEBUG = True
MINIMUM_DISTANCE = 0.3 # the distance [cm] the robot should keep to the obstacle
class BasicAvoidanceUR(URBaseAvoidanceEnv):
"""Universal Robots UR basic obstacle avoidance environment.
Args:
rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None.
fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False.
fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False.
fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False.
fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False.
fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False.
fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True.
ur_model (str): determines which ur model will be used in the environment. Defaults to 'ur5'.
include_polar_to_elbow (bool): determines wether or not the polar coordinates to the elbow joint are included in the state. Defaults to False.
Attributes:
ur (:obj:): Robot utilities object.
client (:obj:str): Robot Server client.
real_robot (bool): True if the environment is controlling a real robot.
"""
max_episode_steps = 1000
def _set_initial_robot_server_state(self, rs_state, fixed_object_position = None) -> robot_server_pb2.State:
if fixed_object_position:
state_msg = super()._set_initial_robot_server_state(rs_state=rs_state, fixed_object_position=fixed_object_position)
return state_msg
z_amplitude = np.random.default_rng().uniform(low=0.09, high=0.35)
z_frequency = 0.125
z_offset = np.random.default_rng().uniform(low=0.2, high=0.6)
string_params = {"object_0_function": "triangle_wave"}
float_params = {"object_0_x": 0.12,
"object_0_y": 0.34,
"object_0_z_amplitude": z_amplitude,
"object_0_z_frequency": z_frequency,
"object_0_z_offset": z_offset}
state = {}
state_msg = robot_server_pb2.State(state = state, float_params = float_params,
string_params = string_params, state_dict = rs_state)
return state_msg
def reset(self, joint_positions = JOINT_POSITIONS, fixed_object_position = None) -> np.array:
"""Environment reset.
Args:
joint_positions (list[6] or np.array[6]): robot joint positions in radians.
fixed_object_position (list[3]): x,y,z fixed position of object
"""
self.prev_action = np.zeros(6)
state = super().reset(joint_positions = joint_positions, fixed_object_position = fixed_object_position)
return state
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
env_state = self._robot_server_state_to_env_state(rs_state)
reward = 0
done = False
info = {}
# Reward weights
close_distance_weight = -2
delta_joint_weight = 1
action_usage_weight = 1
rapid_action_weight = -0.2
# Difference in joint position current vs. starting position
delta_joint_pos = env_state[9:15]
# Calculate distance to the obstacle
obstacle_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
forearm_coord = np.array([rs_state['forearm_to_ref_translation_x'], rs_state['forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']])
distance_to_ee = np.linalg.norm(obstacle_coord - ee_coord)
distance_to_forearm = np.linalg.norm(obstacle_coord - forearm_coord)
distance_to_target = np.min([distance_to_ee, distance_to_forearm])
# Reward staying close to the predefined joint position
if abs(env_state[-6:]).sum() < 0.1 * action.size:
reward += delta_joint_weight * (1 - (abs(delta_joint_pos).sum()/(0.1 * action.size))) * (1/1000)
# Reward for not acting
if abs(action).sum() <= action.size:
reward += action_usage_weight * (1 - (np.square(action).sum()/action.size)) * (1/1000)
# Negative reward if actions change to rapidly between steps
for i in range(len(action)):
if abs(action[i] - self.prev_action[i]) > 0.5:
reward += rapid_action_weight * (1/1000)
# Negative reward if the obstacle is close than the predefined minimum distance
if distance_to_target < MINIMUM_DISTANCE:
reward += close_distance_weight * (1/self.max_episode_steps)
# Check if there is a collision
collision = True if rs_state['in_collision'] == 1 else False
if collision:
done = True
info['final_status'] = 'collision'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
if self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'success'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
return reward, done, info
def step(self, action) -> Tuple[np.array, float, bool, dict]:
if type(action) == list: action = np.array(action)
state, reward, done, info = super().step(action)
self.prev_action = self.add_fixed_joints(action)
return state, reward, done, info
class BasicAvoidanceURSim(BasicAvoidanceUR, Simulation):
cmd = "roslaunch ur_robot_server ur_robot_server.launch \
world_name:=tabletop_sphere50.world \
reference_frame:=base_link \
max_velocity_scale_factor:=0.2 \
action_cycle_rate:=20 \
rviz_gui:=false \
gazebo_gui:=true \
objects_controller:=true \
rs_mode:=1moving2points \
n_objects:=1.0 \
object_0_model_name:=sphere50 \
object_0_frame:=target"
def __init__(self, ip=None, lower_bound_port=None, upper_bound_port=None, gui=False, ur_model='ur5', **kwargs):
self.cmd = self.cmd + ' ' + 'ur_model:=' + ur_model
Simulation.__init__(self, self.cmd, ip, lower_bound_port, upper_bound_port, gui, **kwargs)
BasicAvoidanceUR.__init__(self, rs_address=self.robot_server_ip, ur_model=ur_model, **kwargs)
class BasicAvoidanceURRob(BasicAvoidanceUR):
real_robot = True
# roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving
|
[
"numpy.random.default_rng",
"numpy.min",
"robo_gym_server_modules.robot_server.grpc_msgs.python.robot_server_pb2.State",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm",
"robo_gym.envs.simulation_wrapper.Simulation.__init__"
] |
[((3080, 3196), 'robo_gym_server_modules.robot_server.grpc_msgs.python.robot_server_pb2.State', 'robot_server_pb2.State', ([], {'state': 'state', 'float_params': 'float_params', 'string_params': 'string_params', 'state_dict': 'rs_state'}), '(state=state, float_params=float_params,\n string_params=string_params, state_dict=rs_state)\n', (3102, 3196), False, 'from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2\n'), ((3619, 3630), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3627, 3630), True, 'import numpy as np\n'), ((4315, 4464), 'numpy.array', 'np.array', (["[rs_state['object_0_to_ref_translation_x'], rs_state[\n 'object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']\n ]"], {}), "([rs_state['object_0_to_ref_translation_x'], rs_state[\n 'object_0_to_ref_translation_y'], rs_state[\n 'object_0_to_ref_translation_z']])\n", (4323, 4464), True, 'import numpy as np\n'), ((4474, 4600), 'numpy.array', 'np.array', (["[rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'],\n rs_state['ee_to_ref_translation_z']]"], {}), "([rs_state['ee_to_ref_translation_x'], rs_state[\n 'ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])\n", (4482, 4600), True, 'import numpy as np\n'), ((4620, 4761), 'numpy.array', 'np.array', (["[rs_state['forearm_to_ref_translation_x'], rs_state[\n 'forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']]"], {}), "([rs_state['forearm_to_ref_translation_x'], rs_state[\n 'forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']])\n", (4628, 4761), True, 'import numpy as np\n'), ((4782, 4823), 'numpy.linalg.norm', 'np.linalg.norm', (['(obstacle_coord - ee_coord)'], {}), '(obstacle_coord - ee_coord)\n', (4796, 4823), True, 'import numpy as np\n'), ((4855, 4901), 'numpy.linalg.norm', 'np.linalg.norm', (['(obstacle_coord - forearm_coord)'], {}), '(obstacle_coord - forearm_coord)\n', (4869, 4901), True, 'import numpy as np\n'), ((4932, 4977), 'numpy.min', 'np.min', (['[distance_to_ee, distance_to_forearm]'], {}), '([distance_to_ee, distance_to_forearm])\n', (4938, 4977), True, 'import numpy as np\n'), ((7402, 7496), 'robo_gym.envs.simulation_wrapper.Simulation.__init__', 'Simulation.__init__', (['self', 'self.cmd', 'ip', 'lower_bound_port', 'upper_bound_port', 'gui'], {}), '(self, self.cmd, ip, lower_bound_port, upper_bound_port,\n gui, **kwargs)\n', (7421, 7496), False, 'from robo_gym.envs.simulation_wrapper import Simulation\n'), ((6536, 6552), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (6544, 6552), True, 'import numpy as np\n'), ((2549, 2572), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2570, 2572), True, 'import numpy as np\n'), ((2649, 2672), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2670, 2672), True, 'import numpy as np\n'), ((5362, 5379), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (5371, 5379), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pyqtgraph as pg
import numpy as np
class CustomWidget(pg.GraphicsWindow):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
def __init__(self, parent=None, **kargs):
pg.GraphicsWindow.__init__(self, **kargs)
self.setParent(parent)
self.setWindowTitle('pyqtgraph example: Scrolling Plots')
self.p = self.addPlot(labels = {'left':'Position', 'bottom':'Time'})
self.data = np.zeros(10)
self.curve = self.p.plot(self.data, pen='b')
if __name__ == '__main__':
w = CustomWidget()
w.show()
|
[
"pyqtgraph.setConfigOption",
"numpy.zeros",
"pyqtgraph.GraphicsWindow.__init__"
] |
[((133, 170), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""w"""'], {}), "('background', 'w')\n", (151, 170), True, 'import pyqtgraph as pg\n'), ((175, 212), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""foreground"""', '"""k"""'], {}), "('foreground', 'k')\n", (193, 212), True, 'import pyqtgraph as pg\n'), ((267, 308), 'pyqtgraph.GraphicsWindow.__init__', 'pg.GraphicsWindow.__init__', (['self'], {}), '(self, **kargs)\n', (293, 308), True, 'import pyqtgraph as pg\n'), ((504, 516), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (512, 516), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from network import NN
from evaluate import accuracy
def read_data(fpath):
iris = pd.read_csv(fpath)
iris.loc[iris['species'] == 'virginica', 'species'] = 0
iris.loc[iris['species'] == 'versicolor', 'species'] = 1
iris.loc[iris['species'] == 'setosa', 'species'] = 2
iris = iris[iris['species'] != 2]
return iris[['petal_length', 'petal_width']].values, iris[['species']].values.astype('uint8')
def plot_data(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)
plt.title("IRIS DATA | Blue - Versicolor, Red - Virginica ")
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.show()
def train_test_split(X, y, ratio=0.8):
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_len = int(X.shape[0] * ratio)
return X[indices[:train_len]], y[indices[:train_len]], X[indices[train_len:]], y[indices[train_len:]]
if __name__ == '__main__':
X, y = read_data('iris.csv')
# comment the following line if you don't need the plot anymore
plot_data(X, y)
X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7)
nn = NN(len(X[0]), 5, 1)
output = nn.feedforward(X_train)
print(output)
print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}')
nn.backward(X_train, y_train, output)
print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}')
nn.train(X_train, y_train)
print("Accuracy:")
print(accuracy(nn, X_test, y_test))
|
[
"numpy.random.shuffle",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"evaluate.accuracy",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((159, 177), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (170, 177), True, 'import pandas as pd\n'), ((519, 587), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y[:, 0]', 's': '(40)', 'cmap': 'plt.cm.Spectral'}), '(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)\n', (530, 587), True, 'import matplotlib.pyplot as plt\n'), ((592, 652), 'matplotlib.pyplot.title', 'plt.title', (['"""IRIS DATA | Blue - Versicolor, Red - Virginica """'], {}), "('IRIS DATA | Blue - Versicolor, Red - Virginica ')\n", (601, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal Length"""'], {}), "('Petal Length')\n", (667, 683), True, 'import matplotlib.pyplot as plt\n'), ((688, 713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal Width"""'], {}), "('Petal Width')\n", (698, 713), True, 'import matplotlib.pyplot as plt\n'), ((718, 728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (726, 728), True, 'import matplotlib.pyplot as plt\n'), ((784, 805), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (793, 805), True, 'import numpy as np\n'), ((810, 836), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (827, 836), True, 'import numpy as np\n'), ((1586, 1614), 'evaluate.accuracy', 'accuracy', (['nn', 'X_test', 'y_test'], {}), '(nn, X_test, y_test)\n', (1594, 1614), False, 'from evaluate import accuracy\n')]
|
# -*- coding: utf-8 -*-
"""Linear module for dqn algorithms
- Author: <NAME>
- Contact: <EMAIL>
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rl_algorithms.common.helper_functions import numpy2floattensor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class NoisyLinear(nn.Module):
"""Noisy linear module for NoisyNet.
References:
https://github.com/higgsfield/RL-Adventure/blob/master/5.noisy%20dqn.ipynb
https://github.com/Kaixhin/Rainbow/blob/master/model.py
Attributes:
in_features (int): input size of linear module
out_features (int): output size of linear module
std_init (float): initial std value
weight_mu (nn.Parameter): mean value weight parameter
weight_sigma (nn.Parameter): std value weight parameter
bias_mu (nn.Parameter): mean value bias parameter
bias_sigma (nn.Parameter): std value bias parameter
"""
def __init__(self, in_features: int, out_features: int, std_init: float = 0.5):
"""Initialize."""
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.Tensor(out_features, in_features))
self.register_buffer("weight_epsilon", torch.Tensor(out_features, in_features))
self.bias_mu = nn.Parameter(torch.Tensor(out_features))
self.bias_sigma = nn.Parameter(torch.Tensor(out_features))
self.register_buffer("bias_epsilon", torch.Tensor(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
"""Reset trainable network parameters (factorized gaussian noise)."""
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
@staticmethod
def scale_noise(size: int) -> torch.Tensor:
"""Set scale to make noise (factorized gaussian noise)."""
x = numpy2floattensor(np.random.normal(loc=0.0, scale=1.0, size=size), device)
return x.sign().mul(x.abs().sqrt())
def reset_noise(self):
"""Make new noise."""
epsilon_in = self.scale_noise(self.in_features)
epsilon_out = self.scale_noise(self.out_features)
# outer product
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation.
We don't use separate statements on train / eval mode.
It doesn't show remarkable difference of performance.
"""
return F.linear(
x,
self.weight_mu + self.weight_sigma * self.weight_epsilon,
self.bias_mu + self.bias_sigma * self.bias_epsilon,
)
class NoisyLinearConstructor:
"""Constructor class for changing hyper parameters of NoisyLinear.
Attributes:
std_init (float): initial std value
"""
def __init__(self, std_init: float = 0.5):
"""Initialize."""
self.std_init = std_init
def __call__(self, in_features: int, out_features: int) -> NoisyLinear:
"""Return NoisyLinear instance set hyper parameters"""
return NoisyLinear(in_features, out_features, self.std_init)
class NoisyMLPHandler:
"""Includes methods to handle noisy linear."""
def reset_noise(self):
"""Re-sample noise"""
for _, module in self.named_children():
module.reset_noise()
|
[
"torch.nn.functional.linear",
"numpy.random.normal",
"math.sqrt",
"torch.Tensor",
"torch.cuda.is_available"
] |
[((305, 330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (328, 330), False, 'import torch\n'), ((3051, 3177), 'torch.nn.functional.linear', 'F.linear', (['x', '(self.weight_mu + self.weight_sigma * self.weight_epsilon)', '(self.bias_mu + self.bias_sigma * self.bias_epsilon)'], {}), '(x, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.\n bias_mu + self.bias_sigma * self.bias_epsilon)\n', (3059, 3177), True, 'import torch.nn.functional as F\n'), ((1313, 1352), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (1325, 1352), False, 'import torch\n'), ((1395, 1434), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (1407, 1434), False, 'import torch\n'), ((1483, 1522), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (1495, 1522), False, 'import torch\n'), ((1561, 1587), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (1573, 1587), False, 'import torch\n'), ((1628, 1654), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (1640, 1654), False, 'import torch\n'), ((1701, 1727), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (1713, 1727), False, 'import torch\n'), ((1923, 1950), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (1932, 1950), False, 'import math\n'), ((2392, 2439), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': 'size'}), '(loc=0.0, scale=1.0, size=size)\n', (2408, 2439), True, 'import numpy as np\n'), ((2062, 2089), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (2071, 2089), False, 'import math\n'), ((2198, 2226), 'math.sqrt', 'math.sqrt', (['self.out_features'], {}), '(self.out_features)\n', (2207, 2226), False, 'import math\n')]
|
from numpy import reshape
def vec(x):
return reshape(x, (-1,) + x.shape[2:], order="F")
def unvec(x, shape):
return reshape(x, shape, order="F")
|
[
"numpy.reshape"
] |
[((51, 93), 'numpy.reshape', 'reshape', (['x', '((-1,) + x.shape[2:])'], {'order': '"""F"""'}), "(x, (-1,) + x.shape[2:], order='F')\n", (58, 93), False, 'from numpy import reshape\n'), ((128, 156), 'numpy.reshape', 'reshape', (['x', 'shape'], {'order': '"""F"""'}), "(x, shape, order='F')\n", (135, 156), False, 'from numpy import reshape\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 16:12:56 2020
@author: dylanroyston
"""
# import/configure packages
import numpy as np
import pandas as pd
#import pyarrow as pa
import librosa
import librosa.display
from pathlib import Path
#import Ipython.display as ipd
#import matplotlib.pyplot as plt
from pyspark.sql import *
import pyspark.sql.functions as f
from pyspark import SparkConf, SparkContext, SQLContext
import boto3
from tinytag import TinyTag as tt
import soundfile as sf
import audioread
from pydub import AudioSegment
from io import BytesIO
#from io import BytesIO
import os
import sys
import time
import struct
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/lib")
#import config
time_seq = []
#####
# create local Spark instance (for non-cluster dev)
sc = SparkContext('local')
spark = SparkSession (sc)
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# define Spark config
def spark_conf():
conf = SparkConf().setAppName("decompress_audio_files")
sc = SparkContext(conf=conf)
spark = SparkSession.builder.getOrCreate()
return spark
spark = spark_conf()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
#####
# Function to write spark-dataframe to mySQL
def write_df_to_psql(df, tablename):
psql_user = os.environ.get('PSQL_USR')
psql_pwd = os.environ.get('PSQL_PWD')
df.write.format('jdbc').options(
url='jdbc:postgresql://10.0.0.6:5432/spectralize',
dbtable=tablename,
user=psql_user,
#password=<PASSWORD>).mode('append').save()
password=psql_pwd).save()
#####
# function to read audio files from S3 bucket and extract tags
def read_audio_files():
# basic initialization
time_seq.append(['start-read-audio', time.time()])
# DataFrame schema
File_Tags = Row("s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year")
spec_labels = []
for sn in range(0,128):
spec_labels.append('spec' + str(sn+1))
spec_df_labels = ['song_id','timeseries'] + spec_labels
Spec_Tags = Row(spec_df_labels)
# configure S3 access
s3_bucket = 'mdp-spectralize-pal'
number_of_files = 0
s3 = boto3.resource('s3')
boto_client = boto3.client('s3')
bucket = s3.Bucket(s3_bucket)
number_of_files=0
file_limit=100
#local_path = './local_file.'
known_ext = [".mp3", ".wav", ".m4a"]
#read each file from S3 bucket
for obj in bucket.objects.all():
s3_key = obj.key
audio_obj_stream = boto_client.get_object(Bucket=s3_bucket, Key=s3_key)
audio_obj = BytesIO(audio_obj_stream['Body'].read())
song = bytes(audio_obj)
song = sf.SoundFile(audio_obj)
song = open(audio_obj, 'rb').read()
song = audioread.audio_open(audio_obj)
# extract tags from mp3 files
#if "mp3" in s3_key:
#if any(ext in s3_key for ext in known_ext):
#print(number_of_files)
#ext = s3_key[-4:]
#local_path = './localfile' + ext
number_of_files+=1
#bucket.download_file(s3_key, local_path)
local_path = '/home/dylanroyston/Music/spectralize_data/01 Konoha Densetsu.mp3'
song = open(local_path, 'rb').read()
##### tags
tags = tt.get(local_path)
tags = tt.get(audio_obj)
# extract tags from tinytag object
indiv_tags = (s3_key, number_of_files, tags.album, tags.albumartist, tags.artist,
tags.audio_offset, tags.bitrate, tags.channels,
tags.comment, tags.composer, tags.disc,
tags.disc_total, tags.duration, tags.filesize,
tags.genre, tags.samplerate, tags.title, tags.track,
tags.track_total, tags.year)
# convert tuple object to list
indiv_tag_list = list(indiv_tags)
indiv_tag_list = [str(i) for i in indiv_tag_list]
tag_seq=[]
tag_seq.append(indiv_tag_list)
tags_pdf = pd.DataFrame(data=tag_seq)
tag_df = spark.createDataFrame(tags_pdf, schema=File_Tags)
##### audio
# load audio file with Librosa
#y, sr = librosa.load(str(Path(local_path)), sr=None)
y, sr = librosa.load(local_path, sr=None)
# create indexing variables (song_id, timestamp)
# song_id defined as "repeat(number_of_files)"
song_num = pd.Series([number_of_files])
num_points = len(y)
song_id = song_num.repeat(num_points)
song_id = song_id.to_numpy()
# timeseries defined as "1 : length(audio_data)"
timeseries = np.arange(num_points)
timeseries = timeseries.transpose()
full_audio = {'song_id': song_id, 'timeseries': timeseries,
'intensity': y}
# create combined dataframe
audio_pdf = pd.DataFrame(data = full_audio)
audio_df = spark.createDataFrame(audio_pdf)
##### spectral
S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128, fmax=10000)
log_S = librosa.power_to_db(S, ref=np.max)
log_S = log_S.transpose()
# song_id defined as "repeat(number_of_files)"
song_num = pd.Series([number_of_files])
num_points = len(S.transpose())
song_id = song_num.repeat(num_points)
song_id = song_id.to_numpy()
# timeseries defined as "1 : length(audio_data)"
timeseries = np.arange(num_points)
timeseries = timeseries.transpose()
full_index = {'song_id': song_id, 'timeseries': timeseries}
index_pdf = pd.DataFrame(full_index)
spec_pdf = pd.DataFrame(data=log_S, columns=spec_labels)
full_spec = pd.concat([index_pdf, spec_pdf], axis=1)
spec_df = spark.createDataFrame(full_spec)
##### write dataframes to psql
write_df_to_psql(tag_df, 'clean_metadata')
write_df_to_psql(audio_df, 'clean_audio')
write_df_to_psql(spec_df, 'clean_spec')
# stop process when file_limit is crossed (small batches)
if (number_of_files >= file_limit):
break
#####
time_seq.append(['end read-file', time.time()])
#df_tags = spark.createDataFrame(tag_seq, schema=File_Tags)
#df_audio = spark.createDataFrame(audio_seq)
#df_spec = spark.createDataFrame(audio_seq, schema=Spec_Tags)
# Additional run to
#df_audio_data = spark.createDataFrame(file_audio_data)
#process_df(df_audio_data)
#####
if __name__ == '__main__':
time_seq.append(['start', time.time()])
read_audio_files()
|
[
"pandas.Series",
"librosa.feature.melspectrogram",
"boto3.client",
"audioread.audio_open",
"os.environ.get",
"os.path.abspath",
"pyspark.SparkConf",
"librosa.power_to_db",
"boto3.resource",
"tinytag.TinyTag.get",
"pandas.concat",
"time.time",
"pandas.DataFrame",
"pyspark.SparkContext",
"soundfile.SoundFile",
"numpy.arange",
"librosa.load"
] |
[((833, 854), 'pyspark.SparkContext', 'SparkContext', (['"""local"""'], {}), "('local')\n", (845, 854), False, 'from pyspark import SparkConf, SparkContext, SQLContext\n'), ((1048, 1071), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (1060, 1071), False, 'from pyspark import SparkConf, SparkContext, SQLContext\n'), ((1331, 1357), 'os.environ.get', 'os.environ.get', (['"""PSQL_USR"""'], {}), "('PSQL_USR')\n", (1345, 1357), False, 'import os\n'), ((1377, 1403), 'os.environ.get', 'os.environ.get', (['"""PSQL_PWD"""'], {}), "('PSQL_PWD')\n", (1391, 1403), False, 'import os\n'), ((2473, 2493), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (2487, 2493), False, 'import boto3\n'), ((2517, 2535), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (2529, 2535), False, 'import boto3\n'), ((3060, 3083), 'soundfile.SoundFile', 'sf.SoundFile', (['audio_obj'], {}), '(audio_obj)\n', (3072, 3083), True, 'import soundfile as sf\n'), ((3162, 3193), 'audioread.audio_open', 'audioread.audio_open', (['audio_obj'], {}), '(audio_obj)\n', (3182, 3193), False, 'import audioread\n'), ((3770, 3788), 'tinytag.TinyTag.get', 'tt.get', (['local_path'], {}), '(local_path)\n', (3776, 3788), True, 'from tinytag import TinyTag as tt\n'), ((3804, 3821), 'tinytag.TinyTag.get', 'tt.get', (['audio_obj'], {}), '(audio_obj)\n', (3810, 3821), True, 'from tinytag import TinyTag as tt\n'), ((4539, 4565), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'tag_seq'}), '(data=tag_seq)\n', (4551, 4565), True, 'import pandas as pd\n'), ((4818, 4851), 'librosa.load', 'librosa.load', (['local_path'], {'sr': 'None'}), '(local_path, sr=None)\n', (4830, 4851), False, 'import librosa\n'), ((5002, 5030), 'pandas.Series', 'pd.Series', (['[number_of_files]'], {}), '([number_of_files])\n', (5011, 5030), True, 'import pandas as pd\n'), ((5229, 5250), 'numpy.arange', 'np.arange', (['num_points'], {}), '(num_points)\n', (5238, 5250), True, 'import numpy as np\n'), ((5501, 5530), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'full_audio'}), '(data=full_audio)\n', (5513, 5530), True, 'import pandas as pd\n'), ((5686, 5750), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {'sr': 'sr', 'n_mels': '(128)', 'fmax': '(10000)'}), '(y, sr=sr, n_mels=128, fmax=10000)\n', (5716, 5750), False, 'import librosa\n'), ((5767, 5801), 'librosa.power_to_db', 'librosa.power_to_db', (['S'], {'ref': 'np.max'}), '(S, ref=np.max)\n', (5786, 5801), False, 'import librosa\n'), ((5919, 5947), 'pandas.Series', 'pd.Series', (['[number_of_files]'], {}), '([number_of_files])\n', (5928, 5947), True, 'import pandas as pd\n'), ((6158, 6179), 'numpy.arange', 'np.arange', (['num_points'], {}), '(num_points)\n', (6167, 6179), True, 'import numpy as np\n'), ((6313, 6337), 'pandas.DataFrame', 'pd.DataFrame', (['full_index'], {}), '(full_index)\n', (6325, 6337), True, 'import pandas as pd\n'), ((6366, 6411), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'log_S', 'columns': 'spec_labels'}), '(data=log_S, columns=spec_labels)\n', (6378, 6411), True, 'import pandas as pd\n'), ((6441, 6481), 'pandas.concat', 'pd.concat', (['[index_pdf, spec_pdf]'], {'axis': '(1)'}), '([index_pdf, spec_pdf], axis=1)\n', (6450, 6481), True, 'import pandas as pd\n'), ((702, 727), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (717, 727), False, 'import os\n'), ((992, 1003), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (1001, 1003), False, 'from pyspark import SparkConf, SparkContext, SQLContext\n'), ((1816, 1827), 'time.time', 'time.time', ([], {}), '()\n', (1825, 1827), False, 'import time\n'), ((6953, 6964), 'time.time', 'time.time', ([], {}), '()\n', (6962, 6964), False, 'import time\n'), ((7337, 7348), 'time.time', 'time.time', ([], {}), '()\n', (7346, 7348), False, 'import time\n')]
|
from typing import Dict
import numpy as np
import tensorflow as tf
import verres as V
class ConstantSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, learning_rate: float):
super().__init__()
self.learning_rate = float(learning_rate)
def __call__(self, step):
return self.learning_rate
def get_config(self):
return dict(learning_rate=self.learning_rate)
class LinearLRSchedule(tf.keras.callbacks.Callback):
def __init__(self,
cycle_length: int,
steps_per_epoch: int,
lr_map: Dict[int, float],
initial_lr: float = None):
super().__init__()
self.schedule = None
self.pointer = 0
self.cycle_length = None
self.make_schedule(cycle_length, steps_per_epoch, lr_map, initial_lr)
def make_schedule(self,
cycle_length: int,
steps_per_epoch: int,
lr_map: Dict[int, float],
initial_lr: float = None):
self.cycle_length = cycle_length
schedule = np.empty(self.cycle_length * steps_per_epoch, dtype="float32")
if 0 not in lr_map:
if initial_lr is None:
raise RuntimeError("Either pass the initial learning rate in the lr_map or as a dedicated parameter!")
else:
lr_map = lr_map.copy()
initial_lr = lr_map.pop(0)
start_step = 0
current_lr = initial_lr
for end_epoch, next_lr in sorted(lr_map.items(), key=lambda it: it[0]):
steps = end_epoch * steps_per_epoch - start_step
schedule[start_step:start_step+steps] = np.linspace(
current_lr, next_lr, num=steps, endpoint=False, dtype="float32")
start_step += steps
current_lr = next_lr
schedule[start_step:] = current_lr
self.schedule = schedule
def on_batch_end(self, batch, logs=None):
self.model.optimizer.lr = self.schedule[self.pointer]
self.pointer += 1
self.pointer %= self.cycle_length
def on_epoch_end(self, epoch, logs=None):
logs["lr"] = self.schedule[self.pointer]
def factory(spec: dict) -> tf.optimizers.schedules.LearningRateSchedule:
name = spec.pop("name", "default")
if name.lower() in {"default", "constant"}:
scheduler = ConstantSchedule(float(spec["learning_rate"]))
else:
scheduler_type = getattr(tf.optimizers.schedules, name, None)
if scheduler_type is None:
raise KeyError(f"No such scheduler: {name}")
scheduler = scheduler_type(**spec)
print(f" [Verres.schedule] - Factory built: {name}")
return scheduler
|
[
"numpy.linspace",
"numpy.empty"
] |
[((1143, 1205), 'numpy.empty', 'np.empty', (['(self.cycle_length * steps_per_epoch)'], {'dtype': '"""float32"""'}), "(self.cycle_length * steps_per_epoch, dtype='float32')\n", (1151, 1205), True, 'import numpy as np\n'), ((1725, 1801), 'numpy.linspace', 'np.linspace', (['current_lr', 'next_lr'], {'num': 'steps', 'endpoint': '(False)', 'dtype': '"""float32"""'}), "(current_lr, next_lr, num=steps, endpoint=False, dtype='float32')\n", (1736, 1801), True, 'import numpy as np\n')]
|
import json
import string
from datetime import datetime
import deap
import numpy as np
import hmm
from discriminator import Discriminator
from ea import EA
import random_search
DEFAULT_PARAMS = {
# Discriminator CNN model
"model": "CNNModel3",
# Algorithm Parameters
"states": 5,
"symbols": 5,
"epochs": 10,
"epoch_size": 500,
"batch_size": 200,
"seq_len": 20,
"pop_size": 25,
"gens": 50,
"offspring_prop": 1.0,
"cx_prob": 0.0,
"mut_fn": "uniform",
"mut_prob": 1.0,
"mut_rate": None, # None - default to 1/N where N is number of genes
# Implementation Parameters
"_pool_size": 4,
"_random_search": True, # Also run an elitist random search over #gens to compare performance
}
def param_assert(params):
assert params["states"] > 0
assert 0 < params["symbols"] <= 26
assert 0.0 <= params["offspring_prop"] <= 1.0
assert 0.0 <= params["cx_prob"] <= 1.0
assert 0.0 <= params["mut_prob"] <= 1.0
assert (params["mut_rate"] is None) or (0.0 <= params["mut_rate"] <= 1.0)
def run(param_subset):
# Overwrite the default values of the provided parameters
params = {**DEFAULT_PARAMS, **param_subset}
print(params)
param_assert(params)
x = params["states"]
y = string.ascii_lowercase[: params["symbols"]]
s = [1.0] + [0.0] * (x - 1)
# Random HMM that will act as the 'true' underlying distribution
real_hmm = hmm.random_hmm(x, y, s)
# Different random HMM that will be used to benchmark the best solution we find
rand_hmm = hmm.random_hmm(x, y, s)
d = Discriminator(
real_hmm,
params["epoch_size"],
params["batch_size"],
params["seq_len"],
model=params["model"],
pool_size=params["_pool_size"],
)
print("Pre-training discriminator...")
accs, losses = d.initial_train(params["epochs"])
acc = accs[-1]
loss = losses[-1]
print(f"Pre-trained discriminiator accuracy: {acc}, loss: {loss}")
g = EA(
discriminator=d,
pop_size=params["pop_size"],
states=x,
symbols=len(y),
offpr=params["offspring_prop"],
cxpb=params["cx_prob"],
mut_fn=params["mut_fn"],
mutpb=params["mut_prob"],
mut_rate=params["mut_rate"],
)
print("Running generator...")
final_pop, _, logbook = g.run(params["gens"])
best_ind = deap.tools.selBest(final_pop, 1)[0]
best_hmm = hmm.HMM(x, np.array(list(y)), best_ind[0], best_ind[1], np.array(s))
if params["_random_search"]:
print("Running random search benchmark...")
rs_best_hmm, rs_best_acc = random_search.run(
d, params["states"], params["symbols"], params["gens"]
)
else:
rs_best_hmm, rs_best_acc = None, None
return real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook
def experiment(params, runs):
all_params = {**DEFAULT_PARAMS, **params}
do_rand_search = all_params["_random_search"]
mean_fitnesses = []
best_l2s = []
rand_l2s = []
if do_rand_search:
rs_l2s = []
for i in range(runs):
print(f"Run {i+1}")
real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook = run(params)
best_l2 = hmm.total_l2_diff(real_hmm, best_hmm)
rand_l2 = hmm.total_l2_diff(real_hmm, rand_hmm)
if do_rand_search:
rs_l2 = hmm.total_l2_diff(real_hmm, rs_best_hmm)
mean_fitnesses.append(logbook.select("mean"))
best_l2s.append(best_l2)
rand_l2s.append(rand_l2)
extra_msg = ""
if do_rand_search:
rs_l2s.append(rs_l2)
extra_msg = f", RandSearch L2: {rs_l2}"
print(f"Best L2: {best_l2}, Rand L2: {rand_l2}{extra_msg}")
exp_data = {
"params": all_params,
"mean_fitnesses": mean_fitnesses,
"best_l2s": best_l2s,
"rand_l2s": rand_l2s,
}
if do_rand_search:
exp_data["rs_l2s"] = rs_l2s
exp_file = f'experiments/exp_{datetime.now().strftime("%y%m%d-%H%M%S%f")}.json'
with open(exp_file, "w") as f:
json.dump(exp_data, f, indent=4)
return exp_data
def main():
real_hmm, best_hmm, best_l2 = run(DEFAULT_PARAMS)
print(
f"""
Real HMM: {real_hmm}
Best HMM: {best_hmm}
Best L2: {best_l2}
"""
)
if __name__ == "__main__":
main()
|
[
"discriminator.Discriminator",
"random_search.run",
"hmm.total_l2_diff",
"numpy.array",
"datetime.datetime.now",
"deap.tools.selBest",
"hmm.random_hmm",
"json.dump"
] |
[((1444, 1467), 'hmm.random_hmm', 'hmm.random_hmm', (['x', 'y', 's'], {}), '(x, y, s)\n', (1458, 1467), False, 'import hmm\n'), ((1567, 1590), 'hmm.random_hmm', 'hmm.random_hmm', (['x', 'y', 's'], {}), '(x, y, s)\n', (1581, 1590), False, 'import hmm\n'), ((1600, 1746), 'discriminator.Discriminator', 'Discriminator', (['real_hmm', "params['epoch_size']", "params['batch_size']", "params['seq_len']"], {'model': "params['model']", 'pool_size': "params['_pool_size']"}), "(real_hmm, params['epoch_size'], params['batch_size'], params[\n 'seq_len'], model=params['model'], pool_size=params['_pool_size'])\n", (1613, 1746), False, 'from discriminator import Discriminator\n'), ((2406, 2438), 'deap.tools.selBest', 'deap.tools.selBest', (['final_pop', '(1)'], {}), '(final_pop, 1)\n', (2424, 2438), False, 'import deap\n'), ((2513, 2524), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2521, 2524), True, 'import numpy as np\n'), ((2647, 2720), 'random_search.run', 'random_search.run', (['d', "params['states']", "params['symbols']", "params['gens']"], {}), "(d, params['states'], params['symbols'], params['gens'])\n", (2664, 2720), False, 'import random_search\n'), ((3241, 3278), 'hmm.total_l2_diff', 'hmm.total_l2_diff', (['real_hmm', 'best_hmm'], {}), '(real_hmm, best_hmm)\n', (3258, 3278), False, 'import hmm\n'), ((3297, 3334), 'hmm.total_l2_diff', 'hmm.total_l2_diff', (['real_hmm', 'rand_hmm'], {}), '(real_hmm, rand_hmm)\n', (3314, 3334), False, 'import hmm\n'), ((4091, 4123), 'json.dump', 'json.dump', (['exp_data', 'f'], {'indent': '(4)'}), '(exp_data, f, indent=4)\n', (4100, 4123), False, 'import json\n'), ((3382, 3422), 'hmm.total_l2_diff', 'hmm.total_l2_diff', (['real_hmm', 'rs_best_hmm'], {}), '(real_hmm, rs_best_hmm)\n', (3399, 3422), False, 'import hmm\n'), ((3998, 4012), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4010, 4012), False, 'from datetime import datetime\n')]
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Neon backend wrapper for the NervanaGPU library. Most functions are thin
wrappers around functions from the NervanaGPU class, the GPUTensor is taken
directly from NervanaGPU as well.
NervanaGPU is available at `<https://github.com/NervanaSystems/nervanagpu>`
"""
import logging
from neon.backends.backend import Backend
from nervanagpu import NervanaGPU
from neon.diagnostics.timing_decorators import FlopsDecorator
import pycuda.driver as drv
import numpy as np
logger = logging.getLogger(__name__)
class GPU(Backend):
"""
Sets up a NervanaGPU based backend for matrix operations.
Note that some functions defined in the generic Backend class such as
cross-map pooling and normalization and are not implemented for
this backend.
"""
default_dtype = np.float32
def __init__(self, rng_seed, stochastic_round=False, device_id=0):
import pycuda.driver as drv
drv.init()
global ctx
ctx = drv.Device(device_id).make_context()
import atexit
atexit.register(ctx.pop)
self.ng = NervanaGPU(stochastic_round=stochastic_round)
logger.info("Initialized NervanaGPU with stochastic_round=%s",
stochastic_round)
self.rng_seed = rng_seed
self.rng_init()
self.device_id = device_id if device_id is not None else 0
def __getstate__(self):
"""
Defines what and how we go about serializing an instance of this class.
Returns:
self.__dict__: The full contents of the backend class instance,
except for the mem_pool which is on device and
cannot be serialized.
"""
if hasattr(self, 'mem_pool') and self.mem_pool is not None:
self.mem_pool_pickle = {'shape': self.mem_pool.shape,
'dtype': np.float32}
self.mem_pool = None
return self.__dict__
def __setstate__(self, state):
"""
Defines how we go about deserializing into an instance of this class.
Arguments:
self.__dict__: The full contents of the backend class instance,
except for the mem_pool which is on device and
cannot be serialized.
"""
self.__dict__.update(state)
self.mem_pool = self.ng.empty(self.mem_pool_pickle['shape'],
dtype=self.mem_pool_pickle['dtype'])
def init_mempool(self, shape, dtype=default_dtype):
"""
Allocates a memory pool for temporary storage
"""
self.mem_pool = self.ng.empty(shape, dtype=dtype)
def alloc_host_mem(self, shape, dtype=default_dtype):
return drv.pagelocked_empty(shape, dtype, order="C", mem_flags=0)
def create_stream(self):
return drv.Stream()
def synchronize(self):
pass
def async_copy(self, dest, src, stream=None):
drv.memcpy_htod_async(dest.gpudata, src, stream)
def rng_init(self):
"""
Initialize and seed the pseudo random number genrator. Random numbers
are generated on the host using numpy, then transfered to device.
"""
seed = None
if 'rng_seed' in self.__dict__:
seed = self.rng_seed
logger.info("Seeding random number generator with: %s", str(seed))
np.random.seed(seed)
def flop_timing_init(self, decorate_fc, decorate_conv, decorate_ew):
"""
Initialize FLOP timing. Wraps the specified MOP calls via a decorator
to record elapsed time and number of operations.
Arguments:
decorate_fc (list): string giving the function names of fully
connected layer forward/backward/update calls
to time.
decorate_conv (list): string giving the function names of
convolutional layer forward/backward/update
calls to time.
decorate_ew (list): string giving the function names of element-wise
calls to time.
Notes:
Must be called prior to first flop_timing_start call
"""
self.start = drv.Event()
self.end = drv.Event()
self.flop_timer = FlopsDecorator(self)
self.flop_timer.decorate(decorate_fc=decorate_fc,
decorate_conv=decorate_conv,
decorate_ew=decorate_ew)
def flop_timinig_start(self):
"""
Start a new FLOP timer.
Returns:
None: dummy value (not used)
"""
return self.start.record()
def flop_timing_finish(self, start_time):
"""
Complete current FLOP timing.
Arguments:
start_time (unused): ignored.
Returns:
float: elapsed time in seconds since prior flop_timing_start call.
"""
self.end.record()
self.end.synchronize()
return self.end.time_since(self.start)
def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype,
persist_values=True, name=None):
"""
generate numpy random number and convert to a GPUTensor.
If called with dype=None it will probably explode
"""
ary = np.random.uniform(low, high, size)
return self.ng.array(ary, dtype=dtype, name=name)
def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype,
persist_values=True, name=None):
"""
Gaussian/Normal random number sample generation
"""
ary = np.random.normal(loc, scale, size)
return self.ng.array(ary, dtype=dtype, name=name)
def fprop_fc(self, out, inputs, weights, layer=None):
"""
Forward propagate the inputs of a fully connected network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
layer (Layer): The layer object.
"""
self.ng.dot(weights, inputs, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.ng.dot(weights.T, deltas, out)
def update_fc(self, out, inputs, deltas, layer=None):
"""
Compute the updated gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.ng.dot(deltas, inputs.T, out)
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
err (GPUTensor): backpropagated error
"""
self.ng.sum(err, axis=1, out=out)
def add_fc_bias(self, inputs, bias):
"""
Add the bias for a fully connected network layer.
Arguments:
inputs (GPUTensor): the input to update.
bias (GPUTensor): the amount to increment
"""
self.ng.add(inputs, bias, out=inputs)
def fprop_conv(self, out, inputs, weights, ofmshape, ofmsize, ofmlocs,
ifmshape, links, nifm, padding, stride, ngroups, fpropbuf,
local=False):
"""
Forward propagate the inputs of a convolutional network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element
in each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons). For this
backend we expect these values to be square.
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fpropbuf (GPUTensor): Temporary storage buffer used to hold the
convolved outputs for a single receptive
field. Not used for this backend.
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
'''
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
'''
self.ng.fprop_conv(layer=fpropbuf, I=inputs, F=weights, O=out,
alpha=1.0, repeat=1)
def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs,
ifmshape, links, padding, stride, nifm, ngroups, bpropbuf,
local=False):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
bpropbuf (GPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
self.ng.bprop_conv(layer=bpropbuf, F=weights, E=deltas, grad_I=out,
alpha=1.0, repeat=1)
def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize,
ofmlocs, ifmshape, links, nifm, padding, stride, ngroups,
fwidth, updatebuf, local=False, layer=None):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fwidth (int): Filter width.
updatebuf (GPUTensor): Temporary storage buffer used to hold the
updated gradient for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
layer (Layer): The layer object.
"""
self.ng.update_conv(layer=updatebuf, I=inputs, E=deltas, grad_F=out,
alpha=1.0, repeat=1)
def fprop_pool(self, out, inputs, op, ofmshape, ofmsize, ofmlocs, fshape,
ifmshape, links, nifm, padding, stride, fpropbuf):
"""
Forward propagate the inputs of a Pooling network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
op (string): The type of pooling operation to apply. We support
"max", "avg", "l2" currently.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
fshape (tuple): Dimensions of each filter (typically height and
width).
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
pooling operation.
stride (int): Number of neurons to shift the filter at each step.
fpropbuf (GPUTensor): Temporary storage buffer used to hold the
pooled outputs for a single receptive field.
"""
op = op.lower()
if op == "max":
self.ng.fprop_pool(layer=fpropbuf, I=inputs, O=out, repeat=1)
else:
raise AttributeError("unexpected pooling op type: %s", op)
def bprop_pool(self, out, fouts, inputs, deltas, op, ofmshape, ofmsize,
ofmlocs, fshape, fpsize, ifmshape, links, nifm, padding,
stride, bpropbuf):
"""
Backward propagate the error through a pooling network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
fouts (GPUTensor): Forward propagated outputs from the previous
layer.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (GPUTensor): The error values for this layer
op (string): The type of pooling operation to apply. We support
"max", "avg", "l2" currently.
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
fshape (tuple): Dimensions of each filter (typically height and
width).
fpsize (int): The size of each filter.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
pooling operation.
stride (int): Number of neurons to shift the filter at each step.
bpropbuf (GPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
"""
op = op.lower()
if op == "max":
self.ng.bprop_pool(layer=bpropbuf, I=inputs, E=deltas, grad_I=out,
repeat=1)
else:
raise AttributeError("unexpected pooling op type: %s", op)
def logistic(self, x, out):
"""
Logistic sigmoid nonlinearity, 1/(1+exp(-x))
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.sig(x, out=out)
return out
def transpose(self, untransposed, transposed):
transposed[:] = untransposed.T
def crossent(self, y, t, partial, out, epsilon, doscale, ismulti=False):
"""
Computes cross entropy cost.
Arguments:
y (GPUTensor): Model outputs
t (GPUTensor): Targets
partial (GPUTensor): temporary buffer used for 2D reduction
out (GPUTensor): Storage for the cross entropy output
epsilon (float): constant for numerical stability
doscale (boolean): If True, cross_entropy is scaled by batch size
ismulti (boolean): If True, compute multi class cross_entropy
"""
sumbuf = partial.reshape((partial.size, 1))[:partial.shape[0]]
if ismulti:
self.ng.sum(-t * self.ng.log(y + epsilon),
axis=None, partial=sumbuf, out=out)
else:
self.ng.sum((t - 1) * self.ng.log(1 - y + epsilon) -
t * self.ng.log(y + epsilon),
axis=None, partial=sumbuf, out=out)
if doscale:
out[:] = out / y.shape[1]
return out
def logistic_compound(self, inputs, outputs):
"""
Applies logistic function and its derivative to the dataset passed.
Arguments:
inputs (GPUTensor): Input data to be transformed. This also
acts as storage for the output of the
derivative function.
outputs (GPUTensor): Storage for the transformed output.
"""
# Apply the logistic function.
outputs[:] = self.ng.sig(inputs)
inputs[:] = (1.0 - outputs) * inputs
def rectlin(self, x, out):
"""
Rectified Linear nonlinearity
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.maximum(x, 0., out=out)
return out
def rectlin_derivative(self, x, out):
"""
Rectified linear nonlinearity derivative
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.greater(x, 0, out=out)
return out
def rectleaky(self, x, slope, out):
"""
Leaky rectified linear nonlinearity
Arguments:
x (GPUTensor): Input tensor
slope (float): amount of gradient to apply when unit is not active
out (GPUTensor): Output tensor
"""
out[:] = self.ng.maximum(x, x*slope)
def rectleaky_derivative(self, x, slope, out):
"""
Leaky rectified linear nonlinearity derivative
Arguments:
x (GPUTensor): Input tensor
slope (float): amount of gradient to apply when unit is not active
out (GPUTensor): Output tensor
"""
out[:] = self.ng.greater(x, 0) * (1.0 - slope) + slope
def sum(self, tsr, axes, out):
"""
Sum
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.sum(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.sum(tsr, axis=axes, out=out)
return out
def norm(self, tsr, order=None, axis=None, out=None):
"""
Calculates and returns the vector p-norms of the GPUTensor along the
specified axis. The p-norm is defined on a vector A as
:math:`||A||_p = \sum_i(|A_i|^p)^{1/p}`.
Arguments:
tsr (GPUTensor): the GPUTensor on which to find the norms
order (int): The order or p upon which the norm is calculated.
Valid values include:
None, inf, -inf, 0, 1, -1, 2, -2, ...
axis (int): The axis along which to compute vector norms.
out (GPUTensor): where to write the results to. Must be
of the expected result shape.
Returns:
GPUTensor: p-norm of tsr along the specified axis.
Raises:
IndexError if invalid axis specified
AttributeError if invalid order specified
See Also:
`numpy.linalg.norm`
"""
if not isinstance(axis, int) or axis < 0 or axis >= len(tsr.shape):
raise IndexError("invalid axis value: %s", axis)
if not isinstance(order, (int, float)):
raise AttributeError("invalid order value: %s", order)
if out is None:
raise AttributeError("No output tensor speficied", order)
if order == float('Inf'):
self.ng.max(self.fabs(tsr), axis, out)
elif order == float('-Inf'):
self.ng.min(self.fabs(tsr), axis, out)
elif order == 0:
tmp = self.zeros(tsr.shape)
self.ng.not_equal(tsr, tmp, tmp)
self.ng.sum(tmp, axis, out)
else:
tmp = self.empty(tsr.shape)
self.ng.power(self.fabs(tsr), order, tmp)
self.ng.sum(tmp, axis, out)
self.ng.power(out, (1.0 / order), out)
return out
def mean(self, tsr, axes, out):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.mean(tsr, axis=axes, out=out)
return out
def min(self, tsr, axes, out):
"""
Calculates the minimum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.min(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.min(tsr, axis=axes, out=out)
return out
def max(self, tsr, axes, out):
"""
Calculates the maximum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.max(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.max(tsr, axis=axes, out=out)
return out
def variance(self, tsr, axes, out, mean=None):
"""
Calculates the variance of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): the tensor on which to compute the variance
axes (int, list, optional): the dimension(s) along which to
variance. If set to None, we will
variance over all dimensions.
out (GPUTensor): where the result will be stored.
mean (GPUTensor): the tensor containing mean of tsr
Returns:
GPUTensor: reference to out
"""
if mean is None:
logger.error("GPUTensor requires mean to be specified.")
raise ValueError("mean not specified")
self.ng.mean(self.ng.square(tsr-mean), axis=axes, out=out)
return out
def fabs(self, x, out):
"""
Calculates absolute value of the elements in a tensor
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
Returns:
GPUTensor: reference to out
"""
self.ng.fabs(x, out=out)
return out
def sqrt(self, x, out):
"""
Calculates square root of the elements in a tensor
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
Returns:
GPUTensor: reference to out
"""
self.ng.sqrt(x, out=out)
return out
def zeros(self, shape, dtype=default_dtype, persist_values=True):
"""
Allocate a new GPUTensor and fill it with zeros.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.zeros(shape, dtype=dtype)
def ones(self, shape, dtype=default_dtype, persist_values=True):
"""
Allocate a new GPUTensor and fill it with ones.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.ones(shape, dtype=dtype)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary and populating each element with a value of 0.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
return self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
return self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
def empty(self, shape, dtype=default_dtype, persist_values=True,
name=None):
"""
Allocate a new GPUTensor.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.empty(shape, dtype=dtype)
def copy(self, ary):
"""
returns a copy of ary
"""
res = self.empty_like(ary)
res.copy(ary)
return res
def array(self, ary, dtype=default_dtype, persist_values=True, name=None,
allocator=drv.mem_alloc):
"""
Allocate a new GPUTensor and fill it with supplied numpy array.
Arguments:
ary (ndarray): Numpy array with source data
dtype (dtype, optional): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
name (string): Name for the GPUTensor
allocator (pycuda): Pycuda memory allocator
Returns:
GPUTensor: output
"""
return self.ng.array(ary, dtype=dtype, name=name)
def add(self, left, right, out):
"""
Elementwise addition
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.add(left, right, out=out)
return out
def subtract(self, left, right, out):
"""
Elementwise subtraction
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.subtract(left, right, out=out)
return out
def multiply(self, left, right, out):
"""
Elementwise multiplication
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.multiply(left, right, out=out)
return out
def divide(self, left, right, out):
"""
Elementwise division
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.divide(left, right, out=out)
return out
def greater(self, left, right, out):
"""
Elementwise greater than testing
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.greater(left, right, out=out)
return out
def equal(self, left, right, out):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.equal(left, right, out=out)
return out
def not_equal(self, left, right, out):
"""
Elementwise not equal testing
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.not_equal(left, right, out=out)
return out
def clip(self, a, a_min, a_max, out):
"""
Elementwise clipping between a range of specified values
Arguments:
a (GPUTensor): input tensor.
a_min (float): floor value.
a_max (float): ceiling value.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.clip(a, a_min, a_max, out=out)
return out
def log(self, a, out):
"""
Elementwise base-e logarithm
Arguments:
a (GPUTensor): input tensor.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.log(a, out=out)
return out
def tanh(self, a, out):
"""
Elementwise tanh
Arguments:
a (GPUTensor): input tensor.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.tanh(a, out=out)
return out
def argmax(self, a, out, axis=0):
"""
Calculates the indices of the maximal element value along the specified
axis. If multiple elements contain the maximum, only the elements of
the first are returned.
Arguments:
tsr (GPUTensor): The GPUTensor on which to find the maximum indices
axis (int): The dimension along which to find the maximum. If set
to None, find the overall maximum index of a flattened
representation of tsr.
out (GPUTensor): Where to store the result. Should be of the
appropriate type and expected shape
Returns:
GPUTensor: reference to out
"""
self.ng.argmax(a, out=out, axis=axis)
return out
def softmax(self, x, out):
"""
Softmax nonlinearity. Computes exp(x-max(x)) / sum_i exp(x_i-max(x_i))
Arguments:
x (GPUTensor): input tensor.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
out[:] = (self.ng.reciprocal(self.ng.sum(
self.ng.exp(x - self.ng.max(x, axis=0)), axis=0)) *
self.ng.exp(x - self.ng.max(x, axis=0)))
return out
def softmax_gradient(self, y, err, out):
"""
Gradient of the softmax nonlinearity.
Arguments:
y (GPUTensor): input tensor.
err (GPUTensor): backpropagated error.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
raise NotImplementedError("Softmax gradient should use shortcut")
return out
def make_binary_mask(self, tsr, keepthresh=0.5, dtype=default_dtype):
"""
Create a binary mask for dropout layers.
Arguments:
tsr (GPUTensor): Output tensor
keepthresh (float): fraction of ones
"""
self.ng.dropout(keep=keepthresh, out=tsr)
def gdm_compound(self, ps_item, us_item, vs_item, momentum_coef,
learning_rate, epoch):
"""
Perform gradient descent update with momentum.
Arguments:
ps_item (GPUTensor): parameter tensor (e.g. a weight matrix)
us_item (GPUTensor): update tensor, contains gradient wrt. weights
vs_item (GPUTensor): velocity tensor.
momentum_coef (float): momentum coefficient.
learning_rate (float): learning rate.
epoch (int): epoch (used in conjunction with diagnostics).
Outputs are written to vs_item (updated velocity)
and ps_item (updated weights)
"""
vs_item[:] = vs_item * momentum_coef - us_item * learning_rate
ps_item[:] = ps_item + vs_item
def gdmwd_compound(self, ps_item, us_item, vs_item, momentum_coef,
learning_rate, wd, epoch):
"""
Perform gradient descent update with momentum and weight decay.
Arguments:
ps_item (GPUTensor): parameter tensor (e.g. a weight matrix)
us_item (GPUTensor): update tensor, contains gradient wrt. weights
vs_item (GPUTensor): velocity tensor.
momentum_coef (float): momentum coefficient.
learning_rate (float): learning rate.
wd (float): weight decay parameter.
epoch (int): epoch (used in conjunction with diagnostics).
Outputs:
ps_item, the updated weights.
vs_item, the updated velocity.
us_item, used as a temp buffer.
"""
vs_item[:] = (vs_item * momentum_coef -
us_item * learning_rate -
ps_item * learning_rate * wd)
ps_item[:] = ps_item + vs_item
def exp_mavg(self, mavg, newval, rho):
"""
Calculate the exponential moving average
Arguments:
mavg: The running value of the moving average
newval: New sample to be added to the moving average
rho: Interpolation value
"""
mavg[:] = rho * mavg + (1.0 - rho) * newval
def ada_update(self, ps_item, us_item, gs_item, ds_item, ls_item, ss_item,
rho, epsilon):
"""
Update rule for AdaDelta (Zeiler, http://arxiv.org/abs/1212.5701)
Arguments:
ps_item: weight / parameter (will be updated)
us_item: update
gs_item: expected value of Gradient Squared (will be updated)
ds_item: expected value of Delta Squared (will be updated)
ls_item: learning rate (will be updated)
ss_item: Scratch Space
rho: decay constant (determines window size)
epsilon: small positive constant for numerical stability
"""
# Accumulate E[Grad^2]
gs_item[:] = gs_item * rho + (1.0 - rho) * us_item * us_item
# Calculate Updates
ls_item[:] = self.ng.sqrt((ds_item + epsilon) /
(gs_item + epsilon)) * (-1.0) * us_item
# Accumulate E[Delt^2]
ds_item[:] = ds_item * rho + (1.0 - rho) * ls_item * ls_item
# Final update to the params
ps_item[:] = ps_item + ls_item
def rms_update(self, params, updates, run_squares, velocity, scratch_space,
gamma, epsilon, learning_rate, momentum_coef):
# Update running squares
run_squares[:] = gamma * run_squares + (1. - gamma) * updates * updates
# Now scale the gradient by lr / rms(grad) (with a epsilon term for
# stability) and use it to update the params
if momentum_coef == 0:
params[:] = params - learning_rate * updates * self.ng.reciprocal(
self.ng.sqrt(run_squares) + epsilon)
else:
velocity[:] = velocity * momentum_coef - \
learning_rate * updates * \
self.ng.reciprocal(self.ng.sqrt(run_squares) + epsilon)
params[:] = params + velocity
def fprop_bn_compound(self, inputs, beta, gamma, eps, xhat,
xmean, xvar, gmean, gvar, rho, out):
"""
Batch normalization forward pass, compounded to run in 3 kernel calls.
Arguments:
inputs: input data to be normalized
beta: location parameter
gamma: scale parameter
eps: small constant for numerical stability
xvar: variance (updated)
xhat: normalized input (updated)
out: normalized and rescaled input (updated)
"""
xvar[:] = self.ng.var(inputs, axis=1)
xmean[:] = self.ng.mean(inputs, axis=1)
gmean[:] = gmean * rho + (1.0 - rho) * xmean
gvar[:] = gvar * rho + (1.0 - rho) * xvar
xvar[:] = self.ng.reciprocal(self.ng.sqrt(xvar + eps))
xhat[:] = xvar * (inputs - xmean)
out[:] = xhat * gamma + beta
return out
def bprop_bn_compound(self, xhat, error, xvar, gamma,
beta_updates, gamma_updates):
"""
Batch normalization backward pass, compounded to run with 4 kernel
calls.
Arguments:
xhat: normalized input data (updated)
error: backpropagated deltas (updated)
xvar: precomputed variance
gamma: scale parameter
beta_updates: gradient update for beta (updated)
gamma_updates: gradient update for gamma (updated)
"""
gamma_updates[:] = self.ng.sum(xhat * error, axis=1)
beta_updates[:] = self.ng.sum(error, axis=1)
xhat[:] = (xhat * gamma_updates + beta_updates) / float(xhat.shape[1])
error[:] = xvar * gamma * (error - xhat)
|
[
"logging.getLogger",
"nervanagpu.NervanaGPU",
"numpy.random.normal",
"pycuda.driver.pagelocked_empty",
"pycuda.driver.Stream",
"pycuda.driver.Device",
"pycuda.driver.memcpy_htod_async",
"pycuda.driver.init",
"neon.diagnostics.timing_decorators.FlopsDecorator",
"numpy.random.seed",
"numpy.random.uniform",
"pycuda.driver.Event",
"atexit.register"
] |
[((1219, 1246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1236, 1246), False, 'import logging\n'), ((1654, 1664), 'pycuda.driver.init', 'drv.init', ([], {}), '()\n', (1662, 1664), True, 'import pycuda.driver as drv\n'), ((1765, 1789), 'atexit.register', 'atexit.register', (['ctx.pop'], {}), '(ctx.pop)\n', (1780, 1789), False, 'import atexit\n'), ((1808, 1853), 'nervanagpu.NervanaGPU', 'NervanaGPU', ([], {'stochastic_round': 'stochastic_round'}), '(stochastic_round=stochastic_round)\n', (1818, 1853), False, 'from nervanagpu import NervanaGPU\n'), ((3495, 3553), 'pycuda.driver.pagelocked_empty', 'drv.pagelocked_empty', (['shape', 'dtype'], {'order': '"""C"""', 'mem_flags': '(0)'}), "(shape, dtype, order='C', mem_flags=0)\n", (3515, 3553), True, 'import pycuda.driver as drv\n'), ((3599, 3611), 'pycuda.driver.Stream', 'drv.Stream', ([], {}), '()\n', (3609, 3611), True, 'import pycuda.driver as drv\n'), ((3712, 3760), 'pycuda.driver.memcpy_htod_async', 'drv.memcpy_htod_async', (['dest.gpudata', 'src', 'stream'], {}), '(dest.gpudata, src, stream)\n', (3733, 3760), True, 'import pycuda.driver as drv\n'), ((4142, 4162), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4156, 4162), True, 'import numpy as np\n'), ((5029, 5040), 'pycuda.driver.Event', 'drv.Event', ([], {}), '()\n', (5038, 5040), True, 'import pycuda.driver as drv\n'), ((5060, 5071), 'pycuda.driver.Event', 'drv.Event', ([], {}), '()\n', (5069, 5071), True, 'import pycuda.driver as drv\n'), ((5098, 5118), 'neon.diagnostics.timing_decorators.FlopsDecorator', 'FlopsDecorator', (['self'], {}), '(self)\n', (5112, 5118), False, 'from neon.diagnostics.timing_decorators import FlopsDecorator\n'), ((6134, 6168), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high', 'size'], {}), '(low, high, size)\n', (6151, 6168), True, 'import numpy as np\n'), ((6440, 6474), 'numpy.random.normal', 'np.random.normal', (['loc', 'scale', 'size'], {}), '(loc, scale, size)\n', (6456, 6474), True, 'import numpy as np\n'), ((1698, 1719), 'pycuda.driver.Device', 'drv.Device', (['device_id'], {}), '(device_id)\n', (1708, 1719), True, 'import pycuda.driver as drv\n')]
|
# -*- encoding: utf-8 -*-
import os
import pickle
import sys
import time
import glob
import unittest
import unittest.mock
import numpy as np
import pandas as pd
import sklearn.datasets
from smac.scenario.scenario import Scenario
from smac.facade.roar_facade import ROAR
from autosklearn.util.backend import Backend
from autosklearn.automl import AutoML
import autosklearn.automl
from autosklearn.data.xy_data_manager import XYDataManager
from autosklearn.metrics import accuracy, log_loss, balanced_accuracy
import autosklearn.pipeline.util as putil
from autosklearn.util.logging_ import setup_logger, get_logger
from autosklearn.constants import MULTICLASS_CLASSIFICATION, BINARY_CLASSIFICATION, REGRESSION
from smac.tae.execute_ta_run import StatusType
sys.path.append(os.path.dirname(__file__))
from base import Base # noqa (E402: module level import not at top of file)
class AutoMLStub(AutoML):
def __init__(self):
self.__class__ = AutoML
self._task = None
class AutoMLTest(Base, unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
super().setUp()
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = unittest.mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_refit_shuffle_on_fail(self):
backend_api = self._create_backend('test_refit_shuffle_on_fail')
failing_model = unittest.mock.Mock()
failing_model.fit.side_effect = [ValueError(), ValueError(), None]
failing_model.fit_transformer.side_effect = [
ValueError(), ValueError(), (None, {})]
failing_model.get_max_iter.return_value = 100
auto = AutoML(backend_api, 20, 5)
ensemble_mock = unittest.mock.Mock()
ensemble_mock.get_selected_model_identifiers.return_value = [(1, 1, 50.0)]
auto.ensemble_ = ensemble_mock
for budget_type in [None, 'iterations']:
auto._budget_type = budget_type
auto.models_ = {(1, 1, 50.0): failing_model}
# Make sure a valid 2D array is given to automl
X = np.array([1, 2, 3]).reshape(-1, 1)
y = np.array([1, 2, 3])
auto.refit(X, y)
self.assertEqual(failing_model.fit.call_count, 3)
self.assertEqual(failing_model.fit_transformer.call_count, 3)
del auto
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_only_loads_ensemble_models(self):
def side_effect(ids, *args, **kwargs):
return models if ids is identifiers else {}
# Add a resampling strategy as this is required by load_models
self.automl._resampling_strategy = 'holdout'
identifiers = [(1, 2), (3, 4)]
models = [42]
load_ensemble_mock = unittest.mock.Mock()
load_ensemble_mock.get_selected_model_identifiers.return_value = identifiers
self.automl._backend.load_ensemble.return_value = load_ensemble_mock
self.automl._backend.load_models_by_identifiers.side_effect = side_effect
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
self.assertIsNone(self.automl.cv_models_)
self.automl._resampling_strategy = 'cv'
models = [42]
self.automl._backend.load_cv_models_by_identifiers.side_effect = side_effect
self.automl._load_models()
self.assertEqual(models, self.automl.cv_models_)
def test_check_for_models_if_no_ensemble(self):
models = [42]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.list_all_models.return_value = models
self.automl._disable_evaluator_output = False
self.automl._load_models()
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.list_all_models.return_value = []
self.automl._resampling_strategy = 'holdout'
self.automl._disable_evaluator_output = False
self.assertRaises(ValueError, self.automl._load_models)
self.automl._disable_evaluator_output = True
self.automl._load_models()
def test_fit(self):
backend_api = self._create_backend('test_fit')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
automl.fit(
X_train, Y_train, task=MULTICLASS_CLASSIFICATION,
)
score = automl.score(X_test, Y_test)
self.assertGreaterEqual(score, 0.8)
self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_delete_non_candidate_models(self):
backend_api = self._create_backend(
'test_delete', delete_tmp_folder_after_terminate=False)
seed = 555
X, Y, _, _ = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend_api,
time_left_for_this_task=30,
per_run_time_limit=5,
ensemble_nbest=3,
seed=seed,
initial_configurations_via_metalearning=0,
resampling_strategy='holdout',
include_estimators=['sgd'],
include_preprocessors=['no_preprocessing'],
metric=accuracy,
)
automl.fit(X, Y, task=MULTICLASS_CLASSIFICATION,
X_test=X, y_test=Y)
# Assert at least one model file has been deleted and that there were no
# deletion errors
log_file_path = glob.glob(os.path.join(
backend_api.temporary_directory, 'AutoML(' + str(seed) + '):*.log'))
with open(log_file_path[0]) as log_file:
log_content = log_file.read()
self.assertIn('Deleted files of non-candidate model', log_content)
self.assertNotIn('Failed to delete files of non-candidate model', log_content)
self.assertNotIn('Failed to lock model', log_content)
# Assert that the files of the models used by the ensemble weren't deleted
model_files = backend_api.list_all_models(seed=seed)
model_files_idx = set()
for m_file in model_files:
# Extract the model identifiers from the filename
m_file = os.path.split(m_file)[1].replace('.model', '').split('.', 2)
model_files_idx.add((int(m_file[0]), int(m_file[1]), float(m_file[2])))
ensemble_members_idx = set(automl.ensemble_.identifiers_)
self.assertTrue(ensemble_members_idx.issubset(model_files_idx))
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fit_roar(self):
def get_roar_object_callback(
scenario_dict,
seed,
ta,
ta_kwargs,
**kwargs
):
"""Random online adaptive racing.
http://ml.informatik.uni-freiburg.de/papers/11-LION5-SMAC.pdf"""
scenario = Scenario(scenario_dict)
return ROAR(
scenario=scenario,
rng=seed,
tae_runner=ta,
tae_runner_kwargs=ta_kwargs,
)
backend_api = self._create_backend('test_fit_roar')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
initial_configurations_via_metalearning=0,
get_smac_object_callback=get_roar_object_callback,
metric=accuracy,
)
setup_logger()
automl._logger = get_logger('test_fit_roar')
automl.fit(
X_train, Y_train, task=MULTICLASS_CLASSIFICATION,
)
score = automl.score(X_test, Y_test)
self.assertGreaterEqual(score, 0.8)
self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_binary_score_and_include(self):
"""
Test fix for binary classification prediction
taking the index 1 of second dimension in prediction matrix
"""
backend_api = self._create_backend('test_binary_score_and_include')
data = sklearn.datasets.make_classification(
n_samples=400, n_features=10, n_redundant=1, n_informative=3,
n_repeated=1, n_clusters_per_class=2, random_state=1)
X_train = data[0][:200]
Y_train = data[1][:200]
X_test = data[0][200:]
Y_test = data[1][200:]
automl = autosklearn.automl.AutoML(
backend_api, 20, 5,
include_estimators=['sgd'],
include_preprocessors=['no_preprocessing'],
metric=accuracy,
)
automl.fit(X_train, Y_train, task=BINARY_CLASSIFICATION)
self.assertEqual(automl._task, BINARY_CLASSIFICATION)
# TODO, the assumption from above is not really tested here
# Also, the score method should be removed, it only makes little sense
score = automl.score(X_test, Y_test)
self.assertGreaterEqual(score, 0.4)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_automl_outputs(self):
backend_api = self._create_backend('test_automl_outputs')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
name = 'iris'
data_manager_file = os.path.join(
backend_api.temporary_directory,
'.auto-sklearn',
'datamanager.pkl'
)
auto = autosklearn.automl.AutoML(
backend_api, 20, 5,
initial_configurations_via_metalearning=0,
seed=100,
metric=accuracy,
)
setup_logger()
auto._logger = get_logger('test_automl_outputs')
auto.fit(
X=X_train,
y=Y_train,
X_test=X_test,
y_test=Y_test,
dataset_name=name,
task=MULTICLASS_CLASSIFICATION,
)
# pickled data manager (without one hot encoding!)
with open(data_manager_file, 'rb') as fh:
D = pickle.load(fh)
self.assertTrue(np.allclose(D.data['X_train'], X_train))
# Check that all directories are there
fixture = ['cv_models', 'true_targets_ensemble.npy',
'start_time_100', 'datamanager.pkl',
'predictions_ensemble',
'ensembles', 'predictions_test', 'models']
self.assertEqual(sorted(os.listdir(os.path.join(backend_api.temporary_directory,
'.auto-sklearn'))),
sorted(fixture))
# At least one ensemble, one validation, one test prediction and one
# model and one ensemble
fixture = os.listdir(os.path.join(backend_api.temporary_directory,
'.auto-sklearn', 'predictions_ensemble'))
self.assertGreater(len(fixture), 0)
fixture = glob.glob(os.path.join(backend_api.temporary_directory, '.auto-sklearn',
'models', '100.*.model'))
self.assertGreater(len(fixture), 0)
fixture = os.listdir(os.path.join(backend_api.temporary_directory,
'.auto-sklearn', 'ensembles'))
self.assertIn('100.0000000001.ensemble', fixture)
# Start time
start_time_file_path = os.path.join(backend_api.temporary_directory,
'.auto-sklearn', "start_time_100")
with open(start_time_file_path, 'r') as fh:
start_time = float(fh.read())
self.assertGreaterEqual(time.time() - start_time, 10)
del auto
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_do_dummy_prediction(self):
datasets = {
'breast_cancer': BINARY_CLASSIFICATION,
'wine': MULTICLASS_CLASSIFICATION,
'diabetes': REGRESSION,
}
for name, task in datasets.items():
backend_api = self._create_backend('test_do_dummy_prediction')
X_train, Y_train, X_test, Y_test = putil.get_dataset(name)
datamanager = XYDataManager(
X_train, Y_train,
X_test, Y_test,
task=task,
dataset_name=name,
feat_type=None,
)
auto = autosklearn.automl.AutoML(
backend_api, 20, 5,
initial_configurations_via_metalearning=25,
metric=accuracy,
)
setup_logger()
auto._logger = get_logger('test_do_dummy_predictions')
auto._backend.save_datamanager(datamanager)
D = backend_api.load_datamanager()
# Check if data manager is correcly loaded
self.assertEqual(D.info['task'], datamanager.info['task'])
auto._do_dummy_prediction(D, 1)
# Ensure that the dummy predictions are not in the current working
# directory, but in the temporary directory.
self.assertFalse(os.path.exists(os.path.join(os.getcwd(),
'.auto-sklearn')))
self.assertTrue(os.path.exists(os.path.join(
backend_api.temporary_directory, '.auto-sklearn', 'predictions_ensemble',
'predictions_ensemble_1_1_0.0.npy')))
del auto
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
@unittest.mock.patch('autosklearn.evaluation.ExecuteTaFuncWithQueue.run')
def test_fail_if_dummy_prediction_fails(self, ta_run_mock):
backend_api = self._create_backend('test_fail_if_dummy_prediction_fails')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
datamanager = XYDataManager(
X_train, Y_train,
X_test, Y_test,
task=2,
feat_type=['Numerical' for i in range(X_train.shape[1])],
dataset_name='iris',
)
time_for_this_task = 30
per_run_time = 10
auto = autosklearn.automl.AutoML(backend_api,
time_for_this_task,
per_run_time,
initial_configurations_via_metalearning=25,
metric=accuracy,
)
setup_logger()
auto._logger = get_logger('test_fail_if_dummy_prediction_fails')
auto._backend._make_internals_directory()
auto._backend.save_datamanager(datamanager)
# First of all, check that ta.run() is actually called.
ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test"
auto._do_dummy_prediction(datamanager, 1)
ta_run_mock.assert_called_once_with(1, cutoff=time_for_this_task)
# Case 1. Check that function raises no error when statustype == success.
# ta.run() returns status, cost, runtime, and additional info.
ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test"
raised = False
try:
auto._do_dummy_prediction(datamanager, 1)
except ValueError:
raised = True
self.assertFalse(raised, 'Exception raised')
# Case 2. Check that if statustype returned by ta.run() != success,
# the function raises error.
ta_run_mock.return_value = StatusType.CRASHED, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.CRASHED '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.ABORT, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.ABORT '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.TIMEOUT, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.TIMEOUT '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.MEMOUT, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.MEMOUT '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.CAPPED, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.CAPPED '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
@unittest.mock.patch('autosklearn.smbo.AutoMLSMBO.run_smbo')
def test_exceptions_inside_log_in_smbo(self, smbo_run_mock):
# Make sure that any exception during the AutoML fit due to
# SMAC are properly captured in a log file
backend_api = self._create_backend('test_exceptions_inside_log')
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
automl = autosklearn.automl.AutoML(
backend_api,
20,
5,
metric=accuracy,
)
output_file = 'test_exceptions_inside_log.log'
setup_logger(output_file=output_file)
logger = get_logger('test_exceptions_inside_log')
# Create a custom exception to prevent other errors to slip in
class MyException(Exception):
pass
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
# The first call is on dummy predictor failure
message = str(np.random.randint(100)) + '_run_smbo'
smbo_run_mock.side_effect = MyException(message)
with unittest.mock.patch('autosklearn.automl.AutoML._get_logger') as mock:
mock.return_value = logger
with self.assertRaises(MyException):
automl.fit(
X_train,
Y_train,
task=MULTICLASS_CLASSIFICATION,
)
with open(output_file) as f:
self.assertTrue(message in f.read())
# Cleanup
os.unlink(output_file)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_load_best_individual_model(self):
backend_api = self._create_backend('test_fit')
for metric in [log_loss, balanced_accuracy]:
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=metric,
)
with unittest.mock.patch(
'autosklearn.ensemble_builder.EnsembleBuilder.run'
) as mock_ensemble_run:
mock_ensemble_run.side_effect = MemoryError
automl.fit(
X_train, Y_train, task=MULTICLASS_CLASSIFICATION,
)
# A memory error occurs in the ensemble construction
self.assertIsNone(automl._backend.load_ensemble(automl._seed))
# The load model is robust to this and loads the best model
automl._load_models()
self.assertIsNotNone(automl.ensemble_)
# Just 1 model is there for ensemble and all weight must be on it
get_models_with_weights = automl.get_models_with_weights()
self.assertEqual(len(get_models_with_weights), 1)
self.assertEqual(get_models_with_weights[0][0], 1.0)
# Match a toy dataset
if metric._sign < 0:
self.assertLessEqual(automl.score(X_test, Y_test), 0.2)
else:
self.assertGreaterEqual(automl.score(X_test, Y_test), 0.8)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fail_if_feat_type_on_pandas_input(self):
"""We do not support feat type when pandas
is provided as an input
"""
backend_api = self._create_backend('test_fail_feat_pandas')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]})
y_train = [1, 0]
with self.assertRaisesRegex(ValueError,
"feat_type cannot be provided when using pandas"):
automl.fit(
X_train, y_train,
task=BINARY_CLASSIFICATION,
feat_type=['Categorical', 'Numerical'],
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fail_if_dtype_changes_automl(self):
"""We do not support changes in the input type.
Once a estimator is fitted, it should not change data type
"""
backend_api = self._create_backend('test_fail_feat_typechange')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]})
y_train = [1, 0]
automl.InputValidator.validate(X_train, y_train, is_classification=True)
with self.assertRaisesRegex(ValueError,
"Auto-sklearn previously received features of type"):
automl.fit(
X_train.to_numpy(), y_train,
task=BINARY_CLASSIFICATION,
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
if __name__ == "__main__":
unittest.main()
|
[
"numpy.array",
"autosklearn.util.logging_.get_logger",
"unittest.main",
"unittest.mock.patch",
"autosklearn.pipeline.util.get_dataset",
"os.path.split",
"smac.facade.roar_facade.ROAR",
"os.unlink",
"pandas.DataFrame",
"autosklearn.data.xy_data_manager.XYDataManager",
"numpy.allclose",
"unittest.mock.Mock",
"pickle.load",
"os.path.dirname",
"time.time",
"os.path.join",
"os.getcwd",
"autosklearn.automl.AutoML",
"numpy.random.randint",
"smac.scenario.scenario.Scenario",
"autosklearn.util.logging_.setup_logger"
] |
[((774, 799), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (789, 799), False, 'import os\n'), ((14263, 14335), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.evaluation.ExecuteTaFuncWithQueue.run"""'], {}), "('autosklearn.evaluation.ExecuteTaFuncWithQueue.run')\n", (14282, 14335), False, 'import unittest\n'), ((18352, 18411), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.smbo.AutoMLSMBO.run_smbo"""'], {}), "('autosklearn.smbo.AutoMLSMBO.run_smbo')\n", (18371, 18411), False, 'import unittest\n'), ((23669, 23684), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23682, 23684), False, 'import unittest\n'), ((1255, 1287), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {'spec': 'Backend'}), '(spec=Backend)\n', (1273, 1287), False, 'import unittest\n'), ((1488, 1508), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (1506, 1508), False, 'import unittest\n'), ((1760, 1786), 'autosklearn.automl.AutoML', 'AutoML', (['backend_api', '(20)', '(5)'], {}), '(backend_api, 20, 5)\n', (1766, 1786), False, 'from autosklearn.automl import AutoML\n'), ((1811, 1831), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (1829, 1831), False, 'import unittest\n'), ((2910, 2930), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (2928, 2930), False, 'import unittest\n'), ((4412, 4437), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (4429, 4437), True, 'import autosklearn.pipeline.util as putil\n'), ((5206, 5231), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (5223, 5231), True, 'import autosklearn.pipeline.util as putil\n'), ((7685, 7710), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (7702, 7710), True, 'import autosklearn.pipeline.util as putil\n'), ((8027, 8041), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (8039, 8041), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((8067, 8094), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_fit_roar"""'], {}), "('test_fit_roar')\n", (8077, 8094), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((9910, 9935), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (9927, 9935), True, 'import autosklearn.pipeline.util as putil\n'), ((9986, 10071), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""datamanager.pkl"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn',\n 'datamanager.pkl')\n", (9998, 10071), False, 'import os\n'), ((10313, 10327), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (10325, 10327), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((10351, 10384), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_automl_outputs"""'], {}), "('test_automl_outputs')\n", (10361, 10384), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((12053, 12138), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""start_time_100"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn', 'start_time_100'\n )\n", (12065, 12138), False, 'import os\n'), ((14526, 14551), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (14543, 14551), True, 'import autosklearn.pipeline.util as putil\n'), ((15203, 15217), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (15215, 15217), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((15241, 15290), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_fail_if_dummy_prediction_fails"""'], {}), "('test_fail_if_dummy_prediction_fails')\n", (15251, 15290), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((18983, 19020), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {'output_file': 'output_file'}), '(output_file=output_file)\n', (18995, 19020), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((19038, 19078), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_exceptions_inside_log"""'], {}), "('test_exceptions_inside_log')\n", (19048, 19078), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((19250, 19275), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (19267, 19275), True, 'import autosklearn.pipeline.util as putil\n'), ((19897, 19919), 'os.unlink', 'os.unlink', (['output_file'], {}), '(output_file)\n', (19906, 19919), False, 'import os\n'), ((22167, 22207), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 1], 'c': [1, 2]}"], {}), "({'a': [1, 1], 'c': [1, 2]})\n", (22179, 22207), True, 'import pandas as pd\n'), ((23115, 23155), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 1], 'c': [1, 2]}"], {}), "({'a': [1, 1], 'c': [1, 2]})\n", (23127, 23155), True, 'import pandas as pd\n'), ((2233, 2252), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2241, 2252), True, 'import numpy as np\n'), ((7380, 7403), 'smac.scenario.scenario.Scenario', 'Scenario', (['scenario_dict'], {}), '(scenario_dict)\n', (7388, 7403), False, 'from smac.scenario.scenario import Scenario\n'), ((7423, 7500), 'smac.facade.roar_facade.ROAR', 'ROAR', ([], {'scenario': 'scenario', 'rng': 'seed', 'tae_runner': 'ta', 'tae_runner_kwargs': 'ta_kwargs'}), '(scenario=scenario, rng=seed, tae_runner=ta, tae_runner_kwargs=ta_kwargs)\n', (7427, 7500), False, 'from smac.facade.roar_facade import ROAR\n'), ((10714, 10729), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (10725, 10729), False, 'import pickle\n'), ((11416, 11506), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""predictions_ensemble"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn',\n 'predictions_ensemble')\n", (11428, 11506), False, 'import os\n'), ((11619, 11710), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""models"""', '"""100.*.model"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn', 'models',\n '100.*.model')\n", (11631, 11710), False, 'import os\n'), ((11823, 11898), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""ensembles"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn', 'ensembles')\n", (11835, 11898), False, 'import os\n'), ((12836, 12859), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['name'], {}), '(name)\n', (12853, 12859), True, 'import autosklearn.pipeline.util as putil\n'), ((12886, 12984), 'autosklearn.data.xy_data_manager.XYDataManager', 'XYDataManager', (['X_train', 'Y_train', 'X_test', 'Y_test'], {'task': 'task', 'dataset_name': 'name', 'feat_type': 'None'}), '(X_train, Y_train, X_test, Y_test, task=task, dataset_name=\n name, feat_type=None)\n', (12899, 12984), False, 'from autosklearn.data.xy_data_manager import XYDataManager\n'), ((13277, 13291), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (13289, 13291), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((13319, 13358), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_do_dummy_predictions"""'], {}), "('test_do_dummy_predictions')\n", (13329, 13358), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((19462, 19522), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.automl.AutoML._get_logger"""'], {}), "('autosklearn.automl.AutoML._get_logger')\n", (19481, 19522), False, 'import unittest\n'), ((20234, 20259), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (20251, 20259), True, 'import autosklearn.pipeline.util as putil\n'), ((10758, 10797), 'numpy.allclose', 'np.allclose', (["D.data['X_train']", 'X_train'], {}), "(D.data['X_train'], X_train)\n", (10769, 10797), True, 'import numpy as np\n'), ((12304, 12315), 'time.time', 'time.time', ([], {}), '()\n', (12313, 12315), False, 'import time\n'), ((19353, 19375), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (19370, 19375), True, 'import numpy as np\n'), ((20490, 20561), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.ensemble_builder.EnsembleBuilder.run"""'], {}), "('autosklearn.ensemble_builder.EnsembleBuilder.run')\n", (20509, 20561), False, 'import unittest\n'), ((2182, 2201), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2190, 2201), True, 'import numpy as np\n'), ((11112, 11174), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn')\n", (11124, 11174), False, 'import os\n'), ((13960, 14086), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""predictions_ensemble"""', '"""predictions_ensemble_1_1_0.0.npy"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn',\n 'predictions_ensemble', 'predictions_ensemble_1_1_0.0.npy')\n", (13972, 14086), False, 'import os\n'), ((13828, 13839), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13837, 13839), False, 'import os\n'), ((6617, 6638), 'os.path.split', 'os.path.split', (['m_file'], {}), '(m_file)\n', (6630, 6638), False, 'import os\n')]
|
"""
This implements an abstrace base class Ring .
Rationale:
Goal is to separate the datatype specification from the algorithms and containers for the following reasons:
1) It allows to directly use the algorithms *without* overhead. E.g. calling mul(z.data, x.data, y.data)
has much less overhead than z = x.__mul__(y). data is to be kept as close as possible to
machine primitives. E.g. data is array or tuple of arrays.
2) Potential reuse of an algorithm in several datatypes.
3) Relatively easy to connect high performance algorithms with a very highlevel abstract description.
For instance, most programming languages allow calling C-functions. Therefore, the algorithms
should be given as void fcn(int A, double B, ...)
For instance, the datatype is a truncated Taylor polynomial R[t]/<t^D> of the class Foo.
The underlying container is a simple array of doubles.
"""
import numpy
class Ring(object):
"""
An abstract base class in an attempt to follow the DRY principle.
It implements the algebraic class of a ring as defined on
http://en.wikipedia.org/wiki/Ring_%28mathematics%29
The idea is that the set is described in data and the operations +,* etc.
are implemented as functions that operate on the data.
E.g. the factor ring of natural numbers modulo 4, x.data = 3 y.data = 2
then z = add(x,y) is implemented as
def add(x,y):
return self.__class__((x.data*y.data)%4)
and one obtains z.data = 1
Warning:
Since this class is only of little value it may be deprecated in the future.
"""
data = NotImplementedError()
def totype(self, x):
"""
tries to convert x to an object of the class
works for : scalar x, numpy.ndarray x
Remark:
at the moment, scalar x expanded as Ring with the same degree as self though.
The reason is a missing implementation that works for graded rings of different degree.
Once such implementations exist, this function should be adapted.
"""
if numpy.isscalar(x):
xdata = self.__class__.__zeros_like__(self.data)
self.__class__.__scalar_to_data__(xdata, x)
return self.__class__(xdata)
elif isinstance(x, numpy.ndarray):
raise NotImplementedError('sorry, not implemented just yet')
elif not isinstance(x, self.__class__):
raise NotImplementedError('Cannot convert x\n type(x) = %s but expected type(x) = %s'%(str(type(x))))
else:
return x
def __add__(self, rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.add(retval.data, self.data, rhs.data)
return retval
def __sub__(self, rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.sub(retval.data, self.data, rhs.data)
return retval
def __mul__(self,rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.mul(retval.data, self.data, rhs.data)
return retval
def __truediv__(self,rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.div(retval.data, self.data, rhs.data)
return retval
def __radd__(self, lhs):
return self + lhs
def __rmul__(self, lhs):
return self * lhs
def zeros_like(self):
return self.__class__(self.__class__.__zeros_like__(self.data))
def __str__(self):
return str(self.data)
|
[
"numpy.isscalar"
] |
[((2205, 2222), 'numpy.isscalar', 'numpy.isscalar', (['x'], {}), '(x)\n', (2219, 2222), False, 'import numpy\n')]
|
import pandas as pd
import numpy as np
import csv
import urllib.request
import json
from datetime import datetime
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
import web_scrapers
import os
def load_real_estate_data(filename, state_attr, state):
df = pd.read_csv(filename, encoding="ISO-8859-1")
df = df.loc[df[state_attr] == state]
return df
def load_data(filenames):
df_list=[]
for i in range(0, len(filenames)):
df = pd.read_csv(filenames[i], encoding="ISO-8859-1")
df_list.append(df)
return df_list
def create_zipcode_list(filenames):
zipcodes = {} # structured with within 5, 10 miles from another zipcode
zip_list = []
for i in range(0, len(filenames)):
with open(filenames[i], 'r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
your_list = list(reader)
for z in range(0, len(your_list)):
zipcodes[your_list[z][0]] = [], []
zip_list.append(your_list[z][0])
return zipcodes, zip_list
def wrangle_real_estate_data(df, zip_list, drop_columns):
df = df[df['RegionName'].isin(zip_list)]
df = df.drop(drop_columns, axis=1)
return df
def wrangle_IPO_data(df, zip_list):
df['Date Filed'] = pd.to_datetime(df['Date Filed'], format='%Y-%m-%d')
df['Lockup Expiration Date'] = pd.to_datetime(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y')
df = df[df['Zipcode'].isin(zip_list)]
df = df.drop(['Lockup Expiration Date'], axis=1)
df['Lockup Expiration Date'] = df['Date Filed'] + timedelta(days=180)
df = df[df['Date Filed']> df['Date Filed'].min()+ timedelta(days=366)]
return df
def wrangle_census_data(df_census_econ, df_census_dem, zip_list, census_econ_columns, census_dem_columns):
df_census_econ.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_econ.rename(
columns={'Percent; EMPLOYMENT STATUS - Civilian labor force - Unemployment Rate': 'Unemployment Rate'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Less than $10,000': 'l10000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $10,000 to $14,999': 'l15000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $15,000 to $24,999': 'l25000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; COMMUTING TO WORK - Mean travel time to work (minutes)': 'Mean Travel Time to Work Estimate (minutes)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $200,000 or more': 'Percent of Households with Income Greater than $200,000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Median household income (dollars)': 'Median Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Mean household income (dollars)': 'Mean Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Per capita income (dollars)': 'Per Capita Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; HEALTH INSURANCE COVERAGE - Civilian noninstitutionalized population - No health insurance coverage': 'Percent of Population with no Health Insurance Coverage'},
inplace=True)
df_census_econ.rename(columns={
'Percent; PERCENTAGE OF FAMILIES AND PEOPLE WHOSE INCOME IN THE PAST 12 MONTHS IS BELOW THE POVERTY LEVEL - All people': 'Percent of People whose Income in the Past 12 months has been Below Poverty Level'},
inplace=True)
df_census_econ['l10000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l10000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l10000'] = df_census_econ['l10000'].astype(float)
df_census_econ['l15000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l15000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l15000'] = df_census_econ['l15000'].astype(float)
df_census_econ['l25000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l25000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l25000'] = df_census_econ['l25000'].astype(float)
df_census_econ["Percent of Households With Income Less Than $24,999"] = df_census_econ['l10000'] + df_census_econ[
'l15000'] + df_census_econ['l25000']
df_census_econ = df_census_econ.filter(census_econ_columns)
df_census_dem.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_dem.rename(columns={'Estimate; SEX AND AGE - Median age (years)': 'Median Age'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - Under 18 years': 'Percent of People under 18 years of age'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 65 years and over': 'Percent of People 65 years and over'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Male': 'Percent of Males'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Female': 'Percent of Females'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race)': 'Percent of People who are Hispanic'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - White alone': 'Percent of People who are White'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Black or African American alone': 'Percent of People who are Black or African American'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Asian alone': 'Percent of People who are Asian'},
inplace=True)
df_census_dem = df_census_dem.filter(census_dem_columns)
# filter data to only Silicon Valley + San Francisco Zip Codes
df_census_dem = df_census_dem[df_census_dem['Zipcode'].isin(zip_list)]
df_census_econ = df_census_econ[df_census_econ['Zipcode'].isin(zip_list)]
return df_census_econ, df_census_dem
def wrangle_real_estate_headers(df):
'''
run before joining dataframes so keys match
df_sale_counts_by_zip_silicon_valley.columns = df_sale_counts_by_zip_silicon_valley.columns.str.replace('Sales Counts ', '')
df_sale_counts_by_zip_silicon_valley = df_sale_counts_by_zip_silicon_valley.add_prefix('Sales Counts ')
df_sale_counts_by_zip_silicon_valley.rename(columns = {'Sales Counts RegionName':'Zipcode'}, inplace=True)
'''
df.columns = df.columns.str.replace('All Homes ', '')
df = df.add_prefix('All Homes ')
df.rename(columns={'All Homes RegionName': 'Zipcode'}, inplace=True)
return df
def wrangle_ipo_headers(df):
df.rename(columns={'Ticker': 'Symbol'}, inplace=True)
df["Found"] = df["Found"].astype(dtype=np.int64)
return df
def join_data(df1, df2, key, join_type):
df = df1.set_index(key).merge(df2, on=key, how=join_type)
return df
def merge_data(df1, df2, key):
df = pd.merge(df1, df2, on=key, how='inner')
return df
def df_replace(df, replace_list):
for i in range(0, len(replace_list)):
df = df.replace([replace_list[i]], [''], regex=True)
return df
def drop_columns_and_nans(df, drop_columns, nan_columns):
df = df.drop(['IPO Name', 'Offer date', 'CUSIP', 'PERM'], axis=1)
for i in range(0, len(nan_columns)):
df.drop_duplicates(subset=nan_columns[i], keep='first', inplace=True)
return df
def calculate_distance_between_zips(zipcode, min_radius, max_radius):
# api-endpoint
URL_base = "https://api.zip-codes.com/ZipCodesAPI.svc/1.0/FindZipCodesInRadius?zipcode="
URL = URL_base + zipcode + '&minimumradius=' + min_radius + '&maximumradius=' + max_radius + '&key=<KEY>'
# sending get request and saving the response as response object
contents = urllib.request.urlopen(URL).read()
# printing the output
zipcodes_nearby = []
print(json.loads(contents))
for i in range(1, len(json.loads(contents)['DataList'])):
zipcodes_nearby.append(json.loads(contents)['DataList'][i]['Code'])
return zipcodes_nearby
def create_zipcode_distances_dictionary(zipcodes, zip_list):
'''
***DONT RUN IF THESE ARE ALREADY CREATED***
currently stored as data/processed/zipcodes_within_radius.txt
'''
print(len(zip_list))
for i in range(0, len(zip_list)):
zipcodes[zip_list[i]] = calculate_distance_between_zips(zip_list[i], '0', '5'), calculate_distance_between_zips(
zip_list[i], '5', '10')
return zipcodes
def create_text_file_from_dictionary(filename, dictionary):
'''
with open('data/processed/zipcodes_within_radius.txt', 'w') as json_file:
json.dump(zipcodes, json_file)
'''
with open(filename, 'w') as json_file:
json.dump(dictionary, json_file)
return dictionary
def export_dataframe_to_dictionary(df, name):
filename = 'data/processed/' + name + '.csv'
export_csv = df.to_csv(filename, index=True, header=True) # Don't forget to add '.csv' at the end of the path
def update_zipcodes_dict(zipcodes, zip_list):
exists = os.path.isfile('../data/processed/zipcodes_within_radius.txt')
if not exists:
zipcodes = create_zipcode_distances_dictionary(zipcodes, zip_list)
create_text_file_from_dictionary('../data/processed/zipcodes_within_radius.txt', zipcodes)
else:
zipcodes = {}
with open('../data/processed/zipcodes_within_radius.txt', 'r') as f:
zipcodes = json.load(f)
return zipcodes
def create_IPO_an_Zipcode_dataframe(census_econ_cols, census_dem_cols, df_ipo, df_zip, zipcodes):
if 'Zipcode' in census_econ_cols:
census_econ_cols.remove('Zipcode')
if 'Zipcode' in census_dem_cols:
census_dem_cols.remove('Zipcode')
ipo_header_list = list(df_ipo.columns.values) +census_dem_cols+census_econ_cols + ['All Homes Date Filed',
'All Homes Lockup Expiration Date',
'All Homes 1 Year Before Date Filed',
'All Homes 2 Years After Date Filed']
'''
Distance from IPO = estimate is .2 if in the same zipcode as IPO
= estimate is 0.5 if not in same zip code as IPO and less than 5 miles from zipcode to IPO
= estimate is 1 if greater than 5 and less than 10 miles from zipcode to IPO
'''
new_df_list = []
for index, row in df_ipo.iterrows():
ipo_zipcode = str(row['Zipcode'])
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(ipo_zipcode)]
headerList = join_IPO_and_Zip_Data(row['Date Filed'], row['Lockup Expiration Date'], census_econ_cols,census_dem_cols)
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = .2
dictionary['Zipcode for Distance'] = ipo_zipcode
new_df_list.append(dictionary)
within_5miles = zipcodes[ipo_zipcode][0]
within_10miles = zipcodes[ipo_zipcode][1]
for i in range(0, len(within_5miles)):
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_5miles[i])]
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = .5
dictionary['Zipcode for Distance'] = within_5miles[i]
new_df_list.append(dictionary)
for j in range(0, len(within_10miles)):
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_10miles[j])]
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = 1
dictionary['Zipcode for Distance'] = within_10miles[j]
new_df_list.append(dictionary)
ipo_final_df = pd.DataFrame(new_df_list)
ipo_final_df.dropna(subset=['Median Age'], how='all', inplace=True)
ipo_final_df.dropna(subset=['All Homes Date Filed'], how='all', inplace=True)
return ipo_final_df
def normalize_IPO_an_Zipcode_dataframe(normalization_list, df_ipo):
df_ipo = df_ipo.replace(['--'], [''], regex=True)
df_ipo = df_ipo.replace(r'^\s*$', np.nan, regex=True)
df_ipo = df_ipo.replace(['\,'], [''], regex=True)
df_ipo = df_ipo.replace(['\+'], [''], regex=True)
scaler = MinMaxScaler()
df_ipo[normalization_list] = scaler.fit_transform(df_ipo[normalization_list])
return df_ipo
def join_IPO_and_Zip_Data(IPO_Date_Filed, IPO_Lockup_Expiration_Date, census_econ_cols, census_dem_cols):
filtered_columns = census_dem_cols +census_econ_cols # remove 'zipcode'
ipo_month_filed = IPO_Date_Filed.month
ipo_year_filed = IPO_Date_Filed.year
AllHomes_header_filed = 'All Homes ' + str(ipo_year_filed) + '-' + str(ipo_month_filed).zfill(2)
ipo_month = IPO_Lockup_Expiration_Date.month
ipo_year = IPO_Lockup_Expiration_Date.year
AllHomes_header_lockup = 'All Homes ' + str(ipo_year) + '-' + str(ipo_month).zfill(2)
AllHomes_header_filed_1_yr_ago = 'All Homes ' + str(int(ipo_year_filed) - 1) + '-' + str(ipo_month_filed).zfill(2)
AllHomes_header_filed_2_yr = 'All Homes ' + str(int(ipo_year_filed) + 2) + '-' + str(ipo_month_filed).zfill(2)
filtered_columns = filtered_columns + [AllHomes_header_filed, AllHomes_header_lockup,
AllHomes_header_filed_1_yr_ago,
AllHomes_header_filed_2_yr]
return filtered_columns
def update_ipo_list(year, start_month, end_month):
web_scrapers.add_new_ipo_data_to_csv('../data/processed/1997-04_2019_full_ipo_data.csv', year, start_month, end_month)
df_ipo_list = load_data(['../data/processed/1997-04_2019_full_ipo_data.csv', '../data/raw/ipo_ritter_data.csv'])
zipcodes, zip_list = create_zipcode_list(
['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv',
'../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv'])
df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list)
df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1])
df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left')
df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK'])
df_ipo.to_csv("../data/processed/df_ipo.csv", index=True)
def main():
df_real_estate = load_real_estate_data('../data/raw/Zip_Zhvi_AllHomes.csv', 'State', 'CA')
# data processing to load all IPO Data between 1997 and present data. This data has been scraped using code from src/web_scrapers.py
df_ipo_list = load_data(['../data/processed/df_ipo.csv', '../data/raw/ipo_ritter_data.csv'])
df_census_list = load_data(['../data/raw/zip_census_bureau_economic_characteristics_2017.csv', '../data/raw/zip_census_bureau_age_race_2017.csv'])
zipcodes, zip_list = create_zipcode_list(
['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv',
'../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv'])
df_real_estate = wrangle_real_estate_data(df_real_estate, zip_list,['City', 'State', 'Metro', 'CountyName', 'SizeRank'])
df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list)
census_econ_columns = ['Zipcode',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999']
census_dem_columns = ['Zipcode',
'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian']
df_census_econ, df_census_dem = wrangle_census_data(df_census_list[0], df_census_list[1], zip_list,
census_econ_columns, census_dem_columns)
df_real_estate = wrangle_real_estate_headers(df_real_estate)
df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1])
df_ipo_ritter = df_ipo_ritter.drop(['Found'], axis=1)
df_census = join_data(df_census_econ, df_census_dem, 'Zipcode', 'inner')
df_zip = merge_data(df_census, df_real_estate, 'Zipcode')
df_zip = df_replace(df_zip, ['\+', '\,'])
print(df_zip['All Homes 2019-05'])
df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left')
df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK'])
df_ipo['Found'] = 2019.0 - df_ipo['Found']
normalization_list = ['Offer Amount', 'Number of Employees', 'Found', 'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999']
zipcodes = update_zipcodes_dict(zipcodes, zip_list)
df_ipo_all = create_IPO_an_Zipcode_dataframe(census_econ_columns, census_dem_columns, df_ipo, df_zip, zipcodes)
df_ipo_all.to_csv("../data/processed/df_ipo_all.csv", index=False)
if __name__ == "__main__":
print("we are wrangling data")
#update_ipo_list(2019, 6, 7)
main()
|
[
"json.loads",
"pandas.read_csv",
"pandas.merge",
"json.dump",
"web_scrapers.add_new_ipo_data_to_csv",
"os.path.isfile",
"json.load",
"numpy.array",
"csv.reader",
"pandas.DataFrame",
"datetime.timedelta",
"sklearn.preprocessing.MinMaxScaler",
"pandas.to_datetime"
] |
[((290, 334), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'encoding': '"""ISO-8859-1"""'}), "(filename, encoding='ISO-8859-1')\n", (301, 334), True, 'import pandas as pd\n'), ((1285, 1336), 'pandas.to_datetime', 'pd.to_datetime', (["df['Date Filed']"], {'format': '"""%Y-%m-%d"""'}), "(df['Date Filed'], format='%Y-%m-%d')\n", (1299, 1336), True, 'import pandas as pd\n'), ((1372, 1457), 'pandas.to_datetime', 'pd.to_datetime', (["df['Lockup Expiration Date']"], {'errors': '"""coerce"""', 'format': '"""%m/%d/%Y"""'}), "(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y'\n )\n", (1386, 1457), True, 'import pandas as pd\n'), ((7975, 8014), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'on': 'key', 'how': '"""inner"""'}), "(df1, df2, on=key, how='inner')\n", (7983, 8014), True, 'import pandas as pd\n'), ((10121, 10183), 'os.path.isfile', 'os.path.isfile', (['"""../data/processed/zipcodes_within_radius.txt"""'], {}), "('../data/processed/zipcodes_within_radius.txt')\n", (10135, 10183), False, 'import os\n'), ((13340, 13365), 'pandas.DataFrame', 'pd.DataFrame', (['new_df_list'], {}), '(new_df_list)\n', (13352, 13365), True, 'import pandas as pd\n'), ((13849, 13863), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (13861, 13863), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((15077, 15204), 'web_scrapers.add_new_ipo_data_to_csv', 'web_scrapers.add_new_ipo_data_to_csv', (['"""../data/processed/1997-04_2019_full_ipo_data.csv"""', 'year', 'start_month', 'end_month'], {}), "(\n '../data/processed/1997-04_2019_full_ipo_data.csv', year, start_month,\n end_month)\n", (15113, 15204), False, 'import web_scrapers\n'), ((485, 533), 'pandas.read_csv', 'pd.read_csv', (['filenames[i]'], {'encoding': '"""ISO-8859-1"""'}), "(filenames[i], encoding='ISO-8859-1')\n", (496, 533), True, 'import pandas as pd\n'), ((1602, 1621), 'datetime.timedelta', 'timedelta', ([], {'days': '(180)'}), '(days=180)\n', (1611, 1621), False, 'from datetime import timedelta\n'), ((8924, 8944), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (8934, 8944), False, 'import json\n'), ((9794, 9826), 'json.dump', 'json.dump', (['dictionary', 'json_file'], {}), '(dictionary, json_file)\n', (9803, 9826), False, 'import json\n'), ((838, 851), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (848, 851), False, 'import csv\n'), ((10509, 10521), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10518, 10521), False, 'import json\n'), ((1677, 1696), 'datetime.timedelta', 'timedelta', ([], {'days': '(366)'}), '(days=366)\n', (1686, 1696), False, 'from datetime import timedelta\n'), ((8972, 8992), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (8982, 8992), False, 'import json\n'), ((11934, 11954), 'numpy.array', 'np.array', (['row.values'], {}), '(row.values)\n', (11942, 11954), True, 'import numpy as np\n'), ((12499, 12519), 'numpy.array', 'np.array', (['row.values'], {}), '(row.values)\n', (12507, 12519), True, 'import numpy as np\n'), ((12993, 13013), 'numpy.array', 'np.array', (['row.values'], {}), '(row.values)\n', (13001, 13013), True, 'import numpy as np\n'), ((9039, 9059), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (9049, 9059), False, 'import json\n')]
|
__author__ = '<NAME> - www.tonybeltramelli.com'
# scripted agents taken from PySC2, credits to DeepMind
# https://github.com/deepmind/pysc2/blob/master/pysc2/agents/scripted_agent.py
import numpy as np
import uuid
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_SCREEN_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_SCREEN_SELECTED = features.SCREEN_FEATURES.selected.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class ScriptedAgent(base_agent.BaseAgent):
def step(self, obs):
super(ScriptedAgent, self).step(obs)
# we expand dims because keras wants 4 dims for convolutions
# observation = np.expand_dims(obs.observation["screen"][_SCREEN_PLAYER_RELATIVE], axis=3)
screens = [obs.observation["screen"][_SCREEN_PLAYER_RELATIVE],
obs.observation["screen"][_SCREEN_SELECTED]]
observation = np.stack(screens, axis=2)
if self.game == "beacon":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
if not neutral_y.any():
action = _NO_OP
params = []
else:
target = [int(neutral_x.mean()), int(neutral_y.mean())]
action = _MOVE_SCREEN
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "mineral":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
action = _MOVE_SCREEN
index_x = np.argmin(neutral_x)
index_y = np.argmin(neutral_y)
index = index_x if neutral_x[index_x] + neutral_y[index_x] < neutral_x[index_y] + neutral_y[index_y] else index_y
target = [neutral_x[index], neutral_y[index]]
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "minerals":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = np.linalg.norm(np.array(player) - np.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
action = _MOVE_SCREEN
params = [[0], closest]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "roaches":
if _ATTACK_SCREEN in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
roach_y, roach_x = (player_relative == _PLAYER_HOSTILE).nonzero()
if not roach_y.any():
action = _NO_OP
params = [_NOT_QUEUED]
else:
index = np.argmax(roach_y)
target = [roach_x[index], roach_y[index]]
action = _ATTACK_SCREEN
params = [_NOT_QUEUED, target]
elif _SELECT_ARMY in obs.observation["available_actions"]:
action = _SELECT_ARMY
params = [_SELECT_ALL]
else:
action = _NO_OP
params = [_NOT_QUEUED]
self.states.append(np.array([observation, obs.observation["available_actions"], action, params]))
if len(self.states) == 64:
new_file_name = str(uuid.uuid1())
np.save("dataset_{}/{}".format(self.game, new_file_name), np.array(self.states))
self.states = []
return actions.FunctionCall(action, params)
class AgentRoaches(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "roaches"
self.states = []
class AgentBeacon(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "beacon"
self.states = []
class AgentMineral(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "mineral"
self.states = []
class AgentMinerals(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "minerals"
self.states = []
|
[
"pysc2.lib.actions.FunctionCall",
"pysc2.agents.base_agent.BaseAgent.__init__",
"numpy.argmax",
"uuid.uuid1",
"numpy.stack",
"numpy.array",
"numpy.argmin"
] |
[((1168, 1193), 'numpy.stack', 'np.stack', (['screens'], {'axis': '(2)'}), '(screens, axis=2)\n', (1176, 1193), True, 'import numpy as np\n'), ((5072, 5108), 'pysc2.lib.actions.FunctionCall', 'actions.FunctionCall', (['action', 'params'], {}), '(action, params)\n', (5092, 5108), False, 'from pysc2.lib import actions\n'), ((5178, 5213), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5207, 5213), False, 'from pysc2.agents import base_agent\n'), ((5337, 5372), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5366, 5372), False, 'from pysc2.agents import base_agent\n'), ((5496, 5531), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5525, 5531), False, 'from pysc2.agents import base_agent\n'), ((5657, 5692), 'pysc2.agents.base_agent.BaseAgent.__init__', 'base_agent.BaseAgent.__init__', (['self'], {}), '(self)\n', (5686, 5692), False, 'from pysc2.agents import base_agent\n'), ((4771, 4848), 'numpy.array', 'np.array', (["[observation, obs.observation['available_actions'], action, params]"], {}), "([observation, obs.observation['available_actions'], action, params])\n", (4779, 4848), True, 'import numpy as np\n'), ((4918, 4930), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4928, 4930), False, 'import uuid\n'), ((5003, 5024), 'numpy.array', 'np.array', (['self.states'], {}), '(self.states)\n', (5011, 5024), True, 'import numpy as np\n'), ((2432, 2452), 'numpy.argmin', 'np.argmin', (['neutral_x'], {}), '(neutral_x)\n', (2441, 2452), True, 'import numpy as np\n'), ((2483, 2503), 'numpy.argmin', 'np.argmin', (['neutral_y'], {}), '(neutral_y)\n', (2492, 2503), True, 'import numpy as np\n'), ((4330, 4348), 'numpy.argmax', 'np.argmax', (['roach_y'], {}), '(roach_y)\n', (4339, 4348), True, 'import numpy as np\n'), ((3567, 3583), 'numpy.array', 'np.array', (['player'], {}), '(player)\n', (3575, 3583), True, 'import numpy as np\n'), ((3586, 3597), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (3594, 3597), True, 'import numpy as np\n')]
|
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
from collections import defaultdict
import numpy as np
import time
import random
import hashlib
import tensorflow as tf
from tensorflow import keras
from ..abstractions import display
class TunerStats(object):
"""Track tuner statistics."""
def __init__(self):
self.num_generated_models = 0 # overall number of instances generated
self.num_invalid_models = 0 # how many models didn't work
self.num_oversized_models = 0 # num models with params> max_params
def summary(self, extended=False):
display.subsection('Tuning stats')
display.display_settings(self.get_config())
def get_config(self):
return {
'num_generated_models': self.num_generated_models,
'num_invalid_models': self.num_invalid_models,
'num_oversized_models': self.num_oversized_models
}
@classmethod
def from_config(cls, config):
stats = cls()
stats.num_generated_models = config['num_generated_models']
stats.num_invalid_models = config['num_invalid_models']
stats.num_oversized_models = config['num_oversized_models']
return stats
def get_max_epochs_and_steps(fit_args, fit_kwargs):
if fit_args:
x = tf.nest.flatten(fit_args)[0]
else:
x = tf.nest.flatten(fit_kwargs.get('x'))[0]
batch_size = fit_kwargs.get('batch_size', 32)
if hasattr(x, '__len__'):
max_steps = math.ceil(float(len(x)) / batch_size)
else:
max_steps = fit_kwargs.get('steps')
max_epochs = fit_kwargs.get('epochs', 1)
return max_epochs, max_steps
class TunerCallback(keras.callbacks.Callback):
def __init__(self, tuner, trial, execution):
self.tuner = tuner
self.trial = trial
self.execution = execution
def on_epoch_begin(self, epoch, logs=None):
self.tuner.on_epoch_begin(
self.execution, self.model, epoch, logs=logs)
def on_batch_begin(self, batch, logs=None):
self.tuner.on_batch_begin(self.execution, self.model, batch, logs)
def on_batch_end(self, batch, logs=None):
self.tuner.on_batch_end(self.execution, self.model, batch, logs)
def on_epoch_end(self, epoch, logs=None):
self.tuner.on_epoch_end(
self.execution, self.model, epoch, logs=logs)
class Display(object):
def __init__(self, host):
self.host = host
self.cpu_usage = []
self.gpu_usage = []
self.batch_history = defaultdict(list)
self.epoch_pbar = None
def on_execution_begin(self, trial, execution, model):
# new model summary
if len(trial.executions) == 1:
display.section('New model')
trial.summary()
# execution info if needed
if trial.max_executions > 1:
display.subsection('Execution %d/%d' %
(len(trial.executions),
trial.max_executions))
def on_trial_end(self,
averaged_metrics,
best_metrics,
objective,
remaining_trials,
max_trials):
# train summary
current = averaged_metrics
best = best_metrics
rows = [['Name', 'Best model', 'Current model']]
for name in best.names:
best_value = round(best.get_best_value(name), 4)
current_value = round(current.get_best_value(name), 4)
row = [name, best_value, current_value]
if name == objective:
if best_value == current_value:
row = display.colorize_row(row, 'green')
else:
row = display.colorize_row(row, 'red')
rows.append(row)
display.display_table(rows)
# Tuning budget exhausted
if remaining_trials < 1:
display.highlight('Hypertuning complete - results in %s' %
self.host.results_dir)
# TODO: final summary
else:
display.highlight('%d/%d trials left' %
(remaining_trials, max_trials))
def on_epoch_begin(self, execution, model, epoch, logs=None):
# reset counters
self.epoch_history = defaultdict(list)
self.gpu_usage = []
self.cpu_usage = []
# epoch bar
self.epoch_pbar = display.progress_bar(
total=execution.max_steps,
leave=True,
unit='steps')
def on_epoch_end(self, execution, model, epoch, logs=None):
# compute stats
final_epoch_postfix = {}
for m, v in logs.items():
final_epoch_postfix[m] = round(v, 4)
# epoch bar
self.epoch_pbar.set_postfix(final_epoch_postfix)
self.epoch_pbar.close()
def on_batch_end(self, execution, model, batch, logs=None):
logs = logs or {}
self.epoch_pbar.update(1)
# computing metric statistics
for k, v in logs.items():
self.batch_history[k].append(v)
avg_metrics = self._avg_metrics(self.batch_history)
self.epoch_pbar.set_postfix(avg_metrics)
# create bar desc with updated statistics
description = ''
host_status = self.host.get_status()
if len(host_status['gpu']):
gpu_usage = [float(gpu['usage']) for gpu in host_status['gpu']]
gpu_usage = int(np.average(gpu_usage))
self.gpu_usage.append(gpu_usage)
description += '[GPU:%3s%%]' % int(np.average(self.gpu_usage))
self.cpu_usage.append(int(host_status['cpu']['usage']))
description += '[CPU:%3s%%]' % int(np.average(self.cpu_usage))
description += 'Epoch %s/%s' % (execution.epochs_seen + 1,
execution.max_epochs)
self.epoch_pbar.set_description(description)
def _avg_metrics(self, metrics):
agg_metrics = {}
for metric_name, values in metrics.items():
if metric_name == 'batch' or metric_name == 'size':
continue
agg_metrics[metric_name] = '%.4f' % np.average(values)
return agg_metrics
def generate_trial_id():
s = str(time.time()) + str(random.randint(1, 1e7))
return hashlib.sha256(s.encode('utf-8')).hexdigest()[:32]
def format_execution_id(i, executions_per_trial):
execution_id_length = math.ceil(
math.log(executions_per_trial, 10))
execution_id_template = '%0' + str(execution_id_length) + 'd'
execution_id = execution_id_template % i
return execution_id
@contextlib.contextmanager
def maybe_distribute(distribution_strategy):
if distribution_strategy is None:
yield
else:
with distribution_strategy.scope():
yield
|
[
"numpy.average",
"math.log",
"collections.defaultdict",
"tensorflow.nest.flatten",
"time.time",
"random.randint"
] |
[((3247, 3264), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3258, 3264), False, 'from collections import defaultdict\n'), ((5053, 5070), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5064, 5070), False, 'from collections import defaultdict\n'), ((7212, 7246), 'math.log', 'math.log', (['executions_per_trial', '(10)'], {}), '(executions_per_trial, 10)\n', (7220, 7246), False, 'import math\n'), ((2008, 2033), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['fit_args'], {}), '(fit_args)\n', (2023, 2033), True, 'import tensorflow as tf\n'), ((7010, 7021), 'time.time', 'time.time', ([], {}), '()\n', (7019, 7021), False, 'import time\n'), ((7029, 7058), 'random.randint', 'random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (7043, 7058), False, 'import random\n'), ((6212, 6233), 'numpy.average', 'np.average', (['gpu_usage'], {}), '(gpu_usage)\n', (6222, 6233), True, 'import numpy as np\n'), ((6463, 6489), 'numpy.average', 'np.average', (['self.cpu_usage'], {}), '(self.cpu_usage)\n', (6473, 6489), True, 'import numpy as np\n'), ((6925, 6943), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (6935, 6943), True, 'import numpy as np\n'), ((6327, 6353), 'numpy.average', 'np.average', (['self.gpu_usage'], {}), '(self.gpu_usage)\n', (6337, 6353), True, 'import numpy as np\n')]
|
import jax
import elegy
import unittest
import numpy as np
import jax.numpy as jnp
import optax
class MLP(elegy.Module):
"""Standard LeNet-300-100 MLP network."""
n1: int
n2: int
def __init__(self, n1: int = 3, n2: int = 4):
super().__init__()
self.n1 = n1
self.n2 = n2
def call(self, image: jnp.ndarray, training: bool):
x = image.astype(jnp.float32) / 255.0
x = jnp.reshape(x, [x.shape[0], -1])
x = elegy.nn.Linear(self.n1)(x)
x = elegy.nn.BatchNormalization()(x)
x = jax.nn.relu(x)
x = elegy.nn.Linear(self.n2)(x)
x = jax.nn.relu(x)
x = elegy.nn.Linear(10)(x)
return x
class OptimizerTest(unittest.TestCase):
def test_optimizer(self):
optax_op = optax.adam(1e-3)
lr_schedule = lambda step, epoch: step / 3
optimizer = elegy.Optimizer(optax_op, lr_schedule=lr_schedule)
params = np.random.uniform((3, 4))
grads = np.random.uniform((3, 4))
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(rng, params)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 2 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 3 / 3)
def test_optimizer_epoch(self):
optax_op = optax.adam(1e-3)
lr_schedule = lambda step, epoch: epoch
optimizer = elegy.Optimizer(
optax_op, lr_schedule=lr_schedule, steps_per_epoch=2
)
params = np.random.uniform((3, 4))
grads = np.random.uniform((3, 4))
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(
rng=rng,
net_params=params,
)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
def test_optimizer_chain(self):
optimizer = elegy.Optimizer(
optax.sgd(0.1),
optax.clip(0.5),
)
params = np.zeros(shape=(3, 4))
grads = np.ones(shape=(3, 4)) * 100_000
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(
rng=rng,
net_params=params,
)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert np.all(-0.5 <= params) and np.all(params <= 0.5)
def test_lr_logging(self):
model = elegy.Model(
module=MLP(n1=3, n2=1),
loss=elegy.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=elegy.metrics.SparseCategoricalAccuracy(),
optimizer=elegy.Optimizer(
optax.adamw(1.0, b1=0.95),
lr_schedule=lambda step, epoch: jnp.array(1e-3),
),
run_eagerly=True,
)
X = np.random.uniform(size=(5, 7, 7))
y = np.random.randint(10, size=(5,))
history = model.fit(
x=X,
y=y,
epochs=1,
steps_per_epoch=1,
batch_size=5,
validation_data=(X, y),
shuffle=True,
verbose=0,
)
assert "lr" in history.history
assert np.allclose(history.history["lr"], 1e-3)
|
[
"elegy.nn.BatchNormalization",
"optax.adamw",
"jax.nn.relu",
"optax.adam",
"elegy.Optimizer",
"elegy.metrics.SparseCategoricalAccuracy",
"elegy.nn.Linear",
"numpy.allclose",
"numpy.ones",
"optax.sgd",
"elegy.RNGSeq",
"optax.clip",
"jax.numpy.reshape",
"jax.numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.all",
"elegy.losses.SparseCategoricalCrossentropy"
] |
[((431, 463), 'jax.numpy.reshape', 'jnp.reshape', (['x', '[x.shape[0], -1]'], {}), '(x, [x.shape[0], -1])\n', (442, 463), True, 'import jax.numpy as jnp\n'), ((561, 575), 'jax.nn.relu', 'jax.nn.relu', (['x'], {}), '(x)\n', (572, 575), False, 'import jax\n'), ((629, 643), 'jax.nn.relu', 'jax.nn.relu', (['x'], {}), '(x)\n', (640, 643), False, 'import jax\n'), ((788, 805), 'optax.adam', 'optax.adam', (['(0.001)'], {}), '(0.001)\n', (798, 805), False, 'import optax\n'), ((877, 927), 'elegy.Optimizer', 'elegy.Optimizer', (['optax_op'], {'lr_schedule': 'lr_schedule'}), '(optax_op, lr_schedule=lr_schedule)\n', (892, 927), False, 'import elegy\n'), ((946, 971), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (963, 971), True, 'import numpy as np\n'), ((988, 1013), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (1005, 1013), True, 'import numpy as np\n'), ((1028, 1044), 'elegy.RNGSeq', 'elegy.RNGSeq', (['(42)'], {}), '(42)\n', (1040, 1044), False, 'import elegy\n'), ((1727, 1744), 'optax.adam', 'optax.adam', (['(0.001)'], {}), '(0.001)\n', (1737, 1744), False, 'import optax\n'), ((1813, 1882), 'elegy.Optimizer', 'elegy.Optimizer', (['optax_op'], {'lr_schedule': 'lr_schedule', 'steps_per_epoch': '(2)'}), '(optax_op, lr_schedule=lr_schedule, steps_per_epoch=2)\n', (1828, 1882), False, 'import elegy\n'), ((1923, 1948), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (1940, 1948), True, 'import numpy as np\n'), ((1965, 1990), 'numpy.random.uniform', 'np.random.uniform', (['(3, 4)'], {}), '((3, 4))\n', (1982, 1990), True, 'import numpy as np\n'), ((2005, 2021), 'elegy.RNGSeq', 'elegy.RNGSeq', (['(42)'], {}), '(42)\n', (2017, 2021), False, 'import elegy\n'), ((2932, 2954), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, 4)'}), '(shape=(3, 4))\n', (2940, 2954), True, 'import numpy as np\n'), ((3017, 3033), 'elegy.RNGSeq', 'elegy.RNGSeq', (['(42)'], {}), '(42)\n', (3029, 3033), False, 'import elegy\n'), ((3749, 3782), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(5, 7, 7)'}), '(size=(5, 7, 7))\n', (3766, 3782), True, 'import numpy as np\n'), ((3795, 3827), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(5,)'}), '(10, size=(5,))\n', (3812, 3827), True, 'import numpy as np\n'), ((4121, 4162), 'numpy.allclose', 'np.allclose', (["history.history['lr']", '(0.001)'], {}), "(history.history['lr'], 0.001)\n", (4132, 4162), True, 'import numpy as np\n'), ((476, 500), 'elegy.nn.Linear', 'elegy.nn.Linear', (['self.n1'], {}), '(self.n1)\n', (491, 500), False, 'import elegy\n'), ((516, 545), 'elegy.nn.BatchNormalization', 'elegy.nn.BatchNormalization', ([], {}), '()\n', (543, 545), False, 'import elegy\n'), ((589, 613), 'elegy.nn.Linear', 'elegy.nn.Linear', (['self.n2'], {}), '(self.n2)\n', (604, 613), False, 'import elegy\n'), ((656, 675), 'elegy.nn.Linear', 'elegy.nn.Linear', (['(10)'], {}), '(10)\n', (671, 675), False, 'import elegy\n'), ((2859, 2873), 'optax.sgd', 'optax.sgd', (['(0.1)'], {}), '(0.1)\n', (2868, 2873), False, 'import optax\n'), ((2887, 2902), 'optax.clip', 'optax.clip', (['(0.5)'], {}), '(0.5)\n', (2897, 2902), False, 'import optax\n'), ((2971, 2992), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 4)'}), '(shape=(3, 4))\n', (2978, 2992), True, 'import numpy as np\n'), ((3246, 3268), 'numpy.all', 'np.all', (['(-0.5 <= params)'], {}), '(-0.5 <= params)\n', (3252, 3268), True, 'import numpy as np\n'), ((3273, 3294), 'numpy.all', 'np.all', (['(params <= 0.5)'], {}), '(params <= 0.5)\n', (3279, 3294), True, 'import numpy as np\n'), ((3409, 3469), 'elegy.losses.SparseCategoricalCrossentropy', 'elegy.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (3451, 3469), False, 'import elegy\n'), ((3491, 3532), 'elegy.metrics.SparseCategoricalAccuracy', 'elegy.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (3530, 3532), False, 'import elegy\n'), ((3589, 3614), 'optax.adamw', 'optax.adamw', (['(1.0)'], {'b1': '(0.95)'}), '(1.0, b1=0.95)\n', (3600, 3614), False, 'import optax\n'), ((3664, 3680), 'jax.numpy.array', 'jnp.array', (['(0.001)'], {}), '(0.001)\n', (3673, 3680), True, 'import jax.numpy as jnp\n')]
|
import glob
import os
import torch
from PIL import Image
from tqdm import tqdm
from ssd.config import cfg
from ssd.data.datasets import COCODataset, VOCDataset
from ssd.modeling.predictor import Predictor
from ssd.modeling.vgg_ssd import build_ssd_model
import argparse
import numpy as np
from ssd.utils.viz import draw_bounding_boxes
def run_demo(cfg, weights_file, iou_threshold, score_threshold, images_dir, output_dir, dataset_type):
if dataset_type == "voc":
class_names = VOCDataset.class_names
elif dataset_type == 'coco':
class_names = COCODataset.class_names
else:
raise NotImplementedError('Not implemented now.')
device = torch.device(cfg.MODEL.DEVICE)
model = build_ssd_model(cfg)
model.load(weights_file)
print('Loaded weights from {}.'.format(weights_file))
model = model.to(device)
predictor = Predictor(cfg=cfg,
model=model,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
device=device)
cpu_device = torch.device("cpu")
image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for image_path in tqdm(image_paths):
image = Image.open(image_path).convert("RGB")
image = np.array(image)
output = predictor.predict(image)
boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
drawn_image = draw_bounding_boxes(image, boxes, labels, scores, class_names).astype(np.uint8)
image_name = os.path.basename(image_path)
Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
def main():
parser = argparse.ArgumentParser(description="SSD Demo.")
parser.add_argument(
"--config-file",
default="configs/ssd300_voc0712.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--weights", default='weights/ssd300_voc0712_mAP77.83.pth',type=str, help="Trained weights.")
parser.add_argument("--iou_threshold", type=float, default=0.5)
parser.add_argument("--score_threshold", type=float, default=0.5)
parser.add_argument("--images_dir", default='demo', type=str, help='Specify a image dir to do prediction.')
parser.add_argument("--output_dir", default='demo/result', type=str, help='Specify a image dir to save predicted images.')
parser.add_argument("--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and coco.')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
print(args)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
print("Loaded configuration file {}".format(args.config_file))
#with open(args.config_file, "r") as cf:
# config_str = "\n" + cf.read()
# print(config_str)
#print("Running with config:\n{}".format(cfg))
run_demo(cfg=cfg,
weights_file=args.weights,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
images_dir=args.images_dir,
output_dir=args.output_dir,
dataset_type=args.dataset_type)
if __name__ == '__main__':
main()
|
[
"os.path.exists",
"PIL.Image.fromarray",
"PIL.Image.open",
"argparse.ArgumentParser",
"ssd.modeling.vgg_ssd.build_ssd_model",
"os.makedirs",
"tqdm.tqdm",
"ssd.config.cfg.freeze",
"os.path.join",
"ssd.modeling.predictor.Predictor",
"numpy.array",
"ssd.config.cfg.merge_from_file",
"ssd.config.cfg.merge_from_list",
"os.path.basename",
"ssd.utils.viz.draw_bounding_boxes",
"torch.device"
] |
[((678, 708), 'torch.device', 'torch.device', (['cfg.MODEL.DEVICE'], {}), '(cfg.MODEL.DEVICE)\n', (690, 708), False, 'import torch\n'), ((721, 741), 'ssd.modeling.vgg_ssd.build_ssd_model', 'build_ssd_model', (['cfg'], {}), '(cfg)\n', (736, 741), False, 'from ssd.modeling.vgg_ssd import build_ssd_model\n'), ((874, 986), 'ssd.modeling.predictor.Predictor', 'Predictor', ([], {'cfg': 'cfg', 'model': 'model', 'iou_threshold': 'iou_threshold', 'score_threshold': 'score_threshold', 'device': 'device'}), '(cfg=cfg, model=model, iou_threshold=iou_threshold,\n score_threshold=score_threshold, device=device)\n', (883, 986), False, 'from ssd.modeling.predictor import Predictor\n'), ((1104, 1123), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1116, 1123), False, 'import torch\n'), ((1283, 1300), 'tqdm.tqdm', 'tqdm', (['image_paths'], {}), '(image_paths)\n', (1287, 1300), False, 'from tqdm import tqdm\n'), ((1764, 1812), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SSD Demo."""'}), "(description='SSD Demo.')\n", (1787, 1812), False, 'import argparse\n'), ((2835, 2872), 'ssd.config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config_file'], {}), '(args.config_file)\n', (2854, 2872), False, 'from ssd.config import cfg\n'), ((2877, 2907), 'ssd.config.cfg.merge_from_list', 'cfg.merge_from_list', (['args.opts'], {}), '(args.opts)\n', (2896, 2907), False, 'from ssd.config import cfg\n'), ((2912, 2924), 'ssd.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (2922, 2924), False, 'from ssd.config import cfg\n'), ((1153, 1186), 'os.path.join', 'os.path.join', (['images_dir', '"""*.jpg"""'], {}), "(images_dir, '*.jpg')\n", (1165, 1186), False, 'import os\n'), ((1200, 1226), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1214, 1226), False, 'import os\n'), ((1236, 1259), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1247, 1259), False, 'import os\n'), ((1372, 1387), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1380, 1387), True, 'import numpy as np\n'), ((1628, 1656), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (1644, 1656), False, 'import os\n'), ((1699, 1735), 'os.path.join', 'os.path.join', (['output_dir', 'image_name'], {}), '(output_dir, image_name)\n', (1711, 1735), False, 'import os\n'), ((1318, 1340), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1328, 1340), False, 'from PIL import Image\n'), ((1527, 1589), 'ssd.utils.viz.draw_bounding_boxes', 'draw_bounding_boxes', (['image', 'boxes', 'labels', 'scores', 'class_names'], {}), '(image, boxes, labels, scores, class_names)\n', (1546, 1589), False, 'from ssd.utils.viz import draw_bounding_boxes\n'), ((1665, 1693), 'PIL.Image.fromarray', 'Image.fromarray', (['drawn_image'], {}), '(drawn_image)\n', (1680, 1693), False, 'from PIL import Image\n')]
|
"""
Clustar module for fitting-related methods.
This module is designed for the 'ClustarData' object. All listed methods take
an input parameter of a 'ClustarData' object and return a 'ClustarData' object
after processing the method. As a result, all changes are localized within the
'ClustarData' object.
Visit <https://clustar.github.io/> for additional information.
"""
from clustar import graph
from scipy import ndimage, stats
from shapely import affinity, geometry
import numpy as np
def compute_fit(cd):
"""
Computes the normalized bivariate gaussian fit for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
i = 0
while i < len(cd.groups):
group = cd.groups[i]
try:
rv = stats.multivariate_normal([group.stats.x_bar,
group.stats.y_bar],
group.stats.covariance_matrix)
except ValueError:
del cd.groups[i]
continue
bvg = rv.pdf(group.image.pos)
bvg *= np.max(group.image.data) / np.max(bvg)
group.res.data = 1 - (bvg / group.image.data)
group.fit.bvg = bvg
group.fit.rv = rv
i += 1
return cd
def compute_ellipse(cd):
"""
Computes the ellipse parameters and localized residuals for the 'Group'
objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
a = group.stats.x_len / 2
b = group.stats.y_len / 2
theta = np.linspace(0, np.pi * 2, 360)
r = a * b / np.sqrt((b * np.cos(theta)) ** 2 +
(a * np.sin(theta)) ** 2)
xy = np.stack([group.stats.x_bar + r * np.cos(theta),
group.stats.y_bar + r * np.sin(theta)], 1)
ellipse = affinity.rotate(geometry.Polygon(xy),
group.stats.degrees,
(group.stats.x_bar, group.stats.y_bar))
pos = np.array([[i, j] for i in range(group.image.data.shape[0])
for j in range(group.image.data.shape[1])])
inside = np.array([p for p in pos
if ellipse.contains(geometry.Point(p))])
outside = np.array([p for p in pos
if not ellipse.contains(geometry.Point(p))])
group.fit.ellipse = ellipse
group.res.pos = pos
group.res.inside = inside
group.res.outside = outside
return cd
def compute_metrics(cd):
"""
Computes the evaluation metrics for the 'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = group.res
output = np.abs(res.data[res.inside[:, 0], res.inside[:, 1]])
output[output < 0] = 0
output[output > 1] = 1
bias = group.image.data[res.inside[:, 0], res.inside[:, 1]]
group.metrics.standard_deviation = np.std(output)
group.metrics.variance = group.metrics.standard_deviation ** 2
group.metrics.average = np.mean(output)
group.metrics.weighted_average = np.average(output, weights=bias)
group.res.output = output
return cd
def compute_peaks(cd):
"""
Computes the number of peaks along the major and minor axes for the
'Group' objects.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
for group in cd.groups:
res = np.array(group.res.data, copy=True)
res_out = group.res.outside
res[res_out[:, 0], res_out[:, 1]] = 0
r_major = np.abs(ndimage.rotate(res, group.stats.degrees))
r_minor = np.abs(ndimage.rotate(res, group.stats.degrees + 90))
major_idx = graph.critical_points(r_major)
minor_idx = graph.critical_points(r_minor)
major_idx = [major_idx[i] for i in range(len(major_idx))
if i % 2 == 0]
minor_idx = [minor_idx[i] for i in range(len(minor_idx))
if i % 2 == 0]
group.fit.major_peaks = len(major_idx)
group.fit.minor_peaks = len(minor_idx)
group.res.clean = res
return cd
def validate(cd):
"""
Determines which 'Group' objects are flagged for manual review by using
the specified validation parameters.
Parameters
----------
cd : ClustarData
'ClustarData' object required for processing.
Returns
-------
ClustarData
"""
attribute = cd.params.metric.lower()
threshold = cd.params.threshold
for group in cd.groups:
metric = getattr(group.metrics, attribute)
if metric > threshold:
group.flag = True
cd.flag = True
if cd.params.evaluate_peaks and \
((group.fit.major_peaks in [2, 4]) or
(group.fit.minor_peaks in [2, 4])):
group.flag = False
cd.flag = False
return cd
|
[
"numpy.abs",
"numpy.mean",
"numpy.average",
"scipy.stats.multivariate_normal",
"numpy.max",
"shapely.geometry.Point",
"numpy.array",
"numpy.linspace",
"shapely.geometry.Polygon",
"numpy.cos",
"numpy.std",
"numpy.sin",
"scipy.ndimage.rotate",
"clustar.graph.critical_points"
] |
[((1775, 1805), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(360)'], {}), '(0, np.pi * 2, 360)\n', (1786, 1805), True, 'import numpy as np\n'), ((3074, 3126), 'numpy.abs', 'np.abs', (['res.data[res.inside[:, 0], res.inside[:, 1]]'], {}), '(res.data[res.inside[:, 0], res.inside[:, 1]])\n', (3080, 3126), True, 'import numpy as np\n'), ((3301, 3315), 'numpy.std', 'np.std', (['output'], {}), '(output)\n', (3307, 3315), True, 'import numpy as np\n'), ((3419, 3434), 'numpy.mean', 'np.mean', (['output'], {}), '(output)\n', (3426, 3434), True, 'import numpy as np\n'), ((3476, 3508), 'numpy.average', 'np.average', (['output'], {'weights': 'bias'}), '(output, weights=bias)\n', (3486, 3508), True, 'import numpy as np\n'), ((3888, 3923), 'numpy.array', 'np.array', (['group.res.data'], {'copy': '(True)'}), '(group.res.data, copy=True)\n', (3896, 3923), True, 'import numpy as np\n'), ((4175, 4205), 'clustar.graph.critical_points', 'graph.critical_points', (['r_major'], {}), '(r_major)\n', (4196, 4205), False, 'from clustar import graph\n'), ((4226, 4256), 'clustar.graph.critical_points', 'graph.critical_points', (['r_minor'], {}), '(r_minor)\n', (4247, 4256), False, 'from clustar import graph\n'), ((864, 965), 'scipy.stats.multivariate_normal', 'stats.multivariate_normal', (['[group.stats.x_bar, group.stats.y_bar]', 'group.stats.covariance_matrix'], {}), '([group.stats.x_bar, group.stats.y_bar], group.\n stats.covariance_matrix)\n', (889, 965), False, 'from scipy import ndimage, stats\n'), ((1191, 1215), 'numpy.max', 'np.max', (['group.image.data'], {}), '(group.image.data)\n', (1197, 1215), True, 'import numpy as np\n'), ((1218, 1229), 'numpy.max', 'np.max', (['bvg'], {}), '(bvg)\n', (1224, 1229), True, 'import numpy as np\n'), ((2079, 2099), 'shapely.geometry.Polygon', 'geometry.Polygon', (['xy'], {}), '(xy)\n', (2095, 2099), False, 'from shapely import affinity, geometry\n'), ((4040, 4080), 'scipy.ndimage.rotate', 'ndimage.rotate', (['res', 'group.stats.degrees'], {}), '(res, group.stats.degrees)\n', (4054, 4080), False, 'from scipy import ndimage, stats\n'), ((4107, 4152), 'scipy.ndimage.rotate', 'ndimage.rotate', (['res', '(group.stats.degrees + 90)'], {}), '(res, group.stats.degrees + 90)\n', (4121, 4152), False, 'from scipy import ndimage, stats\n'), ((1963, 1976), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1969, 1976), True, 'import numpy as np\n'), ((2025, 2038), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2031, 2038), True, 'import numpy as np\n'), ((2461, 2478), 'shapely.geometry.Point', 'geometry.Point', (['p'], {}), '(p)\n', (2475, 2478), False, 'from shapely import affinity, geometry\n'), ((1840, 1853), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1846, 1853), True, 'import numpy as np\n'), ((1895, 1908), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1901, 1908), True, 'import numpy as np\n'), ((2577, 2594), 'shapely.geometry.Point', 'geometry.Point', (['p'], {}), '(p)\n', (2591, 2594), False, 'from shapely import affinity, geometry\n')]
|
# !/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2021-09-24
@LastEditTime: 2022-04-17
源自OpenAttack的DCESSubstitute
"""
import random
from typing import NoReturn, List, Any, Optional
import numpy as np
from utils.transformations.base import CharSubstitute
from utils.assets import fetch
from utils.misc import DEFAULTS
__all__ = [
"CharacterDCESSubstitute",
]
class CharacterDCESSubstitute(CharSubstitute):
""" """
__name__ = "CharacterDCESSubstitute"
def __init__(
self, threshold: float, random_one: bool = False, **kwargs: Any
) -> NoReturn:
""" """
super().__init__(**kwargs)
self.threshold = threshold
dces_dict = fetch("dces")
self.descs = dces_dict["descs"]
self.neigh = dces_dict["neigh"]
self.random_one = random_one
def _get_candidates(
self,
word: str,
pos_tag: Optional[str] = None,
num: Optional[int] = None,
) -> List[str]:
""" """
candidate_words = []
if self.random_one:
i = DEFAULTS.RNG.integers(0, len(word))
repl_letters = self._apply_dces(word[i], self.threshold)
if len(repl_letters) > 0:
repl_letter = random.choice(repl_letters)
candidate_word = word[:i] + repl_letter + word[i + 1 :]
candidate_words.append(candidate_word)
else:
for i in range(len(word)):
for repl_letter in self._apply_dces(word[i], self.threshold):
candidate_word = word[:i] + repl_letter + word[i + 1 :]
candidate_words.append(candidate_word)
if num:
candidate_words = candidate_words[:num]
return candidate_words
def _apply_dces(self, char: str, threshold: float) -> List[str]:
""" """
c = get_hex_string(char)
if c in self.descs:
description = self.descs[c]["description"]
else:
return []
tokens = description.split(" ")
case = "unknown"
identifiers = []
for token in tokens:
if len(token) == 1:
identifiers.append(token)
elif token == "SMALL":
case = "SMALL"
elif token == "CAPITAL":
case = "CAPITAL"
matches = []
match_ids = []
for i in identifiers:
for idx, val in self.descs.items():
desc_toks = val["description"].split(" ")
if (
i in desc_toks
and not np.any(np.in1d(desc_toks, _disallowed))
and not np.any(np.in1d(idx, _disallowed_codes))
and not int(idx, 16) > 30000
):
desc_toks = np.array(desc_toks)
case_descriptor = desc_toks[
(desc_toks == "SMALL") | (desc_toks == "CAPITAL")
]
if len(case_descriptor) > 1:
case_descriptor = case_descriptor[0]
elif len(case_descriptor) == 0:
case = "unknown"
if case == "unknown" or case == case_descriptor:
match_ids.append(idx)
matches.append(val["vec"])
if len(matches) == 0:
return []
match_vecs = np.stack(matches)
Y = match_vecs
self.neigh.fit(Y)
X = self.descs[c]["vec"].reshape(1, -1)
if Y.shape[0] > threshold:
dists, idxs = self.neigh.kneighbors(X, threshold, return_distance=True)
else:
dists, idxs = self.neigh.kneighbors(X, Y.shape[0], return_distance=True)
probs = dists.flatten()
charcodes = [match_ids[idx] for idx in idxs.flatten()]
chars = []
for idx, charcode in enumerate(charcodes):
if probs[idx] < threshold:
chars.append(chr(int(charcode, 16)))
return chars
@property
def deterministic(self) -> bool:
return not self.random_one
def extra_repr_keys(self) -> List[str]:
return super().extra_repr_keys() + [
"threshold",
"random_one",
]
_disallowed = [
"TAG",
"MALAYALAM",
"BAMUM",
"HIRAGANA",
"RUNIC",
"TAI",
"SUNDANESE",
"BATAK",
"LEPCHA",
"CHAM",
"TELUGU",
"DEVANGARAI",
"BUGINESE",
"MYANMAR",
"LINEAR",
"SYLOTI",
"PHAGS-PA",
"CHEROKEE",
"CANADIAN",
"YI",
"LYCIAN",
"HANGUL",
"KATAKANA",
"JAVANESE",
"ARABIC",
"KANNADA",
"BUHID",
"TAGBANWA",
"DESERET",
"REJANG",
"BOPOMOFO",
"PERMIC",
"OSAGE",
"TAGALOG",
"MEETEI",
"CARIAN",
"UGARITIC",
"ORIYA",
"ELBASAN",
"CYPRIOT",
"HANUNOO",
"GUJARATI",
"LYDIAN",
"MONGOLIAN",
"AVESTAN",
"MEROITIC",
"KHAROSHTHI",
"HUNGARIAN",
"KHUDAWADI",
"ETHIOPIC",
"PERSIAN",
"OSMANYA",
"ELBASAN",
"TIBETAN",
"BENGALI",
"TURKIC",
"THROWING",
"HANIFI",
"BRAHMI",
"KAITHI",
"LIMBU",
"LAO",
"CHAKMA",
"DEVANAGARI",
"ITALIC",
"CJK",
"MEDEFAIDRIN",
"DIAMOND",
"SAURASHTRA",
"ADLAM",
"DUPLOYAN",
]
_disallowed_codes = [
"1F1A4",
"A7AF",
]
def get_hex_string(ch: str) -> str:
return "{:04x}".format(ord(ch)).upper()
|
[
"random.choice",
"numpy.in1d",
"numpy.stack",
"numpy.array",
"utils.assets.fetch"
] |
[((739, 752), 'utils.assets.fetch', 'fetch', (['"""dces"""'], {}), "('dces')\n", (744, 752), False, 'from utils.assets import fetch\n'), ((3462, 3479), 'numpy.stack', 'np.stack', (['matches'], {}), '(matches)\n', (3470, 3479), True, 'import numpy as np\n'), ((1286, 1313), 'random.choice', 'random.choice', (['repl_letters'], {}), '(repl_letters)\n', (1299, 1313), False, 'import random\n'), ((2851, 2870), 'numpy.array', 'np.array', (['desc_toks'], {}), '(desc_toks)\n', (2859, 2870), True, 'import numpy as np\n'), ((2649, 2680), 'numpy.in1d', 'np.in1d', (['desc_toks', '_disallowed'], {}), '(desc_toks, _disallowed)\n', (2656, 2680), True, 'import numpy as np\n'), ((2717, 2748), 'numpy.in1d', 'np.in1d', (['idx', '_disallowed_codes'], {}), '(idx, _disallowed_codes)\n', (2724, 2748), True, 'import numpy as np\n')]
|
#importing necessary modules
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Data and labels
Xtrain = [[182, 80, 34], [176, 70, 33], [161, 60, 28], [154, 55, 27], [166, 63, 30], [189, 90, 36], [175, 63, 28], [177, 71, 30], [159, 52, 27], [171, 72, 32], [181, 85, 34]]
Ytrain = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']
Xval = [[163, 62, 28], [182, 80, 35], [150, 50, 24], [160, 57, 27], [175, 62, 30], [183, 67, 32], [177, 64, 29], [164, 62, 29], [157, 53, 23], [170, 73, 32], [169, 59, 29]]
Yval = ['female', 'male', 'female', 'female', 'male', 'male', 'female', 'female',
'female', 'male', 'female']
# initializing the ML models
knn = KNeighborsClassifier()
perceptron = Perceptron()
# Fitting the models
knn.fit(Xtrain, Ytrain)
perceptron.fit(Xtrain, Ytrain)
# Testing using our input data
pred_knn = knn.predict(Xval)
acc_knn = accuracy_score(Yval, pred_knn) * 100
print(f'Accuracy for knn: {acc_knn}')
pred_perceptron = perceptron.predict(Xval)
acc_perceptron = accuracy_score(Yval, pred_perceptron) * 100
print(f'Accuracy for perceptron: {acc_perceptron}')
# The best classifier out of the two models
index = np.argmax([acc_knn, acc_perceptron])
#argmax function assigns the index of the maximum value to the variable
classifiers = {0: 'KNN', 1:'PER'}
print(f'Best gender classifier is {classifiers[index]}')
|
[
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.Perceptron",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.argmax"
] |
[((815, 837), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (835, 837), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((851, 863), 'sklearn.linear_model.Perceptron', 'Perceptron', ([], {}), '()\n', (861, 863), False, 'from sklearn.linear_model import Perceptron\n'), ((1297, 1333), 'numpy.argmax', 'np.argmax', (['[acc_knn, acc_perceptron]'], {}), '([acc_knn, acc_perceptron])\n', (1306, 1333), True, 'import numpy as np\n'), ((1012, 1042), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Yval', 'pred_knn'], {}), '(Yval, pred_knn)\n', (1026, 1042), False, 'from sklearn.metrics import accuracy_score\n'), ((1148, 1185), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Yval', 'pred_perceptron'], {}), '(Yval, pred_perceptron)\n', (1162, 1185), False, 'from sklearn.metrics import accuracy_score\n')]
|
from enum import Enum, auto
import funcy as fn
import numpy as np
from monotone_bipartition import rectangles as mdtr
from monotone_bipartition import refine
EPS = 1e-4
class SearchResultType(Enum):
TRIVIALLY_FALSE = auto()
TRIVIALLY_TRUE = auto()
NON_TRIVIAL = auto()
def diagonal_convex_comb(r):
bot, top = np.array(r.bot), np.array(r.top)
diag = top - bot
return lambda t: bot + t * diag
def binsearch(r, oracle, eps=EPS, find_lambda=False):
"""Binary search over the diagonal of the rectangle.
Returns the lower and upper approximation on the diagonal.
"""
f = diagonal_convex_comb(r)
feval = fn.compose(oracle, f)
lo, hi = 0, 1
# Early termination via bounds checks
if feval(lo):
result_type = SearchResultType.TRIVIALLY_TRUE
hi = 0
elif not feval(hi):
result_type = SearchResultType.TRIVIALLY_FALSE
else:
result_type = SearchResultType.NON_TRIVIAL
mid = lo
while hi - lo > eps:
mid = lo + (hi - lo) / 2
lo, hi = (lo, mid) if feval(mid) else (mid, hi)
if find_lambda:
if result_type == SearchResultType.TRIVIALLY_TRUE:
return result_type, -1
elif result_type == SearchResultType.TRIVIALLY_FALSE:
return result_type, 2
return result_type, (lo+hi)/2
else:
return result_type, mdtr.to_rec(zip(f(lo), f(hi)))
def line_intersect(func, point, tol, *, percent=False):
box_intersect = np.array(point) / max(point)
origin = [0]*len(point)
rec = mdtr.to_rec(zip(origin, box_intersect)) # Compute bounding rec.
return binsearch(rec, func, eps=tol, find_lambda=percent)[1]
def lexicographic_opt(func, ordering, tol):
dim = len(ordering)
assert set(fn.pluck(0, ordering)) == set(range(dim))
tol /= dim # Need to compensate for multiple binsearches.
rec = refine.bounding_box(
domain=mdtr.unit_rec(dim),
oracle=func
)
# If polarity is True, set initial value at bounding.top.
# O.w. use bounding.bot.
base = tuple((rec.top if p else rec.bot)[i] for i, p in sorted(ordering))
res_rec = mdtr.to_rec(zip(base, base))
for idx, polarity in ordering:
oracle = func
rec = mdtr.to_rec(
(0, 1) if i == idx else (p, p) for i, p in enumerate(base)
)
result_type, res_cand = binsearch(rec, oracle, eps=tol)
if result_type == SearchResultType.NON_TRIVIAL:
res_rec = res_cand
base = res_rec.bot
return res_rec
|
[
"enum.auto",
"funcy.pluck",
"funcy.compose",
"numpy.array",
"monotone_bipartition.rectangles.unit_rec"
] |
[((226, 232), 'enum.auto', 'auto', ([], {}), '()\n', (230, 232), False, 'from enum import Enum, auto\n'), ((254, 260), 'enum.auto', 'auto', ([], {}), '()\n', (258, 260), False, 'from enum import Enum, auto\n'), ((279, 285), 'enum.auto', 'auto', ([], {}), '()\n', (283, 285), False, 'from enum import Enum, auto\n'), ((651, 672), 'funcy.compose', 'fn.compose', (['oracle', 'f'], {}), '(oracle, f)\n', (661, 672), True, 'import funcy as fn\n'), ((332, 347), 'numpy.array', 'np.array', (['r.bot'], {}), '(r.bot)\n', (340, 347), True, 'import numpy as np\n'), ((349, 364), 'numpy.array', 'np.array', (['r.top'], {}), '(r.top)\n', (357, 364), True, 'import numpy as np\n'), ((1500, 1515), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1508, 1515), True, 'import numpy as np\n'), ((1782, 1803), 'funcy.pluck', 'fn.pluck', (['(0)', 'ordering'], {}), '(0, ordering)\n', (1790, 1803), True, 'import funcy as fn\n'), ((1934, 1952), 'monotone_bipartition.rectangles.unit_rec', 'mdtr.unit_rec', (['dim'], {}), '(dim)\n', (1947, 1952), True, 'from monotone_bipartition import rectangles as mdtr\n')]
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve_grayscale_padding = __import__(
'2-convolve_grayscale_padding').convolve_grayscale_padding
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/MNIST.npz')
images = dataset['X_train']
print(images.shape)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
images_conv = convolve_grayscale_padding(images, kernel, (2, 4))
print(images_conv.shape)
plt.imshow(images[0], cmap='gray')
plt.show()
plt.imshow(images_conv[0], cmap='gray')
plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.array",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((223, 274), 'numpy.load', 'np.load', (['"""../../supervised_learning/data/MNIST.npz"""'], {}), "('../../supervised_learning/data/MNIST.npz')\n", (230, 274), True, 'import numpy as np\n'), ((344, 390), 'numpy.array', 'np.array', (['[[1, 0, -1], [1, 0, -1], [1, 0, -1]]'], {}), '([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n', (352, 390), True, 'import numpy as np\n'), ((494, 528), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[0]'], {'cmap': '"""gray"""'}), "(images[0], cmap='gray')\n", (504, 528), True, 'import matplotlib.pyplot as plt\n'), ((533, 543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (541, 543), True, 'import matplotlib.pyplot as plt\n'), ((548, 587), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images_conv[0]'], {'cmap': '"""gray"""'}), "(images_conv[0], cmap='gray')\n", (558, 587), True, 'import matplotlib.pyplot as plt\n'), ((592, 602), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (600, 602), True, 'import matplotlib.pyplot as plt\n')]
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace =True)
data.head(10)
#Code starts here
# --------------
try:
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
#print(data['Better_Event'])
Total_Count=data['Better_Event'].value_counts()
if(Total_Count[0]>Total_Count[1]):
better_event='Summer'
print(better_event)
print(data)
else:
better_event='Winter'
print(better_event)
except:
print("code Failed")
else:
print("code passed Successfully")
# --------------
#Code starts here
top_countries= data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
#print(top_countries)
def top_ten(Col):
country_list= list((data.nlargest(11,Col)['Country_Name']))
country_list=country_list[1:]
print(country_list)
return country_list
top_10_summer=top_ten('Total_Summer')
top_10_winter =top_ten('Total_Winter')
top_10 =top_ten('Total_Medals')
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print("common",common)
# --------------
#Code starts here
summer_df =data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df =data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=summer_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=pd.Series(data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total'])
print(data_1['Total_Points'])
most_points = max(data_1['Total_Points'])
print(most_points)
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(most_points)
print(best_country)
# --------------
#Code starts here
best = pd.DataFrame(data[data['Country_Name']==best_country])
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
# Rotate X-axes labels
plt.xticks(rotation=45)
|
[
"pandas.Series",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"numpy.where",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame"
] |
[((142, 159), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (153, 159), True, 'import pandas as pd\n'), ((2373, 2467), 'pandas.Series', 'pd.Series', (["(data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1['Bronze_Total']\n )"], {}), "(data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1[\n 'Bronze_Total'])\n", (2382, 2467), True, 'import pandas as pd\n'), ((2711, 2767), 'pandas.DataFrame', 'pd.DataFrame', (["data[data['Country_Name'] == best_country]"], {}), "(data[data['Country_Name'] == best_country])\n", (2723, 2767), True, 'import pandas as pd\n'), ((2843, 2870), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (2853, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2898), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (2882, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2947), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2934, 2947), True, 'import matplotlib.pyplot as plt\n'), ((311, 384), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (319, 384), True, 'import numpy as np\n'), ((413, 502), 'numpy.where', 'np.where', (["(data['Total_Summer'] == data['Total_Winter'])", '"""Both"""', "data['Better_Event']"], {}), "(data['Total_Summer'] == data['Total_Winter'], 'Both', data[\n 'Better_Event'])\n", (421, 502), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""User functions to streamline working with selected pymer4 LMER fit
attributes from lme4::lmer and lmerTest for ``fitgrid.lmer`` grids.
"""
import functools
import re
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import fitgrid
from fitgrid.fitgrid import LMERFitGrid
def get_lmer_dfbetas(epochs, factor, **kwargs):
r"""Fit lmers leaving out factor levels one by one, compute DBETAS.
Parameters
----------
epochs : Epochs
Epochs object
factor : str
column name of the factor of interest
**kwargs
keyword arguments to pass on to ``fitgrid.lmer``, like ``RHS``
Returns
-------
dfbetas : pandas.DataFrame
dataframe containing DFBETAS values
Examples
--------
Example calculation showing how to pass in model fitting parameters::
dfbetas = fitgrid.utils.lmer.get_lmer_dfbetas(
epochs=epochs,
factor='subject_id',
RHS='x + (x|a)
)
Notes
-----
DFBETAS is computed according to the following formula [NieGroPel2012]_:
.. math::
DFBETAS_{ij} = \frac{\hat{\gamma}_i - \hat{\gamma}_{i(-j)}}{se\left(\hat{\gamma}_{i(-j)}\right)}
for parameter :math:`i` and level :math:`j` of ``factor``.
"""
# get the factor levels
table = epochs.table.reset_index().set_index(
[epochs.epoch_id, epochs.time]
)
levels = table[factor].unique()
# produce epochs tables with each level left out
looo_epochs = (
fitgrid.epochs_from_dataframe(
table[table[factor] != level],
time=epochs.time,
epoch_id=epochs.epoch_id,
channels=epochs.channels,
)
for level in levels
)
# fit lmer on these epochs
fitter = functools.partial(fitgrid.lmer, **kwargs)
grids = map(fitter, looo_epochs)
coefs = (grid.coefs for grid in grids)
# get coefficient estimates and se from leave one out fits
looo_coefs = pd.concat(coefs, keys=levels, axis=1)
looo_estimates = looo_coefs.loc[pd.IndexSlice[:, :, 'Estimate'], :]
looo_se = looo_coefs.loc[pd.IndexSlice[:, :, 'SE'], :]
# get coefficient estimates from regular fit (all levels included)
all_levels_coefs = fitgrid.lmer(epochs, **kwargs).coefs
all_levels_estimates = all_levels_coefs.loc[
pd.IndexSlice[:, :, 'Estimate'], :
]
# drop outer level of index for convenience
for df in (looo_estimates, looo_se, all_levels_estimates):
df.index = df.index.droplevel(level=-1)
# (all_levels_estimate - level_excluded_estimate) / level_excluded_se
dfbetas = all_levels_estimates.sub(looo_estimates, level=1).div(
looo_se, level=1
)
return dfbetas.stack(level=0)
def get_lmer_warnings(lmer_grid):
"""grid the LMERFitGrid lme4::lmer4 warnings by type
lmer warnings are a mishmash of characters, punctuation, and digits, some with
numerical values specific to the message, for instance,
| Model failed to converge with max|grad| = 0.00222262 (tol = 0.002, component 1)
| unable to evaluate scaled gradient
| boundary (singular) fit: see ?isSingular
| np.nan
The warning strings are returned as-is except for stripping
leading and trailing whitespace and the "= N.NNNNNNNN" portion of the
max \|grad\| convergence failure.
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
Returns
-------
warning_grids : dict
A dictionary, the keys are lmer warning strings, each value
is a `pandas.DataFrame` indicator grid where grid.loc[time, channel] == 1 if the
lmer warning == key, otherwise 0.
"""
if not isinstance(lmer_grid, LMERFitGrid):
msg = (
"get_lmer_warnings() must be called on an "
f"LMERFitGrid not {type(lmer_grid)}"
)
raise ValueError(msg)
# In pymer4 0.7.1+ and lme4::lmer 0.22+ warnings come back from
# lme4::lmer via pymer4 as list of strings and each LMERFitgrid
# cell may have a list of 0, 1, 2, ... ? warnings. This means
# LMERFitGrid.warnings time index may have missing time stamps (= no
# warnings), a single time stamp (one warning), or duplicate time
# stamps (> 1 warning) and np.nan at channels where there is no
# warning at that timestamp.
# strip reported decimal values so max|grad| convergence failures are one kind
tidy_strings = lmer_grid.warnings.applymap(
lambda x: re.sub(
r"max\|grad\|\s+=\s+\d+\.\d+\s+", "max|grad| ", x
).strip()
if isinstance(x, str)
else x # no warning == np.nan
).rename_axis([lmer_grid.time, "wdx", "_empty"], axis=0)
# the number and types of warning generally vary by time and/or channel
warning_kinds = (
pd.Series(tidy_strings.to_numpy().flatten()).dropna().unique()
)
# collect messy gappy, multiple warnings as a dict of key==warning,
# value==tidy time x channel indicator grid (0, 1)
warning_grids = {}
assert lmer_grid._grid.shape == lmer_grid.has_warning.shape
for warning_kind in warning_kinds:
# empty grid w/ correct shape, row index and columns
warning_grid = pd.DataFrame(
np.zeros(lmer_grid._grid.shape, dtype=int),
index=lmer_grid._grid.index.copy(),
columns=lmer_grid._grid.columns.copy(),
)
# select rows w/ at least one non-na
warning_rows = tidy_strings[tidy_strings == warning_kind].dropna(
axis=0, how="all"
)
assert warning_rows.index.names[0] == lmer_grid._grid.index.name
assert all(
warning_rows.index.get_level_values(0)
== warning_rows.index.get_level_values(0).unique()
)
for rdx, row in warning_rows.iterrows():
warning_grid.loc[rdx[0], :] = (row == warning_kind).astype(int)
assert all(warning_grid.index == lmer_grid._grid.index)
assert all(warning_grid.columns == lmer_grid._grid.columns)
warning_grids[warning_kind] = warning_grid
return warning_grids
def plot_lmer_warnings(lmer_grid, which="each", verbose=True):
"""Raster plot lme4::lmer warning grids
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
which : {"each", "all", or list of str}
select the types of warnings to plot. `each` (default) plots
each type of warning separately. `all` plots one grid showing
where any type of warning occurred. A list of strings searches
the lmer warnings and plots those that match.
verbose : bool, default=True
If `True` warn of failed matches for warnings keywords.
Examples
--------
default, plot each warning grid separately
>>> plot_lmer_warnings(lmer_grid)
one plot shows everywhere there is a warning
>>> plot_lmer_warnings(lmer_grid, which="all")
plot just warnings that match these strings
>>> plot_lmer_warnings(lmer_grid, which=["converge", "singular"])
"""
def _plot_warnings(warning, warning_grid):
# masked array non-values are transparent in pcolormesh
_, axi = plt.subplots(figsize=(12, len(warning_grid.columns) / 2))
axi.set_title(warning)
ylabels = warning_grid.columns
axi.yaxis.set_major_locator(
mpl.ticker.FixedLocator(np.arange(len(ylabels)))
)
axi.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(ylabels))
axi.pcolormesh(
warning_grid.index,
np.arange(len(ylabels)),
np.ma.masked_not_equal(warning_grid.T.to_numpy(), 1),
shading="nearest",
cmap=mpl.colors.ListedColormap(['red']),
)
# validate kwarg
if not (
isinstance(which, str)
or (
isinstance(which, list)
and all((isinstance(wrn, str) for wrn in which))
)
):
raise ValueError(
"The value for which=value must be 'any', 'each', a warning "
f"string pattern to match or list of them, not this: {which}"
)
warning_grids = get_lmer_warnings(lmer_grid)
warning_grids["all"] = lmer_grid.has_warning.astype(int)
keys = None
if which == "all":
keys = ["all"]
elif which == "each":
keys = list(warning_grids.keys())
else:
# lookup matching patterns var so as to not step on original kwarg
patterns = [which] if isinstance(which, str) else which
keys = []
for pattern in patterns:
matches = [key for key in warning_grids if pattern in key]
keys += matches # may be []
if verbose and not matches:
warnings.warn(f"warning pattern '{pattern}' not found")
assert isinstance(keys, list), f"this should be type list: {type(keys)}"
for key in keys:
if verbose:
print(f"{key}")
_plot_warnings(key, warning_grids[key])
if verbose and not keys:
warnings.warn(f"no model warnings match {which}")
|
[
"fitgrid.lmer",
"matplotlib.ticker.FixedFormatter",
"matplotlib.colors.ListedColormap",
"numpy.zeros",
"functools.partial",
"fitgrid.epochs_from_dataframe",
"warnings.warn",
"re.sub",
"pandas.concat"
] |
[((1866, 1907), 'functools.partial', 'functools.partial', (['fitgrid.lmer'], {}), '(fitgrid.lmer, **kwargs)\n', (1883, 1907), False, 'import functools\n'), ((2069, 2106), 'pandas.concat', 'pd.concat', (['coefs'], {'keys': 'levels', 'axis': '(1)'}), '(coefs, keys=levels, axis=1)\n', (2078, 2106), True, 'import pandas as pd\n'), ((1597, 1732), 'fitgrid.epochs_from_dataframe', 'fitgrid.epochs_from_dataframe', (['table[table[factor] != level]'], {'time': 'epochs.time', 'epoch_id': 'epochs.epoch_id', 'channels': 'epochs.channels'}), '(table[table[factor] != level], time=epochs.\n time, epoch_id=epochs.epoch_id, channels=epochs.channels)\n', (1626, 1732), False, 'import fitgrid\n'), ((2333, 2363), 'fitgrid.lmer', 'fitgrid.lmer', (['epochs'], {}), '(epochs, **kwargs)\n', (2345, 2363), False, 'import fitgrid\n'), ((9241, 9290), 'warnings.warn', 'warnings.warn', (['f"""no model warnings match {which}"""'], {}), "(f'no model warnings match {which}')\n", (9254, 9290), False, 'import warnings\n'), ((5412, 5454), 'numpy.zeros', 'np.zeros', (['lmer_grid._grid.shape'], {'dtype': 'int'}), '(lmer_grid._grid.shape, dtype=int)\n', (5420, 5454), True, 'import numpy as np\n'), ((7675, 7709), 'matplotlib.ticker.FixedFormatter', 'mpl.ticker.FixedFormatter', (['ylabels'], {}), '(ylabels)\n', (7700, 7709), True, 'import matplotlib as mpl\n'), ((7918, 7952), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['red']"], {}), "(['red'])\n", (7943, 7952), True, 'import matplotlib as mpl\n'), ((8952, 9007), 'warnings.warn', 'warnings.warn', (['f"""warning pattern \'{pattern}\' not found"""'], {}), '(f"warning pattern \'{pattern}\' not found")\n', (8965, 9007), False, 'import warnings\n'), ((4653, 4717), 're.sub', 're.sub', (['"""max\\\\|grad\\\\|\\\\s+=\\\\s+\\\\d+\\\\.\\\\d+\\\\s+"""', '"""max|grad| """', 'x'], {}), "('max\\\\|grad\\\\|\\\\s+=\\\\s+\\\\d+\\\\.\\\\d+\\\\s+', 'max|grad| ', x)\n", (4659, 4717), False, 'import re\n')]
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import random
import pickle
import json
import fractions
import math
import subprocess
from logging import getLogger
from functools import reduce
from .dataset import DataSet
from .data_sampler import SequentialSampler, ShuffledSampler, DistributedDataSampler
from .tokenization import FullTokenizer
from .squad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, InputFeatures
logger = getLogger(__name__)
def generate_random_features(sequence_length, vocab_length, batch_size):
features = []
for i in range(batch_size):
features.append(InputFeatures(
i,
None,
None,
None,
None,
None,
np.random.randint(0, vocab_length, size=sequence_length),
None,
np.random.randint(0, 2, size=sequence_length),
0,
None,
None,
np.random.randint(0, sequence_length, size=1),
np.random.randint(0, sequence_length, size=1),
None,
np.random.randint(0, sequence_length+1, size=1)
))
return features
class SquadDataLoader(object):
def __init__(self,
features,
sequence_length=None,
batch_size=1,
dtype=np.int32,
sampler=None):
self.features = features
self.batch_size = batch_size
self.dtype = dtype
self.sequence_length = sequence_length
self.sampler = sampler
if sampler is None:
self.sampler = SequentialSampler(features)
self.num_batches = len(self.sampler)//self.batch_size
def __len__(self):
return self.num_batches
def __iter__(self):
self.feature_iterator = iter([self.features[idx] for idx in self.sampler])
return self
def __next__(self):
items = [next(self.feature_iterator) for _ in range(self.batch_size)]
indicies = []
positions = []
segments = []
sequence_mask_idx = []
start_pos = []
end_pos = []
uid = []
for item in items:
indicies.append(item.input_ids)
padding_max = self.sequence_length if self.sequence_length is not None else len(item.input_ids)
padding_length = len(item.input_ids) - item.padding_start_index
position_padding = np.full(padding_length, padding_max)
position_ids = np.arange(0, item.padding_start_index)
positions.append(np.concatenate((position_ids, position_padding)).astype(np.int32))
segments.append(item.segment_ids)
sequence_mask_idx.append(item.padding_start_index)
start_pos.append(item.start_position)
end_pos.append(item.end_position)
uid.append(item.unique_id)
# Including impossible samples during training is under investigation. T12851
# if item.is_impossible:
# logger.warning("Impossible sample exists in the dataset. "
# f"start pos: {item.start_position}, end pos: {item.end_position}")
inputs = []
for i in [indicies, positions, segments, sequence_mask_idx, start_pos, end_pos, uid]:
inputs.append(np.stack(i))
return inputs
class BertDataTransform(object):
'''
Masks the indices that are larger than the vocab_length
'''
def __init__(self, dataloader, vocab_length, sequence_length, embedding_dict, positional_dict, merge_both_embeddings, is_training=True):
self.dataloader = dataloader
self.vocab_length = vocab_length
self.sequence_length = sequence_length
self.is_training = is_training
self.embedding_dict = embedding_dict
self.positional_dict = positional_dict
self.merge_both_embeddings = merge_both_embeddings
def __len__(self):
return len(self.dataloader)
def __iter__(self):
self.dataloader_iterator = iter(self.dataloader)
return self
def __next__(self):
items = next(self.dataloader_iterator)
# Specific BERT Post Processing. TODO: Find a better place for this processing
# The vocab_length may be smaller than the original vocab... In this case with the custom_op
# Out of Bounds indicies over a certain threshold will cause numerical issues.
# 100 is unknown token [UNK]
# 0 in the label is padding
OOB = items[0] >= self.vocab_length
items[0][OOB] = 100
# Force use of uint32 for all inputs.
for i in range(len(items)):
if self.is_training or i < 4:
items[i] = items[i].astype(np.uint32)
if self.embedding_dict is not None:
items[0] = np.take(self.embedding_dict, items[0], 0)
if self.positional_dict is not None:
positional_expanded = np.take(self.positional_dict, items[1], 0)
if self.merge_both_embeddings:
items[0] += positional_expanded
else:
items[1] = positional_expanded
return items
def load_or_cache_features(input_file,
vocab_file,
sequence_length,
is_training=True,
cache_file=None,
overwrite_cache=False,
do_lower_case=False):
if cache_file is None:
cache_file = input_file + f".{sequence_length}.cache"
if os.path.exists(cache_file) and not overwrite_cache:
examples = None
logger.info(f"Loading Cache {cache_file}")
with open(cache_file, "rb") as f:
features = pickle.load(f)
else:
logger.info("Reading Examples")
examples = read_squad_examples(input_file=input_file,
is_training=is_training,
version_2_with_negative=False)
# google-research/bert uses sequence_length 384 with doc_stride 128
# TODO: Find a good value for the doc_stride with sequence_length <384
doc_stride = 128
if sequence_length < 384:
doc_stride = 64
logger.info("Converting to Features")
features = convert_examples_to_features(examples=examples,
tokenizer=FullTokenizer(vocab_file, do_lower_case=do_lower_case),
max_seq_length=sequence_length,
doc_stride=doc_stride,
max_query_length=64,
is_training=is_training)
logger.info(f"Saving Cache {cache_file}")
with open(cache_file, "wb") as f:
pickle.dump(features, f)
return features, examples
class SquadDataSet(DataSet):
def __init__(self,
features,
examples,
input_file,
is_training,
output_dir=None,
evaluate_script=None,
do_lower_case=False,
n_extra=0,
**kwargs):
super().__init__(**kwargs)
self.features = features
self.examples = examples
self.is_training = is_training
self.input_file = input_file
self.output_dir = output_dir
self.do_lower_case = do_lower_case
if not self.is_training and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
# If examples is None, features was loaded from the cache
# So the examples need to be recreated.
if self.examples is None:
self.examples = read_squad_examples(input_file=self.input_file,
is_training=self.is_training,
version_2_with_negative=False)
self.results = []
self.evaluate_script = evaluate_script
self.n_extra = n_extra
def add_results(self, data, logits):
# Results will be batched. Flatten to individual results
start_logits, end_logits = [
logit.reshape(-1, logit.shape[-1]).tolist()
for logit in logits]
for i, unique_id in enumerate(data["uid"]):
self.results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits[i],
end_logits=end_logits[i]
))
def write_predictions(self, epoch=None):
if self.is_training:
raise RuntimeError("Predictions cannot be written for training datasets")
if self.output_dir is None:
raise RuntimeError("Predictions cannot be written when output_dir is None")
suffix = f"_{epoch}" if epoch is not None else ""
predictions_file = os.path.join(self.output_dir, f"predictions{suffix}.json")
nbest_file = os.path.join(self.output_dir, f"nbest_predictions{suffix}.json")
null_log_odds_file = os.path.join(self.output_dir, f"null_odds{suffix}.json")
self.results = self.results[:len(self.results) - self.n_extra]
write_predictions(self.examples,
self.features,
self.results,
20, 30,
self.do_lower_case,
predictions_file,
nbest_file,
null_log_odds_file,
True,
False, 0)
if self.evaluate_script is not None:
evaluation = subprocess.check_output(["python", self.evaluate_script, self.input_file, predictions_file])
evaluation = json.loads(evaluation)
f1 = evaluation["f1"]
exact_match = evaluation["exact_match"]
status_string = f"F1 Score: {f1} | Exact Match: {exact_match}"
if epoch is not None:
status_string = f"Epoch: {epoch:3}{args.epochs - 1} | " + status_string
logger.info(status_string)
def get_bert_dataset(tensor_shapes,
input_file,
output_dir,
sequence_length,
vocab_file,
vocab_length,
batch_size,
batches_per_step,
embedding_dict,
positional_dict,
merge_both_embeddings=False,
replication_factor=1,
accumulation_factor=1,
shuffle=True,
is_training=True,
overwrite_cache=False,
no_drop_remainder=False,
evaluate_script=None,
generated_data=False,
do_lower_case=False,
max_pipeline_stage=1,
seed=0,
mpi_size=1,
mpi_rank=0,
is_distributed=False):
samples_per_step = batch_size * batches_per_step * \
replication_factor * accumulation_factor
div_factor = batch_size * replication_factor * accumulation_factor * batches_per_step
pad = 0
if generated_data:
features = generate_random_features(
sequence_length, vocab_length, samples_per_step)
examples = None
output_dir = None
logger.info("Generating random dataset")
else:
features, examples = load_or_cache_features(
input_file,
vocab_file,
sequence_length,
is_training,
overwrite_cache=overwrite_cache,
do_lower_case=do_lower_case)
if no_drop_remainder and not generated_data:
# dataset will be padded to be divisible by batch-size and samples-per-step
pad = int(np.ceil(len(features)/div_factor)) * div_factor - len(features)
if is_distributed:
sampler = DistributedDataSampler(
features, seed, shuffle,
mpi_size, mpi_rank, padding=False, padding_sub=pad, div_factor=div_factor)
pad = sampler.get_subpadding_size()
elif shuffle:
sampler = ShuffledSampler(features, seed, pad)
else:
sampler = SequentialSampler(features, pad)
if no_drop_remainder and not generated_data:
logger.info(f"no_drop_remainder: Dataset padded by {pad} samples")
dl = SquadDataLoader(
features,
sequence_length=sequence_length,
batch_size=samples_per_step,
sampler=sampler
)
bert_ds = BertDataTransform(
dl,
vocab_length,
sequence_length,
embedding_dict,
positional_dict,
merge_both_embeddings,
is_training=is_training)
if not is_training:
# Add uid to the data dictionary so evaluation script can be run
tensor_shapes += [
("start", None),
("end", None),
("uid", None)]
ds = SquadDataSet(
features,
examples,
input_file,
is_training,
output_dir,
evaluate_script,
do_lower_case=do_lower_case,
n_extra=pad,
loader=bert_ds,
tensor_shapes=tensor_shapes,
batches_per_step=batches_per_step,
replication_factor=replication_factor,
accumulation_factor=accumulation_factor)
return ds
|
[
"logging.getLogger",
"os.path.exists",
"subprocess.check_output",
"json.loads",
"pickle.dump",
"os.makedirs",
"os.path.join",
"pickle.load",
"numpy.take",
"numpy.stack",
"numpy.random.randint",
"numpy.concatenate",
"numpy.full",
"numpy.arange"
] |
[((1064, 1083), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1073, 1083), False, 'from logging import getLogger\n'), ((6190, 6216), 'os.path.exists', 'os.path.exists', (['cache_file'], {}), '(cache_file)\n', (6204, 6216), False, 'import os\n'), ((9633, 9691), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""predictions{suffix}.json"""'], {}), "(self.output_dir, f'predictions{suffix}.json')\n", (9645, 9691), False, 'import os\n'), ((9713, 9777), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""nbest_predictions{suffix}.json"""'], {}), "(self.output_dir, f'nbest_predictions{suffix}.json')\n", (9725, 9777), False, 'import os\n'), ((9807, 9863), 'os.path.join', 'os.path.join', (['self.output_dir', 'f"""null_odds{suffix}.json"""'], {}), "(self.output_dir, f'null_odds{suffix}.json')\n", (9819, 9863), False, 'import os\n'), ((3051, 3087), 'numpy.full', 'np.full', (['padding_length', 'padding_max'], {}), '(padding_length, padding_max)\n', (3058, 3087), True, 'import numpy as np\n'), ((3115, 3153), 'numpy.arange', 'np.arange', (['(0)', 'item.padding_start_index'], {}), '(0, item.padding_start_index)\n', (3124, 3153), True, 'import numpy as np\n'), ((5440, 5481), 'numpy.take', 'np.take', (['self.embedding_dict', 'items[0]', '(0)'], {}), '(self.embedding_dict, items[0], 0)\n', (5447, 5481), True, 'import numpy as np\n'), ((5561, 5603), 'numpy.take', 'np.take', (['self.positional_dict', 'items[1]', '(0)'], {}), '(self.positional_dict, items[1], 0)\n', (5568, 5603), True, 'import numpy as np\n'), ((6382, 6396), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6393, 6396), False, 'import pickle\n'), ((7512, 7536), 'pickle.dump', 'pickle.dump', (['features', 'f'], {}), '(features, f)\n', (7523, 7536), False, 'import pickle\n'), ((8240, 8283), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {'exist_ok': '(True)'}), '(self.output_dir, exist_ok=True)\n', (8251, 8283), False, 'import os\n'), ((10404, 10500), 'subprocess.check_output', 'subprocess.check_output', (["['python', self.evaluate_script, self.input_file, predictions_file]"], {}), "(['python', self.evaluate_script, self.input_file,\n predictions_file])\n", (10427, 10500), False, 'import subprocess\n'), ((10522, 10544), 'json.loads', 'json.loads', (['evaluation'], {}), '(evaluation)\n', (10532, 10544), False, 'import json\n'), ((1365, 1421), 'numpy.random.randint', 'np.random.randint', (['(0)', 'vocab_length'], {'size': 'sequence_length'}), '(0, vocab_length, size=sequence_length)\n', (1382, 1421), True, 'import numpy as np\n'), ((1453, 1498), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'sequence_length'}), '(0, 2, size=sequence_length)\n', (1470, 1498), True, 'import numpy as np\n'), ((1563, 1608), 'numpy.random.randint', 'np.random.randint', (['(0)', 'sequence_length'], {'size': '(1)'}), '(0, sequence_length, size=1)\n', (1580, 1608), True, 'import numpy as np\n'), ((1622, 1667), 'numpy.random.randint', 'np.random.randint', (['(0)', 'sequence_length'], {'size': '(1)'}), '(0, sequence_length, size=1)\n', (1639, 1667), True, 'import numpy as np\n'), ((1699, 1748), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sequence_length + 1)'], {'size': '(1)'}), '(0, sequence_length + 1, size=1)\n', (1716, 1748), True, 'import numpy as np\n'), ((3936, 3947), 'numpy.stack', 'np.stack', (['i'], {}), '(i)\n', (3944, 3947), True, 'import numpy as np\n'), ((3183, 3231), 'numpy.concatenate', 'np.concatenate', (['(position_ids, position_padding)'], {}), '((position_ids, position_padding))\n', (3197, 3231), True, 'import numpy as np\n')]
|
import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def xy2index(xy,resolution):
return xy[...,0]+xy[...,1]*resolution
def get_bbox3d_for_blenderobj(camera_transforms, H, W, near=2.0, far=6.0):
camera_angle_x = float(camera_transforms['camera_angle_x'])
focal = 0.5*W/np.tan(0.5 * camera_angle_x)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
for frame in camera_transforms["frames"]:
c2w = torch.FloatTensor(frame["transform_matrix"])
rays_o, rays_d = get_rays(directions, c2w)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([1.0,1.0,1.0]), torch.tensor(max_bound)+torch.tensor([1.0,1.0,1.0]))
def get_bbox3d_for_llff(poses, hwf, near=0.0, far=1.0):
H, W, focal = hwf
H, W = int(H), int(W)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
poses = torch.FloatTensor(poses)
for pose in poses:
rays_o, rays_d = get_rays(directions, pose)
rays_o, rays_d = get_ndc_rays(H, W, focal, 1.0, rays_o, rays_d)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([0.1,0.1,0.0001]), torch.tensor(max_bound)+torch.tensor([0.1,0.1,0.0001]))
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
|
[
"numpy.tan",
"torch.all",
"ray_utils.get_rays",
"torch.floor",
"ray_utils.get_ndc_rays",
"torch.tensor",
"pdb.set_trace",
"json.load",
"ray_utils.get_ray_directions",
"torch.FloatTensor",
"torch.clamp"
] |
[((137, 231), 'torch.tensor', 'torch.tensor', (['[[[i, j, k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]]'], {'device': '"""cuda"""'}), "([[[i, j, k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],\n device='cuda')\n", (149, 231), False, 'import torch\n'), ((271, 342), 'torch.tensor', 'torch.tensor', (['[[[i, j] for i in [0, 1] for j in [0, 1]]]'], {'device': '"""cuda"""'}), "([[[i, j] for i in [0, 1] for j in [0, 1]]], device='cuda')\n", (283, 342), False, 'import torch\n'), ((1264, 1295), 'ray_utils.get_ray_directions', 'get_ray_directions', (['H', 'W', 'focal'], {}), '(H, W, focal)\n', (1282, 1295), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2343, 2374), 'ray_utils.get_ray_directions', 'get_ray_directions', (['H', 'W', 'focal'], {}), '(H, W, focal)\n', (2361, 2374), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2472, 2496), 'torch.FloatTensor', 'torch.FloatTensor', (['poses'], {}), '(poses)\n', (2489, 2496), False, 'import torch\n'), ((538, 580), 'torch.tensor', 'torch.tensor', (['((1 << log2_hashmap_size) - 1)'], {}), '((1 << log2_hashmap_size) - 1)\n', (550, 580), False, 'import torch\n'), ((876, 918), 'torch.tensor', 'torch.tensor', (['((1 << log2_hashmap_size) - 1)'], {}), '((1 << log2_hashmap_size) - 1)\n', (888, 918), False, 'import torch\n'), ((1174, 1202), 'numpy.tan', 'np.tan', (['(0.5 * camera_angle_x)'], {}), '(0.5 * camera_angle_x)\n', (1180, 1202), True, 'import numpy as np\n'), ((1442, 1486), 'torch.FloatTensor', 'torch.FloatTensor', (["frame['transform_matrix']"], {}), "(frame['transform_matrix'])\n", (1459, 1486), False, 'import torch\n'), ((1512, 1537), 'ray_utils.get_rays', 'get_rays', (['directions', 'c2w'], {}), '(directions, c2w)\n', (1520, 1537), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2545, 2571), 'ray_utils.get_rays', 'get_rays', (['directions', 'pose'], {}), '(directions, pose)\n', (2553, 2571), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((2597, 2643), 'ray_utils.get_ndc_rays', 'get_ndc_rays', (['H', 'W', 'focal', '(1.0)', 'rays_o', 'rays_d'], {}), '(H, W, focal, 1.0, rays_o, rays_d)\n', (2609, 2643), False, 'from ray_utils import get_rays, get_ray_directions, get_ndc_rays\n'), ((3710, 3725), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3723, 3725), False, 'import pdb\n'), ((3740, 3782), 'torch.clamp', 'torch.clamp', (['xyz'], {'min': 'box_min', 'max': 'box_max'}), '(xyz, min=box_min, max=box_max)\n', (3751, 3782), False, 'import torch\n'), ((5251, 5266), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5264, 5266), False, 'import pdb\n'), ((5281, 5323), 'torch.clamp', 'torch.clamp', (['xyz'], {'min': 'box_min', 'max': 'box_max'}), '(xyz, min=box_min, max=box_max)\n', (5292, 5323), False, 'import torch\n'), ((7637, 7652), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7650, 7652), False, 'import pdb\n'), ((7667, 7709), 'torch.clamp', 'torch.clamp', (['xyz'], {'min': 'box_min', 'max': 'box_max'}), '(xyz, min=box_min, max=box_max)\n', (7678, 7709), False, 'import torch\n'), ((9829, 9841), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9838, 9841), False, 'import json\n'), ((2066, 2089), 'torch.tensor', 'torch.tensor', (['min_bound'], {}), '(min_bound)\n', (2078, 2089), False, 'import torch\n'), ((2090, 2119), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2102, 2119), False, 'import torch\n'), ((2119, 2142), 'torch.tensor', 'torch.tensor', (['max_bound'], {}), '(max_bound)\n', (2131, 2142), False, 'import torch\n'), ((2143, 2172), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2155, 2172), False, 'import torch\n'), ((3164, 3187), 'torch.tensor', 'torch.tensor', (['min_bound'], {}), '(min_bound)\n', (3176, 3187), False, 'import torch\n'), ((3188, 3220), 'torch.tensor', 'torch.tensor', (['[0.1, 0.1, 0.0001]'], {}), '([0.1, 0.1, 0.0001])\n', (3200, 3220), False, 'import torch\n'), ((3220, 3243), 'torch.tensor', 'torch.tensor', (['max_bound'], {}), '(max_bound)\n', (3232, 3243), False, 'import torch\n'), ((3244, 3276), 'torch.tensor', 'torch.tensor', (['[0.1, 0.1, 0.0001]'], {}), '([0.1, 0.1, 0.0001])\n', (3256, 3276), False, 'import torch\n'), ((3563, 3588), 'torch.all', 'torch.all', (['(xyz <= box_max)'], {}), '(xyz <= box_max)\n', (3572, 3588), False, 'import torch\n'), ((3596, 3621), 'torch.all', 'torch.all', (['(xyz >= box_min)'], {}), '(xyz >= box_min)\n', (3605, 3621), False, 'import torch\n'), ((3856, 3896), 'torch.floor', 'torch.floor', (['((xyz - box_min) / grid_size)'], {}), '((xyz - box_min) / grid_size)\n', (3867, 3896), False, 'import torch\n'), ((4000, 4029), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4012, 4029), False, 'import torch\n'), ((5104, 5129), 'torch.all', 'torch.all', (['(xyz <= box_max)'], {}), '(xyz <= box_max)\n', (5113, 5129), False, 'import torch\n'), ((5137, 5162), 'torch.all', 'torch.all', (['(xyz >= box_min)'], {}), '(xyz >= box_min)\n', (5146, 5162), False, 'import torch\n'), ((5397, 5437), 'torch.floor', 'torch.floor', (['((xyz - box_min) / grid_size)'], {}), '((xyz - box_min) / grid_size)\n', (5408, 5437), False, 'import torch\n'), ((5567, 5596), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5579, 5596), False, 'import torch\n'), ((7490, 7515), 'torch.all', 'torch.all', (['(xyz <= box_max)'], {}), '(xyz <= box_max)\n', (7499, 7515), False, 'import torch\n'), ((7523, 7548), 'torch.all', 'torch.all', (['(xyz >= box_min)'], {}), '(xyz >= box_min)\n', (7532, 7548), False, 'import torch\n'), ((7783, 7823), 'torch.floor', 'torch.floor', (['((xyz - box_min) / grid_size)'], {}), '((xyz - box_min) / grid_size)\n', (7794, 7823), False, 'import torch\n'), ((7953, 7982), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (7965, 7982), False, 'import torch\n')]
|
import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
class Trainer(object):
def __init__(self, cfg):
self.cfg = cfg
# Define Saver
self.saver = Saver(cfg)
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.cfg["log_tb_dir"])
self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": cfg["num_workers"], "pin_memory": True}
self.train_loader, self.val_loader, self.test_loader, dset = make_data_loader(
cfg, **kwargs)
# read landmark centers
self.id2center = np.array(json.load(
open(osp.join(cfg["data_dir"], "id2centers.json")))).astype(np.float64)
self.coding_book = torch.zeros(
(len(self.id2center), cfg["seg_channel"]), dtype=torch.float32).cuda()
torch.nn.init.xavier_uniform(self.coding_book)
print("coding book size = {}".format(self.coding_book.shape))
# generate color map
unique_label = np.arange(len(self.id2center))
unique_label = unique_label.astype(
np.int64) * 6364136223846793005 + 1442695040888963407
color_map = np.zeros((unique_label.shape[0], 3), np.uint8)
color_map[:, 0] = np.bitwise_and(unique_label, 0xff)
color_map[:, 1] = np.bitwise_and(np.right_shift(unique_label, 4), 0xff)
color_map[:, 2] = np.bitwise_and(np.right_shift(unique_label, 8), 0xff)
self.color_map = np.array(color_map)
self.coding_book = Variable(self.coding_book, requires_grad=True)
# Define network
model = VSNet(backbone=cfg["backbone"],
seg_decoder=cfg["seg_decoder"],
vertex_decoder=cfg["vertex_decoder"],
seg_channel=cfg["seg_channel"],
vertex_channel=cfg["vertex_channel"],
output_stride=cfg["out_stride"],
sync_bn=cfg["sync_bn"])
train_params = [{"params": model.get_1x_lr_params(), "lr": cfg["lr"]},
{"params": model.get_10x_lr_params(),
"lr": cfg["lr"] * 10},
{"params": self.coding_book, "lr": cfg["lr"] * 10}
]
# Define Optimizer
if cfg["optimizer"] == "SGD":
optimizer = torch.optim.SGD(train_params, momentum=cfg["momentum"],
weight_decay=cfg["weight_decay"], nesterov=cfg["nesterov"])
elif cfg["optimizer"] == "Adam":
optimizer = torch.optim.Adam(train_params, lr=cfg["lr"],
weight_decay=cfg["weight_decay"], amsgrad=True)
else:
raise NotImplementedError
# Define Criterion
self.seg_criterion = loss_dict[cfg["seg_loss_type"]]
self.vertex_criterion = loss_dict[cfg["vertex_loss_type"]]
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.coding_book.shape[0], cfg["vertex_channel"])
# Define lr scheduler
self.scheduler = LR_Scheduler(mode=cfg["lr_scheduler"], base_lr=cfg["lr"],
num_epochs=cfg["epochs"], iters_per_epoch=len(
self.train_loader),
lr_step=cfg["lr_step"])
self.model = torch.nn.DataParallel(self.model)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = {"mIoU": 0.0, "Acc": 0.0, "Acc": 0.0,
"FWIoU": 0.0, "translation_median": 1000}
if cfg["resume"] is not None and cfg["resume"] == True:
print(os.path.isfile(cfg["resume_checkpoint"]))
if not os.path.isfile(cfg["resume_checkpoint"]):
raise RuntimeError("=> no checkpoint found at {}" .format(
cfg["resume_checkpoint"]))
checkpoint = torch.load(cfg["resume_checkpoint"])
cfg.opt["start_epoch"] = checkpoint["epoch"] - 1
self.model.module.load_state_dict(checkpoint["state_dict"])
if not cfg["ft"]:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
if "coding_book" in checkpoint.keys():
assert self.coding_book.shape == checkpoint["coding_book"].shape
self.coding_book = checkpoint["coding_book"]
else:
print("Alert! coding book does not exist in the checkpoint")
print("=> loaded checkpoint {} (epoch {})"
.format(cfg["resume"], checkpoint["epoch"]))
def validation(self, epoch):
print("=================================")
print("validation")
print("=================================")
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc="\r")
num_iter_val = len(self.val_loader)
test_loss = 0.0
num_images = 0
ten_count = []
five_count = []
three_count = []
one_count = []
translation_list = []
angular_list = []
reproject_list = []
test_seg_loss = 0.0
test_ver_loss = 0.0
for i, data in enumerate(tbar):
image, seg_target, vertex_target = [d.cuda() for d in data[:3]]
valid_mask = data[-1].cuda()
pose_target, camera_k_matrix, ori_img = data[3:]
seg_target = seg_target.long()
valid_mask = (seg_target.detach() > 0).float()
with torch.no_grad():
seg_pred, vertex_pred, seg_pred_x4s = self.model(
image)
loss_seg = 0
if self.cfg["seg_decoder"]:
loss_seg = self.seg_criterion(seg_pred, seg_target, self.coding_book,
margin=self.cfg["seg_loss_margin"],
seg_k=self.cfg["seg_k"],
valid_mask=valid_mask)
test_seg_loss += loss_seg.item()
self.summary.add_scalar(
"val/loss_seg_iter", loss_seg.item(), i + num_iter_val * epoch)
loss_vertex = 0
if self.cfg["vertex_decoder"]:
loss_vertex = self.vertex_criterion(vertex_pred, vertex_target,
valid_mask)
test_ver_loss += loss_vertex.item()
self.summary.add_scalar(
"val/loss_vertex_iter", loss_vertex.item(), i + num_iter_val * epoch)
loss = 0
if self.cfg["seg_decoder"]:
loss += loss_seg
if self.cfg["vertex_decoder"]:
loss += loss_vertex * self.cfg["vertex_loss_ratio"]
test_loss += loss.item()
tbar.set_description("Test loss: %.9f" % (test_loss / (i + 1)))
self.summary.add_scalar(
"val/total_loss_iter", loss.item(), i + num_iter_val * epoch)
global_step = i * \
self.cfg["val_batch_size"] + image.data.shape[0]
# evaluate seg_pred
seg_target = seg_target.detach().squeeze()
if self.cfg["seg_decoder"]:
seg_pred, knn = utils.evaluate_segmentation(seg_pred_x4s,
self.coding_book, seg_target.size(), self.cfg["use_own_nn"])
else:
seg_pred = seg_target
# evaluate vertex
pt3d_filter, pt2d_filter, _ = utils.evaluate_vertex_v2(vertex_pred, seg_pred,
self.id2center, inlier_thresh=0.999,
min_mask_num=self.cfg["val_label_filter_threshsold"])
# pt3d_filter, pt2d_filter = utils.evaluate_vertex(vertex_target, seg_pred, self.id2center)
camera_k_matrix = camera_k_matrix.squeeze().numpy()
translation_distance, angular_distance, error = 1e9, 1e9, 1e9
if pt2d_filter.shape[0] > 6:
# pnp
ret, pose_pred = utils.pnp(
pt3d_filter, pt2d_filter, camera_k_matrix)
error = utils.reproject_error(
pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)
translation_distance, angular_distance = utils.cm_degree_metric(
pose_pred, pose_target)
print(translation_distance, angular_distance, error, i)
ten_count.append(translation_distance <
10 and angular_distance < 10)
five_count.append(translation_distance <
5 and angular_distance < 5)
three_count.append(translation_distance <
3 and angular_distance < 3)
one_count.append(translation_distance <
1 and angular_distance < 1)
translation_list.append(translation_distance)
angular_list.append(angular_distance)
reproject_list.append(error)
# Add batch sample into evaluator
if self.cfg["seg_decoder"]:
self.evaluator.add_seg_batch(seg_target, seg_pred)
if self.cfg["visualize_segmenation"]:
self.summary.visualize_seg_image(ori_img, seg_pred, seg_target,
epoch, i, global_step, self.color_map)
if self.cfg["vertex_decoder"]:
# evaluate vertex_pred
vertex_target, vertex_pred = vertex_target.squeeze(), vertex_pred.squeeze()
self.evaluator.add_vertex_batch(vertex_target, vertex_pred)
# vertex acc的计算
if self.cfg["visualize_voting"]:
if self.cfg["visualize_landmark"] != None and self.cfg["visualize_landmark"]:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step, pt2d_filter, True)
else:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step)
mIoU, Acc, Acc_class, FWIoU = self.summary.visualize_seg_evaluator(
self.evaluator, epoch, "val/seg/")
print("Validation:")
print("[Epoch: %d, numImages: %5d]" % (epoch, num_images))
print("Loss: %.9f" % (test_loss / num_iter_val))
self.summary.add_scalar("val/total_loss_epoch",
test_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_seg_epoch",
test_seg_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_ver_epoch",
test_ver_loss / num_iter_val, epoch)
self.summary.add_scalar("val/pnp/10cm_epoch",
np.mean(ten_count), epoch)
self.summary.add_scalar("val/pnp/5cm_epoch",
np.mean(five_count), epoch)
self.summary.add_scalar("val/pnp/3cm_epoch",
np.mean(three_count), epoch)
self.summary.add_scalar("val/pnp/1cm_epoch", np.mean(one_count), epoch)
self.summary.add_scalar(
"val/pnp/translation_median_epoch", np.median(translation_list), epoch)
self.summary.add_scalar(
"val/pnp/angular_median_epoch", np.median(angular_list), epoch)
new_pred = {"mIoU": mIoU.item(), "Acc": Acc.item(), "Acc_class": Acc_class.item(), "FWIoU": FWIoU.item(),
"10cm": np.mean(ten_count),
"5cm": np.mean(five_count), "3cm": np.mean(three_count), "1cm": np.mean(one_count),
"translation_median": np.median(translation_list), "angular_list": np.median(angular_list)}
print(new_pred)
if new_pred["translation_median"] < self.best_pred["translation_median"]:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
"epoch": epoch + 1,
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"best_pred": self.best_pred,
"coding_book": self.coding_book
}, is_best, save_model=self.cfg["save_model"])
def main():
parser = argparse.ArgumentParser(
description="PyTorch Landmark Segmentation Training")
parser.add_argument("--dataset", type=str,
choices=["7scenes_loc", "cambridge_loc"], help="experiment config file")
parser.add_argument("--scene", type=str, default="",
help="experiment scene")
parser.add_argument("--gpu-id", type=str, default="",
help="experiment gpu id")
parser.add_argument("--use-aug", type=str, default="",
choices=["", "true", "false"], help="experiment use aug")
parser.add_argument("--config", type=str, default=None,
help="experiment config file")
parser.add_argument("--debug", type=str, default="",
choices=["", "true", "false"], help="debug")
parser.add_argument("--resume", type=str, default="true",
choices=["", "true", "false"], help="resume")
args = parser.parse_args()
debug = None
if args.debug != "":
debug = (args.debug == "true")
if args.dataset == "7scenes_loc":
cfg = SevenScenesLocConfig(args.config, debug)
elif args.dataset == "cambridge_loc":
cfg = CambridgeLocConfig(args.config, debug)
if args.scene != "":
cfg.opt["scene"] = args.scene
if args.gpu_id != "":
cfg.opt["devices"] = args.gpu_id
if args.use_aug == "true":
cfg.opt["use_aug"] = True
if args.resume == "true":
cfg.opt["resume"] = True
cfg.opt["resume_checkpoint"] = cfg["export_dir"] + \
'/ckpts/checkpoint-backup.pth.tar'
cfg.print_opt()
cfg.set_environmental_variables()
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed(cfg["seed"])
np.random.seed(cfg["seed"])
random.seed(cfg["seed"])
trainer = Trainer(cfg)
print("Starting Epoch:", trainer.cfg["start_epoch"])
print("Total Epoches:", trainer.cfg["epochs"])
trainer.validation(trainer.cfg["start_epoch"])
trainer.summary.close()
if __name__ == "__main__":
main()
|
[
"dataloaders.make_data_loader",
"numpy.array",
"numpy.right_shift",
"numpy.mean",
"argparse.ArgumentParser",
"utils.utils.pnp",
"numpy.random.seed",
"torch.autograd.Variable",
"utils.metrics.Evaluator",
"utils.summaries.TensorboardSummary",
"utils.utils.evaluate_vertex_v2",
"os.path.isfile",
"models.sync_batchnorm.replicate.patch_replication_callback",
"warnings.filterwarnings",
"numpy.median",
"utils.utils.reproject_error",
"utils.saver.Saver",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"numpy.bitwise_and",
"numpy.zeros",
"utils.utils.cm_degree_metric"
] |
[((585, 618), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (608, 618), False, 'import warnings\n'), ((13676, 13753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Landmark Segmentation Training"""'}), "(description='PyTorch Landmark Segmentation Training')\n", (13699, 13753), False, 'import argparse\n'), ((15442, 15469), 'numpy.random.seed', 'np.random.seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (15456, 15469), True, 'import numpy as np\n'), ((15474, 15498), 'random.seed', 'random.seed', (["cfg['seed']"], {}), "(cfg['seed'])\n", (15485, 15498), False, 'import random\n'), ((740, 750), 'utils.saver.Saver', 'Saver', (['cfg'], {}), '(cfg)\n', (745, 750), False, 'from utils.saver import Saver\n'), ((811, 853), 'utils.summaries.TensorboardSummary', 'TensorboardSummary', (["self.cfg['log_tb_dir']"], {}), "(self.cfg['log_tb_dir'])\n", (829, 853), False, 'from utils.summaries import TensorboardSummary\n'), ((1063, 1094), 'dataloaders.make_data_loader', 'make_data_loader', (['cfg'], {}), '(cfg, **kwargs)\n', (1079, 1094), False, 'from dataloaders import make_data_loader\n'), ((1733, 1779), 'numpy.zeros', 'np.zeros', (['(unique_label.shape[0], 3)', 'np.uint8'], {}), '((unique_label.shape[0], 3), np.uint8)\n', (1741, 1779), True, 'import numpy as np\n'), ((1806, 1839), 'numpy.bitwise_and', 'np.bitwise_and', (['unique_label', '(255)'], {}), '(unique_label, 255)\n', (1820, 1839), True, 'import numpy as np\n'), ((2026, 2045), 'numpy.array', 'np.array', (['color_map'], {}), '(color_map)\n', (2034, 2045), True, 'import numpy as np\n'), ((2074, 2120), 'torch.autograd.Variable', 'Variable', (['self.coding_book'], {'requires_grad': '(True)'}), '(self.coding_book, requires_grad=True)\n', (2082, 2120), False, 'from torch.autograd import Variable\n'), ((3587, 3646), 'utils.metrics.Evaluator', 'Evaluator', (['self.coding_book.shape[0]', "cfg['vertex_channel']"], {}), "(self.coding_book.shape[0], cfg['vertex_channel'])\n", (3596, 3646), False, 'from utils.metrics import Evaluator\n'), ((4047, 4085), 'models.sync_batchnorm.replicate.patch_replication_callback', 'patch_replication_callback', (['self.model'], {}), '(self.model)\n', (4073, 4085), False, 'from models.sync_batchnorm.replicate import patch_replication_callback\n'), ((5586, 5618), 'tqdm.tqdm', 'tqdm', (['self.val_loader'], {'desc': "'\\r'"}), "(self.val_loader, desc='\\r')\n", (5590, 5618), False, 'from tqdm import tqdm\n'), ((1882, 1913), 'numpy.right_shift', 'np.right_shift', (['unique_label', '(4)'], {}), '(unique_label, 4)\n', (1896, 1913), True, 'import numpy as np\n'), ((1962, 1993), 'numpy.right_shift', 'np.right_shift', (['unique_label', '(8)'], {}), '(unique_label, 8)\n', (1976, 1993), True, 'import numpy as np\n'), ((12190, 12208), 'numpy.mean', 'np.mean', (['ten_count'], {}), '(ten_count)\n', (12197, 12208), True, 'import numpy as np\n'), ((12302, 12321), 'numpy.mean', 'np.mean', (['five_count'], {}), '(five_count)\n', (12309, 12321), True, 'import numpy as np\n'), ((12415, 12435), 'numpy.mean', 'np.mean', (['three_count'], {}), '(three_count)\n', (12422, 12435), True, 'import numpy as np\n'), ((12497, 12515), 'numpy.mean', 'np.mean', (['one_count'], {}), '(one_count)\n', (12504, 12515), True, 'import numpy as np\n'), ((12605, 12632), 'numpy.median', 'np.median', (['translation_list'], {}), '(translation_list)\n', (12614, 12632), True, 'import numpy as np\n'), ((12718, 12741), 'numpy.median', 'np.median', (['angular_list'], {}), '(angular_list)\n', (12727, 12741), True, 'import numpy as np\n'), ((12893, 12911), 'numpy.mean', 'np.mean', (['ten_count'], {}), '(ten_count)\n', (12900, 12911), True, 'import numpy as np\n'), ((12940, 12959), 'numpy.mean', 'np.mean', (['five_count'], {}), '(five_count)\n', (12947, 12959), True, 'import numpy as np\n'), ((12968, 12988), 'numpy.mean', 'np.mean', (['three_count'], {}), '(three_count)\n', (12975, 12988), True, 'import numpy as np\n'), ((12997, 13015), 'numpy.mean', 'np.mean', (['one_count'], {}), '(one_count)\n', (13004, 13015), True, 'import numpy as np\n'), ((13059, 13086), 'numpy.median', 'np.median', (['translation_list'], {}), '(translation_list)\n', (13068, 13086), True, 'import numpy as np\n'), ((13104, 13127), 'numpy.median', 'np.median', (['angular_list'], {}), '(angular_list)\n', (13113, 13127), True, 'import numpy as np\n'), ((4369, 4409), 'os.path.isfile', 'os.path.isfile', (["cfg['resume_checkpoint']"], {}), "(cfg['resume_checkpoint'])\n", (4383, 4409), False, 'import os\n'), ((4430, 4470), 'os.path.isfile', 'os.path.isfile', (["cfg['resume_checkpoint']"], {}), "(cfg['resume_checkpoint'])\n", (4444, 4470), False, 'import os\n'), ((8469, 8611), 'utils.utils.evaluate_vertex_v2', 'utils.evaluate_vertex_v2', (['vertex_pred', 'seg_pred', 'self.id2center'], {'inlier_thresh': '(0.999)', 'min_mask_num': "self.cfg['val_label_filter_threshsold']"}), "(vertex_pred, seg_pred, self.id2center,\n inlier_thresh=0.999, min_mask_num=self.cfg['val_label_filter_threshsold'])\n", (8493, 8611), False, 'from utils import utils\n'), ((9113, 9165), 'utils.utils.pnp', 'utils.pnp', (['pt3d_filter', 'pt2d_filter', 'camera_k_matrix'], {}), '(pt3d_filter, pt2d_filter, camera_k_matrix)\n', (9122, 9165), False, 'from utils import utils\n'), ((9219, 9294), 'utils.utils.reproject_error', 'utils.reproject_error', (['pt3d_filter', 'pt2d_filter', 'pose_pred', 'camera_k_matrix'], {}), '(pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)\n', (9240, 9294), False, 'from utils import utils\n'), ((9381, 9427), 'utils.utils.cm_degree_metric', 'utils.cm_degree_metric', (['pose_pred', 'pose_target'], {}), '(pose_pred, pose_target)\n', (9403, 9427), False, 'from utils import utils\n'), ((1203, 1247), 'os.path.join', 'osp.join', (["cfg['data_dir']", '"""id2centers.json"""'], {}), "(cfg['data_dir'], 'id2centers.json')\n", (1211, 1247), True, 'import os.path as osp\n')]
|
import os
import numpy as np
import time
import multiprocessing as mp
import csv
import socket
import datetime
import math
import glob
from pypushexp import PushSim
# # input - [recorded item]
# [weight] : 48
# [height] : 160
# [crouch_angle] (deg)
# [step_length_ratio]
# [halfcycle_duration_ratio]
# [push_step] : 8
# [push_duration] (sec) : .2
# [push_force] (N)
# [push_start_timing] (half gait cycle percent)
#
# # output
# [pushed_length] (m) : sim.out_pushed_length
# [pushed_steps] : sim.out_pushed_steps
# [push_strength] : abs(push_force * push_duration / weight)
# [step_length] (m) : sim.getPushedLength()
# [walking_speed] (m/s) : sim.getWalkingSpeed()
# [halfcycle_duration] (s) : sim.getStepLength() /sim.getWalkingSpeed()
#
# # output for hospital
# [distance] : pushed_length * 1000.
# [speed] : walking_speed * 1000.
# [force] : push_strength * 1000.
# [stride] : step_length * 1000.
# [start_timing_time_ic] = sim.start_timing_time_ic
# [mid_timing_time_ic] = sim.mid_timing_time_ic
# [start_timing_foot_ic] = sim.getStartTimingFootIC()
# [mid_timing_foot_ic] = sim.getMidTimingFootIC()
# [start_timing_time_fl] = sim.getStartTimingTimeFL()
# [mid_timing_time_fl] = sim.getMidTimingTimeFL()
# [start_timing_foot_fl] = sim.getStartTimingFootFL()
# [mid_timing_foot_fl] = sim.getMidTimingFootFL()
# # not used
# subject no
# sex
# left leg length
# right leg length
# stride
# speed
# experiment
# file name
# trial no
# push timing : 'left stance'
# push direction : 'from left'
# normalized push length
# push length until first step
# push end timing (time)
# push end timing (foot pos)
# return during first step
# push duration
# push start time
def gettimestringisoformat():
return datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
def worker_simulation(sim, param):
try:
push_step, push_duration,\
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,\
weight, height, ith, q = param
# print(int(crouch_angle), step_length_ratio, walk_speed_ratio, push_force, push_start_timing)
sim.setParamedStepParams(int(crouch_angle), step_length_ratio, walk_speed_ratio)
sim.setPushParams(8, 0.2, 0., 0.)
print(step_length_ratio, walk_speed_ratio)
stopcode = sim.simulate()
# stopcode = 0
if stopcode in [0, 3, 4]:
cot = sim.getCostOfTransport()
walking_speed = sim.getWalkingSpeed()
q.put((ith, crouch_angle, walking_speed, cot))
except IndexError:
pass
def write_start(csvfilepath):
csvfile = open(csvfilepath, 'w')
csvfile.write('type,ith,crouch_angle,speed,cot\n')
return csvfile
def write_body(q, csvfile):
while True:
try:
ith, crouch_angle, walking_speed, cot = q.get(False)
csvfile.write('torque,%d,%s,%s,%s\n' % (ith, crouch_angle, walking_speed, cot))
csvfile.flush()
except:
break
def write_end(csvfile):
csvfile.close()
def simulate(sim, launch_order, num, option_str=''):
#=======================================================================
# settings
#=======================================================================
TEST = True if launch_order is None else False
# TEST = True
# TEST = False
weight = 72
height = 170
push_step = 8
push_duration = .2
test_params = [] # element: (crouch_angle, step_length_ratio, halfcycle_duration_ratio, push_force, push_start_timing)
# ===========================================================================
#
# ===========================================================================
if TEST:
# test
additional_str = ''
num = 2
# num = 5000
mean_crouch = [0, 20, 30, 60]
else:
# real
all_mean_crouch = [0, 20, 30, 60]
mean_crouch = [all_mean_crouch[launch_order % len(all_mean_crouch)]]
additional_str = '_%ddeg__push' % mean_crouch[0]
# if launch_order==0:
# param_opt_result = '130810_113234_0_60_push'
# additional_str = '_0_60_push'
# elif launch_order==2:
# param_opt_result = '130810_161152_0_30_60_push'
# additional_str = '_0_30_60_push'
# =======================================================================
# set logger
# =======================================================================
outDir = os.path.dirname(os.path.abspath(__file__)) + '/results/'
if not os.path.exists(outDir):
os.makedirs(outDir)
csvfilepath = outDir + 'COT_' +option_str + '_' + gettimestringisoformat() + '_' + str(num) + 'trials_' + socket.gethostname() + '.csv'
print('start logging at', gettimestringisoformat())
print()
print('<simulation setting>')
# =======================================================================
# test2 : multivariate normal distribution
# =======================================================================
stride_means = [1.1262070300, 0.9529737358, 0.9158506655, 0.8755451448]
speed_means = [0.9943359644, 0.8080297151, 0.7880050552, 0.7435198328]
stride_vars = [0.03234099289, 0.02508595114, 0.02772452640, 0.02817863267]
stride_speed_covars = [0.03779884365, 0.02225320798, 0.02906793442, 0.03000639027]
speed_vars = [0.06929309644, 0.04421889347, 0.04899931048, 0.05194827755]
# crouch angle
# mean_crouch = [0,20,30,60]
std_crouch = 1
# step length
motion_stride_bvh_after_default_param = 1.1886
experi_stride_mean = stride_means[launch_order]
experi_stride_std = math.sqrt(stride_vars[launch_order])
mean_length_ratio = experi_stride_mean / motion_stride_bvh_after_default_param
std_length_ratio = experi_stride_std / motion_stride_bvh_after_default_param
# walk speed
speed_bvh_after_default_param = 0.9134
experi_speed_mean = speed_means[launch_order]
experi_speed_std = math.sqrt(speed_vars[launch_order])
mean_speed_ratio = experi_speed_mean / speed_bvh_after_default_param
std_speed_ratio = experi_speed_std / speed_bvh_after_default_param
# push strength
mean_strength = .535
std_strength = .096
mean_force = -(mean_strength*weight/push_duration)
std_force = (std_strength*weight/push_duration)
# push timing
mean_timing = 34
std_timing = 21
if TEST:
np.set_printoptions(precision=4, linewidth=200)
# for i in range(len(mean_crouch)):
# mean = [mean_crouch[i], mean_length_ratio, mean_duration_ratio, mean_force, mean_timing, mean_crouch[i]]
# cov = np.diag( [std_crouch**2, std_length_ratio**2, std_duration_ratio**2, std_force**2, std_timing**2, 0])
for i in range(len(mean_crouch)):
mean = [mean_crouch[i], mean_length_ratio, mean_speed_ratio, mean_force, mean_timing, mean_crouch[i]]
cov = np.diag([0 , std_length_ratio**2, std_speed_ratio**2, std_force**2, std_timing**2, 0])
cov[1, 2] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
cov[2, 1] = stride_speed_covars[i] / speed_bvh_after_default_param / motion_stride_bvh_after_default_param
if len(test_params) == 0:
test_params = np.random.multivariate_normal(mean, cov, num)
else:
test_params = np.vstack((test_params, np.random.multivariate_normal(mean, cov, num)))
# no negative crouch angle
for i in range(len(test_params)):
test_params[i][0] = abs(test_params[i][0])
test_params[i][2] = abs(test_params[i][2])
test_params[i][3] = -abs(test_params[i][3])
# print(test_params)
print()
print('multivariate normal distribution')
print()
print('mean_crouch', mean_crouch)
print('std_crouch', std_crouch)
print()
print('motion_step_stride', motion_stride_bvh_after_default_param)
print('experi_step_length_mean', experi_stride_mean)
print('experi_step_length_std', experi_stride_std)
print('mean_length_ratio', mean_length_ratio)
print('std_length_ratio', std_length_ratio)
print()
print('motion_speed', speed_bvh_after_default_param)
print('experi_speed_mean', experi_speed_mean)
print('experi_speed_std', experi_speed_std)
print('mean_speed_ratio', mean_speed_ratio)
print('std_speed_ratio', std_speed_ratio)
print()
print('num', num)
print()
print('total # of simulations', len(test_params))
print()
# =======================================================================
# simulation
# =======================================================================
pt = time.time()
print('<start simulation>')
print('hostname %s ' % socket.gethostname())
print()
q = mp.Manager().Queue()
groupsize = 100
paramgroups = [[] for i in range( len(test_params)//groupsize + 1 )]
ith = 1
for i in range(len(test_params)):
crouch_angle = test_params[i][0]
step_length_ratio = test_params[i][1]
walk_speed_ratio = test_params[i][2]
push_force = test_params[i][3]
push_start_timing = test_params[i][4]
crouch_label = test_params[i][5]
paramgroups[i//groupsize].append((push_step, push_duration,
crouch_angle, step_length_ratio, walk_speed_ratio, push_force, push_start_timing, crouch_label,
weight, height, ith, q))
ith += 1
csvfile = write_start(csvfilepath)
for i in range(len(paramgroups)):
for j in range(len(paramgroups[i])):
print(j)
worker_simulation(sim, paramgroups[i][j])
write_body(q, csvfile)
write_end(csvfile)
print()
_s = time.time() - pt
_h = _s // 3600
_m = _s // 60
_s -= 60 * _m
_m -= 60 * _h
print('elapsed time = %d h:%d m:%d s' % (int(_h), int(_m), int(_s)))
print()
print('end logging at', gettimestringisoformat())
if __name__ == '__main__':
import sys
import re
option = sys.argv[1]
trial_num = int(sys.argv[2])
_metadata_dir = os.path.dirname(os.path.abspath(__file__)) + '/../data/metadata/'
_nn_finding_dir = os.path.dirname(os.path.abspath(__file__)) + '/../nn/*/'
nn_dir = None
if _nn_finding_dir is not None:
nn_dir = glob.glob(_nn_finding_dir + option)[0]
meta_file = _metadata_dir + option + '.txt'
sim = None
if 'muscle' in option:
sim = PushSim(meta_file, nn_dir+'/max.pt', nn_dir+'/max_muscle.pt')
else:
sim = PushSim(meta_file, nn_dir+'/max.pt')
if "all" in option:
simulate(sim, 0, trial_num, option)
simulate(sim, 1, trial_num, option)
simulate(sim, 2, trial_num, option)
simulate(sim, 3, trial_num, option)
else:
crouch = re.findall(r'crouch\d+', option)[0][6:]
simulate(sim, ['0', '20', '30', '60'].index(crouch), trial_num, option)
|
[
"os.path.exists",
"os.makedirs",
"numpy.random.multivariate_normal",
"math.sqrt",
"multiprocessing.Manager",
"numpy.diag",
"pypushexp.PushSim",
"datetime.datetime.now",
"re.findall",
"os.path.abspath",
"socket.gethostname",
"time.time",
"glob.glob",
"numpy.set_printoptions"
] |
[((5920, 5956), 'math.sqrt', 'math.sqrt', (['stride_vars[launch_order]'], {}), '(stride_vars[launch_order])\n', (5929, 5956), False, 'import math\n'), ((6255, 6290), 'math.sqrt', 'math.sqrt', (['speed_vars[launch_order]'], {}), '(speed_vars[launch_order])\n', (6264, 6290), False, 'import math\n'), ((9026, 9037), 'time.time', 'time.time', ([], {}), '()\n', (9035, 9037), False, 'import time\n'), ((4806, 4828), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (4820, 4828), False, 'import os\n'), ((4838, 4857), 'os.makedirs', 'os.makedirs', (['outDir'], {}), '(outDir)\n', (4849, 4857), False, 'import os\n'), ((6702, 6749), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'linewidth': '(200)'}), '(precision=4, linewidth=200)\n', (6721, 6749), True, 'import numpy as np\n'), ((7222, 7320), 'numpy.diag', 'np.diag', (['[0, std_length_ratio ** 2, std_speed_ratio ** 2, std_force ** 2, std_timing **\n 2, 0]'], {}), '([0, std_length_ratio ** 2, std_speed_ratio ** 2, std_force ** 2, \n std_timing ** 2, 0])\n', (7229, 7320), True, 'import numpy as np\n'), ((10168, 10179), 'time.time', 'time.time', ([], {}), '()\n', (10177, 10179), False, 'import time\n'), ((10897, 10962), 'pypushexp.PushSim', 'PushSim', (['meta_file', "(nn_dir + '/max.pt')", "(nn_dir + '/max_muscle.pt')"], {}), "(meta_file, nn_dir + '/max.pt', nn_dir + '/max_muscle.pt')\n", (10904, 10962), False, 'from pypushexp import PushSim\n'), ((10983, 11021), 'pypushexp.PushSim', 'PushSim', (['meta_file', "(nn_dir + '/max.pt')"], {}), "(meta_file, nn_dir + '/max.pt')\n", (10990, 11021), False, 'from pypushexp import PushSim\n'), ((1873, 1896), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1894, 1896), False, 'import datetime\n'), ((4753, 4778), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4768, 4778), False, 'import os\n'), ((4969, 4989), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4987, 4989), False, 'import socket\n'), ((7612, 7657), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'num'], {}), '(mean, cov, num)\n', (7641, 7657), True, 'import numpy as np\n'), ((9098, 9118), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (9116, 9118), False, 'import socket\n'), ((9141, 9153), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (9151, 9153), True, 'import multiprocessing as mp\n'), ((10552, 10577), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10567, 10577), False, 'import os\n'), ((10640, 10665), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10655, 10665), False, 'import os\n'), ((10753, 10788), 'glob.glob', 'glob.glob', (['(_nn_finding_dir + option)'], {}), '(_nn_finding_dir + option)\n', (10762, 10788), False, 'import glob\n'), ((11248, 11280), 're.findall', 're.findall', (['"""crouch\\\\d+"""', 'option'], {}), "('crouch\\\\d+', option)\n", (11258, 11280), False, 'import re\n'), ((7722, 7767), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'num'], {}), '(mean, cov, num)\n', (7751, 7767), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import os
import numpy
from io import BytesIO
from matplotlib import pyplot
import requests
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from maskrcnn_benchmark.structures.image_list import ImageList
if __name__ == "__main__":
# load config from file and command-line arguments
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cfg.merge_from_file(
os.path.join(project_dir,
"configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"))
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=480,
)
def single_image_to_top_predictions(image):
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
image = image * 255
else:
image = image[[2, 1, 0]]
# we absolutely want fixed size (int) here (or we run into a tracing error (or bug?)
# or we might later decide to make things work with variable size...
image = image - torch.tensor(cfg.INPUT.PIXEL_MEAN)[:, None, None]
# should also do variance...
image_list = ImageList(image.unsqueeze(0), [(int(image.size(-2)), int(image.size(-1)))])
result, = coco_demo.model(image_list)
scores = result.get_field("scores")
keep = (scores >= coco_demo.confidence_threshold)
result = (result.bbox[keep],
result.get_field("labels")[keep],
result.get_field("mask")[keep],
scores[keep])
return result
@torch.jit.script
def my_paste_mask(mask, bbox, height, width, threshold=0.5, padding=1, contour=True, rectangle=False):
# type: (Tensor, Tensor, int, int, float, int, bool, bool) -> Tensor
padded_mask = torch.constant_pad_nd(mask, (padding, padding, padding, padding))
scale = 1.0 + 2.0 * float(padding) / float(mask.size(-1))
center_x = (bbox[2] + bbox[0]) * 0.5
center_y = (bbox[3] + bbox[1]) * 0.5
w_2 = (bbox[2] - bbox[0]) * 0.5 * scale
h_2 = (bbox[3] - bbox[1]) * 0.5 * scale # should have two scales?
bbox_scaled = torch.stack([center_x - w_2, center_y - h_2,
center_x + w_2, center_y + h_2], 0)
TO_REMOVE = 1
w = (bbox_scaled[2] - bbox_scaled[0] + TO_REMOVE).clamp(min=1).long()
h = (bbox_scaled[3] - bbox_scaled[1] + TO_REMOVE).clamp(min=1).long()
scaled_mask = torch.ops.maskrcnn_benchmark.upsample_bilinear(padded_mask.float(), h, w)
x0 = bbox_scaled[0].long()
y0 = bbox_scaled[1].long()
x = x0.clamp(min=0)
y = y0.clamp(min=0)
leftcrop = x - x0
topcrop = y - y0
w = torch.min(w - leftcrop, width - x)
h = torch.min(h - topcrop, height - y)
# mask = torch.zeros((height, width), dtype=torch.uint8)
# mask[y:y + h, x:x + w] = (scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold)
mask = torch.constant_pad_nd((scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold),
(int(x), int(width - x - w), int(y), int(height - y - h))) # int for the script compiler
if contour:
mask = mask.float()
# poor person's contour finding by comparing to smoothed
mask = (mask - torch.nn.functional.conv2d(mask.unsqueeze(0).unsqueeze(0),
torch.full((1, 1, 3, 3), 1.0 / 9.0), padding=1)[0, 0]).abs() > 0.001
if rectangle:
x = torch.arange(width, dtype=torch.long).unsqueeze(0)
y = torch.arange(height, dtype=torch.long).unsqueeze(1)
r = bbox.long()
# work around script not liking bitwise ops
rectangle_mask = ((((x == r[0]) + (x == r[2])) * (y >= r[1]) * (y <= r[3]))
+ (((y == r[1]) + (y == r[3])) * (x >= r[0]) * (x <= r[2])))
mask = (mask + rectangle_mask).clamp(max=1)
return mask
@torch.jit.script
def add_annotations(image, labels, scores, bboxes, class_names=','.join(coco_demo.CATEGORIES), color=torch.tensor([255, 255, 255], dtype=torch.long)):
# type: (Tensor, Tensor, Tensor, Tensor, str, Tensor) -> Tensor
result_image = torch.ops.maskrcnn_benchmark.add_annotations(image, labels, scores, bboxes, class_names, color)
return result_image
@torch.jit.script
def combine_masks(image, labels, masks, scores, bboxes, threshold=0.5, padding=1, contour=True, rectangle=False, palette=torch.tensor([33554431, 32767, 2097151])):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, int, bool, bool, Tensor) -> Tensor
height = image.size(0)
width = image.size(1)
image_with_mask = image.clone()
for i in range(masks.size(0)):
color = ((palette * labels[i]) % 255).to(torch.uint8)
one_mask = my_paste_mask(masks[i, 0], bboxes[i], height, width, threshold, padding, contour, rectangle)
image_with_mask = torch.where(one_mask.unsqueeze(-1), color.unsqueeze(0).unsqueeze(0), image_with_mask)
image_with_mask = add_annotations(image_with_mask, labels, scores, bboxes)
return image_with_mask
def process_image_with_traced_model(image):
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
boxes, labels, masks, scores = traced_model(image)
# todo: make this in one large thing
result_image = combine_masks(original_image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
def fetch_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")
if __name__ == "__main__":
pil_image = fetch_image(
url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# convert to BGR format
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
traced_model = torch.jit.trace(single_image_to_top_predictions, (image,))
@torch.jit.script
def end_to_end_model(image):
boxes, labels, masks, scores = traced_model(image)
result_image = combine_masks(image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
end_to_end_model.save('end_to_end_model.pt')
result_image = process_image_with_traced_model(original_image)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image[:, :, [2, 1, 0]])
pyplot.show()
# second image
image2 = fetch_image(
url='http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg')
image2 = image2.resize((640, 480), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
result_image2 = process_image_with_traced_model(image2)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image2[:, :, [2, 1, 0]])
pyplot.show()
|
[
"torch.jit.trace",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"torch.full",
"torch.stack",
"os.path.join",
"io.BytesIO",
"maskrcnn_benchmark.config.cfg.merge_from_list",
"torch.min",
"requests.get",
"torch.tensor",
"predictor.COCODemo",
"numpy.array",
"torch.ops.maskrcnn_benchmark.add_annotations",
"os.path.abspath",
"maskrcnn_benchmark.config.cfg.freeze",
"torch.arange",
"torch.constant_pad_nd"
] |
[((673, 717), 'maskrcnn_benchmark.config.cfg.merge_from_list', 'cfg.merge_from_list', (["['MODEL.DEVICE', 'cpu']"], {}), "(['MODEL.DEVICE', 'cpu'])\n", (692, 717), False, 'from maskrcnn_benchmark.config import cfg\n'), ((722, 734), 'maskrcnn_benchmark.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (732, 734), False, 'from maskrcnn_benchmark.config import cfg\n'), ((834, 940), 'predictor.COCODemo', 'COCODemo', (['cfg'], {'confidence_threshold': '(0.7)', 'show_mask_heatmaps': '(False)', 'masks_per_dim': '(2)', 'min_image_size': '(480)'}), '(cfg, confidence_threshold=0.7, show_mask_heatmaps=False,\n masks_per_dim=2, min_image_size=480)\n', (842, 940), False, 'from predictor import COCODemo\n'), ((2346, 2411), 'torch.constant_pad_nd', 'torch.constant_pad_nd', (['mask', '(padding, padding, padding, padding)'], {}), '(mask, (padding, padding, padding, padding))\n', (2367, 2411), False, 'import torch\n'), ((2689, 2774), 'torch.stack', 'torch.stack', (['[center_x - w_2, center_y - h_2, center_x + w_2, center_y + h_2]', '(0)'], {}), '([center_x - w_2, center_y - h_2, center_x + w_2, center_y + h_2], 0\n )\n', (2700, 2774), False, 'import torch\n'), ((3223, 3257), 'torch.min', 'torch.min', (['(w - leftcrop)', '(width - x)'], {}), '(w - leftcrop, width - x)\n', (3232, 3257), False, 'import torch\n'), ((3266, 3300), 'torch.min', 'torch.min', (['(h - topcrop)', '(height - y)'], {}), '(h - topcrop, height - y)\n', (3275, 3300), False, 'import torch\n'), ((4585, 4632), 'torch.tensor', 'torch.tensor', (['[255, 255, 255]'], {'dtype': 'torch.long'}), '([255, 255, 255], dtype=torch.long)\n', (4597, 4632), False, 'import torch\n'), ((4722, 4821), 'torch.ops.maskrcnn_benchmark.add_annotations', 'torch.ops.maskrcnn_benchmark.add_annotations', (['image', 'labels', 'scores', 'bboxes', 'class_names', 'color'], {}), '(image, labels, scores, bboxes,\n class_names, color)\n', (4766, 4821), False, 'import torch\n'), ((4983, 5023), 'torch.tensor', 'torch.tensor', (['[33554431, 32767, 2097151]'], {}), '([33554431, 32767, 2097151])\n', (4995, 5023), False, 'import torch\n'), ((6190, 6207), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6202, 6207), False, 'import requests\n'), ((6845, 6903), 'torch.jit.trace', 'torch.jit.trace', (['single_image_to_top_predictions', '(image,)'], {}), '(single_image_to_top_predictions, (image,))\n', (6860, 6903), False, 'import torch\n'), ((7306, 7350), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['result_image[:, :, [2, 1, 0]]'], {}), '(result_image[:, :, [2, 1, 0]])\n', (7319, 7350), False, 'from matplotlib import pyplot\n'), ((7355, 7368), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7366, 7368), False, 'from matplotlib import pyplot\n'), ((7720, 7765), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['result_image2[:, :, [2, 1, 0]]'], {}), '(result_image2[:, :, [2, 1, 0]])\n', (7733, 7765), False, 'from matplotlib import pyplot\n'), ((7770, 7783), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7781, 7783), False, 'from matplotlib import pyplot\n'), ((565, 650), 'os.path.join', 'os.path.join', (['project_dir', '"""configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"""'], {}), "(project_dir,\n 'configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml')\n", (577, 650), False, 'import os\n'), ((504, 529), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (519, 529), False, 'import os\n'), ((1647, 1681), 'torch.tensor', 'torch.tensor', (['cfg.INPUT.PIXEL_MEAN'], {}), '(cfg.INPUT.PIXEL_MEAN)\n', (1659, 1681), False, 'import torch\n'), ((6465, 6487), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (6476, 6487), False, 'import numpy\n'), ((7578, 7597), 'numpy.array', 'numpy.array', (['image2'], {}), '(image2)\n', (7589, 7597), False, 'import numpy\n'), ((4034, 4071), 'torch.arange', 'torch.arange', (['width'], {'dtype': 'torch.long'}), '(width, dtype=torch.long)\n', (4046, 4071), False, 'import torch\n'), ((4097, 4135), 'torch.arange', 'torch.arange', (['height'], {'dtype': 'torch.long'}), '(height, dtype=torch.long)\n', (4109, 4135), False, 'import torch\n'), ((6230, 6255), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (6237, 6255), False, 'from io import BytesIO\n'), ((3935, 3970), 'torch.full', 'torch.full', (['(1, 1, 3, 3)', '(1.0 / 9.0)'], {}), '((1, 1, 3, 3), 1.0 / 9.0)\n', (3945, 3970), False, 'import torch\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
def save_tb_embeddings(embeddings_filename):
f = open(embeddings_filename, 'rb')
embeddings = pickle.load(f)
images = embeddings['images']
zs = embeddings['zs']
# overwrite Tensorboard log dir if necessary
if os.path.exists(TB_DIR):
shutil.rmtree(TB_DIR)
os.makedirs(TB_DIR)
# create grid image
img_width, img_height = save_sprite_image(images)
with tf.device('cpu:0'):
# create embedding var
embedding_var = tf.Variable(initial_value=zs)
# save projector config
summary_writer = tf.summary.FileWriter(TB_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
embedding.sprite.single_image_dim.extend([img_width, img_height])
projector.visualize_embeddings(summary_writer, config)
# save embeddings
sess = tf.Session()
sess.run(embedding_var.initializer)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
def save_sprite_image(images):
n_embeddings = images.shape[0]
grid_cols = int(np.sqrt(n_embeddings))
grid_rows = int(np.ceil(float(n_embeddings) / grid_cols))
img_height, img_width, img_channels = images[0].shape
grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels))
for i, image in enumerate(images):
row = i / grid_cols
col = i % grid_cols
x = img_width * col
y = img_height * row
grid_image[y:y + img_height, x:x + img_width] = image
grid_image = PIL.Image.fromarray(grid_image.astype('uint8'))
grid_image.save(SPRITE_IMAGE_FILENAME)
return img_width, img_height
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print(('%s: %s' % (type(e).__name__, e.message)))
raise
|
[
"os.path.exists",
"tensorflow.device",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"numpy.sqrt",
"os.makedirs",
"argparse.ArgumentParser",
"tensorflow.Variable",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"tensorflow.Session",
"pickle.load",
"os.path.join",
"tensorflow.train.Saver",
"os.getcwd",
"numpy.empty",
"shutil.rmtree",
"tensorflow.summary.FileWriter"
] |
[((334, 368), 'os.path.join', 'os.path.join', (['TB_DIR', '"""sprite.png"""'], {}), "(TB_DIR, 'sprite.png')\n", (346, 368), False, 'import os\n'), ((287, 298), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (296, 298), False, 'import os\n'), ((473, 487), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (484, 487), False, 'import pickle\n'), ((606, 628), 'os.path.exists', 'os.path.exists', (['TB_DIR'], {}), '(TB_DIR)\n', (620, 628), False, 'import os\n'), ((664, 683), 'os.makedirs', 'os.makedirs', (['TB_DIR'], {}), '(TB_DIR)\n', (675, 683), False, 'import os\n'), ((1759, 1830), 'numpy.empty', 'np.empty', (['(img_height * grid_rows, img_width * grid_cols, img_channels)'], {}), '((img_height * grid_rows, img_width * grid_cols, img_channels))\n', (1767, 1830), True, 'import numpy as np\n'), ((2229, 2291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Inference tool - DIGITS"""'}), "(description='Inference tool - DIGITS')\n", (2252, 2291), False, 'import argparse\n'), ((638, 659), 'shutil.rmtree', 'shutil.rmtree', (['TB_DIR'], {}), '(TB_DIR)\n', (651, 659), False, 'import shutil\n'), ((773, 791), 'tensorflow.device', 'tf.device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (782, 791), True, 'import tensorflow as tf\n'), ((848, 877), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'zs'}), '(initial_value=zs)\n', (859, 877), True, 'import tensorflow as tf\n'), ((936, 965), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['TB_DIR'], {}), '(TB_DIR)\n', (957, 965), True, 'import tensorflow as tf\n'), ((983, 1010), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (1008, 1010), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((1248, 1302), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['summary_writer', 'config'], {}), '(summary_writer, config)\n', (1278, 1302), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((1345, 1357), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1355, 1357), True, 'import tensorflow as tf\n'), ((1418, 1449), 'tensorflow.train.Saver', 'tf.train.Saver', (['[embedding_var]'], {}), '([embedding_var])\n', (1432, 1449), True, 'import tensorflow as tf\n'), ((1599, 1620), 'numpy.sqrt', 'np.sqrt', (['n_embeddings'], {}), '(n_embeddings)\n', (1606, 1620), True, 'import numpy as np\n'), ((1475, 1509), 'os.path.join', 'os.path.join', (['TB_DIR', '"""model.ckpt"""'], {}), "(TB_DIR, 'model.ckpt')\n", (1487, 1509), False, 'import os\n')]
|
import tvm
import sys
import time
import numpy as np
from tvm.tensor_graph.testing.models import resnet
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import CELoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler
from tvm.tensor_graph.core.scheduler import schedule_all
from tvm.tensor_graph.core.build_graph import build_all
from tvm.tensor_graph.core.runtime import run_all
def test1():
print("test 1 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
begin = time.time()
# change data layout
forward_space = ForwardGraphSpace()
forward_tuner = RandomForwardTuner(forward_space)
layout_generator = LayoutTransform(fwd_graph, forward_space, forward_tuner)
fgraph = layout_generator.generate()
after_layout = time.time()
# autodiff
bgraph = fgraph.make_backward(ce_loss, sgd)
after_autodiff = time.time()
# make tir graph
inputs = [x.tvm_tensor for x in bgraph.inputs]
weights = [x.tvm_tensor for x in bgraph.weights]
outputs = [x.tvm_tensor for x in bgraph.outputs]
# labels = [x.tvm_tensor for x in bgraph.labels]
# loss = bgraph.loss.tvm_tensor
# gradients = [x.tvm_tensor for x in bgraph.gradients]
# updates = [x.tvm_tensor for x in bgraph.updates]
labels = []
loss = None
gradients = []
lr = None
updates = []
tgraph = PyTIRGraph(
inputs,
labels,
outputs,
weights,
loss,
gradients,
lr,
updates)
after_tir_graph = time.time()
# subgraph partition
partition_space = PartitionSpace()
partition_tuner = RandomPartitionTuner(partition_space)
cut_candidates = form_cut_candidates(tgraph)
# print(cut_candidates)
for i, candidate in enumerate(cut_candidates):
name = "graph_cut_" + str(i)
partition_generator = SingleCut(tgraph, name, candidate, partition_space, partition_tuner)
partition_generator.generate()
# for op, stat in tgraph.op_stat_dict.items():
# print(op, " head=", stat.head)
tgraph.partition_graph()
after_partition = time.time()
print("num subgraphs:", len(tgraph.subgraphs))
target = "cuda"
dev = 0
# update the op stat dict of subgraphs
# do auto-schedule
total_build_trials = 0
build_time_record = []
for mark, subgraph in tgraph.subgraphs.items():
# print("subgraph", mark)
tensors = list(subgraph.outputs.keys()) + list(subgraph.loss.keys()) \
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = tgraph.op_map[op]
if v in tgraph.op_stat_dict:
op_stat_dict[op] = tgraph.op_stat_dict[v]
c_list = form_connected_sets(subgraph, op_stat_dict, tensors, ops, down_graph)
# print("c_list_length=", len(c_list))
# print("check connected set")
# for connected_set in c_list:
# print(connected_set)
scheduler = Scheduler()
# sch = tgraph.schedules[mark]
for i, connected_set in enumerate(c_list):
name = "subgraph_" + str(mark) + "_connect_" + str(i)
assert not connected_set.empty()
build_success = False
for trial in range(10):
total_build_trials += 1
tgraph.create_schedule_for(mark=mark)
sch = tgraph.schedules[mark]
if connected_set.has_master():
if connected_set.iso_base():
PrimitiveScheduler = GPUScheduleMasterBaseSet
else:
PrimitiveScheduler = GPUScheduleMasterSet
primitive_generator = PrimitiveScheduler(
name, subgraph, connected_set, down_graph, op_stat_dict, scheduler)
else:
PrimitiveScheduler = GPUScheduleBaseSet
primitive_generator = PrimitiveScheduler(
name, connected_set, scheduler)
primitive_generator.generate(sch)
# try:
# print(tvm.lower(sch, tgraph.bufs[mark], simple_mode=True))
# except Exception as e:
# print(e)
# print("prologue")
# for p in connected_set.prologue:
# print(p.body)
# print("epilogue")
# for e in connected_set.epilogue:
# print(e.body)
# print("base")
# print(connected_set.base.body)
# print("master")
# print(connected_set.master.body)
# print(connected_set.master.input_tensors)
# for op, master in connected_set.prologue.items():
# in_input = False
# for inp in master.input_tensors:
# if op == inp.op:
# in_input = True
# break
# if not in_input:
# print(op, "not in the inputs of", master)
build_beg = time.time()
build_success = tgraph.build_for(target, mark=mark)
build_end = time.time()
build_time_record.append(build_end - build_beg)
if build_success:
break
if not build_success:
raise RuntimeError("Can't build for subgraph", mark)
after_schedule = time.time()
tgraph.set_inputs({bgraph.inputs[0].tvm_tensor: img_np})
# tgraph.set_labels({bgraph.labels[0].tvm_tensor: label_np})
# tgraph.set_lr(optimize_engine.get_lr())
tgraph.allocate_buffer(target, dev)
beg = time.time()
for mark in tgraph.call_order:
func = tgraph.functions[mark]
bufs = tgraph.bufs[mark]
real_bufs = [tgraph.tvm_array_dict[tgraph.subgraphs[mark].index[x]] for x in bufs]
func_beg = time.time()
func(*real_bufs)
func_end = time.time()
print((func_end - func_beg) * 1e3, "ms")
end = time.time()
print("End to end time:", (end - beg) * 1e3, "ms")
print("total build trails=", total_build_trials)
print("layout change time cost=", (after_layout - begin) * 1e3, "ms")
print("autodiff time cost=", (after_autodiff - after_layout) * 1e3, "ms")
print("make tir_graph time cost=", (after_tir_graph - after_autodiff) * 1e3, "ms")
print("subgraph partition time cost=", (after_partition - after_tir_graph) * 1e3, "ms")
print("schedule time cost=", (after_schedule - after_partition) * 1e3, "ms. average=",
(after_schedule - after_partition) * 1e3 / total_build_trials, "ms")
print("average build time cost=", np.array(build_time_record).mean() * 1e3, "ms")
print("total build time cost=", (after_schedule - begin) * 1e3, "ms")
print("Success!")
def test2(file=sys.stdout):
print("test 2 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())), file=file)
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())), file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
for k, v in tir_graph.op_map.items():
print(k.name, v.name, file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
tmp = {}
for f in set(tir_graph.op_feature_dict.values()):
if f.split(")")[-1] not in tmp:
tmp[f.split(")")[-1]] = []
tmp[f.split(")")[-1]].append(f)
print("different kinds of ops:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
print("####################################################", file=file)
tmp = {}
for f in set(tir_graph.subgraph_features.values()):
key = ";".join([x.split(")")[-1] for x in f.split(";")])
if key not in tmp:
tmp[key] = []
tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
for k, v in tir_graph.subgraph_features.items():
key = ";".join([x.split(")")[-1] for x in v.split(";")])
if key == "collect_3_dim4;grad_bn2d_to_conv2d_nchw_8;grad_bn2d_var_to_conv2d_nchw_10;grad_bn2d_mean_to_conv2d_nchw_2;collect_2_dim1":
i = 1
for op in tir_graph.subgraphs[k].op_list:
print(i, ". #####")
i += 1
print(op.body)
print(op.input_tensors)
break
# target = "cuda"
# dev = 0
# print("begin schedule")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], [label_np], sgd.get_lr(), target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success", file=file)
def test3():
print("test 3 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())))
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())))
tmp = {}
# for f in set(tir_graph.op_feature_dict.values()):
# if f.split(")")[-1] not in tmp:
# tmp[f.split(")")[-1]] = []
# tmp[f.split(")")[-1]].append(f)
# for k, v in tmp.items():
# print(k)
# for vv in v:
# print(" ", vv)
print("####################################################")
tmp = {}
# for f in set(tir_graph.subgraph_features.values()):
# key = ";".join([x.split(")")[-1] for x in f.split(";")])
# if key not in tmp:
# tmp[key] = []
# tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp))
for k, v in tmp.items():
print(k)
for vv in v:
print(" ", vv)
# target = "cuda"
# dev = 1
# print("begin build")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success")
if __name__ == "__main__":
with open("trace_resnet_subgraph.log", "w") as fout:
test2(file=fout)
# test3()
|
[
"tvm.tensor_graph.core.tuner.RandomForwardTuner",
"tvm.tensor_graph.core.schedule_generator.form_cut_candidates",
"tvm.tensor_graph.core.utils.flatten_tir_graph",
"numpy.array",
"tvm.tensor_graph.core.GraphTensor",
"tvm.tensor_graph.core.ForwardGraph",
"tvm.tensor_graph.nn.CELoss",
"tvm.tensor_graph.core.scheduler.PrimitiveScheduler",
"tvm.tensor_graph.core.scheduler.schedule_all",
"tvm.tensor_graph.core.space.ForwardGraphSpace",
"tvm.tensor_graph.nn.SGD",
"tvm.tensor_graph.core.space.PartitionSpace",
"tvm.tensor_graph.core.schedule_generator.form_connected_sets",
"tvm.tensor_graph.core.schedule_generator.LayoutTransform",
"tvm.tensor_graph.testing.models.resnet.resnet50",
"tvm.tensor_graph.core.PyTIRGraph",
"time.time",
"tvm.tensor_graph.core.schedule_generator.SingleCut",
"tvm.tensor_graph.core.tuner.RandomPartitionTuner",
"numpy.random.uniform"
] |
[((1232, 1265), 'tvm.tensor_graph.testing.models.resnet.resnet50', 'resnet.resnet50', ([], {'num_classes': '(1000)'}), '(num_classes=1000)\n', (1247, 1265), False, 'from tvm.tensor_graph.testing.models import resnet\n'), ((1281, 1330), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['img_shape'], {'dtype': 'dtype', 'name': '"""image"""'}), "(img_shape, dtype=dtype, name='image')\n", (1292, 1330), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((1348, 1399), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['label_shape'], {'dtype': 'dtype', 'name': '"""label"""'}), "(label_shape, dtype=dtype, name='label')\n", (1359, 1399), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((1735, 1755), 'tvm.tensor_graph.nn.CELoss', 'CELoss', (['label_tensor'], {}), '(label_tensor)\n', (1741, 1755), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((1764, 1774), 'tvm.tensor_graph.nn.SGD', 'SGD', (['(0.002)'], {}), '(0.002)\n', (1767, 1774), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((1789, 1849), 'tvm.tensor_graph.core.ForwardGraph', 'ForwardGraph', (['[img_tensor]', '[output_tensor]', 'weights_tensors'], {}), '([img_tensor], [output_tensor], weights_tensors)\n', (1801, 1849), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((1861, 1872), 'time.time', 'time.time', ([], {}), '()\n', (1870, 1872), False, 'import time\n'), ((1914, 1933), 'tvm.tensor_graph.core.space.ForwardGraphSpace', 'ForwardGraphSpace', ([], {}), '()\n', (1931, 1933), False, 'from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace\n'), ((1952, 1985), 'tvm.tensor_graph.core.tuner.RandomForwardTuner', 'RandomForwardTuner', (['forward_space'], {}), '(forward_space)\n', (1970, 1985), False, 'from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner\n'), ((2008, 2064), 'tvm.tensor_graph.core.schedule_generator.LayoutTransform', 'LayoutTransform', (['fwd_graph', 'forward_space', 'forward_tuner'], {}), '(fwd_graph, forward_space, forward_tuner)\n', (2023, 2064), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((2121, 2132), 'time.time', 'time.time', ([], {}), '()\n', (2130, 2132), False, 'import time\n'), ((2212, 2223), 'time.time', 'time.time', ([], {}), '()\n', (2221, 2223), False, 'import time\n'), ((2674, 2748), 'tvm.tensor_graph.core.PyTIRGraph', 'PyTIRGraph', (['inputs', 'labels', 'outputs', 'weights', 'loss', 'gradients', 'lr', 'updates'], {}), '(inputs, labels, outputs, weights, loss, gradients, lr, updates)\n', (2684, 2748), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((2803, 2814), 'time.time', 'time.time', ([], {}), '()\n', (2812, 2814), False, 'import time\n'), ((2859, 2875), 'tvm.tensor_graph.core.space.PartitionSpace', 'PartitionSpace', ([], {}), '()\n', (2873, 2875), False, 'from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace\n'), ((2896, 2933), 'tvm.tensor_graph.core.tuner.RandomPartitionTuner', 'RandomPartitionTuner', (['partition_space'], {}), '(partition_space)\n', (2916, 2933), False, 'from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner\n'), ((2954, 2981), 'tvm.tensor_graph.core.schedule_generator.form_cut_candidates', 'form_cut_candidates', (['tgraph'], {}), '(tgraph)\n', (2973, 2981), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((3357, 3368), 'time.time', 'time.time', ([], {}), '()\n', (3366, 3368), False, 'import time\n'), ((6405, 6416), 'time.time', 'time.time', ([], {}), '()\n', (6414, 6416), False, 'import time\n'), ((6631, 6642), 'time.time', 'time.time', ([], {}), '()\n', (6640, 6642), False, 'import time\n'), ((6954, 6965), 'time.time', 'time.time', ([], {}), '()\n', (6963, 6965), False, 'import time\n'), ((7951, 7984), 'tvm.tensor_graph.testing.models.resnet.resnet50', 'resnet.resnet50', ([], {'num_classes': '(1000)'}), '(num_classes=1000)\n', (7966, 7984), False, 'from tvm.tensor_graph.testing.models import resnet\n'), ((8000, 8049), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['img_shape'], {'dtype': 'dtype', 'name': '"""image"""'}), "(img_shape, dtype=dtype, name='image')\n", (8011, 8049), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((8067, 8118), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['label_shape'], {'dtype': 'dtype', 'name': '"""label"""'}), "(label_shape, dtype=dtype, name='label')\n", (8078, 8118), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((8454, 8474), 'tvm.tensor_graph.nn.CELoss', 'CELoss', (['label_tensor'], {}), '(label_tensor)\n', (8460, 8474), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((8483, 8493), 'tvm.tensor_graph.nn.SGD', 'SGD', (['(0.002)'], {}), '(0.002)\n', (8486, 8493), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((8508, 8568), 'tvm.tensor_graph.core.ForwardGraph', 'ForwardGraph', (['[img_tensor]', '[output_tensor]', 'weights_tensors'], {}), '([img_tensor], [output_tensor], weights_tensors)\n', (8520, 8568), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((8584, 8653), 'tvm.tensor_graph.core.scheduler.schedule_all', 'schedule_all', (['fwd_graph'], {'loss': 'ce_loss', 'optimizer': 'sgd', 'inference': '(False)'}), '(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)\n', (8596, 8653), False, 'from tvm.tensor_graph.core.scheduler import schedule_all\n'), ((10961, 10994), 'tvm.tensor_graph.testing.models.resnet.resnet50', 'resnet.resnet50', ([], {'num_classes': '(1000)'}), '(num_classes=1000)\n', (10976, 10994), False, 'from tvm.tensor_graph.testing.models import resnet\n'), ((11010, 11059), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['img_shape'], {'dtype': 'dtype', 'name': '"""image"""'}), "(img_shape, dtype=dtype, name='image')\n", (11021, 11059), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((11077, 11128), 'tvm.tensor_graph.core.GraphTensor', 'GraphTensor', (['label_shape'], {'dtype': 'dtype', 'name': '"""label"""'}), "(label_shape, dtype=dtype, name='label')\n", (11088, 11128), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((11464, 11484), 'tvm.tensor_graph.nn.CELoss', 'CELoss', (['label_tensor'], {}), '(label_tensor)\n', (11470, 11484), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((11493, 11503), 'tvm.tensor_graph.nn.SGD', 'SGD', (['(0.002)'], {}), '(0.002)\n', (11496, 11503), False, 'from tvm.tensor_graph.nn import CELoss, SGD\n'), ((11518, 11578), 'tvm.tensor_graph.core.ForwardGraph', 'ForwardGraph', (['[img_tensor]', '[output_tensor]', 'weights_tensors'], {}), '([img_tensor], [output_tensor], weights_tensors)\n', (11530, 11578), False, 'from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, GraphTensor, GraphOp, PyTIRGraph\n'), ((11594, 11617), 'tvm.tensor_graph.core.scheduler.schedule_all', 'schedule_all', (['fwd_graph'], {}), '(fwd_graph)\n', (11606, 11617), False, 'from tvm.tensor_graph.core.scheduler import schedule_all\n'), ((3118, 3186), 'tvm.tensor_graph.core.schedule_generator.SingleCut', 'SingleCut', (['tgraph', 'name', 'candidate', 'partition_space', 'partition_tuner'], {}), '(tgraph, name, candidate, partition_space, partition_tuner)\n', (3127, 3186), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((3848, 3889), 'tvm.tensor_graph.core.utils.flatten_tir_graph', 'flatten_tir_graph', (['ops'], {'output_first': '(True)'}), '(ops, output_first=True)\n', (3865, 3889), False, 'from tvm.tensor_graph.core.utils import flatten_tir_graph\n'), ((4062, 4131), 'tvm.tensor_graph.core.schedule_generator.form_connected_sets', 'form_connected_sets', (['subgraph', 'op_stat_dict', 'tensors', 'ops', 'down_graph'], {}), '(subgraph, op_stat_dict, tensors, ops, down_graph)\n', (4081, 4131), False, 'from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, SingleCut, form_cut_candidates, LayoutTransform\n'), ((4290, 4301), 'tvm.tensor_graph.core.scheduler.PrimitiveScheduler', 'Scheduler', ([], {}), '()\n', (4299, 4301), True, 'from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler\n'), ((6841, 6852), 'time.time', 'time.time', ([], {}), '()\n', (6850, 6852), False, 'import time\n'), ((6889, 6900), 'time.time', 'time.time', ([], {}), '()\n', (6898, 6900), False, 'import time\n'), ((1598, 1633), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'img_shape'], {}), '(-1, 1, img_shape)\n', (1615, 1633), True, 'import numpy as np\n'), ((1661, 1707), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[batch, num_classes]'], {}), '(-1, 1, [batch, num_classes])\n', (1678, 1707), True, 'import numpy as np\n'), ((8317, 8352), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'img_shape'], {}), '(-1, 1, img_shape)\n', (8334, 8352), True, 'import numpy as np\n'), ((8380, 8426), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[batch, num_classes]'], {}), '(-1, 1, [batch, num_classes])\n', (8397, 8426), True, 'import numpy as np\n'), ((11327, 11362), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'img_shape'], {}), '(-1, 1, img_shape)\n', (11344, 11362), True, 'import numpy as np\n'), ((11390, 11436), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[batch, num_classes]'], {}), '(-1, 1, [batch, num_classes])\n', (11407, 11436), True, 'import numpy as np\n'), ((6092, 6103), 'time.time', 'time.time', ([], {}), '()\n', (6101, 6103), False, 'import time\n'), ((6184, 6195), 'time.time', 'time.time', ([], {}), '()\n', (6193, 6195), False, 'import time\n'), ((7596, 7623), 'numpy.array', 'np.array', (['build_time_record'], {}), '(build_time_record)\n', (7604, 7623), True, 'import numpy as np\n')]
|
# coding: utf-8
from __future__ import division, print_function
# Standard library
import time
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
from astropy.extern.six.moves import cPickle as pickle
import pytest
# Project
from ..io import load
from ..core import CompositePotential
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from ....integrate import LeapfrogIntegrator
def partial_derivative(func, point, dim_ix=0, **kwargs):
xyz = np.array(point, copy=True)
def wraps(a):
xyz[dim_ix] = a
return func(xyz)
return derivative(wraps, point[dim_ix], **kwargs)
class PotentialTestBase(object):
name = None
potential = None # MUST SET THIS
tol = 1E-5
show_plots = False
@classmethod
def setup_class(cls):
if cls.name is None:
cls.name = cls.__name__[4:] # remove Test
print("Testing potential: {}".format(cls.name))
cls.w0 = np.array(cls.w0)
cls.ndim = cls.w0.size // 2
# TODO: need to test also quantity objects and phasespacepositions!
# these are arrays we will test the methods on:
w0_2d = np.repeat(cls.w0[:,None], axis=1, repeats=16)
w0_3d = np.repeat(w0_2d[...,None], axis=2, repeats=8)
w0_list = list(cls.w0)
w0_slice = w0_2d[:,:4]
cls.w0s = [cls.w0, w0_2d, w0_3d, w0_list, w0_slice]
cls._grad_return_shapes = [cls.w0[:cls.ndim].shape + (1,),
w0_2d[:cls.ndim].shape,
w0_3d[:cls.ndim].shape,
cls.w0[:cls.ndim].shape + (1,),
w0_slice[:cls.ndim].shape]
cls._hess_return_shapes = [(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_2d[:cls.ndim].shape,
(cls.ndim,) + w0_3d[:cls.ndim].shape,
(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_slice[:cls.ndim].shape]
cls._valu_return_shapes = [x[1:] for x in cls._grad_return_shapes]
def test_unitsystem(self):
assert isinstance(self.potential.units, UnitSystem)
def test_energy(self):
assert self.ndim == self.potential.ndim
for arr,shp in zip(self.w0s, self._valu_return_shapes):
v = self.potential.energy(arr[:self.ndim])
assert v.shape == shp
g = self.potential.energy(arr[:self.ndim], t=0.1)
g = self.potential.energy(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.energy(arr[:self.ndim], t=t)
g = self.potential.energy(arr[:self.ndim], t=t*self.potential.units['time'])
def test_gradient(self):
for arr,shp in zip(self.w0s, self._grad_return_shapes):
g = self.potential.gradient(arr[:self.ndim])
assert g.shape == shp
g = self.potential.gradient(arr[:self.ndim], t=0.1)
g = self.potential.gradient(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.gradient(arr[:self.ndim], t=t)
g = self.potential.gradient(arr[:self.ndim], t=t*self.potential.units['time'])
def test_hessian(self):
for arr,shp in zip(self.w0s, self._hess_return_shapes):
g = self.potential.hessian(arr[:self.ndim])
assert g.shape == shp
g = self.potential.hessian(arr[:self.ndim], t=0.1)
g = self.potential.hessian(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.hessian(arr[:self.ndim], t=t)
g = self.potential.hessian(arr[:self.ndim], t=t*self.potential.units['time'])
def test_mass_enclosed(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.mass_enclosed(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.mass_enclosed(arr[:self.ndim], t=t)
g = self.potential.mass_enclosed(arr[:self.ndim], t=t*self.potential.units['time'])
def test_circular_velocity(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.circular_velocity(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.circular_velocity(arr[:self.ndim], t=t)
g = self.potential.circular_velocity(arr[:self.ndim], t=t*self.potential.units['time'])
def test_repr(self):
pot_repr = repr(self.potential)
if isinstance(self.potential.units, DimensionlessUnitSystem):
assert "dimensionless" in pot_repr
else:
assert str(self.potential.units['length']) in pot_repr
assert str(self.potential.units['time']) in pot_repr
assert str(self.potential.units['mass']) in pot_repr
for k in self.potential.parameters.keys():
assert "{}=".format(k) in pot_repr
def test_compare(self):
# skip if composite potentials
if len(self.potential.parameters) == 0:
return
other = self.potential.__class__(units=self.potential.units,
**self.potential.parameters)
assert other == self.potential
pars = self.potential.parameters.copy()
for k in pars.keys():
if k != 0:
pars[k] = 1.1*pars[k]
other = self.potential.__class__(units=self.potential.units, **pars)
assert other != self.potential
# check that comparing to non-potentials works
assert not self.potential == "sup"
assert not self.potential == None
def test_plot(self):
p = self.potential
if self.show_plots:
f = p.plot_contours(grid=(np.linspace(-10., 10., 100), 0., 0.),
labels=["X"])
# f.suptitle("slice off from 0., won't have cusp")
# f.savefig(os.path.join(plot_path, "contour_x.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
np.linspace(-10., 10., 100),
0.),
cmap='Blues')
# f.savefig(os.path.join(plot_path, "contour_xy.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
1.,
np.linspace(-10., 10., 100)),
cmap='Blues', labels=["X", "Z"])
# f.savefig(os.path.join(plot_path, "contour_xz.png"))
plt.show()
plt.close('all')
def test_save_load(self, tmpdir):
"""
Test writing to a YAML file, and reading back in
"""
fn = str(tmpdir.join("{}.yml".format(self.name)))
self.potential.save(fn)
p = load(fn)
p.energy(self.w0[:self.w0.size//2])
p.gradient(self.w0[:self.w0.size//2])
def test_numerical_gradient_vs_gradient(self):
"""
Check that the value of the implemented gradient function is close to a
numerically estimated value. This is to check the coded-up version.
"""
dx = 1E-3 * np.sqrt(np.sum(self.w0[:self.w0.size//2]**2))
max_x = np.sqrt(np.sum([x**2 for x in self.w0[:self.w0.size//2]]))
grid = np.linspace(-max_x,max_x,8)
grid = grid[grid != 0.]
grids = [grid for i in range(self.w0.size//2)]
xyz = np.ascontiguousarray(np.vstack(map(np.ravel, np.meshgrid(*grids))).T)
def energy_wrap(xyz):
xyz = np.ascontiguousarray(xyz[None])
return self.potential._energy(xyz, t=np.array([0.]))[0]
num_grad = np.zeros_like(xyz)
for i in range(xyz.shape[0]):
num_grad[i] = np.squeeze([partial_derivative(energy_wrap, xyz[i], dim_ix=dim_ix, n=1, dx=dx, order=5)
for dim_ix in range(self.w0.size//2)])
grad = self.potential._gradient(xyz, t=np.array([0.]))
assert np.allclose(num_grad, grad, rtol=self.tol)
def test_orbit_integration(self):
"""
Make we can integrate an orbit in this potential
"""
w0 = self.w0
w0 = np.vstack((w0,w0,w0)).T
t1 = time.time()
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
print("Integration time (10000 steps): {}".format(time.time() - t1))
if self.show_plots:
f = orbit.plot()
f.suptitle("Vector w0")
plt.show()
plt.close(f)
us = self.potential.units
w0 = PhaseSpacePosition(pos=w0[:self.ndim]*us['length'],
vel=w0[self.ndim:]*us['length']/us['time'])
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
if self.show_plots:
f = orbit.plot()
f.suptitle("Object w0")
plt.show()
plt.close(f)
def test_pickle(self, tmpdir):
fn = str(tmpdir.join("{}.pickle".format(self.name)))
with open(fn, "wb") as f:
pickle.dump(self.potential, f)
with open(fn, "rb") as f:
p = pickle.load(f)
p.energy(self.w0[:self.w0.size//2])
class CompositePotentialTestBase(PotentialTestBase):
@pytest.mark.skip(reason="Skip composite potential repr test")
def test_repr(self):
pass
@pytest.mark.skip(reason="Skip composite potential compare test")
def test_compare(self):
pass
|
[
"numpy.allclose",
"numpy.repeat",
"astropy.extern.six.moves.cPickle.dump",
"pytest.mark.skip",
"numpy.ascontiguousarray",
"scipy.misc.derivative",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.close",
"numpy.sum",
"numpy.vstack",
"time.time",
"numpy.meshgrid",
"numpy.all",
"astropy.extern.six.moves.cPickle.load",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] |
[((553, 579), 'numpy.array', 'np.array', (['point'], {'copy': '(True)'}), '(point, copy=True)\n', (561, 579), True, 'import numpy as np\n'), ((658, 700), 'scipy.misc.derivative', 'derivative', (['wraps', 'point[dim_ix]'], {}), '(wraps, point[dim_ix], **kwargs)\n', (668, 700), False, 'from scipy.misc import derivative\n'), ((10299, 10360), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skip composite potential repr test"""'}), "(reason='Skip composite potential repr test')\n", (10315, 10360), False, 'import pytest\n'), ((10405, 10469), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skip composite potential compare test"""'}), "(reason='Skip composite potential compare test')\n", (10421, 10469), False, 'import pytest\n'), ((1026, 1042), 'numpy.array', 'np.array', (['cls.w0'], {}), '(cls.w0)\n', (1034, 1042), True, 'import numpy as np\n'), ((1229, 1275), 'numpy.repeat', 'np.repeat', (['cls.w0[:, None]'], {'axis': '(1)', 'repeats': '(16)'}), '(cls.w0[:, None], axis=1, repeats=16)\n', (1238, 1275), True, 'import numpy as np\n'), ((1291, 1337), 'numpy.repeat', 'np.repeat', (['w0_2d[..., None]'], {'axis': '(2)', 'repeats': '(8)'}), '(w0_2d[..., None], axis=2, repeats=8)\n', (1300, 1337), True, 'import numpy as np\n'), ((8174, 8203), 'numpy.linspace', 'np.linspace', (['(-max_x)', 'max_x', '(8)'], {}), '(-max_x, max_x, 8)\n', (8185, 8203), True, 'import numpy as np\n'), ((8542, 8560), 'numpy.zeros_like', 'np.zeros_like', (['xyz'], {}), '(xyz)\n', (8555, 8560), True, 'import numpy as np\n'), ((8869, 8911), 'numpy.allclose', 'np.allclose', (['num_grad', 'grad'], {'rtol': 'self.tol'}), '(num_grad, grad, rtol=self.tol)\n', (8880, 8911), True, 'import numpy as np\n'), ((9104, 9115), 'time.time', 'time.time', ([], {}), '()\n', (9113, 9115), False, 'import time\n'), ((4228, 4243), 'numpy.all', 'np.all', (['(g > 0.0)'], {}), '(g > 0.0)\n', (4234, 4243), True, 'import numpy as np\n'), ((4853, 4868), 'numpy.all', 'np.all', (['(g > 0.0)'], {}), '(g > 0.0)\n', (4859, 4868), True, 'import numpy as np\n'), ((7423, 7433), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7431, 7433), True, 'import matplotlib.pyplot as plt\n'), ((7446, 7462), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7455, 7462), True, 'import matplotlib.pyplot as plt\n'), ((8107, 8162), 'numpy.sum', 'np.sum', (['[(x ** 2) for x in self.w0[:self.w0.size // 2]]'], {}), '([(x ** 2) for x in self.w0[:self.w0.size // 2]])\n', (8113, 8162), True, 'import numpy as np\n'), ((8422, 8453), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['xyz[None]'], {}), '(xyz[None])\n', (8442, 8453), True, 'import numpy as np\n'), ((9066, 9089), 'numpy.vstack', 'np.vstack', (['(w0, w0, w0)'], {}), '((w0, w0, w0))\n', (9075, 9089), True, 'import numpy as np\n'), ((9450, 9460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9458, 9460), True, 'import matplotlib.pyplot as plt\n'), ((9473, 9485), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (9482, 9485), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9927, 9929), True, 'import matplotlib.pyplot as plt\n'), ((9942, 9954), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (9951, 9954), True, 'import matplotlib.pyplot as plt\n'), ((10098, 10128), 'astropy.extern.six.moves.cPickle.dump', 'pickle.dump', (['self.potential', 'f'], {}), '(self.potential, f)\n', (10109, 10128), True, 'from astropy.extern.six.moves import cPickle as pickle\n'), ((10180, 10194), 'astropy.extern.six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10191, 10194), True, 'from astropy.extern.six.moves import cPickle as pickle\n'), ((8045, 8085), 'numpy.sum', 'np.sum', (['(self.w0[:self.w0.size // 2] ** 2)'], {}), '(self.w0[:self.w0.size // 2] ** 2)\n', (8051, 8085), True, 'import numpy as np\n'), ((8837, 8852), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8845, 8852), True, 'import numpy as np\n'), ((9325, 9336), 'time.time', 'time.time', ([], {}), '()\n', (9334, 9336), False, 'import time\n'), ((6596, 6625), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (6607, 6625), True, 'import numpy as np\n'), ((6848, 6877), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (6859, 6877), True, 'import numpy as np\n'), ((6915, 6944), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (6926, 6944), True, 'import numpy as np\n'), ((7139, 7168), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (7150, 7168), True, 'import numpy as np\n'), ((7248, 7277), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(100)'], {}), '(-10.0, 10.0, 100)\n', (7259, 7277), True, 'import numpy as np\n'), ((8348, 8367), 'numpy.meshgrid', 'np.meshgrid', (['*grids'], {}), '(*grids)\n', (8359, 8367), True, 'import numpy as np\n'), ((8503, 8518), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8511, 8518), True, 'import numpy as np\n'), ((2734, 2747), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (2742, 2747), True, 'import numpy as np\n'), ((3283, 3296), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (3291, 3296), True, 'import numpy as np\n'), ((3832, 3845), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (3840, 3845), True, 'import numpy as np\n'), ((4437, 4450), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (4445, 4450), True, 'import numpy as np\n'), ((5070, 5083), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (5078, 5083), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import json
import time
import pandas as pd
import tensorflow as tf
import numpy as np
import math
from decimal import Decimal
import matplotlib.pyplot as plt
from agents.ornstein_uhlenbeck import OrnsteinUhlenbeckActionNoise
eps=10e-8
epochs=0
M=0
class StockTrader():
def __init__(self):
self.reset()
def reset(self):
self.wealth = 10e3
self.total_reward = 0
self.ep_ave_max_q = 0
self.loss = 0
self.actor_loss=0
self.wealth_history = []
self.r_history = []
self.w_history = []
self.p_history = []
self.noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(M))
def update_summary(self,loss,r,q_value,actor_loss,w,p):
self.loss += loss
self.actor_loss+=actor_loss
self.total_reward+=r
self.ep_ave_max_q += q_value
self.r_history.append(r)
self.wealth = self.wealth * math.exp(r)
self.wealth_history.append(self.wealth)
self.w_history.extend([','.join([str(Decimal(str(w0)).quantize(Decimal('0.00'))) for w0 in w.tolist()[0]])])
self.p_history.extend([','.join([str(Decimal(str(p0)).quantize(Decimal('0.000'))) for p0 in p.tolist()])])
def write(self,epoch):
wealth_history = pd.Series(self.wealth_history)
r_history = pd.Series(self.r_history)
w_history = pd.Series(self.w_history)
p_history = pd.Series(self.p_history)
history = pd.concat([wealth_history, r_history, w_history, p_history], axis=1)
history.to_csv('result' + str(epoch) + '-' + str(math.exp(np.sum(self.r_history)) * 100) + '.csv')
def print_result(self,epoch,agent):
self.total_reward=math.exp(self.total_reward) * 100
print('*-----Episode: {:d}, Reward:{:.6f}%, ep_ave_max_q:{:.2f}, actor_loss:{:2f}-----*'.format(epoch, self.total_reward,self.ep_ave_max_q,self.actor_loss))
agent.write_summary(self.loss, self.total_reward,self.ep_ave_max_q,self.actor_loss, epoch)
agent.save_model()
def plot_result(self):
pd.Series(self.wealth_history).plot()
plt.show()
def action_processor(self,a,ratio):
a = np.clip(a + self.noise() * ratio, 0, 1)
a = a / (a.sum() + eps)
return a
def parse_info(info):
return info['reward'],info['continue'],info[ 'next state'],info['weight vector'],info ['price'],info['risk']
def traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable):
info = env.step(None,None)
r,contin,s,w1,p,risk=parse_info(info)
contin=1
t=0
while contin:
w2 = agent.predict(s,w1)
if noise_flag=='True':
w2=stocktrader.action_processor(w2,(epochs-epoch)/epochs)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p,risk = parse_info(env_info)
if framework=='PG':
agent.save_transition(s,p,w2,w1)
else:
agent.save_transition(s, w2, r-risk, contin, s_next, w1)
loss, q_value,actor_loss=0,0,0
if framework=='DDPG':
if not contin and trainable=="True":
agent_info= agent.train(method,epoch)
loss, q_value=agent_info["critic_loss"],agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PPO':
if not contin and trainable=="True":
agent_info = agent.train(method, epoch)
loss, q_value = agent_info["critic_loss"], agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PG':
if not contin and trainable=="True":
agent.train()
stocktrader.update_summary(loss,r,q_value,actor_loss,w2,p)
s = s_next
t=t+1
def backtest(agent,env):
print("starting to backtest......")
from agents.UCRP import UCRP
from agents.Winner import WINNER
from agents.Losser import LOSSER
agents=[]
agents.append(agent)
agents.append(WINNER())
agents.append(UCRP())
agents.append(LOSSER())
labels=['PG','Winner','UCRP','Losser']
wealths_result=[]
rs_result=[]
for i,agent in enumerate(agents):
info = env.step(None, None)
r, contin, s, w1, p, risk = parse_info(info)
contin = 1
wealth=10000
wealths = [wealth]
rs=[1]
while contin:
w2 = agent.predict(s, w1)
if i==0:
print(w2)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p, risk = parse_info(env_info)
wealth=wealth*math.exp(r)
rs.append(math.exp(r)-1)
wealths.append(wealth)
s=s_next
print('finish one agent')
wealths_result.append(wealths)
rs_result.append(rs)
for i in range(len(agents)):
plt.plot(wealths_result[i],label=labels[i])
print(labels[i],' ',np.mean(rs_result[i]),' ',np.std(rs_result[i]))
plt.legend()
plt.show()
def parse_config(config,mode):
codes = config["session"]["codes"]
start_date = config["session"]["start_date"]
end_date = config["session"]["end_date"]
features = config["session"]["features"]
agent_config = config["session"]["agents"]
market = config["session"]["market_types"]
noise_flag, record_flag, plot_flag=config["session"]["noise_flag"],config["session"]["record_flag"],config["session"]["plot_flag"]
predictor, framework, window_length = agent_config
reload_flag, trainable=config["session"]['reload_flag'],config["session"]['trainable']
method=config["session"]['method']
global epochs
epochs = int(config["session"]["epochs"])
if mode=='test':
record_flag='True'
noise_flag='False'
plot_flag='True'
reload_flag='True'
trainable='False'
method='model_free'
print("*--------------------Training Status-------------------*")
print('Codes:',codes)
print("Date from",start_date,' to ',end_date)
print('Features:',features)
print("Agent:Noise(",noise_flag,')---Recoed(',noise_flag,')---Plot(',plot_flag,')')
print("Market Type:",market)
print("Predictor:",predictor," Framework:", framework," Window_length:",window_length)
print("Epochs:",epochs)
print("Trainable:",trainable)
print("Reloaded Model:",reload_flag)
print("Method",method)
print("Noise_flag",noise_flag)
print("Record_flag",record_flag)
print("Plot_flag",plot_flag)
return codes,start_date,end_date,features,agent_config,market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method
def session(config,mode):
from data.environment import Environment
codes, start_date, end_date, features, agent_config, market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method=parse_config(config,mode)
env = Environment(start_date, end_date, codes, features, int(window_length),market)
global M
M=len(codes)+1
if framework == 'DDPG':
print("*-----------------Loading DDPG Agent---------------------*")
from agents.ddpg import DDPG
agent = DDPG(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PPO':
print("*-----------------Loading PPO Agent---------------------*")
from agents.ppo import PPO
agent = PPO(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PG':
print("*-----------------Loading PG Agent---------------------*")
from agents.pg import PG
agent = PG(len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
stocktrader=StockTrader()
if mode=='train':
print("Training with {:d}".format(epochs))
for epoch in range(epochs):
print("Now we are at epoch", epoch)
traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable)
if record_flag=='True':
stocktrader.write(epoch)
if plot_flag=='True':
stocktrader.plot_result()
agent.reset_buffer()
stocktrader.print_result(epoch,agent)
stocktrader.reset()
elif mode=='test':
backtest(agent, env)
def build_parser():
parser = ArgumentParser(description='Provide arguments for training different DDPG or PPO models in Portfolio Management')
parser.add_argument("--mode",dest="mode",help="download(China), train, test",metavar="MODE", default="train",required=True)
parser.add_argument("--model",dest="model",help="DDPG,PPO",metavar="MODEL", default="DDPG",required=False)
return parser
def main():
parser = build_parser()
args=vars(parser.parse_args())
with open('config.json') as f:
config=json.load(f)
if args['mode']=='download':
from data.download_data import DataDownloader
data_downloader=DataDownloader(config)
data_downloader.save_data()
else:
session(config,args['mode'])
if __name__=="__main__":
main()
|
[
"pandas.Series",
"numpy.mean",
"data.download_data.DataDownloader",
"argparse.ArgumentParser",
"decimal.Decimal",
"agents.Winner.WINNER",
"matplotlib.pyplot.plot",
"numpy.sum",
"agents.UCRP.UCRP",
"numpy.zeros",
"numpy.std",
"json.load",
"math.exp",
"agents.Losser.LOSSER",
"pandas.concat",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((5269, 5281), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5279, 5281), True, 'import matplotlib.pyplot as plt\n'), ((5287, 5297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5295, 5297), True, 'import matplotlib.pyplot as plt\n'), ((8870, 8993), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Provide arguments for training different DDPG or PPO models in Portfolio Management"""'}), "(description=\n 'Provide arguments for training different DDPG or PPO models in Portfolio Management'\n )\n", (8884, 8993), False, 'from argparse import ArgumentParser\n'), ((1368, 1398), 'pandas.Series', 'pd.Series', (['self.wealth_history'], {}), '(self.wealth_history)\n', (1377, 1398), True, 'import pandas as pd\n'), ((1420, 1445), 'pandas.Series', 'pd.Series', (['self.r_history'], {}), '(self.r_history)\n', (1429, 1445), True, 'import pandas as pd\n'), ((1467, 1492), 'pandas.Series', 'pd.Series', (['self.w_history'], {}), '(self.w_history)\n', (1476, 1492), True, 'import pandas as pd\n'), ((1514, 1539), 'pandas.Series', 'pd.Series', (['self.p_history'], {}), '(self.p_history)\n', (1523, 1539), True, 'import pandas as pd\n'), ((1559, 1627), 'pandas.concat', 'pd.concat', (['[wealth_history, r_history, w_history, p_history]'], {'axis': '(1)'}), '([wealth_history, r_history, w_history, p_history], axis=1)\n', (1568, 1627), True, 'import pandas as pd\n'), ((2221, 2231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((4266, 4274), 'agents.Winner.WINNER', 'WINNER', ([], {}), '()\n', (4272, 4274), False, 'from agents.Winner import WINNER\n'), ((4295, 4301), 'agents.UCRP.UCRP', 'UCRP', ([], {}), '()\n', (4299, 4301), False, 'from agents.UCRP import UCRP\n'), ((4322, 4330), 'agents.Losser.LOSSER', 'LOSSER', ([], {}), '()\n', (4328, 4330), False, 'from agents.Losser import LOSSER\n'), ((5139, 5183), 'matplotlib.pyplot.plot', 'plt.plot', (['wealths_result[i]'], {'label': 'labels[i]'}), '(wealths_result[i], label=labels[i])\n', (5147, 5183), True, 'import matplotlib.pyplot as plt\n'), ((9378, 9390), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9387, 9390), False, 'import json\n'), ((1017, 1028), 'math.exp', 'math.exp', (['r'], {}), '(r)\n', (1025, 1028), False, 'import math\n'), ((1806, 1833), 'math.exp', 'math.exp', (['self.total_reward'], {}), '(self.total_reward)\n', (1814, 1833), False, 'import math\n'), ((5214, 5235), 'numpy.mean', 'np.mean', (['rs_result[i]'], {}), '(rs_result[i])\n', (5221, 5235), True, 'import numpy as np\n'), ((5242, 5262), 'numpy.std', 'np.std', (['rs_result[i]'], {}), '(rs_result[i])\n', (5248, 5262), True, 'import numpy as np\n'), ((9517, 9539), 'data.download_data.DataDownloader', 'DataDownloader', (['config'], {}), '(config)\n', (9531, 9539), False, 'from data.download_data import DataDownloader\n'), ((738, 749), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (746, 749), True, 'import numpy as np\n'), ((2174, 2204), 'pandas.Series', 'pd.Series', (['self.wealth_history'], {}), '(self.wealth_history)\n', (2183, 2204), True, 'import pandas as pd\n'), ((4881, 4892), 'math.exp', 'math.exp', (['r'], {}), '(r)\n', (4889, 4892), False, 'import math\n'), ((4916, 4927), 'math.exp', 'math.exp', (['r'], {}), '(r)\n', (4924, 4927), False, 'import math\n'), ((1150, 1165), 'decimal.Decimal', 'Decimal', (['"""0.00"""'], {}), "('0.00')\n", (1157, 1165), False, 'from decimal import Decimal\n'), ((1268, 1284), 'decimal.Decimal', 'Decimal', (['"""0.000"""'], {}), "('0.000')\n", (1275, 1284), False, 'from decimal import Decimal\n'), ((1695, 1717), 'numpy.sum', 'np.sum', (['self.r_history'], {}), '(self.r_history)\n', (1701, 1717), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2016-03-16 11:28:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-04-04 13:50:50
"""
Batch export freesurfer results to animated gifs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as op
import subprocess as sp
from shutil import rmtree
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from tempfile import mkdtemp
from errno import EEXIST
import glob
from six import string_types
import numpy as np
import nibabel as nb
from skimage import exposure
def main():
"""Entry point"""
parser = ArgumentParser(description='Batch export freesurfer results to animated gifs',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-S', '--subjects-dir', action='store', default=os.getcwd())
g_input.add_argument('-s', '--subject-id', action='store')
g_input.add_argument('-t', '--temp-dir', action='store')
g_input.add_argument('--keep-temp', action='store_true', default=False)
g_input.add_argument('--zoom', action='store_true', default=False)
g_input.add_argument('--hist-eq', action='store_true', default=False)
g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif')
opts = parser.parse_args()
if opts.temp_dir is None:
tmpdir = mkdtemp()
else:
tmpdir = op.abspath(opts.temp_dir)
try:
os.makedirs(tmpdir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
out_dir = op.abspath(opts.output_dir)
try:
os.makedirs(out_dir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
subjects_dir = op.abspath(opts.subjects_dir)
subject_list = opts.subject_id
if subject_list is None:
subject_list = [name for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
elif isinstance(subject_list, string_types):
if '*' not in subject_list:
subject_list = [subject_list]
else:
all_dirs = [op.join(subjects_dir, name) for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
pattern = glob.glob(op.abspath(op.join(subjects_dir, opts.subject_id)))
subject_list = list(set(pattern).intersection(set(all_dirs)))
environ = os.environ.copy()
environ['SUBJECTS_DIR'] = subjects_dir
# tcl_file = pkgr.resource_filename('structural_dhcp_mriqc', 'data/fsexport.tcl')
tcl_contents = """
SetOrientation 0
SetCursor 0 128 128 128
SetDisplayFlag 3 0
SetDisplayFlag 22 1
set i 0
"""
for sub_path in subject_list:
subid = op.basename(sub_path)
tmp_sub = op.join(tmpdir, subid)
try:
os.makedirs(tmp_sub)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
niifile = op.join(tmp_sub, '%s.nii.gz') % subid
ref_file = op.join(sub_path, 'mri', 'T1.mgz')
sp.call(['mri_convert', op.join(sub_path, 'mri', 'norm.mgz'), niifile],
cwd=tmp_sub)
data = nb.load(niifile).get_data()
data[data > 0] = 1
# Compute brain bounding box
indexes = np.argwhere(data)
bbox_min = indexes.min(0)
bbox_max = indexes.max(0) + 1
center = np.average([bbox_min, bbox_max], axis=0)
if opts.hist_eq:
modnii = op.join(tmp_sub, '%s.nii.gz' % subid)
ref_file = op.join(tmp_sub, '%s.mgz' % subid)
img = nb.load(niifile)
data = exposure.equalize_adapthist(img.get_data(), clip_limit=0.03)
nb.Nifti1Image(data, img.get_affine(), img.get_header()).to_filename(modnii)
sp.call(['mri_convert', modnii, ref_file], cwd=tmp_sub)
if not opts.zoom:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, '%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),
'%s/%s.gif' % (out_dir, subid)])
else:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-lh-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=environ)
# Export tiffs for right hemisphere
tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-rh-' % (tmp_sub, subid) + '%03d.tif" $slice]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub, subid),
'%s/%s-lh.gif' % (out_dir, subid)])
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub, subid),
'%s/%s-rh.gif' % (out_dir, subid)])
if not opts.keep_temp:
try:
rmtree(tmp_sub)
except:
pass
if __name__ == '__main__':
main()
|
[
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.average",
"nibabel.load",
"os.path.join",
"os.environ.copy",
"os.getcwd",
"numpy.argwhere",
"tempfile.mkdtemp",
"os.path.basename",
"subprocess.call",
"shutil.rmtree",
"os.path.abspath"
] |
[((752, 878), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Batch export freesurfer results to animated gifs"""', 'formatter_class': 'RawTextHelpFormatter'}), "(description=\n 'Batch export freesurfer results to animated gifs', formatter_class=\n RawTextHelpFormatter)\n", (766, 878), False, 'from argparse import ArgumentParser\n'), ((1810, 1837), 'os.path.abspath', 'op.abspath', (['opts.output_dir'], {}), '(opts.output_dir)\n', (1820, 1837), True, 'import os.path as op\n'), ((1977, 2006), 'os.path.abspath', 'op.abspath', (['opts.subjects_dir'], {}), '(opts.subjects_dir)\n', (1987, 2006), True, 'import os.path as op\n'), ((2682, 2699), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2697, 2699), False, 'import os\n'), ((1594, 1603), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (1601, 1603), False, 'from tempfile import mkdtemp\n'), ((1631, 1656), 'os.path.abspath', 'op.abspath', (['opts.temp_dir'], {}), '(opts.temp_dir)\n', (1641, 1656), True, 'import os.path as op\n'), ((1855, 1875), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (1866, 1875), False, 'import os\n'), ((2995, 3016), 'os.path.basename', 'op.basename', (['sub_path'], {}), '(sub_path)\n', (3006, 3016), True, 'import os.path as op\n'), ((3035, 3057), 'os.path.join', 'op.join', (['tmpdir', 'subid'], {}), '(tmpdir, subid)\n', (3042, 3057), True, 'import os.path as op\n'), ((3273, 3307), 'os.path.join', 'op.join', (['sub_path', '"""mri"""', '"""T1.mgz"""'], {}), "(sub_path, 'mri', 'T1.mgz')\n", (3280, 3307), True, 'import os.path as op\n'), ((3543, 3560), 'numpy.argwhere', 'np.argwhere', (['data'], {}), '(data)\n', (3554, 3560), True, 'import numpy as np\n'), ((3650, 3690), 'numpy.average', 'np.average', (['[bbox_min, bbox_max]'], {'axis': '(0)'}), '([bbox_min, bbox_max], axis=0)\n', (3660, 3690), True, 'import numpy as np\n'), ((1020, 1031), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1029, 1031), False, 'import os\n'), ((1682, 1701), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (1693, 1701), False, 'import os\n'), ((3083, 3103), 'os.makedirs', 'os.makedirs', (['tmp_sub'], {}), '(tmp_sub)\n', (3094, 3103), False, 'import os\n'), ((3216, 3245), 'os.path.join', 'op.join', (['tmp_sub', '"""%s.nii.gz"""'], {}), "(tmp_sub, '%s.nii.gz')\n", (3223, 3245), True, 'import os.path as op\n'), ((3738, 3775), 'os.path.join', 'op.join', (['tmp_sub', "('%s.nii.gz' % subid)"], {}), "(tmp_sub, '%s.nii.gz' % subid)\n", (3745, 3775), True, 'import os.path as op\n'), ((3799, 3833), 'os.path.join', 'op.join', (['tmp_sub', "('%s.mgz' % subid)"], {}), "(tmp_sub, '%s.mgz' % subid)\n", (3806, 3833), True, 'import os.path as op\n'), ((3852, 3868), 'nibabel.load', 'nb.load', (['niifile'], {}), '(niifile)\n', (3859, 3868), True, 'import nibabel as nb\n'), ((4050, 4105), 'subprocess.call', 'sp.call', (["['mri_convert', modnii, ref_file]"], {'cwd': 'tmp_sub'}), "(['mri_convert', modnii, ref_file], cwd=tmp_sub)\n", (4057, 4105), True, 'import subprocess as sp\n'), ((4204, 4238), 'os.path.join', 'op.join', (['tmp_sub', "('%s.tcl' % subid)"], {}), "(tmp_sub, '%s.tcl' % subid)\n", (4211, 4238), True, 'import os.path as op\n'), ((4778, 4888), 'subprocess.call', 'sp.call', (["['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl',\n tcl_file]"], {'env': 'environ'}), "(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial',\n '-tcl', tcl_file], env=environ)\n", (4785, 4888), True, 'import subprocess as sp\n'), ((4935, 5055), 'subprocess.call', 'sp.call', (["['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),\n '%s/%s.gif' % (out_dir, subid)]"], {}), "(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub,\n subid), '%s/%s.gif' % (out_dir, subid)])\n", (4942, 5055), True, 'import subprocess as sp\n'), ((5158, 5195), 'os.path.join', 'op.join', (['tmp_sub', "('lh-%s.tcl' % subid)"], {}), "(tmp_sub, 'lh-%s.tcl' % subid)\n", (5165, 5195), True, 'import os.path as op\n'), ((5883, 5970), 'subprocess.call', 'sp.call', (["['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file]"], {'env': 'environ'}), "(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=\n environ)\n", (5890, 5970), True, 'import subprocess as sp\n'), ((6038, 6075), 'os.path.join', 'op.join', (['tmp_sub', "('rh-%s.tcl' % subid)"], {}), "(tmp_sub, 'rh-%s.tcl' % subid)\n", (6045, 6075), True, 'import os.path as op\n'), ((6767, 6854), 'subprocess.call', 'sp.call', (["['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file]"], {'env': 'environ'}), "(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=\n environ)\n", (6774, 6854), True, 'import subprocess as sp\n'), ((6901, 7028), 'subprocess.call', 'sp.call', (["['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub,\n subid), '%s/%s-lh.gif' % (out_dir, subid)]"], {}), "(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (\n tmp_sub, subid), '%s/%s-lh.gif' % (out_dir, subid)])\n", (6908, 7028), True, 'import subprocess as sp\n'), ((7057, 7184), 'subprocess.call', 'sp.call', (["['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub,\n subid), '%s/%s-rh.gif' % (out_dir, subid)]"], {}), "(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (\n tmp_sub, subid), '%s/%s-rh.gif' % (out_dir, subid)])\n", (7064, 7184), True, 'import subprocess as sp\n'), ((2112, 2136), 'os.listdir', 'os.listdir', (['subjects_dir'], {}), '(subjects_dir)\n', (2122, 2136), False, 'import os\n'), ((3340, 3376), 'os.path.join', 'op.join', (['sub_path', '"""mri"""', '"""norm.mgz"""'], {}), "(sub_path, 'mri', 'norm.mgz')\n", (3347, 3376), True, 'import os.path as op\n'), ((3432, 3448), 'nibabel.load', 'nb.load', (['niifile'], {}), '(niifile)\n', (3439, 3448), True, 'import nibabel as nb\n'), ((7267, 7282), 'shutil.rmtree', 'rmtree', (['tmp_sub'], {}), '(tmp_sub)\n', (7273, 7282), False, 'from shutil import rmtree\n'), ((2173, 2205), 'os.path.join', 'os.path.join', (['subjects_dir', 'name'], {}), '(subjects_dir, name)\n', (2185, 2205), False, 'import os\n'), ((2373, 2400), 'os.path.join', 'op.join', (['subjects_dir', 'name'], {}), '(subjects_dir, name)\n', (2380, 2400), True, 'import os.path as op\n'), ((2413, 2437), 'os.listdir', 'os.listdir', (['subjects_dir'], {}), '(subjects_dir)\n', (2423, 2437), False, 'import os\n'), ((2552, 2590), 'os.path.join', 'op.join', (['subjects_dir', 'opts.subject_id'], {}), '(subjects_dir, opts.subject_id)\n', (2559, 2590), True, 'import os.path as op\n'), ((2474, 2506), 'os.path.join', 'os.path.join', (['subjects_dir', 'name'], {}), '(subjects_dir, name)\n', (2486, 2506), False, 'import os\n')]
|
# __author__ = 'Dave'
import cv2
from skimage import io
from skimage.transform import probabilistic_hough_line
import matplotlib.pyplot as plt
import os
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore', category=RuntimeWarning)
class CorrectImage(object):
def __init__(self):
self.path = ""
self.name = ""
self.image = None
self.edges = None
self.lines = None
def _load_image(self, image):
"""
:param image: image file name (str)
:return: skimage image data
"""
filename = os.path.join(self.path, image)
return io.imread(filename)
def add_path(self, image_path):
"""
Adds image to the list of images
:param image_path: (string)
"""
self.path = image_path + '/'
def add_image(self, filename):
"""
Adds image to the list of images
:param filename: (string)
"""
self.name = filename
self.hough_transform()
def _detect_edges(self, image, vary=False, plot=False):
"""
:param image: image file name (str)
:param vary: turn tunable plotting on
:param plot: turn plotting on
:return: detected edges with variable filters
"""
self.image = self._load_image(image)
if vary:
def nothing(x):
pass
cv2.namedWindow('image')
cv2.createTrackbar('th1', 'image', 0, 255, nothing)
cv2.createTrackbar('th2', 'image', 0, 255, nothing)
while True:
th1 = cv2.getTrackbarPos('th1', 'image')
th2 = cv2.getTrackbarPos('th2', 'image')
edges = cv2.Canny(self.image, th1, th2)
cv2.imshow('image', edges)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
edges = cv2.Canny(self.image, 255, 255)
if plot:
cv2.namedWindow('image')
cv2.imshow('image', edges)
cv2.waitKey(5000)
cv2.destroyAllWindows()
return edges
def hough_transform(self, vary=False, plot=False):
"""
:param vary: turn edge detection tunable plotting on
:param plot: turn plotting on
:return: numpy array of probabilistically found straight lines
"""
if self.name == "":
raise ValueError('Missing image: you need to specify the image file using add_image.')
self.edges = self._detect_edges(self.name, vary=vary, plot=plot)
self.lines = probabilistic_hough_line(self.edges, threshold=10, line_length=5, line_gap=3)
if plot:
for line in self.lines:
p0, p1 = line
plt.plot((p0[0], p1[0]), (p0[1], p1[1]))
plt.show()
@staticmethod
def slope(lines):
"""
:param lines: array of coordinates (ie. [((x0, y0), (xf, yf)), ...]
:return: array of slope values with the same number of entries as lines
"""
# for doing vectorized subtraction across all line pairs,
# we need the first line of each pair to be the negative of itself
sign_op = np.ones_like(lines)
sign_op[:, :, 0] *= -1
# get the differences between x and y coordinates (start, end), respectively
slopes = np.sum(sign_op * lines, axis=2)
# compute the slopes of each line for every line pair
slopes = slopes[:, :, 0] / slopes[:, :, 1]
# turn infinite values to a finite, but very large value
slopes[np.isinf(slopes)] = 1e6
# this catches cases when the line - as defined - is actually a point and the slope doesn't exist
slopes[np.isnan(slopes)] = 0
return slopes
def line_pair(self, num_pairs):
"""
:param num_pairs: number of line pairs to take (int)
:return: line pairs (array)
"""
idx = np.random.randint(len(self.lines), size=num_pairs * 2)
lines = np.array(self.lines)[idx]
return lines.reshape(num_pairs, 2, 2, 2)
@staticmethod
def mutation(pairs, p_mutate=0.01):
"""
:param pairs: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines
:param p_mutate: (float) probability of a mutation
:return: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines with mutations
"""
for i in range(len(pairs)):
if p_mutate > random.random():
# column = np.random.randint(low=0, high=2)
for column in [0, 1]:
t = pairs[i, :, :, column]
low, high = np.min(t), np.max(t)
if high == low:
high *= 2
pairs[i, :, :, column] = np.random.randint(low=low, high=high, size=t.shape)
return pairs
|
[
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.min",
"skimage.transform.probabilistic_hough_line",
"numpy.isinf",
"cv2.waitKey",
"skimage.io.imread",
"numpy.isnan",
"cv2.Canny",
"cv2.createTrackbar",
"cv2.namedWindow",
"warnings.filterwarnings",
"matplotlib.pyplot.show",
"numpy.ones_like",
"os.path.join",
"numpy.sum",
"numpy.random.randint",
"random.random",
"cv2.getTrackbarPos"
] |
[((205, 263), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (228, 263), False, 'import warnings\n'), ((601, 631), 'os.path.join', 'os.path.join', (['self.path', 'image'], {}), '(self.path, image)\n', (613, 631), False, 'import os\n'), ((647, 666), 'skimage.io.imread', 'io.imread', (['filename'], {}), '(filename)\n', (656, 666), False, 'from skimage import io\n'), ((1968, 1999), 'cv2.Canny', 'cv2.Canny', (['self.image', '(255)', '(255)'], {}), '(self.image, 255, 255)\n', (1977, 1999), False, 'import cv2\n'), ((2652, 2729), 'skimage.transform.probabilistic_hough_line', 'probabilistic_hough_line', (['self.edges'], {'threshold': '(10)', 'line_length': '(5)', 'line_gap': '(3)'}), '(self.edges, threshold=10, line_length=5, line_gap=3)\n', (2676, 2729), False, 'from skimage.transform import probabilistic_hough_line\n'), ((3274, 3293), 'numpy.ones_like', 'np.ones_like', (['lines'], {}), '(lines)\n', (3286, 3293), True, 'import numpy as np\n'), ((3428, 3459), 'numpy.sum', 'np.sum', (['(sign_op * lines)'], {'axis': '(2)'}), '(sign_op * lines, axis=2)\n', (3434, 3459), True, 'import numpy as np\n'), ((1428, 1452), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (1443, 1452), False, 'import cv2\n'), ((1465, 1516), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""th1"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('th1', 'image', 0, 255, nothing)\n", (1483, 1516), False, 'import cv2\n'), ((1529, 1580), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""th2"""', '"""image"""', '(0)', '(255)', 'nothing'], {}), "('th2', 'image', 0, 255, nothing)\n", (1547, 1580), False, 'import cv2\n'), ((1928, 1951), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1949, 1951), False, 'import cv2\n'), ((2029, 2053), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (2044, 2053), False, 'import cv2\n'), ((2066, 2092), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'edges'], {}), "('image', edges)\n", (2076, 2092), False, 'import cv2\n'), ((2105, 2122), 'cv2.waitKey', 'cv2.waitKey', (['(5000)'], {}), '(5000)\n', (2116, 2122), False, 'import cv2\n'), ((2135, 2158), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2156, 2158), False, 'import cv2\n'), ((2882, 2892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2890, 2892), True, 'import matplotlib.pyplot as plt\n'), ((3655, 3671), 'numpy.isinf', 'np.isinf', (['slopes'], {}), '(slopes)\n', (3663, 3671), True, 'import numpy as np\n'), ((3801, 3817), 'numpy.isnan', 'np.isnan', (['slopes'], {}), '(slopes)\n', (3809, 3817), True, 'import numpy as np\n'), ((4090, 4110), 'numpy.array', 'np.array', (['self.lines'], {}), '(self.lines)\n', (4098, 4110), True, 'import numpy as np\n'), ((1628, 1662), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""th1"""', '"""image"""'], {}), "('th1', 'image')\n", (1646, 1662), False, 'import cv2\n'), ((1685, 1719), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""th2"""', '"""image"""'], {}), "('th2', 'image')\n", (1703, 1719), False, 'import cv2\n'), ((1744, 1775), 'cv2.Canny', 'cv2.Canny', (['self.image', 'th1', 'th2'], {}), '(self.image, th1, th2)\n', (1753, 1775), False, 'import cv2\n'), ((1792, 1818), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'edges'], {}), "('image', edges)\n", (1802, 1818), False, 'import cv2\n'), ((2829, 2869), 'matplotlib.pyplot.plot', 'plt.plot', (['(p0[0], p1[0])', '(p0[1], p1[1])'], {}), '((p0[0], p1[0]), (p0[1], p1[1]))\n', (2837, 2869), True, 'import matplotlib.pyplot as plt\n'), ((4552, 4567), 'random.random', 'random.random', ([], {}), '()\n', (4565, 4567), False, 'import random\n'), ((1839, 1853), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1850, 1853), False, 'import cv2\n'), ((4882, 4933), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'high': 'high', 'size': 't.shape'}), '(low=low, high=high, size=t.shape)\n', (4899, 4933), True, 'import numpy as np\n'), ((4746, 4755), 'numpy.min', 'np.min', (['t'], {}), '(t)\n', (4752, 4755), True, 'import numpy as np\n'), ((4757, 4766), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (4763, 4766), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
def to_cpu(tensor):
return tensor.detach().cpu()
def xywh2xyxy(x):
''' Convert bounding box from [x, y, w, h] to [x1, y1, x2, y2]
:param x: bounding boxes array
:return: Converted bounding box array
'''
y = x.new(x.shape)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def rescale_boxes(boxes, current_dim, original_shape):
""" Rescales bounding boxes to the original shape """
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = xywh2xyxy(prediction[..., :4])
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim = img_dim
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# If grid size does not match current we compute new offsets
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
pred_boxes=pred_boxes,
pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
# Metrics
cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * class_mask * tconf
precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"cls": to_cpu(loss_cls).item(),
"cls_acc": to_cpu(cls_acc).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, config_path, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path)
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets=None):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return yolo_outputs if targets is None else (loss, yolo_outputs)
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# Open the weights file
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
# Establish cutoff for loading backbone weights
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
def prepare_yolo(model_dir):
''' Download yolo model files and load the model weights
:param model_dir: Directory path where to store yolo model weights and yolo model configuration file.
:return: Yolo model after loading model weights
'''
cfg_file = os.path.join(model_dir, 'yolov3.cfg')
if not os.path.exists(cfg_file):
download_command = 'wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg -O ' + cfg_file
os.system(download_command)
weight_file = os.path.join(model_dir, 'yolov3.weights')
if not os.path.exists(weight_file):
download_command = 'wget https://pjreddie.com/media/files/yolov3.weights -O ' + weight_file
os.system(download_command)
yolo_model = Darknet(cfg_file, 416)
yolo_model.load_darknet_weights(weight_file)
print ('prepared yolo model')
return yolo_model
# if __name__ == '__main__':
# prepare_yolo(model_dir = '/home/face-r/Steps_face_recognition/emotic/debug/models')
|
[
"numpy.fromfile",
"torch.nn.ZeroPad2d",
"torch.nn.Sequential",
"torch.max",
"torch.exp",
"torch.min",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.array",
"torch.sum",
"torch.nn.functional.interpolate",
"torch.arange",
"os.path.exists",
"torch.nn.BatchNorm2d",
"torch.nn.ModuleList",
"torch.nn.LeakyReLU",
"torch.cat",
"torch.clamp",
"torch.sigmoid",
"os.path.join",
"torch.stack",
"torch.nn.BCELoss",
"os.system"
] |
[((1247, 1270), 'torch.max', 'torch.max', (['b1_x1', 'b2_x1'], {}), '(b1_x1, b2_x1)\n', (1256, 1270), False, 'import torch\n'), ((1288, 1311), 'torch.max', 'torch.max', (['b1_y1', 'b2_y1'], {}), '(b1_y1, b2_y1)\n', (1297, 1311), False, 'import torch\n'), ((1329, 1352), 'torch.min', 'torch.min', (['b1_x2', 'b2_x2'], {}), '(b1_x2, b2_x2)\n', (1338, 1352), False, 'import torch\n'), ((1370, 1393), 'torch.min', 'torch.min', (['b1_y2', 'b2_y2'], {}), '(b1_y2, b2_y2)\n', (1379, 1393), False, 'import torch\n'), ((5542, 5557), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5555, 5557), True, 'import torch.nn as nn\n'), ((17645, 17682), 'os.path.join', 'os.path.join', (['model_dir', '"""yolov3.cfg"""'], {}), "(model_dir, 'yolov3.cfg')\n", (17657, 17682), False, 'import os\n'), ((17878, 17919), 'os.path.join', 'os.path.join', (['model_dir', '"""yolov3.weights"""'], {}), "(model_dir, 'yolov3.weights')\n", (17890, 17919), False, 'import os\n'), ((1429, 1482), 'torch.clamp', 'torch.clamp', (['(inter_rect_x2 - inter_rect_x1 + 1)'], {'min': '(0)'}), '(inter_rect_x2 - inter_rect_x1 + 1, min=0)\n', (1440, 1482), False, 'import torch\n'), ((1485, 1538), 'torch.clamp', 'torch.clamp', (['(inter_rect_y2 - inter_rect_y1 + 1)'], {'min': '(0)'}), '(inter_rect_y2 - inter_rect_y1 + 1, min=0)\n', (1496, 1538), False, 'import torch\n'), ((5623, 5638), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (5636, 5638), True, 'import torch.nn as nn\n'), ((8171, 8235), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': 'self.scale_factor', 'mode': 'self.mode'}), '(x, scale_factor=self.scale_factor, mode=self.mode)\n', (8184, 8235), True, 'import torch.nn.functional as F\n'), ((8670, 8682), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (8680, 8682), True, 'import torch.nn as nn\n'), ((8701, 8713), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (8711, 8713), True, 'import torch.nn as nn\n'), ((10083, 10116), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 0]'], {}), '(prediction[..., 0])\n', (10096, 10116), False, 'import torch\n'), ((10135, 10168), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 1]'], {}), '(prediction[..., 1])\n', (10148, 10168), False, 'import torch\n'), ((10264, 10297), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 4]'], {}), '(prediction[..., 4])\n', (10277, 10297), False, 'import torch\n'), ((10319, 10353), 'torch.sigmoid', 'torch.sigmoid', (['prediction[..., 5:]'], {}), '(prediction[..., 5:])\n', (10332, 10353), False, 'import torch\n'), ((13496, 13545), 'numpy.array', 'np.array', (['[0, 0, 0, self.seen, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0, self.seen, 0], dtype=np.int32)\n', (13504, 13545), True, 'import numpy as np\n'), ((17691, 17715), 'os.path.exists', 'os.path.exists', (['cfg_file'], {}), '(cfg_file)\n', (17705, 17715), False, 'import os\n'), ((17835, 17862), 'os.system', 'os.system', (['download_command'], {}), '(download_command)\n', (17844, 17862), False, 'import os\n'), ((17928, 17955), 'os.path.exists', 'os.path.exists', (['weight_file'], {}), '(weight_file)\n', (17942, 17955), False, 'import os\n'), ((18053, 18080), 'os.system', 'os.system', (['download_command'], {}), '(download_command)\n', (18062, 18080), False, 'import os\n'), ((4181, 4204), 'torch.stack', 'torch.stack', (['keep_boxes'], {}), '(keep_boxes)\n', (4192, 4204), False, 'import torch\n'), ((10725, 10742), 'torch.exp', 'torch.exp', (['w.data'], {}), '(w.data)\n', (10734, 10742), False, 'import torch\n'), ((10782, 10799), 'torch.exp', 'torch.exp', (['h.data'], {}), '(h.data)\n', (10791, 10799), False, 'import torch\n'), ((14289, 14315), 'torch.cat', 'torch.cat', (['yolo_outputs', '(1)'], {}), '(yolo_outputs, 1)\n', (14298, 14315), False, 'import torch\n'), ((14571, 14610), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int32', 'count': '(5)'}), '(f, dtype=np.int32, count=5)\n', (14582, 14610), True, 'import numpy as np\n'), ((14797, 14829), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float32'}), '(f, dtype=np.float32)\n', (14808, 14829), True, 'import numpy as np\n'), ((12285, 12317), 'torch.sum', 'torch.sum', (['(iou50 * detected_mask)'], {}), '(iou50 * detected_mask)\n', (12294, 12317), False, 'import torch\n'), ((12357, 12389), 'torch.sum', 'torch.sum', (['(iou50 * detected_mask)'], {}), '(iou50 * detected_mask)\n', (12366, 12389), False, 'import torch\n'), ((12431, 12463), 'torch.sum', 'torch.sum', (['(iou75 * detected_mask)'], {}), '(iou75 * detected_mask)\n', (12440, 12463), False, 'import torch\n'), ((6142, 6190), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['filters'], {'momentum': '(0.9)', 'eps': '(1e-05)'}), '(filters, momentum=0.9, eps=1e-05)\n', (6156, 6190), True, 'import torch.nn as nn\n'), ((6278, 6295), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (6290, 6295), True, 'import torch.nn as nn\n'), ((6510, 6536), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(0, 1, 0, 1)'], {}), '((0, 1, 0, 1))\n', (6522, 6536), True, 'import torch.nn as nn\n'), ((16285, 16327), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_w]'], {}), '(weights[ptr:ptr + num_w])\n', (16301, 16327), False, 'import torch\n'), ((15382, 15424), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15398, 15424), False, 'import torch\n'), ((15530, 15572), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15546, 15572), False, 'import torch\n'), ((15689, 15731), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15705, 15731), False, 'import torch\n'), ((15860, 15902), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (15876, 15902), False, 'import torch\n'), ((16081, 16123), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_b]'], {}), '(weights[ptr:ptr + num_b])\n', (16097, 16123), False, 'import torch\n'), ((9113, 9128), 'torch.arange', 'torch.arange', (['g'], {}), '(g)\n', (9125, 9128), False, 'import torch\n'), ((9195, 9210), 'torch.arange', 'torch.arange', (['g'], {}), '(g)\n', (9207, 9210), False, 'import torch\n')]
|
"""
Example of usage of the AVB framework to infer a single exponential decay
model.
This uses the Python classes directly to infer the parameters for a single
instance of noisy data constructed as a Numpy array.
"""
import sys
import logging
import numpy as np
from vaby_avb import Avb
import vaby
# Uncomment line below to start the random number generator off with the same seed value
# each time, for repeatable results
#np.random.seed(0)
# Ground truth parameters
PARAMS_TRUTH = [42, 0.5]
NOISE_PREC_TRUTH = 0.1
NOISE_VAR_TRUTH = 1/NOISE_PREC_TRUTH
NOISE_STD_TRUTH = np.sqrt(NOISE_VAR_TRUTH)
print("Ground truth: a=%f, r=%f, noise=%f (precision)" % (PARAMS_TRUTH[0], PARAMS_TRUTH[1], NOISE_PREC_TRUTH))
# Create single exponential model
model = vaby.get_model_class("exp")(None)
# Observed data samples are generated by Numpy from the ground truth
# Gaussian distribution. Reducing the number of samples should make
# the inference less 'confident' - i.e. the output variances for
# MU and BETA will increase
N = 100
DT = 0.02
t = np.array([float(t)*DT for t in range(N)])
DATA_CLEAN = model.evaluate(PARAMS_TRUTH, t).numpy()
DATA_NOISY = DATA_CLEAN + np.random.normal(0, NOISE_STD_TRUTH, [N])
print("Time values:")
print(t)
print("Data samples (clean):")
print(DATA_CLEAN)
print("Data samples (noisy):")
print(DATA_NOISY)
# Run Fabber as a comparison if desired
#import os
#import nibabel as nib
#niidata = DATA_NOISY.reshape((1, 1, 1, N))
#nii = nib.Nifti1Image(niidata, np.identity(4))
#nii.to_filename("data_noisy.nii.gz")
#os.system("fabber_exp --data=data_noisy --print-free-energy --output=fabberout --dt=%.3f --model=exp --num-exps=1 --method=vb --noise=white --overwrite --debug" % DT)
# Log to stdout
logging.getLogger().setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(levelname)s : %(message)s'))
logging.getLogger().addHandler(handler)
# Run AVB inference
avb = Avb(t, vaby.DataModel(DATA_NOISY), model)
avb.run(method="leastsq", maxits=20, learning_rate=0.1, debug="--debug" in sys.argv)
|
[
"numpy.random.normal",
"logging.getLogger",
"logging.StreamHandler",
"numpy.sqrt",
"logging.Formatter",
"vaby.DataModel",
"vaby.get_model_class"
] |
[((577, 601), 'numpy.sqrt', 'np.sqrt', (['NOISE_VAR_TRUTH'], {}), '(NOISE_VAR_TRUTH)\n', (584, 601), True, 'import numpy as np\n'), ((1777, 1810), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1798, 1810), False, 'import logging\n'), ((755, 782), 'vaby.get_model_class', 'vaby.get_model_class', (['"""exp"""'], {}), "('exp')\n", (775, 782), False, 'import vaby\n'), ((1163, 1204), 'numpy.random.normal', 'np.random.normal', (['(0)', 'NOISE_STD_TRUTH', '[N]'], {}), '(0, NOISE_STD_TRUTH, [N])\n', (1179, 1204), True, 'import numpy as np\n'), ((1832, 1880), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s : %(message)s"""'], {}), "('%(levelname)s : %(message)s')\n", (1849, 1880), False, 'import logging\n'), ((1956, 1982), 'vaby.DataModel', 'vaby.DataModel', (['DATA_NOISY'], {}), '(DATA_NOISY)\n', (1970, 1982), False, 'import vaby\n'), ((1724, 1743), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1741, 1743), False, 'import logging\n'), ((1882, 1901), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1899, 1901), False, 'import logging\n')]
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
class Constant(Base):
@staticmethod
def export(): # type: () -> None
values = np.random.randn(5, 5).astype(np.float32)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
expect(node, inputs=[], outputs=[values],
name='test_constant')
|
[
"numpy.random.randn"
] |
[((364, 385), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)'], {}), '(5, 5)\n', (379, 385), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
#grid number on half space (without the origin)
N=150
#total grid number = 2*N + 1 (with origin)
N_g=2*N+1
#finite barrier potential value = 300 (meV)
potential_value=300
#building potential:
def potential(potential_value):
V=np.zeros((1,N_g),dtype=float)
V[0,0:100]=potential_value
V[0,100:201]=0
V[0,201:]=potential_value
return V
# #Hamiltonian matrix:
def Hamiltonian(V):
H=np.zeros((N_g,N_g),dtype=float)
dx=10 #0.1 (nanometer)
for i in range(0,N_g):
for j in range(0,N_g):
if i==j:
x=dx*(i-N) #position
H[i,j]=1/(dx**2)+V[0,i]
elif j==i-1 or j==i+1:
H[i,j]=-0.5/(dx**2)
return H
V=potential(potential_value)
H=Hamiltonian(V)
#sort the eigenvalue and get the corresponding eigenvector
eigenvalue,eigenvector=np.linalg.eig(H)
idx=np.argsort(eigenvalue)
eigenvalue=eigenvalue[idx]
eigenvector=eigenvector[:,idx]
#visualize
fig=plt.figure(figsize=(18,6))
ax1=fig.add_subplot(131)
x=np.linspace(0,10,11)
ax1.plot(x,eigenvalue[0:11],'r.',label='numerical')
ax1.set_xlabel('n')
ax1.set_ylabel('$E_n (meV)$')
ax1.set_title('eigen energies')
ax1.grid(True)
ax1.legend()
ax2=fig.add_subplot(132)
x=np.linspace(-5,5,301)
#x/lamda_0
x=x/(np.sqrt(2)*10**(10-9)/np.pi)
y1=eigenvector[:,0]
y2=eigenvector[:,1]
y3=eigenvector[:,2]
y4=eigenvector[:,3]
y5=eigenvector[:,4]
ax2.plot(x,(y1),label='$Ψ_{n=0}(x)$')
ax2.plot(x,(y2),label='$Ψ_{n=1}(x)$')
ax2.plot(x,(y3),label='$Ψ_{n=2}(x)$')
ax2.set_xlabel('position ($x/λ_0$) ')
ax2.set_ylabel('wavefunction')
ax2.set_title('wave function in different eigen state')
ax2.legend()
ax2.grid(True)
ax3=fig.add_subplot(133)
ax3.plot(x,(y1**2),label='$Ψ^2_{n=0}(x)$')
ax3.plot(x,(y2**2),label='$Ψ^2_{n=1}(x)$')
ax3.plot(x,(y3**2),label='$Ψ^2_{n=2}(x)$')
ax3.set_xlabel('position ($x/λ_0$) ')
ax3.set_ylabel('square wavefunction')
ax3.set_title('probability distribution in finite barrier well')
ax3.grid(True)
ax3.legend()
plt.show()
|
[
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.show"
] |
[((886, 902), 'numpy.linalg.eig', 'np.linalg.eig', (['H'], {}), '(H)\n', (899, 902), True, 'import numpy as np\n'), ((907, 929), 'numpy.argsort', 'np.argsort', (['eigenvalue'], {}), '(eigenvalue)\n', (917, 929), True, 'import numpy as np\n'), ((1004, 1031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 6)'}), '(figsize=(18, 6))\n', (1014, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1080), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (1069, 1080), True, 'import numpy as np\n'), ((1269, 1292), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(301)'], {}), '(-5, 5, 301)\n', (1280, 1292), True, 'import numpy as np\n'), ((2027, 2037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2035, 2037), True, 'import matplotlib.pyplot as plt\n'), ((283, 314), 'numpy.zeros', 'np.zeros', (['(1, N_g)'], {'dtype': 'float'}), '((1, N_g), dtype=float)\n', (291, 314), True, 'import numpy as np\n'), ((456, 489), 'numpy.zeros', 'np.zeros', (['(N_g, N_g)'], {'dtype': 'float'}), '((N_g, N_g), dtype=float)\n', (464, 489), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1314, 1317), True, 'import numpy as np\n')]
|
import numpy as np
def _check_mne(name):
"""Helper to check if h5py is installed"""
try:
import mne
except ImportError:
raise ImportError('Please install MNE-python to use %s.' % name)
return mne
def raw_to_mask(raw, ixs, events=None, tmin=None, tmax=None):
"""
A function to transform MNE data into pactools input signals.
It select the one channel on which you to estimate PAC, or two channels
for cross-channel PAC. It also returns a mask generator, that mask the
data outside a given window around an event. The mask generator returns
a number of masks equal to the number of events times the number of
windows (i.e. the number of pairs (tmin, tmax)).
Warning: events is stored in indices, tmin and tmax are stored in seconds.
Parameters
----------
raw : an instance of Raw, containing data of shape (n_channels, n_times)
The data used to calculate PAC
ixs : int or couple of int
The indices for the low/high frequency channels. If only one is given,
the same channel is used for both low_sig and high_sig.
events : array, shape (n_events, 3) | array, shape (n_events,) | None
MNE events array. To be supplied if data is 2D and output should be
split by events. In this case, `tmin` and `tmax` must be provided. If
`ndim == 1`, it is assumed to be event indices, and all events will be
grouped together. Otherwise, events will be grouped along the third
dimension.
tmin : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the start time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include before
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `min(raw.times)`.
tmax : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the stop time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include after
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `max(raw.times)`.
Attributes
----------
low_sig : array, shape (1, n_points)
Input data for the phase signal
high_sig : array or None, shape (1, n_points)
Input data for the amplitude signal.
If None, we use low_sig for both signals.
mask : MaskIterator instance
Object that behaves like a list of mask, without storing them all.
The PAC will only be evaluated where the mask is False.
Examples
--------
>>> from pactools import raw_to_mask
>>> low_sig, high_sig, mask = raw_to_mask(raw, ixs, events, tmin, tmax)
>>> n_masks = len(mask)
>>> for one_mask in mask:
... pass
"""
mne = _check_mne('raw_to_mask')
if not isinstance(raw, mne.io.BaseRaw):
raise ValueError('Must supply Raw as input')
ixs = np.atleast_1d(ixs)
fs = raw.info['sfreq']
data = raw[:][0]
n_channels, n_points = data.shape
low_sig = data[ixs[0]][None, :]
if ixs.shape[0] > 1:
high_sig = data[ixs[1]][None, :]
else:
high_sig = None
mask = MaskIterator(events, tmin, tmax, n_points, fs)
return low_sig, high_sig, mask
class MaskIterator(object):
"""Iterator that creates the masks one at a time.
Examples
--------
>>> from pactools import MaskIterator
>>> all_masks = MaskIterator(events, tmin, tmax, n_points, fs)
>>> n_masks = len(all_masks)
>>> for one_mask in all_masks:
... pass
"""
def __init__(self, events, tmin, tmax, n_points, fs):
self.events = events
self.tmin = tmin
self.tmax = tmax
self.n_points = n_points
self.fs = float(fs)
self._init()
def _init(self):
self.tmin = np.atleast_1d(self.tmin)
self.tmax = np.atleast_1d(self.tmax)
if len(self.tmin) != len(self.tmax):
raise ValueError('tmin and tmax have differing lengths')
n_windows = len(self.tmin)
if self.events is None:
self.events = np.array([0.])
n_events = 1
if self.events.ndim == 1:
n_events = 1 # number of different event kinds
else:
n_events = np.unique(self.events[:, -1]).shape[0]
self._n_iter = n_windows * n_events
def __iter__(self):
return self.next()
def __len__(self):
return self._n_iter
def next(self):
if self.events.ndim == 1:
event_names = [None, ]
else:
event_names = np.unique(self.events[:, -1])
mask = np.empty((1, self.n_points), dtype=bool)
for event_name in event_names:
if self.events.ndim == 1:
# select all the events since their kind is not specified
these_events = self.events
else:
# select the event indices of one kind of event
these_events = self.events[self.events[:, -1] == event_name, 0]
for tmin, tmax in zip(self.tmin, self.tmax):
mask.fill(True) # it masks everything
for event in these_events:
start, stop = None, None
if tmin is not None:
start = int(event + tmin * self.fs)
if tmax is not None:
stop = int(event + tmax * self.fs)
mask[:, start:stop] = False
yield mask
|
[
"numpy.array",
"numpy.empty",
"numpy.unique",
"numpy.atleast_1d"
] |
[((3066, 3084), 'numpy.atleast_1d', 'np.atleast_1d', (['ixs'], {}), '(ixs)\n', (3079, 3084), True, 'import numpy as np\n'), ((3978, 4002), 'numpy.atleast_1d', 'np.atleast_1d', (['self.tmin'], {}), '(self.tmin)\n', (3991, 4002), True, 'import numpy as np\n'), ((4023, 4047), 'numpy.atleast_1d', 'np.atleast_1d', (['self.tmax'], {}), '(self.tmax)\n', (4036, 4047), True, 'import numpy as np\n'), ((4793, 4833), 'numpy.empty', 'np.empty', (['(1, self.n_points)'], {'dtype': 'bool'}), '((1, self.n_points), dtype=bool)\n', (4801, 4833), True, 'import numpy as np\n'), ((4257, 4272), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (4265, 4272), True, 'import numpy as np\n'), ((4747, 4776), 'numpy.unique', 'np.unique', (['self.events[:, -1]'], {}), '(self.events[:, -1])\n', (4756, 4776), True, 'import numpy as np\n'), ((4429, 4458), 'numpy.unique', 'np.unique', (['self.events[:, -1]'], {}), '(self.events[:, -1])\n', (4438, 4458), True, 'import numpy as np\n')]
|
import sys
import numpy
import numpy as np
from snappy import Product
from snappy import ProductData
from snappy import ProductIO
from snappy import ProductUtils
from snappy import FlagCoding
##############
import csv
###############MSVR
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
########################
if len(sys.argv) != 2:
print("usage: %s <file>" % sys.argv[0])
sys.exit(1)
file = sys.argv[1]
print("Reading...")
product = ProductIO.readProduct(file)
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
description = product.getDescription()
band_names = product.getBandNames()
print("Product: %s, %s" % (name, description))
print("Raster size: %d x %d pixels" % (width, height))
print("Start time: " + str(product.getStartTime()))
print("End time: " + str(product.getEndTime()))
print("Bands: %s" % (list(band_names)))
##---------------------------------------------------------------------------------
with open('rice_LUT.csv','r') as dest_f:
data_iter = csv.reader(dest_f,
delimiter = ',',
quotechar = '"')
data = [data for data in data_iter]
data_array = np.asarray(data, dtype = np.float32)
VV = data_array[:,1]
VH = data_array[:,2]
PAI = data_array[:,0]
X=np.column_stack((VV,VH))
Y = PAI
#SVR training
pipeline = make_pipeline(StandardScaler(),
SVR(kernel='rbf', epsilon=0.105, C=250, gamma = 2.8),
)
SVRmodel=pipeline.fit(X,Y)
# Predictfor validation data
valX = X;
y_out = pipeline.predict(valX);
##---------------------------------------------------------------------------------
bandc11 = product.getBand('C11')
bandc22 = product.getBand('C22')
laiProduct = Product('LAI', 'LAI', width, height)
laiBand = laiProduct.addBand('lai', ProductData.TYPE_FLOAT32)
laiFlagsBand = laiProduct.addBand('lai_flags', ProductData.TYPE_UINT8)
writer = ProductIO.getProductWriter('BEAM-DIMAP')
ProductUtils.copyGeoCoding(product, laiProduct)
ProductUtils.copyMetadata(product, laiProduct)
ProductUtils.copyTiePointGrids(product, laiProduct)
laiFlagCoding = FlagCoding('lai_flags')
laiFlagCoding.addFlag("LAI_LOW", 1, "LAI below 0")
laiFlagCoding.addFlag("LAI_HIGH", 2, "LAI above 5")
group = laiProduct.getFlagCodingGroup()
#print(dir(group))
group.add(laiFlagCoding)
laiFlagsBand.setSampleCoding(laiFlagCoding)
laiProduct.setProductWriter(writer)
laiProduct.writeHeader('LAImap_output.dim')
c11 = numpy.zeros(width, dtype=numpy.float32)
c22 = numpy.zeros(width, dtype=numpy.float32)
print("Writing...")
for y in range(height):
print("processing line ", y, " of ", height)
c11 = bandc11.readPixels(0, y, width, 1, c11)
c22 = bandc22.readPixels(0, y, width, 1, c22)
Z=np.column_stack((c11,c22))
#ndvi = (r10 - r7) / (r10 + r7)
lai = pipeline.predict(Z);
laiBand.writePixels(0, y, width, 1, lai)
laiLow = lai < 0.0
laiHigh = lai > 5.0
laiFlags = numpy.array(laiLow + 2 * laiHigh, dtype=numpy.int32)
laiFlagsBand.writePixels(0, y, width, 1, laiFlags)
laiProduct.closeIO()
print("Done.")
|
[
"snappy.ProductIO.readProduct",
"snappy.ProductUtils.copyGeoCoding",
"snappy.Product",
"snappy.ProductIO.getProductWriter",
"numpy.asarray",
"numpy.column_stack",
"snappy.FlagCoding",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"numpy.array",
"snappy.ProductUtils.copyMetadata",
"csv.reader",
"sys.exit",
"sklearn.svm.SVR",
"snappy.ProductUtils.copyTiePointGrids"
] |
[((520, 547), 'snappy.ProductIO.readProduct', 'ProductIO.readProduct', (['file'], {}), '(file)\n', (541, 547), False, 'from snappy import ProductIO\n'), ((1284, 1318), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1294, 1318), True, 'import numpy as np\n'), ((1389, 1414), 'numpy.column_stack', 'np.column_stack', (['(VV, VH)'], {}), '((VV, VH))\n', (1404, 1414), True, 'import numpy as np\n'), ((1806, 1842), 'snappy.Product', 'Product', (['"""LAI"""', '"""LAI"""', 'width', 'height'], {}), "('LAI', 'LAI', width, height)\n", (1813, 1842), False, 'from snappy import Product\n'), ((1985, 2025), 'snappy.ProductIO.getProductWriter', 'ProductIO.getProductWriter', (['"""BEAM-DIMAP"""'], {}), "('BEAM-DIMAP')\n", (2011, 2025), False, 'from snappy import ProductIO\n'), ((2027, 2074), 'snappy.ProductUtils.copyGeoCoding', 'ProductUtils.copyGeoCoding', (['product', 'laiProduct'], {}), '(product, laiProduct)\n', (2053, 2074), False, 'from snappy import ProductUtils\n'), ((2075, 2121), 'snappy.ProductUtils.copyMetadata', 'ProductUtils.copyMetadata', (['product', 'laiProduct'], {}), '(product, laiProduct)\n', (2100, 2121), False, 'from snappy import ProductUtils\n'), ((2122, 2173), 'snappy.ProductUtils.copyTiePointGrids', 'ProductUtils.copyTiePointGrids', (['product', 'laiProduct'], {}), '(product, laiProduct)\n', (2152, 2173), False, 'from snappy import ProductUtils\n'), ((2191, 2214), 'snappy.FlagCoding', 'FlagCoding', (['"""lai_flags"""'], {}), "('lai_flags')\n", (2201, 2214), False, 'from snappy import FlagCoding\n'), ((2535, 2574), 'numpy.zeros', 'numpy.zeros', (['width'], {'dtype': 'numpy.float32'}), '(width, dtype=numpy.float32)\n', (2546, 2574), False, 'import numpy\n'), ((2581, 2620), 'numpy.zeros', 'numpy.zeros', (['width'], {'dtype': 'numpy.float32'}), '(width, dtype=numpy.float32)\n', (2592, 2620), False, 'import numpy\n'), ((457, 468), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (465, 468), False, 'import sys\n'), ((1124, 1172), 'csv.reader', 'csv.reader', (['dest_f'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(dest_f, delimiter=\',\', quotechar=\'"\')\n', (1134, 1172), False, 'import csv\n'), ((1462, 1478), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1476, 1478), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1484, 1534), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'epsilon': '(0.105)', 'C': '(250)', 'gamma': '(2.8)'}), "(kernel='rbf', epsilon=0.105, C=250, gamma=2.8)\n", (1487, 1534), False, 'from sklearn.svm import SVR\n'), ((2827, 2854), 'numpy.column_stack', 'np.column_stack', (['(c11, c22)'], {}), '((c11, c22))\n', (2842, 2854), True, 'import numpy as np\n'), ((3034, 3086), 'numpy.array', 'numpy.array', (['(laiLow + 2 * laiHigh)'], {'dtype': 'numpy.int32'}), '(laiLow + 2 * laiHigh, dtype=numpy.int32)\n', (3045, 3086), False, 'import numpy\n')]
|
from __future__ import absolute_import, division, print_function
import numpy as np
import wx
from dials.array_family import flex
from dials_viewer_ext import rgb_img
class wxbmp_from_np_array(object):
def __init__(
self, lst_data_in, show_nums=True, palette="black2white", lst_data_mask_in=None
):
self.wx_bmp_arr = rgb_img()
if lst_data_in is None and lst_data_mask_in is None:
self._ini_wx_bmp_lst = None
else:
self._ini_wx_bmp_lst = []
for lst_pos in range(len(lst_data_in)):
data_3d_in = lst_data_in[lst_pos]
xmax = data_3d_in.shape[1]
ymax = data_3d_in.shape[2]
# remember to put here some assertion to check that
# both arrays have the same shape
if lst_data_mask_in is not None:
data_3d_in_mask = lst_data_mask_in[lst_pos]
self.vl_max = float(np.amax(data_3d_in))
self.vl_min = float(np.amin(data_3d_in))
tmp_data2d = np.zeros((xmax, ymax), "double")
tmp_data2d_mask = np.zeros((xmax, ymax), "double")
z_dp = data_3d_in.shape[0]
single_block_lst_01 = []
for z in range(z_dp):
# print "z =", z
tmp_data2d[:, :] = data_3d_in[z : z + 1, :, :]
if lst_data_mask_in is not None:
tmp_data2d_mask[:, :] = data_3d_in_mask[z : z + 1, :, :]
else:
tmp_data2d_mask = None
data_sigle_img = self._wx_img_w_cpp(
tmp_data2d, show_nums, palette, tmp_data2d_mask
)
single_block_lst_01.append(data_sigle_img)
self._ini_wx_bmp_lst.append(single_block_lst_01)
def bmp_lst_scaled(self, scale=1.0):
if self._ini_wx_bmp_lst is None:
NewW = 350
wx_image = wx.Image(NewW, NewW)
wxBitmap = wx_image.ConvertToBitmap()
dc = wx.MemoryDC(wxBitmap)
text = "No Shoebox data"
w, h = dc.GetSize()
tw, th = dc.GetTextExtent(text)
dc.Clear()
dc.DrawText(text, (w - tw) / 2, (h - th) / 2) # display text in center
dc.SelectObject(wxBitmap)
del dc
wx_bmp_lst = [[wxBitmap]]
else:
wx_bmp_lst = []
for data_3d in self._ini_wx_bmp_lst:
single_block_lst = []
for sigle_img_data in data_3d:
single_block_lst.append(self._wx_bmp_scaled(sigle_img_data, scale))
wx_bmp_lst.append(single_block_lst)
return wx_bmp_lst
def _wx_img_w_cpp(self, np_2d_tmp, show_nums, palette, np_2d_mask=None):
xmax = np_2d_tmp.shape[1]
ymax = np_2d_tmp.shape[0]
if np_2d_mask is None:
np_2d_mask = np.zeros((ymax, xmax), "double")
transposed_data = np.zeros((ymax, xmax), "double")
transposed_mask = np.zeros((ymax, xmax), "double")
transposed_data[:, :] = np_2d_tmp
transposed_mask[:, :] = np_2d_mask
flex_data_in = flex.double(transposed_data)
flex_mask_in = flex.double(transposed_mask)
if palette == "black2white":
palette_num = 1
elif palette == "white2black":
palette_num = 2
elif palette == "hot ascend":
palette_num = 3
else: # assuming "hot descend"
palette_num = 4
img_array_tmp = self.wx_bmp_arr.gen_bmp(
flex_data_in, flex_mask_in, show_nums, palette_num
)
np_img_array = img_array_tmp.as_numpy_array()
height = np.size(np_img_array[:, 0:1, 0:1])
width = np.size(np_img_array[0:1, :, 0:1])
img_array = np.empty((height, width, 3), "uint8")
img_array[:, :, :] = np_img_array[:, :, :]
self._wx_image = wx.Image(width, height)
self._wx_image.SetData(img_array.tostring())
data_to_become_bmp = (self._wx_image, width, height)
return data_to_become_bmp
def _wx_bmp_scaled(self, data_to_become_bmp, scale):
to_become_bmp = data_to_become_bmp[0]
width = data_to_become_bmp[1]
height = data_to_become_bmp[2]
NewW = int(width * scale)
NewH = int(height * scale)
to_become_bmp = to_become_bmp.Scale(NewW, NewH, wx.IMAGE_QUALITY_NORMAL)
wxBitmap = to_become_bmp.ConvertToBitmap()
return wxBitmap
|
[
"numpy.amax",
"numpy.amin",
"dials_viewer_ext.rgb_img",
"wx.MemoryDC",
"numpy.size",
"wx.Image",
"numpy.zeros",
"numpy.empty",
"dials.array_family.flex.double"
] |
[((345, 354), 'dials_viewer_ext.rgb_img', 'rgb_img', ([], {}), '()\n', (352, 354), False, 'from dials_viewer_ext import rgb_img\n'), ((3055, 3087), 'numpy.zeros', 'np.zeros', (['(ymax, xmax)', '"""double"""'], {}), "((ymax, xmax), 'double')\n", (3063, 3087), True, 'import numpy as np\n'), ((3114, 3146), 'numpy.zeros', 'np.zeros', (['(ymax, xmax)', '"""double"""'], {}), "((ymax, xmax), 'double')\n", (3122, 3146), True, 'import numpy as np\n'), ((3257, 3285), 'dials.array_family.flex.double', 'flex.double', (['transposed_data'], {}), '(transposed_data)\n', (3268, 3285), False, 'from dials.array_family import flex\n'), ((3309, 3337), 'dials.array_family.flex.double', 'flex.double', (['transposed_mask'], {}), '(transposed_mask)\n', (3320, 3337), False, 'from dials.array_family import flex\n'), ((3801, 3835), 'numpy.size', 'np.size', (['np_img_array[:, 0:1, 0:1]'], {}), '(np_img_array[:, 0:1, 0:1])\n', (3808, 3835), True, 'import numpy as np\n'), ((3852, 3886), 'numpy.size', 'np.size', (['np_img_array[0:1, :, 0:1]'], {}), '(np_img_array[0:1, :, 0:1])\n', (3859, 3886), True, 'import numpy as np\n'), ((3907, 3944), 'numpy.empty', 'np.empty', (['(height, width, 3)', '"""uint8"""'], {}), "((height, width, 3), 'uint8')\n", (3915, 3944), True, 'import numpy as np\n'), ((4022, 4045), 'wx.Image', 'wx.Image', (['width', 'height'], {}), '(width, height)\n', (4030, 4045), False, 'import wx\n'), ((2020, 2040), 'wx.Image', 'wx.Image', (['NewW', 'NewW'], {}), '(NewW, NewW)\n', (2028, 2040), False, 'import wx\n'), ((2109, 2130), 'wx.MemoryDC', 'wx.MemoryDC', (['wxBitmap'], {}), '(wxBitmap)\n', (2120, 2130), False, 'import wx\n'), ((2995, 3027), 'numpy.zeros', 'np.zeros', (['(ymax, xmax)', '"""double"""'], {}), "((ymax, xmax), 'double')\n", (3003, 3027), True, 'import numpy as np\n'), ((1074, 1106), 'numpy.zeros', 'np.zeros', (['(xmax, ymax)', '"""double"""'], {}), "((xmax, ymax), 'double')\n", (1082, 1106), True, 'import numpy as np\n'), ((1141, 1173), 'numpy.zeros', 'np.zeros', (['(xmax, ymax)', '"""double"""'], {}), "((xmax, ymax), 'double')\n", (1149, 1173), True, 'import numpy as np\n'), ((967, 986), 'numpy.amax', 'np.amax', (['data_3d_in'], {}), '(data_3d_in)\n', (974, 986), True, 'import numpy as np\n'), ((1024, 1043), 'numpy.amin', 'np.amin', (['data_3d_in'], {}), '(data_3d_in)\n', (1031, 1043), True, 'import numpy as np\n')]
|
import numpy as np
def check_x_y(x, y):
assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray)
assert np.ndim(x) <= 3 and np.ndim(y) <= 2
assert len(x) == len(y)
|
[
"numpy.ndim"
] |
[((125, 135), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (132, 135), True, 'import numpy as np\n'), ((145, 155), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (152, 155), True, 'import numpy as np\n')]
|
import numpy as np
from radix import radixConvert
c = radixConvert()
a = np.load("../../data/5/layer4.npy")
print(a.shape)
a = a*128
a = np.around(a).astype(np.int16)
print(a)
a = np.load('../../data/6.npy')
a = a*128
a = np.around(a).astype(np.int8)
print(a.shape)
for i in range(84):
print(i)
print(a[i])
'''
a = a*128
print(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if a[i][j] > 127:
a[i][j] = 127
a = np.around(a).astype(np.int8)
print(a)
print(a[4][17])
weight_file = open('f1_rom.coe', 'w')
weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n')
weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n')
for i in range(32):
for j in range(32):
if(i < 2 or i > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
elif(j < 2 or j > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
else:
weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n')
'''
|
[
"numpy.load",
"numpy.around",
"radix.radixConvert"
] |
[((54, 68), 'radix.radixConvert', 'radixConvert', ([], {}), '()\n', (66, 68), False, 'from radix import radixConvert\n'), ((74, 108), 'numpy.load', 'np.load', (['"""../../data/5/layer4.npy"""'], {}), "('../../data/5/layer4.npy')\n", (81, 108), True, 'import numpy as np\n'), ((183, 210), 'numpy.load', 'np.load', (['"""../../data/6.npy"""'], {}), "('../../data/6.npy')\n", (190, 210), True, 'import numpy as np\n'), ((139, 151), 'numpy.around', 'np.around', (['a'], {}), '(a)\n', (148, 151), True, 'import numpy as np\n'), ((225, 237), 'numpy.around', 'np.around', (['a'], {}), '(a)\n', (234, 237), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# conda install pytorch>=1.6 cudatoolkit=10.2 -c pytorch
# wandb login XXX
import json
import logging
import os
import re
import sklearn
import time
from itertools import product
import numpy as np
import pandas as pd
import wandb
#from IPython import get_ipython
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from simpletransformers.classification import MultiLabelClassificationModel
from sklearn.model_selection import train_test_split
truthy_values = ("true", "1", "y", "yes")
TAG = os.environ.get("TAG", "bertsification")
LANGS = [lang.strip() for lang in os.environ.get("LANGS", "es,ge,en,multi").lower().split(",")]
MODELNAMES = os.environ.get("MODELNAMES")
EVAL = os.environ.get("EVAL", "True").lower() in truthy_values
OVERWRITE = os.environ.get("OVERWRITE", "False").lower() in truthy_values
logging.basicConfig(level=logging.INFO, filename=time.strftime("models/{}-%Y-%m-%dT%H%M%S.log".format(TAG)))
with open('pid', 'w') as pid:
pid.write(str(os.getpid()))
logging.info("Experiment '{}' on {}, (eval = {}, pid = {})".format(
TAG, LANGS, str(EVAL), str(os.getpid()),
))
# SimpleTransformers (based on HuggingFace/Transformers) for Multilingual Scansion
# We will be using `simpletransformers`, a wrapper of `huggingface/transformers` to fine-tune different BERT-based and other architecture models with support for Spanish.
# Utils
def clean_text(string):
output = string.strip()
# replacements = (("“", '"'), ("”", '"'), ("//", ""), ("«", '"'), ("»",'"'))
replacements = (
("“", ''), ("”", ''), ("//", ""), ("«", ''), ("»",''), (",", ''),
(";", ''), (".", ''),
# ("?", ''), ("¿", ''), ("¡", ''), ("!", ''), ("-", ' '),
)
for replacement in replacements:
output = output.replace(*replacement)
# Any sequence of two or more spaces should be converted into one space
output = re.sub(r'(?is)\s+', ' ', output)
return output.strip()
def metric2binary(meter, pad=11):
return ([1 if syllable == "+" else 0 for syllable in meter] + [0] * (11 - len(meter)))[:pad]
def label2metric(label):
return "".join("+" if l else "-" for l in label)
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# Spanish
# if not os.path.isfile("adso100.json"):
# get_ipython().system("averell export adso100 --filename adso100.json")
# if not os.path.isfile("adso.json"):
# get_ipython().system("averell export adso --filename adso.json")
es_test = (pd
.read_json(open("adso100.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es_test = es_test[es_test["length"] == 11]
es = (pd
.read_json(open("adso.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es = es[~es["text"].isin(es_test["text"])][es["length"] == 11]
es["labels"] = es.meter.apply(metric2binary)
es_train, es_eval = train_test_split(
es[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("Spanish")
logging.info("- Lines: {} train, {} eval, {} test".format(es_train.shape[0], es_eval.shape[0], es_test.shape[0]))
# English
en_test = (pd
.read_csv("4b4v_prosodic_meter.csv")
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en_test = en_test.query("length in (5,6,7,8,9,10,11)")
# if not os.path.isfile("ecpa.json"):
# get_ipython().system("averell export ecpa --filename ecpa.json")
en = (pd
.read_json(open("ecpa.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
metrical_pattern=lambda x: x["metrical_pattern"].str.replace("|", "").str.replace("(", "").str.replace(")", "")
)
.assign(
length=lambda x: x["metrical_pattern"].str.len(),
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en = en[~en["text"].isin(en_test["text"])].query("length in (5,6,7,8,9,10,11)")
en["labels"] = en.meter.apply(metric2binary)
en_train, en_eval = train_test_split(
en[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("English")
logging.info("- Lines: {} train, {} eval, {} test".format(en_train.shape[0], en_eval.shape[0], en_test.shape[0]))
# sota
en_sota = sum(en_test.meter == en_test.sota) / en_test.meter.size
# German
ge = (pd
.read_csv("po-emo-metricalizer.csv")
.rename(columns={"verse": "text", "annotated_pattern": "meter", "metricalizer_pattern": "sota"})
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.query("length in (5, 6, 7, 8, 9, 10, 11)")
)
ge["labels"] = ge.meter.apply(metric2binary)
ge_train_eval, ge_test = train_test_split(ge, test_size=0.15, random_state=42)
ge_train, ge_eval = train_test_split(
ge_train_eval[["text", "labels"]], test_size=0.176, random_state=42)
logging.info("German")
logging.info("- Lines: {} train, {} eval, {} test".format(ge_train.shape[0], ge_eval.shape[0], ge_test.shape[0]))
# sota
ge_sota = sum(ge_test.meter == ge_test.sota) / ge_test.meter.size
# training
# Multilingual inputs
# - bert bert-base-multilingual-cased
# - distilbert distilbert-base-multilingual-cased
# - xlmroberta, xlm-roberta-base
# - xlmroberta, xlm-roberta-large
# Only English
# - roberta roberta-base
# - roberta roberta-large
# - albert albert-xxlarge-v2
# You can set class weights by using the optional weight argument
models = (
# ("xlnet", "xlnet-base-cased"),
("bert", "bert-base-multilingual-cased"),
("distilbert", "distilbert-base-multilingual-cased"),
("roberta", "roberta-base"),
("roberta", "roberta-large"),
("xlmroberta", "xlm-roberta-base"),
("xlmroberta", "xlm-roberta-large"),
("electra", "google/electra-base-discriminator"),
("albert", "albert-base-v2"),
("albert", "albert-large-v2"),
)
if MODELNAMES:
models = [list(map(str.strip, modelname.split(",")))
for modelname in MODELNAMES.split(";")]
langs = LANGS or ("es", "ge", "en", "multi")
for lang, (model_type, model_name) in product(langs, models):
model_output = 'models/{}-{}-{}-{}'.format(TAG, lang, model_type, model_name.replace("/", "-"))
if OVERWRITE is False and os.path.exists(model_output):
logging.info("Skipping training of {} for {}".format(model_name, lang))
continue
logging.info("Starting training of {} for {}".format(model_name, lang))
run = wandb.init(project=model_output.split("/")[-1], reinit=True)
model = MultiLabelClassificationModel(
model_type, model_name, num_labels=11, args={
'output_dir': model_output,
'best_model_dir': '{}/best'.format(model_output),
'reprocess_input_data': True,
'overwrite_output_dir': True,
'use_cached_eval_features': True,
'num_train_epochs': 100, # For BERT, 2, 3, 4
'save_steps': 10000,
'early_stopping_patience': 5,
'evaluate_during_training': EVAL,
#'early_stopping_metric': "accuracy_score",
'evaluate_during_training_steps': 1000,
'early_stopping_delta': 0.00001,
'manual_seed': 42,
# 'learning_rate': 2e-5, # For BERT, 5e-5, 3e-5, 2e-5
# For BERT 16, 32. It could be 128, but with gradient_acc_steps set to 2 is equivalent
'train_batch_size': 16 if "large" in model_name else 32,
'eval_batch_size': 16 if "large" in model_name else 32,
# Doubles train_batch_size, but gradients and wrights are calculated once every 2 steps
'gradient_accumulation_steps': 2 if "large" in model_name else 1,
'max_seq_length': 32,
'use_early_stopping': True,
'wandb_project': model_output.split("/")[-1],
#'wandb_kwargs': {'reinit': True},
# "adam_epsilon": 3e-5, # 1e-8
"silent": False,
"fp16": False,
"n_gpu": 2,
})
# train the model
if lang == "multi":
train_df = pd.concat([es_train, en_train, ge_train], ignore_index=True)
eval_df = pd.concat([es_eval, en_eval, ge_eval], ignore_index=True)
elif lang == "es":
train_df = es_train
eval_df = es_eval
elif lang == "en":
train_df = en_train
eval_df = en_eval
elif lang == "ge":
train_df = ge_train
eval_df = ge_eval
if EVAL:
model.train_model(train_df, eval_df=eval_df)
# evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(eval_df)
logging.info(str(result))
#logging.info(str(model_outputs))
else:
train_eval_df = pd.concat([train_df, eval_df, ge_train], ignore_index=True)
model.train_model(train_eval_df)
if lang in ("es", "multi"):
es_test["predicted"], *_ = model.predict(es_test.text.values)
es_test["predicted"] = es_test["predicted"].apply(label2metric)
es_test["pred"] = es_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
es_bert = sum(es_test.meter == es_test.pred) / es_test.meter.size
logging.info("Accuracy [{}:es]: {} ({})".format(lang, es_bert, model_name))
wandb.log({"accuracy_es": es_bert})
if lang in ("en", "multi"):
en_test["predicted"], *_ = model.predict(en_test.text.values)
en_test["predicted"] = en_test["predicted"].apply(label2metric)
en_test["pred"] = en_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
en_bert = sum(en_test.meter == en_test.pred) / en_test.meter.size
logging.info("Accuracy [{}:en]: {} ({})".format(lang, en_bert, model_name))
wandb.log({"accuracy_en": en_bert})
if lang in ("ge", "multi"):
ge_test["predicted"], *_ = model.predict(ge_test.text.values)
ge_test["predicted"] = ge_test["predicted"].apply(label2metric)
ge_test["pred"] = ge_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
ge_bert = sum(ge_test.meter == ge_test.pred) / ge_test.meter.size
logging.info("Accuracy [{}:ge]: {} ({})".format(lang, ge_bert, model_name))
wandb.log({"accuracy_ge": ge_bert})
if lang in ("multi", ):
test_df = pd.concat([es_test, en_test, ge_test], ignore_index=True)
test_df["predicted"], *_ = model.predict(test_df.text.values)
test_df["predicted"] = test_df["predicted"].apply(label2metric)
test_df["pred"] = test_df.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
multi_bert = sum(test_df.meter == test_df.pred) / test_df.meter.size
logging.info("Accuracy [{}:multi]: {} ({})".format(lang, multi_bert, model_name))
wandb.log({"accuracy_multi": multi_bert})
run.finish()
logging.info("Done training '{}'".format(model_output))
# get_ipython().system("rm -rf `ls -dt models/{}-*/checkpoint*/ | awk 'NR>5'`".format(TAG))
logging.info("Done training")
|
[
"os.path.exists",
"wandb.log",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"itertools.product",
"os.environ.get",
"numpy.argmax",
"numpy.sum",
"pandas.concat",
"os.getpid",
"re.sub",
"logging.info"
] |
[((827, 866), 'os.environ.get', 'os.environ.get', (['"""TAG"""', '"""bertsification"""'], {}), "('TAG', 'bertsification')\n", (841, 866), False, 'import os\n'), ((976, 1004), 'os.environ.get', 'os.environ.get', (['"""MODELNAMES"""'], {}), "('MODELNAMES')\n", (990, 1004), False, 'import os\n'), ((3792, 3865), 'sklearn.model_selection.train_test_split', 'train_test_split', (["es[['text', 'labels']]"], {'test_size': '(0.25)', 'random_state': '(42)'}), "(es[['text', 'labels']], test_size=0.25, random_state=42)\n", (3808, 3865), False, 'from sklearn.model_selection import train_test_split\n'), ((3871, 3894), 'logging.info', 'logging.info', (['"""Spanish"""'], {}), "('Spanish')\n", (3883, 3894), False, 'import logging\n'), ((5159, 5232), 'sklearn.model_selection.train_test_split', 'train_test_split', (["en[['text', 'labels']]"], {'test_size': '(0.25)', 'random_state': '(42)'}), "(en[['text', 'labels']], test_size=0.25, random_state=42)\n", (5175, 5232), False, 'from sklearn.model_selection import train_test_split\n'), ((5238, 5261), 'logging.info', 'logging.info', (['"""English"""'], {}), "('English')\n", (5250, 5261), False, 'import logging\n'), ((5877, 5930), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ge'], {'test_size': '(0.15)', 'random_state': '(42)'}), '(ge, test_size=0.15, random_state=42)\n', (5893, 5930), False, 'from sklearn.model_selection import train_test_split\n'), ((5951, 6040), 'sklearn.model_selection.train_test_split', 'train_test_split', (["ge_train_eval[['text', 'labels']]"], {'test_size': '(0.176)', 'random_state': '(42)'}), "(ge_train_eval[['text', 'labels']], test_size=0.176,\n random_state=42)\n", (5967, 6040), False, 'from sklearn.model_selection import train_test_split\n'), ((6042, 6064), 'logging.info', 'logging.info', (['"""German"""'], {}), "('German')\n", (6054, 6064), False, 'import logging\n'), ((7239, 7261), 'itertools.product', 'product', (['langs', 'models'], {}), '(langs, models)\n', (7246, 7261), False, 'from itertools import product\n'), ((12099, 12128), 'logging.info', 'logging.info', (['"""Done training"""'], {}), "('Done training')\n", (12111, 12128), False, 'import logging\n'), ((2189, 2221), 're.sub', 're.sub', (['"""(?is)\\\\s+"""', '""" """', 'output'], {}), "('(?is)\\\\s+', ' ', output)\n", (2195, 2221), False, 'import re\n'), ((2594, 2626), 'numpy.sum', 'np.sum', (['(pred_flat == labels_flat)'], {}), '(pred_flat == labels_flat)\n', (2600, 2626), True, 'import numpy as np\n'), ((7393, 7421), 'os.path.exists', 'os.path.exists', (['model_output'], {}), '(model_output)\n', (7407, 7421), False, 'import os\n'), ((9215, 9275), 'pandas.concat', 'pd.concat', (['[es_train, en_train, ge_train]'], {'ignore_index': '(True)'}), '([es_train, en_train, ge_train], ignore_index=True)\n', (9224, 9275), True, 'import pandas as pd\n'), ((9294, 9351), 'pandas.concat', 'pd.concat', (['[es_eval, en_eval, ge_eval]'], {'ignore_index': '(True)'}), '([es_eval, en_eval, ge_eval], ignore_index=True)\n', (9303, 9351), True, 'import pandas as pd\n'), ((9865, 9924), 'pandas.concat', 'pd.concat', (['[train_df, eval_df, ge_train]'], {'ignore_index': '(True)'}), '([train_df, eval_df, ge_train], ignore_index=True)\n', (9874, 9924), True, 'import pandas as pd\n'), ((10399, 10434), 'wandb.log', 'wandb.log', (["{'accuracy_es': es_bert}"], {}), "({'accuracy_es': es_bert})\n", (10408, 10434), False, 'import wandb\n'), ((10867, 10902), 'wandb.log', 'wandb.log', (["{'accuracy_en': en_bert}"], {}), "({'accuracy_en': en_bert})\n", (10876, 10902), False, 'import wandb\n'), ((11335, 11370), 'wandb.log', 'wandb.log', (["{'accuracy_ge': ge_bert}"], {}), "({'accuracy_ge': ge_bert})\n", (11344, 11370), False, 'import wandb\n'), ((11417, 11474), 'pandas.concat', 'pd.concat', (['[es_test, en_test, ge_test]'], {'ignore_index': '(True)'}), '([es_test, en_test, ge_test], ignore_index=True)\n', (11426, 11474), True, 'import pandas as pd\n'), ((11884, 11925), 'wandb.log', 'wandb.log', (["{'accuracy_multi': multi_bert}"], {}), "({'accuracy_multi': multi_bert})\n", (11893, 11925), False, 'import wandb\n'), ((1012, 1042), 'os.environ.get', 'os.environ.get', (['"""EVAL"""', '"""True"""'], {}), "('EVAL', 'True')\n", (1026, 1042), False, 'import os\n'), ((1080, 1116), 'os.environ.get', 'os.environ.get', (['"""OVERWRITE"""', '"""False"""'], {}), "('OVERWRITE', 'False')\n", (1094, 1116), False, 'import os\n'), ((1299, 1310), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1308, 1310), False, 'import os\n'), ((1412, 1423), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1421, 1423), False, 'import os\n'), ((2513, 2537), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2522, 2537), True, 'import numpy as np\n'), ((901, 942), 'os.environ.get', 'os.environ.get', (['"""LANGS"""', '"""es,ge,en,multi"""'], {}), "('LANGS', 'es,ge,en,multi')\n", (915, 942), False, 'import os\n'), ((4031, 4069), 'pandas.read_csv', 'pd.read_csv', (['"""4b4v_prosodic_meter.csv"""'], {}), "('4b4v_prosodic_meter.csv')\n", (4042, 4069), True, 'import pandas as pd\n'), ((5465, 5503), 'pandas.read_csv', 'pd.read_csv', (['"""po-emo-metricalizer.csv"""'], {}), "('po-emo-metricalizer.csv')\n", (5476, 5503), True, 'import pandas as pd\n')]
|
from __future__ import print_function, division
import numpy as np
import Nio
import time, os
#
# Creating a file
#
init_time = time.clock()
ncfile = 'test-large.nc'
if (os.path.exists(ncfile)):
os.system("/bin/rm -f " + ncfile)
opt = Nio.options()
opt.Format = "LargeFile"
opt.PreFill = False
file = Nio.open_file(ncfile, 'w', options=opt)
file.title = "Testing large files and dimensions"
file.create_dimension('big', 2500000000)
bigvar = file.create_variable('bigvar', "b", ('big',))
print("created bigvar")
# note it is incredibly slow to write a scalar to a large file variable
# so create an temporary variable x that will get assigned in steps
x = np.empty(1000000,dtype = 'int8')
#print x
x[:] = 42
t = list(range(0,2500000000,1000000))
ii = 0
for i in t:
if (i == 0):
continue
print(t[ii],i)
bigvar[t[ii]:i] = x[:]
ii += 1
x[:] = 84
bigvar[2499000000:2500000000] = x[:]
bigvar[-1] = 84
bigvar.units = "big var units"
#print bigvar[-1]
print(bigvar.dimensions)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print(file)
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
#quit()
#
# Reading a file
#
print('opening file for read')
print('elapsed time: ',time.clock() - init_time)
file = Nio.open_file(ncfile, 'r')
print('file is open')
print('elapsed time: ',time.clock() - init_time)
print(file.dimensions)
print(list(file.variables.keys()))
print(file)
print("reading variable")
print('elapsed time: ',time.clock() - init_time)
x = file.variables['bigvar']
print(x[0],x[1000000],x[249000000],x[2499999999])
print("max and min")
min = x[:].min()
max = x[:].max()
print(min, max)
print('elapsed time: ',time.clock() - init_time)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
|
[
"os.path.exists",
"time.clock",
"numpy.empty",
"Nio.options",
"Nio.open_file",
"os.system"
] |
[((129, 141), 'time.clock', 'time.clock', ([], {}), '()\n', (139, 141), False, 'import time, os\n'), ((171, 193), 'os.path.exists', 'os.path.exists', (['ncfile'], {}), '(ncfile)\n', (185, 193), False, 'import time, os\n'), ((238, 251), 'Nio.options', 'Nio.options', ([], {}), '()\n', (249, 251), False, 'import Nio\n'), ((304, 343), 'Nio.open_file', 'Nio.open_file', (['ncfile', '"""w"""'], {'options': 'opt'}), "(ncfile, 'w', options=opt)\n", (317, 343), False, 'import Nio\n'), ((662, 693), 'numpy.empty', 'np.empty', (['(1000000)'], {'dtype': '"""int8"""'}), "(1000000, dtype='int8')\n", (670, 693), True, 'import numpy as np\n'), ((1321, 1347), 'Nio.open_file', 'Nio.open_file', (['ncfile', '"""r"""'], {}), "(ncfile, 'r')\n", (1334, 1347), False, 'import Nio\n'), ((198, 231), 'os.system', 'os.system', (["('/bin/rm -f ' + ncfile)"], {}), "('/bin/rm -f ' + ncfile)\n", (207, 231), False, 'import time, os\n'), ((1166, 1178), 'time.clock', 'time.clock', ([], {}), '()\n', (1176, 1178), False, 'import time, os\n'), ((1288, 1300), 'time.clock', 'time.clock', ([], {}), '()\n', (1298, 1300), False, 'import time, os\n'), ((1394, 1406), 'time.clock', 'time.clock', ([], {}), '()\n', (1404, 1406), False, 'import time, os\n'), ((1539, 1551), 'time.clock', 'time.clock', ([], {}), '()\n', (1549, 1551), False, 'import time, os\n'), ((1738, 1750), 'time.clock', 'time.clock', ([], {}), '()\n', (1748, 1750), False, 'import time, os\n'), ((1926, 1938), 'time.clock', 'time.clock', ([], {}), '()\n', (1936, 1938), False, 'import time, os\n')]
|
###############################################################################
# Author: <NAME>
# Project: ARC-II: Convolutional Matching Model
# Date Created: 7/18/2017
#
# File Description: This script contains ranking evaluation functions.
###############################################################################
import torch, numpy
def mean_average_precision(logits, target):
"""
Compute mean average precision.
:param logits: 2d tensor [batch_size x num_clicks_per_query]
:param target: 2d tensor [batch_size x num_clicks_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
map = 0
for i in range(indices.size(0)):
average_precision = 0
num_rel = 0
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
num_rel += 1
average_precision += num_rel / (j + 1)
average_precision = average_precision / num_rel
map += average_precision
return map / indices.size(0)
def NDCG(logits, target, k):
"""
Compute normalized discounted cumulative gain.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
assert logits.size(1) >= k, 'NDCG@K cannot be computed, invalid value of K.'
sorted, indices = torch.sort(logits, 1, descending=True)
NDCG = 0
for i in range(indices.size(0)):
DCG_ref = 0
num_rel_docs = torch.nonzero(target[i].data).size(0)
for j in range(indices.size(1)):
if j == k:
break
if target[i, indices[i, j].data[0]].data[0] == 1:
DCG_ref += 1 / numpy.log2(j + 2)
DCG_gt = 0
for j in range(num_rel_docs):
if j == k:
break
DCG_gt += 1 / numpy.log2(j + 2)
NDCG += DCG_ref / DCG_gt
return NDCG / indices.size(0)
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
total_reciprocal_rank = 0
for i in range(indices.size(0)):
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
total_reciprocal_rank += 1.0 / (j + 1)
break
return total_reciprocal_rank / logits.size(0)
|
[
"torch.sort",
"numpy.log2",
"torch.nonzero"
] |
[((708, 746), 'torch.sort', 'torch.sort', (['logits', '(1)'], {'descending': '(True)'}), '(logits, 1, descending=True)\n', (718, 746), False, 'import torch, numpy\n'), ((1603, 1641), 'torch.sort', 'torch.sort', (['logits', '(1)'], {'descending': '(True)'}), '(logits, 1, descending=True)\n', (1613, 1641), False, 'import torch, numpy\n'), ((2531, 2569), 'torch.sort', 'torch.sort', (['logits', '(1)'], {'descending': '(True)'}), '(logits, 1, descending=True)\n', (2541, 2569), False, 'import torch, numpy\n'), ((1739, 1768), 'torch.nonzero', 'torch.nonzero', (['target[i].data'], {}), '(target[i].data)\n', (1752, 1768), False, 'import torch, numpy\n'), ((2112, 2129), 'numpy.log2', 'numpy.log2', (['(j + 2)'], {}), '(j + 2)\n', (2122, 2129), False, 'import torch, numpy\n'), ((1961, 1978), 'numpy.log2', 'numpy.log2', (['(j + 2)'], {}), '(j + 2)\n', (1971, 1978), False, 'import torch, numpy\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import cv2
import math
import numpy as np
import paddle
import yaml
from det_keypoint_unite_utils import argsparser
from preprocess import decode_image
from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log
from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint
from visualize import visualize_pose
from benchmark_utils import PaddleInferBenchmark
from utils import get_current_memory_mb
from keypoint_postprocess import translate_to_ori_images
KEYPOINT_SUPPORT_MODELS = {
'HigherHRNet': 'keypoint_bottomup',
'HRNet': 'keypoint_topdown'
}
def predict_with_given_det(image, det_res, keypoint_detector,
keypoint_batch_size, run_benchmark):
rec_images, records, det_rects = keypoint_detector.get_person_from_rect(
image, det_res)
keypoint_vector = []
score_vector = []
rect_vector = det_rects
keypoint_results = keypoint_detector.predict_image(
rec_images, run_benchmark, repeats=10, visual=False)
keypoint_vector, score_vector = translate_to_ori_images(keypoint_results,
np.array(records))
keypoint_res = {}
keypoint_res['keypoint'] = [
keypoint_vector.tolist(), score_vector.tolist()
] if len(keypoint_vector) > 0 else [[], []]
keypoint_res['bbox'] = rect_vector
return keypoint_res
def topdown_unite_predict(detector,
topdown_keypoint_detector,
image_list,
keypoint_batch_size=1,
save_res=False):
det_timer = detector.get_timer()
store_res = []
for i, img_file in enumerate(image_list):
# Decode image in advance in det + pose prediction
det_timer.preprocess_time_s.start()
image, _ = decode_image(img_file, {})
det_timer.preprocess_time_s.end()
if FLAGS.run_benchmark:
results = detector.predict_image(
[image], run_benchmark=True, repeats=10)
cm, gm, gu = get_current_memory_mb()
detector.cpu_mem += cm
detector.gpu_mem += gm
detector.gpu_util += gu
else:
results = detector.predict_image([image], visual=False)
results = detector.filter_box(results, FLAGS.det_threshold)
if results['boxes_num'] > 0:
keypoint_res = predict_with_given_det(
image, results, topdown_keypoint_detector, keypoint_batch_size,
FLAGS.run_benchmark)
if save_res:
save_name = img_file if isinstance(img_file, str) else i
store_res.append([
save_name, keypoint_res['bbox'],
[keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
])
else:
results["keypoint"] = [[], []]
keypoint_res = results
if FLAGS.run_benchmark:
cm, gm, gu = get_current_memory_mb()
topdown_keypoint_detector.cpu_mem += cm
topdown_keypoint_detector.gpu_mem += gm
topdown_keypoint_detector.gpu_util += gu
else:
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
visualize_pose(
img_file,
keypoint_res,
visual_thresh=FLAGS.keypoint_threshold,
save_dir=FLAGS.output_dir)
if save_res:
"""
1) store_res: a list of image_data
2) image_data: [imageid, rects, [keypoints, scores]]
3) rects: list of rect [xmin, ymin, xmax, ymax]
4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
5) scores: mean of all joint conf
"""
with open("det_keypoint_unite_image_results.json", 'w') as wf:
json.dump(store_res, wf, indent=4)
def topdown_unite_predict_video(detector,
topdown_keypoint_detector,
camera_id,
keypoint_batch_size=1,
save_res=False):
video_name = 'output.mp4'
if camera_id != -1:
capture = cv2.VideoCapture(camera_id)
else:
capture = cv2.VideoCapture(FLAGS.video_file)
video_name = os.path.split(FLAGS.video_file)[-1]
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("fps: %d, frame_count: %d" % (fps, frame_count))
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
out_path = os.path.join(FLAGS.output_dir, video_name)
fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
index = 0
store_res = []
while (1):
ret, frame = capture.read()
if not ret:
break
index += 1
print('detect frame: %d' % (index))
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = detector.predict_image([frame2], visual=False)
results = detector.filter_box(results, FLAGS.det_threshold)
if results['boxes_num'] == 0:
writer.write(frame)
continue
keypoint_res = predict_with_given_det(
frame2, results, topdown_keypoint_detector, keypoint_batch_size,
FLAGS.run_benchmark)
im = visualize_pose(
frame,
keypoint_res,
visual_thresh=FLAGS.keypoint_threshold,
returnimg=True)
if save_res:
store_res.append([
index, keypoint_res['bbox'],
[keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
])
writer.write(im)
if camera_id != -1:
cv2.imshow('Mask Detection', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
writer.release()
print('output_video saved to: {}'.format(out_path))
if save_res:
"""
1) store_res: a list of frame_data
2) frame_data: [frameid, rects, [keypoints, scores]]
3) rects: list of rect [xmin, ymin, xmax, ymax]
4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
5) scores: mean of all joint conf
"""
with open("det_keypoint_unite_video_results.json", 'w') as wf:
json.dump(store_res, wf, indent=4)
def main():
deploy_file = os.path.join(FLAGS.det_model_dir, 'infer_cfg.yml')
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
arch = yml_conf['arch']
detector_func = 'Detector'
if arch == 'PicoDet':
detector_func = 'DetectorPicoDet'
detector = eval(detector_func)(FLAGS.det_model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
threshold=FLAGS.det_threshold)
topdown_keypoint_detector = KeyPointDetector(
FLAGS.keypoint_model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
batch_size=FLAGS.keypoint_batch_size,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
use_dark=FLAGS.use_dark)
keypoint_arch = topdown_keypoint_detector.pred_config.arch
assert KEYPOINT_SUPPORT_MODELS[
keypoint_arch] == 'keypoint_topdown', 'Detection-Keypoint unite inference only supports topdown models.'
# predict from video file or camera video stream
if FLAGS.video_file is not None or FLAGS.camera_id != -1:
topdown_unite_predict_video(detector, topdown_keypoint_detector,
FLAGS.camera_id, FLAGS.keypoint_batch_size,
FLAGS.save_res)
else:
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
topdown_unite_predict(detector, topdown_keypoint_detector, img_list,
FLAGS.keypoint_batch_size, FLAGS.save_res)
if not FLAGS.run_benchmark:
detector.det_times.info(average=True)
topdown_keypoint_detector.det_times.info(average=True)
else:
mode = FLAGS.run_mode
det_model_dir = FLAGS.det_model_dir
det_model_info = {
'model_name': det_model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(detector, img_list, det_model_info, name='Det')
keypoint_model_dir = FLAGS.keypoint_model_dir
keypoint_model_info = {
'model_name': keypoint_model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(topdown_keypoint_detector, img_list, keypoint_model_info,
FLAGS.keypoint_batch_size, 'KeyPoint')
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU'
], "device should be CPU, GPU or XPU"
main()
|
[
"visualize.visualize_pose",
"cv2.imshow",
"numpy.array",
"infer.bench_log",
"os.path.exists",
"infer.get_test_images",
"cv2.VideoWriter",
"paddle.enable_static",
"os.path.split",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"utils.get_current_memory_mb",
"cv2.cvtColor",
"det_keypoint_unite_utils.argsparser",
"infer.print_arguments",
"os.makedirs",
"keypoint_infer.KeyPointDetector",
"os.path.join",
"preprocess.decode_image",
"yaml.safe_load",
"cv2.VideoCapture",
"json.dump"
] |
[((5452, 5494), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'video_name'], {}), '(FLAGS.output_dir, video_name)\n', (5464, 5494), False, 'import os\n'), ((5508, 5539), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (5530, 5539), False, 'import cv2\n'), ((5554, 5609), 'cv2.VideoWriter', 'cv2.VideoWriter', (['out_path', 'fourcc', 'fps', '(width, height)'], {}), '(out_path, fourcc, fps, (width, height))\n', (5569, 5609), False, 'import cv2\n'), ((7294, 7344), 'os.path.join', 'os.path.join', (['FLAGS.det_model_dir', '"""infer_cfg.yml"""'], {}), "(FLAGS.det_model_dir, 'infer_cfg.yml')\n", (7306, 7344), False, 'import os\n'), ((8232, 8616), 'keypoint_infer.KeyPointDetector', 'KeyPointDetector', (['FLAGS.keypoint_model_dir'], {'device': 'FLAGS.device', 'run_mode': 'FLAGS.run_mode', 'batch_size': 'FLAGS.keypoint_batch_size', 'trt_min_shape': 'FLAGS.trt_min_shape', 'trt_max_shape': 'FLAGS.trt_max_shape', 'trt_opt_shape': 'FLAGS.trt_opt_shape', 'trt_calib_mode': 'FLAGS.trt_calib_mode', 'cpu_threads': 'FLAGS.cpu_threads', 'enable_mkldnn': 'FLAGS.enable_mkldnn', 'use_dark': 'FLAGS.use_dark'}), '(FLAGS.keypoint_model_dir, device=FLAGS.device, run_mode=\n FLAGS.run_mode, batch_size=FLAGS.keypoint_batch_size, trt_min_shape=\n FLAGS.trt_min_shape, trt_max_shape=FLAGS.trt_max_shape, trt_opt_shape=\n FLAGS.trt_opt_shape, trt_calib_mode=FLAGS.trt_calib_mode, cpu_threads=\n FLAGS.cpu_threads, enable_mkldnn=FLAGS.enable_mkldnn, use_dark=FLAGS.\n use_dark)\n', (8248, 8616), False, 'from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint\n'), ((10364, 10386), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (10384, 10386), False, 'import paddle\n'), ((10400, 10412), 'det_keypoint_unite_utils.argsparser', 'argsparser', ([], {}), '()\n', (10410, 10412), False, 'from det_keypoint_unite_utils import argsparser\n'), ((10449, 10471), 'infer.print_arguments', 'print_arguments', (['FLAGS'], {}), '(FLAGS)\n', (10464, 10471), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((1804, 1821), 'numpy.array', 'np.array', (['records'], {}), '(records)\n', (1812, 1821), True, 'import numpy as np\n'), ((2490, 2516), 'preprocess.decode_image', 'decode_image', (['img_file', '{}'], {}), '(img_file, {})\n', (2502, 2516), False, 'from preprocess import decode_image\n'), ((4876, 4903), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (4892, 4903), False, 'import cv2\n'), ((4932, 4966), 'cv2.VideoCapture', 'cv2.VideoCapture', (['FLAGS.video_file'], {}), '(FLAGS.video_file)\n', (4948, 4966), False, 'import cv2\n'), ((5365, 5397), 'os.path.exists', 'os.path.exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (5379, 5397), False, 'import os\n'), ((5407, 5436), 'os.makedirs', 'os.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (5418, 5436), False, 'import os\n'), ((5813, 5851), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (5825, 5851), False, 'import cv2\n'), ((6249, 6344), 'visualize.visualize_pose', 'visualize_pose', (['frame', 'keypoint_res'], {'visual_thresh': 'FLAGS.keypoint_threshold', 'returnimg': '(True)'}), '(frame, keypoint_res, visual_thresh=FLAGS.keypoint_threshold,\n returnimg=True)\n', (6263, 6344), False, 'from visualize import visualize_pose\n'), ((7397, 7414), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (7411, 7414), False, 'import yaml\n'), ((9272, 9322), 'infer.get_test_images', 'get_test_images', (['FLAGS.image_dir', 'FLAGS.image_file'], {}), '(FLAGS.image_dir, FLAGS.image_file)\n', (9287, 9322), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((2721, 2744), 'utils.get_current_memory_mb', 'get_current_memory_mb', ([], {}), '()\n', (2742, 2744), False, 'from utils import get_current_memory_mb\n'), ((3640, 3663), 'utils.get_current_memory_mb', 'get_current_memory_mb', ([], {}), '()\n', (3661, 3663), False, 'from utils import get_current_memory_mb\n'), ((3946, 4056), 'visualize.visualize_pose', 'visualize_pose', (['img_file', 'keypoint_res'], {'visual_thresh': 'FLAGS.keypoint_threshold', 'save_dir': 'FLAGS.output_dir'}), '(img_file, keypoint_res, visual_thresh=FLAGS.\n keypoint_threshold, save_dir=FLAGS.output_dir)\n', (3960, 4056), False, 'from visualize import visualize_pose\n'), ((4519, 4553), 'json.dump', 'json.dump', (['store_res', 'wf'], {'indent': '(4)'}), '(store_res, wf, indent=4)\n', (4528, 4553), False, 'import json\n'), ((4988, 5019), 'os.path.split', 'os.path.split', (['FLAGS.video_file'], {}), '(FLAGS.video_file)\n', (5001, 5019), False, 'import os\n'), ((6643, 6675), 'cv2.imshow', 'cv2.imshow', (['"""Mask Detection"""', 'im'], {}), "('Mask Detection', im)\n", (6653, 6675), False, 'import cv2\n'), ((7227, 7261), 'json.dump', 'json.dump', (['store_res', 'wf'], {'indent': '(4)'}), '(store_res, wf, indent=4)\n', (7236, 7261), False, 'import json\n'), ((9899, 9956), 'infer.bench_log', 'bench_log', (['detector', 'img_list', 'det_model_info'], {'name': '"""Det"""'}), "(detector, img_list, det_model_info, name='Det')\n", (9908, 9956), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((10202, 10313), 'infer.bench_log', 'bench_log', (['topdown_keypoint_detector', 'img_list', 'keypoint_model_info', 'FLAGS.keypoint_batch_size', '"""KeyPoint"""'], {}), "(topdown_keypoint_detector, img_list, keypoint_model_info, FLAGS.\n keypoint_batch_size, 'KeyPoint')\n", (10211, 10313), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((3854, 3886), 'os.path.exists', 'os.path.exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (3868, 3886), False, 'import os\n'), ((3904, 3933), 'os.makedirs', 'os.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (3915, 3933), False, 'import os\n'), ((6691, 6705), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6702, 6705), False, 'import cv2\n')]
|
import random
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from mla.base import BaseEstimator
from mla.metrics.distance import euclidean_distance
random.seed(1111)
class KMeans(BaseEstimator):
"""Partition a dataset into K clusters.
Finds clusters by repeatedly assigning each data point to the cluster with
the nearest centroid and iterating until the assignments converge (meaning
they don't change during an iteration) or the maximum number of iterations
is reached.
Parameters
----------
K : int
The number of clusters into which the dataset is partitioned.
max_iters: int
The maximum iterations of assigning points to the nearest cluster.
Short-circuited by the assignments converging on their own.
init: str, default 'random'
The name of the method used to initialize the first clustering.
'random' - Randomly select values from the dataset as the K centroids.
'++' - Select a random first centroid from the dataset, then select
K - 1 more centroids by choosing values from the dataset with a
probability distribution proportional to the squared distance
from each point's closest existing cluster. Attempts to create
larger distances between initial clusters to improve convergence
rates and avoid degenerate cases.
"""
y_required = False
def __init__(self, K=5, max_iters=100, init='random'):
self.K = K
self.max_iters = max_iters
self.clusters = [[] for _ in range(self.K)]
self.centroids = []
self.init = init
def _initialize_cetroids(self, init):
"""Set the initial centroids."""
if init == 'random':
self.centroids = [self.X[x] for x in
random.sample(range(self.n_samples), self.K)]
elif init == '++':
self.centroids = [random.choice(self.X)]
while len(self.centroids) < self.K:
self.centroids.append(self._choose_next_center())
else:
raise ValueError('Unknown type of init parameter')
def _predict(self, X=None):
"""Perform the clustering on the dataset."""
self._initialize_cetroids(self.init)
centroids = self.centroids
for _ in range(self.max_iters):
self._assign(centroids)
centroids_old = centroids
centroids = [self._get_centroid(cluster) for cluster in self.clusters]
if self._is_converged(centroids_old, centroids):
break
self.centroids = centroids
return self._get_predictions()
def _get_predictions(self):
predictions = np.empty(self.n_samples)
for i, cluster in enumerate(self.clusters):
for index in cluster:
predictions[index] = i
return predictions
def _assign(self, centroids):
for row in range(self.n_samples):
for i, cluster in enumerate(self.clusters):
if row in cluster:
self.clusters[i].remove(row)
break
closest = self._closest(row, centroids)
self.clusters[closest].append(row)
def _closest(self, fpoint, centroids):
closest_index = None
closest_distance = None
for i, point in enumerate(centroids):
dist = euclidean_distance(self.X[fpoint], point)
if closest_index is None or dist < closest_distance:
closest_index = i
closest_distance = dist
return closest_index
def _get_centroid(self, cluster):
"""Get values by indices and take the mean."""
return [np.mean(np.take(self.X[:, i], cluster)) for i in range(self.n_features)]
def _dist_from_centers(self):
return np.array([min([euclidean_distance(x, c) for c in self.centroids]) for x in self.X])
def _choose_next_center(self):
distances = self._dist_from_centers()
probs = distances / distances.sum()
cumprobs = probs.cumsum()
r = random.random()
ind = np.where(cumprobs >= r)[0][0]
return self.X[ind]
def _is_converged(self, centroids_old, centroids):
return True if sum([euclidean_distance(centroids_old[i], centroids[i]) for i in range(self.K)]) == 0 else False
def plot(self, data=None):
sns.set(style="white")
if data is None:
data = self.X
for i, index in enumerate(self.clusters):
point = np.array(data[index]).T
plt.scatter(*point, c=sns.color_palette("hls", self.K + 1)[i])
for point in self.centroids:
plt.scatter(*point, marker='x', linewidths=10)
plt.show()
|
[
"seaborn.set",
"random.choice",
"seaborn.color_palette",
"mla.metrics.distance.euclidean_distance",
"numpy.where",
"random.seed",
"numpy.take",
"numpy.array",
"numpy.empty",
"matplotlib.pyplot.scatter",
"random.random",
"matplotlib.pyplot.show"
] |
[((177, 194), 'random.seed', 'random.seed', (['(1111)'], {}), '(1111)\n', (188, 194), False, 'import random\n'), ((2761, 2785), 'numpy.empty', 'np.empty', (['self.n_samples'], {}), '(self.n_samples)\n', (2769, 2785), True, 'import numpy as np\n'), ((4152, 4167), 'random.random', 'random.random', ([], {}), '()\n', (4165, 4167), False, 'import random\n'), ((4455, 4477), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (4462, 4477), True, 'import seaborn as sns\n'), ((4806, 4816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4814, 4816), True, 'import matplotlib.pyplot as plt\n'), ((3453, 3494), 'mla.metrics.distance.euclidean_distance', 'euclidean_distance', (['self.X[fpoint]', 'point'], {}), '(self.X[fpoint], point)\n', (3471, 3494), False, 'from mla.metrics.distance import euclidean_distance\n'), ((4750, 4796), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*point'], {'marker': '"""x"""', 'linewidths': '(10)'}), "(*point, marker='x', linewidths=10)\n", (4761, 4796), True, 'import matplotlib.pyplot as plt\n'), ((3781, 3811), 'numpy.take', 'np.take', (['self.X[:, i]', 'cluster'], {}), '(self.X[:, i], cluster)\n', (3788, 3811), True, 'import numpy as np\n'), ((4182, 4205), 'numpy.where', 'np.where', (['(cumprobs >= r)'], {}), '(cumprobs >= r)\n', (4190, 4205), True, 'import numpy as np\n'), ((4601, 4622), 'numpy.array', 'np.array', (['data[index]'], {}), '(data[index])\n', (4609, 4622), True, 'import numpy as np\n'), ((1969, 1990), 'random.choice', 'random.choice', (['self.X'], {}), '(self.X)\n', (1982, 1990), False, 'import random\n'), ((3911, 3935), 'mla.metrics.distance.euclidean_distance', 'euclidean_distance', (['x', 'c'], {}), '(x, c)\n', (3929, 3935), False, 'from mla.metrics.distance import euclidean_distance\n'), ((4323, 4373), 'mla.metrics.distance.euclidean_distance', 'euclidean_distance', (['centroids_old[i]', 'centroids[i]'], {}), '(centroids_old[i], centroids[i])\n', (4341, 4373), False, 'from mla.metrics.distance import euclidean_distance\n'), ((4659, 4695), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', '(self.K + 1)'], {}), "('hls', self.K + 1)\n", (4676, 4695), True, 'import seaborn as sns\n')]
|
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
"""
import logging
import numpy as np
import scipy as sp
import collections
import itertools
from model.modelTemplate import Model
class BPE(Model):
"""The Bayesian predictor model
Attributes
----------
Name : string
The name of the class used when recording what has been used.
Parameters
----------
alpha : float, optional
Learning rate parameter
epsilon : float, optional
Noise parameter. The larger it is the less likely the model is to choose the highest expected reward
number_actions : integer, optional
The maximum number of valid actions the model can expect to receive.
Default 2.
number_cues : integer, optional
The initial maximum number of stimuli the model can expect to receive.
Default 1.
number_critics : integer, optional
The number of different reaction learning sets.
Default number_actions*number_cues
validRewards : list,np.ndarray, optional
The different reward values that can occur in the task. Default ``array([0, 1])``
action_codes : dict with string or int as keys and int values, optional
A dictionary used to convert between the action references used by the
task or dataset and references used in the models to describe the order
in which the action information is stored.
dirichletInit : float, optional
The initial values for values of the dirichlet distribution.
Normally 0, 1/2 or 1. Default 1
prior : array of floats in ``[0, 1]``, optional
Ignored in this case
stimFunc : function, optional
The function that transforms the stimulus into a form the model can
understand and a string to identify it later. Default is blankStim
rewFunc : function, optional
The function that transforms the reward into a form the model can
understand. Default is blankRew
decFunc : function, optional
The function that takes the internal values of the model and turns them
in to a decision. Default is model.decision.discrete.weightProb
See Also
--------
model.BP : This model is heavily based on that one
"""
def __init__(self, alpha=0.3, epsilon=0.1, dirichletInit=1, validRewards=np.array([0, 1]), **kwargs):
super(BPE, self).__init__(**kwargs)
self.alpha = alpha
self.epsilon = epsilon
self.validRew = validRewards
self.rewLoc = collections.OrderedDict(((k, v) for k, v in itertools.izip(self.validRew, range(len(self.validRew)))))
self.dirichletVals = np.ones((self.number_actions, self.number_cues, len(self.validRew))) * dirichletInit
self.expectations = self.updateExpectations(self.dirichletVals)
self.parameters["epsilon"] = self.epsilon
self.parameters["alpha"] = self.alpha
self.parameters["dirichletInit"] = dirichletInit
# Recorded information
self.recDirichletVals = []
def returnTaskState(self):
""" Returns all the relevant data for this model
Returns
-------
results : dict
The dictionary contains a series of keys including Name,
Probabilities, Actions and Events.
"""
results = self.standardResultOutput()
results["dirichletVals"] = np.array(self.recDirichletVals)
return results
def storeState(self):
"""
Stores the state of all the important variables so that they can be
accessed later
"""
self.storeStandardResults()
self.recDirichletVals.append(self.dirichletVals.copy())
def rewardExpectation(self, observation):
"""Calculate the estimated reward based on the action and stimuli
This contains parts that are task dependent
Parameters
----------
observation : {int | float | tuple}
The set of stimuli
Returns
-------
actionExpectations : array of floats
The expected rewards for each action
stimuli : list of floats
The processed observations
activeStimuli : list of [0, 1] mapping to [False, True]
A list of the stimuli that were or were not present
"""
activeStimuli, stimuli = self.stimulus_shaper.processStimulus(observation)
actionExpectations = self._actExpectations(self.dirichletVals, stimuli)
return actionExpectations, stimuli, activeStimuli
def delta(self, reward, expectation, action, stimuli):
"""
Calculates the comparison between the reward and the expectation
Parameters
----------
reward : float
The reward value
expectation : float
The expected reward value
action : int
The chosen action
stimuli : {int | float | tuple | None}
The stimuli received
Returns
-------
delta
"""
modReward = self.reward_shaper.processFeedback(reward, action, stimuli)
return modReward
def updateModel(self, delta, action, stimuli, stimuliFilter):
"""
Parameters
----------
delta : float
The difference between the reward and the expected reward
action : int
The action chosen by the model in this trialstep
stimuli : list of float
The weights of the different stimuli in this trialstep
stimuliFilter : list of bool
A list describing if a stimulus cue is present in this trialstep
"""
# Find the new activities
self._newExpect(action, delta, stimuli)
# Calculate the new probabilities
# We need to combine the expectations before calculating the probabilities
actionExpectations = self._actExpectations(self.dirichletVals, stimuli)
self.probabilities = self.calcProbabilities(actionExpectations)
def _newExpect(self, action, delta, stimuli):
self.dirichletVals[action, :, self.rewLoc[delta]] += self.alpha * stimuli/np.sum(stimuli)
self.expectations = self.updateExpectations(self.dirichletVals)
def _actExpectations(self, dirichletVals, stimuli):
# If there are multiple possible stimuli, filter by active stimuli and calculate
# calculate the expectations associated with each action.
if self.number_cues > 1:
actionExpectations = self.calcActExpectations(self.actStimMerge(dirichletVals, stimuli))
else:
actionExpectations = self.calcActExpectations(dirichletVals[:, 0, :])
return actionExpectations
def calcProbabilities(self, actionValues):
# type: (np.ndarray) -> np.ndarray
"""
Calculate the probabilities associated with the actions
Parameters
----------
actionValues : 1D ndArray of floats
Returns
-------
probArray : 1D ndArray of floats
The probabilities associated with the actionValues
"""
cbest = actionValues == max(actionValues)
deltaEpsilon = self.epsilon * (1 / self.number_actions)
bestEpsilon = (1 - self.epsilon) / np.sum(cbest) + deltaEpsilon
probArray = bestEpsilon * cbest + deltaEpsilon * (1 - cbest)
return probArray
def actorStimulusProbs(self):
"""
Calculates in the model-appropriate way the probability of each action.
Returns
-------
probabilities : 1D ndArray of floats
The probabilities associated with the action choices
"""
probabilities = self.calcProbabilities(self.expectedRewards)
return probabilities
def actStimMerge(self, dirichletVals, stimuli):
dirVals = dirichletVals * np.expand_dims(np.repeat([stimuli], self.number_actions, axis=0), 2)
actDirVals = np.sum(dirVals, 1)
return actDirVals
def calcActExpectations(self, dirichletVals):
actExpect = np.fromiter((np.sum(sp.stats.dirichlet(d).mean() * self.validRew) for d in dirichletVals), float, count=self.number_actions)
return actExpect
def updateExpectations(self, dirichletVals):
def meanFunc(p, r=[]):
return np.sum(sp.stats.dirichlet(p).mean() * r)
expectations = np.apply_along_axis(meanFunc, 2, dirichletVals, r=self.validRew)
return expectations
|
[
"numpy.repeat",
"scipy.stats.dirichlet",
"numpy.array",
"numpy.sum",
"numpy.apply_along_axis"
] |
[((2391, 2407), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2399, 2407), True, 'import numpy as np\n'), ((3481, 3512), 'numpy.array', 'np.array', (['self.recDirichletVals'], {}), '(self.recDirichletVals)\n', (3489, 3512), True, 'import numpy as np\n'), ((8204, 8222), 'numpy.sum', 'np.sum', (['dirVals', '(1)'], {}), '(dirVals, 1)\n', (8210, 8222), True, 'import numpy as np\n'), ((8654, 8718), 'numpy.apply_along_axis', 'np.apply_along_axis', (['meanFunc', '(2)', 'dirichletVals'], {'r': 'self.validRew'}), '(meanFunc, 2, dirichletVals, r=self.validRew)\n', (8673, 8718), True, 'import numpy as np\n'), ((6338, 6353), 'numpy.sum', 'np.sum', (['stimuli'], {}), '(stimuli)\n', (6344, 6353), True, 'import numpy as np\n'), ((7494, 7507), 'numpy.sum', 'np.sum', (['cbest'], {}), '(cbest)\n', (7500, 7507), True, 'import numpy as np\n'), ((8126, 8175), 'numpy.repeat', 'np.repeat', (['[stimuli]', 'self.number_actions'], {'axis': '(0)'}), '([stimuli], self.number_actions, axis=0)\n', (8135, 8175), True, 'import numpy as np\n'), ((8594, 8615), 'scipy.stats.dirichlet', 'sp.stats.dirichlet', (['p'], {}), '(p)\n', (8612, 8615), True, 'import scipy as sp\n'), ((8348, 8369), 'scipy.stats.dirichlet', 'sp.stats.dirichlet', (['d'], {}), '(d)\n', (8366, 8369), True, 'import scipy as sp\n')]
|
#! /usr/bin/env python
"""Toolbox for unbalanced dataset in machine learning."""
from setuptools import setup, find_packages
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
descr = """Toolbox for unbalanced dataset in machine learning."""
DISTNAME = 'unbalanced_dataset'
DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = '<NAME>, <NAME>'
MAINTAINER_EMAIL = '<EMAIL>, <EMAIL>'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by UnbalancedDataset to
# recursively build the compiled extensions in sub-packages is based on
# the Python import machinery.
builtins.__UNBALANCED_DATASET_SETUP__ = True
with open('unbalanced_dataset/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('unbalanced_dataset')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install UnbalancedDataset when Numpy is not yet
# present in the system.
from setuptools import setup
extra = {}
else:
print('To install UnbalancedDataset from source, you need numpy.' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager.')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=INSTALL_REQUIRES,
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
cmdclass={'build_py': build_py},
**extra
)
|
[
"os.path.exists",
"setuptools.find_packages",
"numpy.distutils.misc_util.Configuration",
"sys.exit",
"os.remove"
] |
[((1708, 1734), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (1722, 1734), False, 'import os\n'), ((1836, 1881), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (1849, 1881), False, 'from numpy.distutils.misc_util import Configuration\n'), ((1744, 1765), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (1753, 1765), False, 'import os\n'), ((4762, 4803), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['doc']"}), "(exclude=['doc'])\n", (4786, 4803), False, 'import setuptools\n'), ((3442, 3453), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3450, 3453), False, 'import sys\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[18]:
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import numpy as np
import pandas as pd
from causalnex.structure import DAGRegressor
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
# ...
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# In[22]:
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
def stage(name):
with open("data/"+name+".csv", 'r') as f:
df = pd.read_csv(f)
with open("data/"+name+".json", 'r') as f:
param = json.load(f)
return df, param
# In[24]:
# initialize your model
# available inputs: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
def init(df,param):
model = DAGRegressor(
alpha=0.1,
beta=0.9,
fit_intercept=True,
hidden_layer_units=None,
dependent_target=True,
enforce_dag=True,
)
return model
# In[26]:
# train your model
# returns a fit info json object and may modify the model object
def fit(model,df,param):
target=param['target_variables'][0]
#Data prep for processing
y_p = df[target]
y = y_p.values
X_p = df[param['feature_variables']]
X = X_p.to_numpy()
X_col = list(X_p.columns)
#Scale the data
ss = StandardScaler()
X_ss = ss.fit_transform(X)
y_ss = (y - y.mean()) / y.std()
scores = cross_val_score(model, X_ss, y_ss, cv=KFold(shuffle=True, random_state=42))
print(f'MEAN R2: {np.mean(scores).mean():.3f}')
X_pd = pd.DataFrame(X_ss, columns=X_col)
y_pd = pd.Series(y_ss, name=target)
model.fit(X_pd, y_pd)
info = pd.Series(model.coef_, index=X_col)
#info = pd.Series(model.coef_, index=list(df.drop(['_time'],axis=1).columns))
return info
# In[28]:
# apply your model
# returns the calculated results
def apply(model,df,param):
data = []
for col in list(df.columns):
s = model.get_edges_to_node(col)
for i in s.index:
data.append([i,col,s[i]]);
graph = pd.DataFrame(data, columns=['src','dest','weight'])
#results to send back to Splunk
graph_output=graph[graph['weight']>0]
return graph_output
# In[ ]:
# save model to name in expected convention "<algo_name>_<model_name>"
def save(model,name):
#with open(MODEL_DIRECTORY + name + ".json", 'w') as file:
# json.dump(model, file)
return model
# In[ ]:
# load model from name in expected convention "<algo_name>_<model_name>"
def load(name):
model = {}
#with open(MODEL_DIRECTORY + name + ".json", 'r') as file:
# model = json.load(file)
return model
# In[ ]:
# return a model summary
def summary(model=None):
returns = {"version": {"numpy": np.__version__, "pandas": pd.__version__} }
return returns
|
[
"pandas.Series",
"numpy.mean",
"pandas.read_csv",
"sklearn.preprocessing.StandardScaler",
"json.load",
"pandas.DataFrame",
"causalnex.structure.DAGRegressor",
"sklearn.model_selection.KFold"
] |
[((1022, 1146), 'causalnex.structure.DAGRegressor', 'DAGRegressor', ([], {'alpha': '(0.1)', 'beta': '(0.9)', 'fit_intercept': '(True)', 'hidden_layer_units': 'None', 'dependent_target': '(True)', 'enforce_dag': '(True)'}), '(alpha=0.1, beta=0.9, fit_intercept=True, hidden_layer_units=\n None, dependent_target=True, enforce_dag=True)\n', (1034, 1146), False, 'from causalnex.structure import DAGRegressor\n'), ((1653, 1669), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1667, 1669), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1895, 1928), 'pandas.DataFrame', 'pd.DataFrame', (['X_ss'], {'columns': 'X_col'}), '(X_ss, columns=X_col)\n', (1907, 1928), True, 'import pandas as pd\n'), ((1940, 1968), 'pandas.Series', 'pd.Series', (['y_ss'], {'name': 'target'}), '(y_ss, name=target)\n', (1949, 1968), True, 'import pandas as pd\n'), ((2012, 2047), 'pandas.Series', 'pd.Series', (['model.coef_'], {'index': 'X_col'}), '(model.coef_, index=X_col)\n', (2021, 2047), True, 'import pandas as pd\n'), ((2416, 2469), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['src', 'dest', 'weight']"}), "(data, columns=['src', 'dest', 'weight'])\n", (2428, 2469), True, 'import pandas as pd\n'), ((675, 689), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (686, 689), True, 'import pandas as pd\n'), ((753, 765), 'json.load', 'json.load', (['f'], {}), '(f)\n', (762, 765), False, 'import json\n'), ((1793, 1829), 'sklearn.model_selection.KFold', 'KFold', ([], {'shuffle': '(True)', 'random_state': '(42)'}), '(shuffle=True, random_state=42)\n', (1798, 1829), False, 'from sklearn.model_selection import KFold\n'), ((1853, 1868), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1860, 1868), True, 'import numpy as np\n')]
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
matplotlib.use('Agg')
import math
import numpy as np
import sys
from os.path import join, isfile
import warnings
warnings.filterwarnings("ignore")
def gda(x, y):
x = x.T
y = y.T
# phi = P(y = 1)
# mu[i] = mean of the feature vectors of the ith class
# sigma = common co-variance matrix
# M[i] = number of data points of class i
phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0])
m = y.shape[0]
M[1] = np.sum(y)
M[0] = m - M[1]
phi = M[1] / m
mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)])
sigma = np.sum(np.array([np.outer(x[i] - mu[y[i]], x[i] - mu[y[i]]) for i in range(m)]), axis=0).astype(float) / m
return phi, mu, sigma
def gda_general(x, y):
x = x.T
y = y.T
# phi = P(y = 1)
# mu[i] = mean of the feature vectors of the ith class
# sigma[i] = co-variance matrix for the ith class
# M[i] = number of data points of class i
phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0])
m = y.shape[0]
M[1] = np.sum(y)
M[0] = m - M[1]
phi = M[1] / m
mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)])
sigma = np.array([np.sum(np.array([np.outer(x[i] - mu[k], x[i] - mu[k]) for i in range(m) if y[i] == k]), axis=0) / M[k] for k in range(2)]).astype(float)
return phi, mu, sigma
def main():
# read command-line arguments
data_dir = sys.argv[1]
out_dir = sys.argv[2]
part = sys.argv[3]
# check for existence of input files
for c in ['x', 'y']:
if not isfile(join(data_dir, 'q4' + c + '.dat')):
raise Exception('q4' + c + '.dat not found')
# read from csv file
x = np.array(np.genfromtxt(join(data_dir, 'q4x.dat'))).T
y = np.array([0 if yi == 'Alaska' else 1 for yi in np.loadtxt(join(data_dir, 'q4y.dat'), dtype=str)])
# normalisation
x_mean = np.array([0.0] * 2)
x_stddev = np.array([0.0] * 2)
for i in range(2):
x_mean[i] = np.mean(x[i])
x[i] -= np.full_like(x[i], np.mean(x[i]))
x_stddev[i] = np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0])
x[i] /= np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0])
# part A
# running GDA with common co-variance matrix
phi, mu, sigma = gda(x, y)
if part == 'a':
output_file = open(join(out_dir, '4aoutput.txt'), mode='w')
output_file.write('phi = ' + str(phi) + '\n')
output_file.write('mu[0] = ' + str(mu[0]) + '\n')
output_file.write('mu[1] = ' + str(mu[1]) + '\n')
output_file.write('sigma = \n' + str(sigma) + '\n')
output_file.close()
print('phi = ' + str(phi))
print('mu[0] = ' + str(mu[0]))
print('mu[1] = ' + str(mu[1]))
print('sigma = \n' + str(sigma))
return 0
# part B, C
fig4b, ax4b = plt.subplots()
# filter by y-values
x0, x1 = [], []
for i in range(y.shape[0]):
if y[i] == 0:
x0.append([x[0][i], x[1][i]])
else:
x1.append([x[0][i], x[1][i]])
x0 = np.array(x0).T
x1 = np.array(x1).T
# plot classes
alaska = ax4b.scatter(x0[0] * x_stddev[0] + x_mean[0], x0[1] * x_stddev[1] + x_mean[1], c='red', s=6)
canada = ax4b.scatter(x1[0] * x_stddev[0] + x_mean[0], x1[1] * x_stddev[1] + x_mean[1], c='blue', s=6)
ax4b.set_xlabel('Fresh water ring dia.')
ax4b.set_ylabel('Marine water ring dia.')
fig4b.legend((alaska, canada), ('Alaska', 'Canada'))
if part == 'b':
fig4b.savefig(join(out_dir, '1b_plot.png'))
plt.show()
return 0
# linear boundary computation - equation in report
sigma_inverse = np.linalg.inv(sigma)
theta = np.array([0., 0., 0.])
theta[0] = np.log(phi / (1 - phi))
for i in range(2):
mui = np.array([mu[i]])
theta[0] += ((-1) ** i) * np.matmul(np.matmul(mui, sigma_inverse), mui.T)
theta[1:] = np.matmul(np.array([mu[1] - mu[0]]), sigma_inverse)
# plotting the boundary
rx = np.arange(-3, 4)
ry = (-theta[0] - theta[1] * rx) / theta[2]
ax4b.plot(rx * x_stddev[0] + x_mean[0], ry * x_stddev[1] + x_mean[1])
#plt.show()
if part == 'c':
fig4b.savefig(join(out_dir, '1c_plot.png'))
plt.show()
return 0
# part D
# running generalised GDA
phi, mu, sigma = gda_general(x, y)
if part == 'd':
output_file = open(join(out_dir, '4doutput.txt'), mode='w')
output_file.write('phi = ' + str(phi) + '\n')
output_file.write('mu[0] = ' + str(mu[0]) + '\n')
output_file.write('mu[1] = ' + str(mu[1]) + '\n')
output_file.write('sigma[0] = \n' + str(sigma[0]) + '\n')
output_file.write('sigma[1] = \n' + str(sigma[1]) + '\n')
output_file.close()
print('phi = ' + str(phi))
print('mu[0] = ' + str(mu[0]))
print('mu[1] = ' + str(mu[1]))
print('sigma[0] = \n' + str(sigma[0]))
print('sigma[1] = \n' + str(sigma[1]))
return 0
# part E
# quadratic boundary computation - equation in report
constant = np.log(phi / (1 - phi)) + np.log(np.linalg.det(sigma[0]) / np.linalg.det(sigma[1])) / 2
linear = 0
quadratic = 0
for i in range(2):
sigma_inverse = np.linalg.inv(sigma[i])
mui = np.array([mu[i]])
prod = np.matmul(mui, sigma_inverse)
constant += ((-1) ** i) * np.matmul(prod, mui.T) / 2
linear += ((-1) ** (i + 1)) * prod
quadratic += ((-1) ** i) * sigma_inverse / 2
constant = constant[0][0]
linear = linear[0]
# note that here x transposed is the feature vector (as x is a row vector)
# and similarly mu[i] is also a row vector, which explains the equations above
# equation is x * quadratic * x.T + linear * x.T + constant = 0
# plotting the quadratic boundary
Z = 0
X, Y = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100))
Z += quadratic[0, 0] * (X ** 2) + (quadratic[0, 1] + quadratic[1, 0]) * X * Y + (quadratic[1, 1]) * (Y ** 2)
Z += linear[0] * X + linear[1] * Y
Z += constant
ax4b.contour(X * x_stddev[0] + x_mean[0], Y * x_stddev[1] + x_mean[1], Z, 0)
if part == 'e':
fig4b.savefig(join(out_dir, '1e_plot.png'))
plt.show()
# part F - in the report
return 0
if __name__ == '__main__':
main()
|
[
"warnings.filterwarnings",
"numpy.mean",
"matplotlib.use",
"numpy.log",
"os.path.join",
"numpy.linalg.det",
"numpy.sum",
"numpy.array",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.linspace",
"numpy.outer",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((91, 112), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (105, 112), False, 'import matplotlib\n'), ((206, 239), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (229, 239), False, 'import warnings\n'), ((547, 556), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (553, 556), True, 'import numpy as np\n'), ((1184, 1193), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (1190, 1193), True, 'import numpy as np\n'), ((2065, 2084), 'numpy.array', 'np.array', (['([0.0] * 2)'], {}), '([0.0] * 2)\n', (2073, 2084), True, 'import numpy as np\n'), ((2100, 2119), 'numpy.array', 'np.array', (['([0.0] * 2)'], {}), '([0.0] * 2)\n', (2108, 2119), True, 'import numpy as np\n'), ((3001, 3015), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3013, 3015), True, 'import matplotlib.pyplot as plt\n'), ((3827, 3847), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma'], {}), '(sigma)\n', (3840, 3847), True, 'import numpy as np\n'), ((3860, 3885), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3868, 3885), True, 'import numpy as np\n'), ((3898, 3921), 'numpy.log', 'np.log', (['(phi / (1 - phi))'], {}), '(phi / (1 - phi))\n', (3904, 3921), True, 'import numpy as np\n'), ((4165, 4181), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (4174, 4181), True, 'import numpy as np\n'), ((475, 495), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (483, 495), True, 'import numpy as np\n'), ((498, 514), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (506, 514), True, 'import numpy as np\n'), ((1112, 1132), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1120, 1132), True, 'import numpy as np\n'), ((1135, 1151), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1143, 1151), True, 'import numpy as np\n'), ((2163, 2176), 'numpy.mean', 'np.mean', (['x[i]'], {}), '(x[i])\n', (2170, 2176), True, 'import numpy as np\n'), ((3223, 3235), 'numpy.array', 'np.array', (['x0'], {}), '(x0)\n', (3231, 3235), True, 'import numpy as np\n'), ((3247, 3259), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (3255, 3259), True, 'import numpy as np\n'), ((3723, 3733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3731, 3733), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3976), 'numpy.array', 'np.array', (['[mu[i]]'], {}), '([mu[i]])\n', (3967, 3976), True, 'import numpy as np\n'), ((4085, 4110), 'numpy.array', 'np.array', (['[mu[1] - mu[0]]'], {}), '([mu[1] - mu[0]])\n', (4093, 4110), True, 'import numpy as np\n'), ((4401, 4411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4409, 4411), True, 'import matplotlib.pyplot as plt\n'), ((5244, 5267), 'numpy.log', 'np.log', (['(phi / (1 - phi))'], {}), '(phi / (1 - phi))\n', (5250, 5267), True, 'import numpy as np\n'), ((5412, 5435), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma[i]'], {}), '(sigma[i])\n', (5425, 5435), True, 'import numpy as np\n'), ((5450, 5467), 'numpy.array', 'np.array', (['[mu[i]]'], {}), '([mu[i]])\n', (5458, 5467), True, 'import numpy as np\n'), ((5483, 5512), 'numpy.matmul', 'np.matmul', (['mui', 'sigma_inverse'], {}), '(mui, sigma_inverse)\n', (5492, 5512), True, 'import numpy as np\n'), ((6025, 6048), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(100)'], {}), '(-4, 4, 100)\n', (6036, 6048), True, 'import numpy as np\n'), ((6050, 6073), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(100)'], {}), '(-4, 4, 100)\n', (6061, 6073), True, 'import numpy as np\n'), ((6406, 6416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6414, 6416), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2225), 'numpy.mean', 'np.mean', (['x[i]'], {}), '(x[i])\n', (2219, 2225), True, 'import numpy as np\n'), ((2495, 2524), 'os.path.join', 'join', (['out_dir', '"""4aoutput.txt"""'], {}), "(out_dir, '4aoutput.txt')\n", (2499, 2524), False, 'from os.path import join, isfile\n'), ((3685, 3713), 'os.path.join', 'join', (['out_dir', '"""1b_plot.png"""'], {}), "(out_dir, '1b_plot.png')\n", (3689, 3713), False, 'from os.path import join, isfile\n'), ((4363, 4391), 'os.path.join', 'join', (['out_dir', '"""1c_plot.png"""'], {}), "(out_dir, '1c_plot.png')\n", (4367, 4391), False, 'from os.path import join, isfile\n'), ((4561, 4590), 'os.path.join', 'join', (['out_dir', '"""4doutput.txt"""'], {}), "(out_dir, '4doutput.txt')\n", (4565, 4590), False, 'from os.path import join, isfile\n'), ((6368, 6396), 'os.path.join', 'join', (['out_dir', '"""1e_plot.png"""'], {}), "(out_dir, '1e_plot.png')\n", (6372, 6396), False, 'from os.path import join, isfile\n'), ((1745, 1778), 'os.path.join', 'join', (['data_dir', "('q4' + c + '.dat')"], {}), "(data_dir, 'q4' + c + '.dat')\n", (1749, 1778), False, 'from os.path import join, isfile\n'), ((1895, 1920), 'os.path.join', 'join', (['data_dir', '"""q4x.dat"""'], {}), "(data_dir, 'q4x.dat')\n", (1899, 1920), False, 'from os.path import join, isfile\n'), ((2257, 2274), 'numpy.sum', 'np.sum', (['(x[i] ** 2)'], {}), '(x[i] ** 2)\n', (2263, 2274), True, 'import numpy as np\n'), ((2316, 2333), 'numpy.sum', 'np.sum', (['(x[i] ** 2)'], {}), '(x[i] ** 2)\n', (2322, 2333), True, 'import numpy as np\n'), ((4021, 4050), 'numpy.matmul', 'np.matmul', (['mui', 'sigma_inverse'], {}), '(mui, sigma_inverse)\n', (4030, 4050), True, 'import numpy as np\n'), ((5547, 5569), 'numpy.matmul', 'np.matmul', (['prod', 'mui.T'], {}), '(prod, mui.T)\n', (5556, 5569), True, 'import numpy as np\n'), ((1991, 2016), 'os.path.join', 'join', (['data_dir', '"""q4y.dat"""'], {}), "(data_dir, 'q4y.dat')\n", (1995, 2016), False, 'from os.path import join, isfile\n'), ((5277, 5300), 'numpy.linalg.det', 'np.linalg.det', (['sigma[0]'], {}), '(sigma[0])\n', (5290, 5300), True, 'import numpy as np\n'), ((5303, 5326), 'numpy.linalg.det', 'np.linalg.det', (['sigma[1]'], {}), '(sigma[1])\n', (5316, 5326), True, 'import numpy as np\n'), ((738, 780), 'numpy.outer', 'np.outer', (['(x[i] - mu[y[i]])', '(x[i] - mu[y[i]])'], {}), '(x[i] - mu[y[i]], x[i] - mu[y[i]])\n', (746, 780), True, 'import numpy as np\n'), ((1385, 1421), 'numpy.outer', 'np.outer', (['(x[i] - mu[k])', '(x[i] - mu[k])'], {}), '(x[i] - mu[k], x[i] - mu[k])\n', (1393, 1421), True, 'import numpy as np\n')]
|
"""
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
extent : Tuple of 1 or 2 `~astropy.units.Quantity`
The size of the mesh grid (in the mesh plane). If one value
is provided, the mesh is circular and the value provided is
interpreted as the diameter. If two values are provided, the
mesh is rectangular and they the values are interpreted as the
width and height respectively.
nwires : Tuple of 1 or 2 ints, or a single int
The number of wires in the horizontal and vertical directions. If
only one value is provided, the number in the two directions is
assumed to be equal. Note that a wire will cross the center of the
mesh only when nwires is odd.
wire_diameter : `~astropy.units.Quantity`
The diameter of the wires.
mesh_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the mesh plane. Modifying this vector can rotate the
mesh in the plane or tilt the mesh plane relative to the
source-detector axis. By default, `mesh_hdir` is set equal to
`detector_hdir` (see `detector_hdir` keyword in `__init__`).
mesh_vdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the vertical
direction on the mesh plane. Modifying this vector can tilt the
mesh relative to the source-detector axis. By default, `mesh_vdir`
is defined to be perpendicular to `mesh_hdir` and the detector
plane normal (such that the mesh is parallel to the detector plane).
Raises
------
ValueError
Raises a ValueError if the provided mesh location is not
between the source and the object grid.
"""
location = _coerce_to_cartesian_si(location)
wire_radius = wire_diameter.si.value / 2
if not isinstance(extent, tuple):
extent = (extent,)
if len(extent) == 1:
radius = 0.5 * extent[0].si.value
width = extent[0].si.value
height = extent[0].si.value
elif len(extent) == 2:
radius = None
width = extent[0].si.value
height = extent[1].si.value
else:
raise ValueError(
"extent must be a tuple of 1 or 2 elements, but "
f"{len(extent)} elements were provided."
)
if not isinstance(nwires, tuple):
nwires = (nwires,)
if len(nwires) != 2:
nwires = (nwires[0], nwires[0])
# If no hdir/vdir is specified, calculate a default value
# If one is specified, make sure it is normalized
if mesh_hdir is None:
# Re-calculate the default here, in case the user
# specified a different det_hdir
mesh_hdir = self._default_detector_hdir()
else:
mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir)
if mesh_vdir is None:
mesh_vdir = np.cross(mesh_hdir, self.det_n)
mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir)
else:
mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir)
# Raise exception if mesh is AFTER the field grid
if np.linalg.norm(location - self.source) > np.linalg.norm(self.source):
raise ValueError(
f"The specified mesh location, {location},"
"is not between the source and the origin."
)
mesh_entry = {
"location": location,
"wire_radius": wire_radius,
"radius": radius,
"width": width,
"height": height,
"nwires": nwires,
"mesh_hdir": mesh_hdir,
"mesh_vdir": mesh_vdir,
}
self.mesh_list.append(mesh_entry)
def _apply_wire_mesh(
self,
location=None,
wire_radius=None,
radius=None,
width=None,
height=None,
nwires=None,
mesh_hdir=None,
mesh_vdir=None,
):
"""
Apply wire meshes that were added to self.mesh_list
"""
x = self._coast_to_plane(location, mesh_hdir, mesh_vdir)
# Particle positions in 2D on the mesh plane
xloc = np.dot(x - location, mesh_hdir)
yloc = np.dot(x - location, mesh_vdir)
# Create an array in which True indicates that a particle has hit a wire
# and False indicates that it has not
hit = np.zeros(self.nparticles, dtype=bool)
# Mark particles that overlap vertical or horizontal position with a wire
h_centers = np.linspace(-width / 2, width / 2, num=nwires[0])
for c in h_centers:
hit |= np.isclose(xloc, c, atol=wire_radius)
v_centers = np.linspace(-height / 2, height / 2, num=nwires[1])
for c in v_centers:
hit |= np.isclose(yloc, c, atol=wire_radius)
# Put back any particles that are outside the mesh boundaries
# First handle the case where the mesh is rectangular
if radius is None:
# Replace particles outside the x-boundary
hit[
np.logical_or(
xloc > np.max(h_centers) + wire_radius,
xloc < np.min(h_centers) - wire_radius,
)
] = False
# Replace particles outside the y-boundary
hit[
np.logical_or(
yloc > np.max(v_centers) + wire_radius,
yloc < np.min(v_centers) - wire_radius,
)
] = False
# Handle the case where the mesh is circular
else:
loc_rad = np.sqrt(xloc ** 2 + yloc ** 2)
hit[loc_rad > radius] = False
# In the case of a circular mesh, also create a round wire along the
# outside edge
hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True
# Identify the particles that have hit something, then remove them from
# all of the arrays
keep_these_particles = ~hit
number_kept_particles = keep_these_particles.sum()
nremoved = self.nparticles - number_kept_particles
if self.nparticles - nremoved <= 0:
raise ValueError(
"The specified mesh is blocking all of the particles. "
f"The wire diameter ({2*wire_radius}) may be too large."
)
self.x = self.x[keep_these_particles, :]
self.v = self.v[keep_these_particles, :]
self.theta = self.theta[
keep_these_particles
] # Important to apply here to get correct grid_ind
self.nparticles = number_kept_particles
# *************************************************************************
# Particle creation methods
# *************************************************************************
def _angles_monte_carlo(self):
"""
Generates angles for each particle randomly such that the flux
per solid angle is uniform.
"""
# Create a probability vector for the theta distribution
# Theta must follow a sine distribution in order for the particle
# flux per solid angle to be uniform.
arg = np.linspace(0, self.max_theta, num=int(1e5))
prob = np.sin(arg)
prob *= 1 / np.sum(prob)
# Randomly choose theta's weighted with the sine probabilities
theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob)
# Also generate a uniform phi distribution
phi = np.random.uniform(high=2 * np.pi, size=self.nparticles)
return theta, phi
def _angles_uniform(self):
"""
Generates angles for each particle such that their velocities are
uniformly distributed on a grid in theta and phi. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
"""
# Calculate the approximate square root
n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32)
# Set new nparticles to be a perfect square
self.nparticles = n_per ** 2
# Create an imaginary grid positioned 1 unit from the source
# and spanning max_theta at the corners
extent = np.sin(self.max_theta) / np.sqrt(2)
arr = np.linspace(-extent, extent, num=n_per)
harr, varr = np.meshgrid(arr, arr, indexing="ij")
# calculate the angles from the source for each point in
# the grid.
theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2))
phi = np.arctan2(varr, harr)
return theta.flatten(), phi.flatten()
@particles.particle_input
def create_particles(
self,
nparticles,
particle_energy,
max_theta=None,
particle: Particle = Particle("p+"),
distribution="monte-carlo",
):
r"""
Generates the angular distributions about the Z-axis, then
rotates those distributions to align with the source-to-detector axis.
By default, particles are generated over almost the entire pi/2. However,
if the detector is far from the source, many of these particles will
never be observed. The max_theta keyword allows these extraneous
particles to be neglected to focus computational resources on the
particles who will actually hit the detector.
nparticles : integer
The number of particles to include in the simulation. The default
is 1e5.
particle_energy : `~astropy.units.Quantity`
The energy of the particle, in units convertible to eV.
All particles are given the same energy.
max_theta : `~astropy.units.Quantity`, optional
The largest velocity vector angle (measured from the
source-to-detector axis) for which particles should be generated.
Decreasing this angle can eliminate particles that would never
reach the detector region of interest. If no value is given, a
guess will be made based on the size of the grid.
Units must be convertible to radians.
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unperturbed,they will form a uniform pattern
on the detection plane. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self._log("Creating Particles")
# Load inputs
self.nparticles = int(nparticles)
self.particle_energy = particle_energy.to(u.eV).value
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
# If max_theta is not specified, make a guess based on the grid size
if max_theta is None:
self.max_theta = np.clip(
1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2
)
else:
self.max_theta = max_theta.to(u.rad).value
# Calculate the velocity corresponding to the particle energy
ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2)
v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2)
if distribution == "monte-carlo":
theta, phi = self._angles_monte_carlo()
elif distribution == "uniform":
theta, phi = self._angles_uniform()
# Temporarily save theta to later determine which particles
# should be tracked
self.theta = theta
# Construct the velocity distribution around the z-axis
self.v = np.zeros([self.nparticles, 3])
self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi)
self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi)
self.v[:, 2] = v0 * np.cos(theta)
# Calculate the rotation matrix that rotates the z-axis
# onto the source-detector axis
a = np.array([0, 0, 1])
b = self.detector - self.source
rot = rot_a_to_b(a, b)
# Apply rotation matrix to calculated velocity distribution
self.v = np.matmul(self.v, rot)
# Place particles at the source
self.x = np.tile(self.source, (self.nparticles, 1))
@particles.particle_input
def load_particles(
self, x, v, particle: Particle = Particle("p+"),
):
r"""
Load arrays of particle positions and velocities
x : `~astropy.units.Quantity`, shape (N,3)
Positions for N particles
v: `~astropy.units.Quantity`, shape (N,3)
Velocities for N particles
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unpreturbed,they will form a uniform pattern
on the detection plane.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
if x.shape[0] != v.shape[0]:
raise ValueError(
"Provided x and v arrays have inconsistent numbers "
" of particles "
f"({x.shape[0]} and {v.shape[0]} respectively)."
)
else:
self.nparticles = x.shape[0]
self.x = x.to(u.m).value
self.v = v.to(u.m / u.s).value
self.theta = np.arccos(
np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1)
)
n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0))
if n_wrong_way > 1:
warnings.warn(
f"{100*n_wrong_way/self.nparticles:.2f}% of particles "
"initialized are heading away from the grid. Check the orientation "
" of the provided velocity vectors.",
RuntimeWarning,
)
# *************************************************************************
# Run/push loop methods
# *************************************************************************
def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz):
r"""
Calculate the appropriate dt based on a number of considerations
including the local grid resolution (ds) and the gyroperiod of the
particles in the current fields.
"""
# If dt was explicitly set, skip the rest of this function
if self.dt.size == 1:
return self.dt
# Compute the timestep indicated by the grid resolution
ds = self.grid.grid_resolution.to(u.m).value
gridstep = 0.5 * (np.min(ds) / self.vmax)
# If not, compute a number of possible timesteps
# Compute the cyclotron gyroperiod
Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value
# Compute the gyroperiod
if Bmag == 0:
gyroperiod = np.inf
else:
gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag))
# TODO: introduce a minimum timestep based on electric fields too!
# Create an array of all the possible time steps we computed
candidates = np.array([gyroperiod / 12, gridstep])
# Enforce limits on dt
candidates = np.clip(candidates, self.dt[0], self.dt[1])
# dt is the min of the remaining candidates
return np.min(candidates)
def _coast_to_grid(self):
r"""
Coasts all particles to the timestep when the first particle should
be entering the grid. Doing in this in one step (rather than pushing
the particles through zero fields) saves computation time.
"""
# Distance from the source to the nearest gridpoint
dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3))
# Find the particle with the highest speed towards the grid
vmax = np.max(np.dot(self.v, self.src_n))
# Time for fastest possible particle to reach the grid.
t = dist / vmax
# Coast the particles to the advanced position
self.x = self.x + self.v * t
def _coast_to_plane(self, center, hdir, vdir, x=None):
"""
Calculates the positions where the current trajectories of each
particle impact a plane, described by the plane's center and
horizontal and vertical unit vectors.
Returns an [nparticles, 3] array of the particle positions in the plane
By default this function does not alter self.x. The optional keyword
x can be used to pass in an output array that will used to hold
the positions in the plane. This can be used to directly update self.x
as follows:
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x)
"""
normal = np.cross(hdir, vdir)
# Calculate the time required to evolve each particle into the
# plane
t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal)
# Calculate particle positions in the plane
if x is None:
# If no output array is provided, preallocate
x = np.empty_like(self.x)
x[...] = self.x + self.v * t[:, np.newaxis]
# Check that all points are now in the plane
# (Eq. of a plane is nhat*x + d = 0)
plane_eq = np.dot(x - center, normal)
assert np.allclose(plane_eq, 0, atol=1e-6)
return x
def _remove_deflected_particles(self):
r"""
Removes any particles that have been deflected away from the detector
plane (eg. those that will never hit the grid)
"""
dist_remaining = np.dot(self.x, self.det_n) + np.linalg.norm(self.detector)
v_towards_det = np.dot(self.v, -self.det_n)
# If particles have not yet reached the detector plane and are moving
# away from it, they will never reach the detector.
# So, we can remove them from the arrays
# Find the indices of all particles that we should keep:
# i.e. those still moving towards the detector.
ind = np.logical_not((v_towards_det < 0) & (dist_remaining > 0)).nonzero()[0]
# Drop the other particles
self.x = self.x[ind, :]
self.v = self.v[ind, :]
self.v_init = self.v_init[ind, :]
self.nparticles_grid = self.x.shape[0]
# Store the number of particles deflected
self.fract_deflected = (self.nparticles - ind.size) / self.nparticles
# Warn the user if a large number of particles are being deflected
if self.fract_deflected > 0.05:
warnings.warn(
f"{100*self.fract_deflected:.1f}% particles have been "
"deflected away from the detector plane. The fields "
"provided may be too high to successfully radiograph "
"with this particle energy.",
RuntimeWarning,
)
def _push(self):
r"""
Advance particles using an implementation of the time-centered
Boris algorithm
"""
# Get a list of positions (input for interpolator)
pos = self.x[self.grid_ind, :] * u.m
# Update the list of particles on and off the grid
self.on_grid = self.grid.on_grid(pos)
# entered_grid is zero at the end if a particle has never
# entered the grid
self.entered_grid += self.on_grid
# Estimate the E and B fields for each particle
# Note that this interpolation step is BY FAR the slowest part of the push
# loop. Any speed improvements will have to come from here.
if self.field_weighting == "volume averaged":
Ex, Ey, Ez, Bx, By, Bz = self.grid.volume_averaged_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
elif self.field_weighting == "nearest neighbor":
Ex, Ey, Ez, Bx, By, Bz = self.grid.nearest_neighbor_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
# Create arrays of E and B as required by push algorithm
E = np.array(
[Ex.to(u.V / u.m).value, Ey.to(u.V / u.m).value, Ez.to(u.V / u.m).value]
)
E = np.moveaxis(E, 0, -1)
B = np.array([Bx.to(u.T).value, By.to(u.T).value, Bz.to(u.T).value])
B = np.moveaxis(B, 0, -1)
# Calculate the adaptive timestep from the fields currently experienced
# by the particles
# If user sets dt explicitly, that's handled in _adpative_dt
dt = self._adaptive_dt(Ex, Ey, Ez, Bx, By, Bz)
# TODO: Test v/c and implement relativistic Boris push when required
# vc = np.max(v)/_c
x = self.x[self.grid_ind, :]
v = self.v[self.grid_ind, :]
boris_push(x, v, B, E, self.q, self.m, dt)
self.x[self.grid_ind, :] = x
self.v[self.grid_ind, :] = v
def _stop_condition(self):
r"""
The stop condition is that most of the particles have entered the grid
and almost all have now left it.
"""
# Count the number of particles who have entered, which is the
# number of non-zero entries in entered_grid
self.num_entered = np.nonzero(self.entered_grid)[0].size
# How many of the particles have entered the grid
self.fract_entered = np.sum(self.num_entered) / self.nparticles_grid
# Of the particles that have entered the grid, how many are currently
# on the grid?
# if/else avoids dividing by zero
if np.sum(self.num_entered) > 0:
still_on = np.sum(self.on_grid) / np.sum(self.num_entered)
else:
still_on = 0.0
if self.fract_entered > 0.1 and still_on < 0.001:
# Warn user if < 10% of the particles ended up on the grid
if self.num_entered < 0.1 * self.nparticles:
warnings.warn(
f"Only {100*self.num_entered/self.nparticles:.2f}% of "
"particles entered the field grid: consider "
"decreasing the max_theta to increase this "
"number.",
RuntimeWarning,
)
return True
else:
return False
def run(
self, dt=None, field_weighting="volume averaged",
):
r"""
Runs a particle-tracing simulation.
Timesteps are adaptively calculated based on the
local grid resolution of the particles and the electric and magnetic
fields they are experiencing. After all particles
have left the grid, they are advanced to the
detector plane where they can be used to construct a synthetic
diagnostic image.
Parameters
----------
dt : `~astropy.units.Quantity`, optional
An explicitly set timestep in units convertable to seconds.
Setting this optional keyword overrules the adaptive time step
capability and forces the use of this timestep throughout. If a tuple
of timesteps is provided, the adaptive timstep will be clamped
between the first and second values.
field_weighting : str
String that selects the field weighting algorithm used to determine
what fields are felt by the particles. Options are:
* 'nearest neighbor': Particles are assigned the fields on
the grid vertex closest to them.
* 'volume averaged' : The fields experienced by a particle are a
volume-average of the eight grid points surrounding them.
The default is 'volume averaged'.
Returns
-------
None.
"""
# Load and validate inputs
field_weightings = ["volume averaged", "nearest neighbor"]
if field_weighting in field_weightings:
self.field_weighting = field_weighting
else:
raise ValueError(
f"{field_weighting} is not a valid option for ",
"field_weighting. Valid choices are",
f"{field_weightings}",
)
if dt is None:
# Set dt as an infinite range by default (auto dt with no restrictions)
self.dt = np.array([0.0, np.inf]) * u.s
else:
self.dt = dt
self.dt = (self.dt).to(u.s).value
# Check to make sure particles have already been generated
if not hasattr(self, "x"):
raise ValueError(
"Either the create_particles or load_particles method must be "
"called before running the particle tracing algorithm."
)
# If meshes have been added, apply them now
for mesh in self.mesh_list:
self._apply_wire_mesh(**mesh)
# Store a copy of the initial velocity distribution in memory
# This will be used later to calculate the maximum deflection
self.v_init = np.copy(self.v)
# Calculate the maximum velocity
# Used for determining the grid crossing maximum timestep
self.vmax = np.max(np.linalg.norm(self.v, axis=-1))
# Determine which particles should be tracked
# This array holds the indices of all particles that WILL hit the grid
# Only these particles will actually be pushed through the fields
self.grid_ind = np.where(self.theta < self.max_theta_hit_grid)[0]
self.nparticles_grid = len(self.grid_ind)
self.fract_tracked = self.nparticles_grid / self.nparticles
# Create flags for tracking when particles during the simulation
# on_grid -> zero if the particle is off grid, 1
self.on_grid = np.zeros([self.nparticles_grid])
# Entered grid -> non-zero if particle EVER entered the grid
self.entered_grid = np.zeros([self.nparticles_grid])
# Generate a null distribution of points (the result in the absence of
# any fields) for statistical comparison
self.x0 = self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir)
# Advance the particles to the near the start of the grid
self._coast_to_grid()
# Initialize a "progress bar" (really more of a meter)
# Setting sys.stdout lets this play nicely with regular print()
pbar = tqdm(
initial=0,
total=self.nparticles_grid + 1,
disable=not self.verbose,
desc="Particles on grid",
unit="particles",
bar_format="{l_bar}{bar}{n:.1e}/{total:.1e} {unit}",
file=sys.stdout,
)
# Push the particles until the stop condition is satisfied
# (no more particles on the simulation grid)
while not self._stop_condition():
n_on_grid = np.sum(self.on_grid)
pbar.n = n_on_grid
pbar.last_print_n = n_on_grid
pbar.update()
self._push()
pbar.close()
# Remove particles that will never reach the detector
self._remove_deflected_particles()
# Advance the particles to the image plane
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x=self.x)
# Log a summary of the run
self._log("Run completed")
self._log("Fraction of particles tracked: " f"{self.fract_tracked*100:.1f}%")
self._log(
"Fraction of tracked particles that entered the grid: "
f"{self.fract_entered*100:.1f}%"
)
self._log(
"Fraction of tracked particles deflected away from the "
"detector plane: "
f"{self.fract_deflected*100}%"
)
@property
def max_deflection(self):
"""
The maximum deflection experienced by one of the particles, determined
by comparing their initial and final velocitiy vectors.
This value can be used to determine the charged particle radiography regime
using the dimensionless number defined by Kugland et al. 2012
Returns
-------
max_deflection : float
The maximum deflection in radians
"""
# Normalize the initial and final velocities
v_norm = self.v / np.linalg.norm(self.v, axis=1, keepdims=True)
v_init_norm = self.v_init / np.linalg.norm(self.v_init, axis=1, keepdims=True)
# Compute the dot product
proj = np.sum(v_norm * v_init_norm, axis=1)
# In case of numerical errors, make sure the output is within the domain of
# arccos
proj = np.where(proj > 1, 1, proj)
max_deflection = np.max(np.arccos(proj))
return max_deflection * u.rad
# *************************************************************************
# Synthetic diagnostic methods (creating output)
# *************************************************************************
def synthetic_radiograph(
self, size=None, bins=[200, 200], ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
size : `~astropy.units.Quantity`, shape (2,2)
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
[[hmin,hmax], [vmin, vmax]]. Units must be convertable to meters.
bins : array of integers, shape (2)
The number of bins in each direction in the format [hbins, vbins].
The default is [200,200].
ignore_grid: bool
If True, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: bool
If True, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where I_O is the intensity on the detector plane in the absence of
simulated fields. Default is False.
Returns
-------
hax : `~astropy.units.Quantity` array shape (hbins,)
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape (vbins, )
The vertical axis of the synthetic radiograph in meters.
intensity : ndarray, shape (hbins, vbins)
The number of particles counted in each bin of the histogram.
"""
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
x = self.x0
else:
x = self.x
# Determine locations of points in the detector plane using unit
# vectors
xloc = np.dot(x - self.detector, self.det_hdir)
yloc = np.dot(x - self.detector, self.det_vdir)
if size is None:
# If a detector size is not given, choose lengths based on the
# dimensions of the grid
w = self.mag * np.max(
[
np.max(np.abs(self.grid.pts0.to(u.m).value)),
np.max(np.abs(self.grid.pts1.to(u.m).value)),
np.max(np.abs(self.grid.pts2.to(u.m).value)),
]
)
# The factor of 5 here is somewhat arbitrary: we just want a
# region a few times bigger than the image of the grid on the
# detector, since particles could be deflected out
size = 5 * np.array([[-w, w], [-w, w]]) * u.m
# Generate the histogram
intensity, h, v = np.histogram2d(
xloc, yloc, range=size.to(u.m).value, bins=bins
)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[0:-1]
v = ((v + np.roll(v, -1)) / 2)[0:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / self.nparticles
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = self.synthetic_radiograph(size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Overwrite any zeros in intensity to avoid log10(0)
intensity[intensity == 0] = 1
# Calculate the optical_density
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
|
[
"numpy.clip",
"numpy.log10",
"numpy.sqrt",
"numpy.arccos",
"plasmapy.simulation.particle_integrators.boris_push",
"numpy.logical_not",
"numpy.array",
"numpy.arctan2",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.sin",
"numpy.moveaxis",
"numpy.mean",
"numpy.cross",
"numpy.where",
"numpy.max",
"plasmapy.particles.Particle",
"numpy.dot",
"numpy.linspace",
"numpy.matmul",
"numpy.min",
"warnings.warn",
"numpy.meshgrid",
"numpy.tile",
"numpy.abs",
"numpy.allclose",
"numpy.random.choice",
"numpy.inner",
"numpy.cos",
"numpy.nonzero",
"numpy.copy",
"numpy.isclose",
"numpy.roll",
"plasmapy.formulary.mathematics.rot_a_to_b",
"tqdm.tqdm",
"numpy.sum",
"numpy.zeros",
"numpy.empty_like",
"numpy.random.uniform"
] |
[((1362, 1373), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1370, 1373), True, 'import numpy as np\n'), ((7185, 7220), 'numpy.cross', 'np.cross', (['self.det_hdir', 'self.det_n'], {}), '(self.det_hdir, self.det_n)\n', (7193, 7220), True, 'import numpy as np\n'), ((10017, 10030), 'numpy.zeros', 'np.zeros', (['[8]'], {}), '([8])\n', (10025, 10030), True, 'import numpy as np\n'), ((10652, 10665), 'numpy.max', 'np.max', (['theta'], {}), '(theta)\n', (10658, 10665), True, 'import numpy as np\n'), ((16375, 16406), 'numpy.dot', 'np.dot', (['(x - location)', 'mesh_hdir'], {}), '(x - location, mesh_hdir)\n', (16381, 16406), True, 'import numpy as np\n'), ((16422, 16453), 'numpy.dot', 'np.dot', (['(x - location)', 'mesh_vdir'], {}), '(x - location, mesh_vdir)\n', (16428, 16453), True, 'import numpy as np\n'), ((16596, 16633), 'numpy.zeros', 'np.zeros', (['self.nparticles'], {'dtype': 'bool'}), '(self.nparticles, dtype=bool)\n', (16604, 16633), True, 'import numpy as np\n'), ((16737, 16786), 'numpy.linspace', 'np.linspace', (['(-width / 2)', '(width / 2)'], {'num': 'nwires[0]'}), '(-width / 2, width / 2, num=nwires[0])\n', (16748, 16786), True, 'import numpy as np\n'), ((16893, 16944), 'numpy.linspace', 'np.linspace', (['(-height / 2)', '(height / 2)'], {'num': 'nwires[1]'}), '(-height / 2, height / 2, num=nwires[1])\n', (16904, 16944), True, 'import numpy as np\n'), ((19447, 19458), 'numpy.sin', 'np.sin', (['arg'], {}), '(arg)\n', (19453, 19458), True, 'import numpy as np\n'), ((19580, 19645), 'numpy.random.choice', 'np.random.choice', (['arg'], {'size': 'self.nparticles', 'replace': '(True)', 'p': 'prob'}), '(arg, size=self.nparticles, replace=True, p=prob)\n', (19596, 19645), True, 'import numpy as np\n'), ((19712, 19767), 'numpy.random.uniform', 'np.random.uniform', ([], {'high': '(2 * np.pi)', 'size': 'self.nparticles'}), '(high=2 * np.pi, size=self.nparticles)\n', (19729, 19767), True, 'import numpy as np\n'), ((20567, 20606), 'numpy.linspace', 'np.linspace', (['(-extent)', 'extent'], {'num': 'n_per'}), '(-extent, extent, num=n_per)\n', (20578, 20606), True, 'import numpy as np\n'), ((20628, 20664), 'numpy.meshgrid', 'np.meshgrid', (['arr', 'arr'], {'indexing': '"""ij"""'}), "(arr, arr, indexing='ij')\n", (20639, 20664), True, 'import numpy as np\n'), ((20823, 20845), 'numpy.arctan2', 'np.arctan2', (['varr', 'harr'], {}), '(varr, harr)\n', (20833, 20845), True, 'import numpy as np\n'), ((21062, 21076), 'plasmapy.particles.Particle', 'Particle', (['"""p+"""'], {}), "('p+')\n", (21070, 21076), False, 'from plasmapy.particles import Particle\n'), ((24716, 24746), 'numpy.zeros', 'np.zeros', (['[self.nparticles, 3]'], {}), '([self.nparticles, 3])\n', (24724, 24746), True, 'import numpy as np\n'), ((25018, 25037), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (25026, 25037), True, 'import numpy as np\n'), ((25092, 25108), 'plasmapy.formulary.mathematics.rot_a_to_b', 'rot_a_to_b', (['a', 'b'], {}), '(a, b)\n', (25102, 25108), False, 'from plasmapy.formulary.mathematics import rot_a_to_b\n'), ((25195, 25217), 'numpy.matmul', 'np.matmul', (['self.v', 'rot'], {}), '(self.v, rot)\n', (25204, 25217), True, 'import numpy as np\n'), ((25276, 25318), 'numpy.tile', 'np.tile', (['self.source', '(self.nparticles, 1)'], {}), '(self.source, (self.nparticles, 1))\n', (25283, 25318), True, 'import numpy as np\n'), ((25415, 25429), 'plasmapy.particles.Particle', 'Particle', (['"""p+"""'], {}), "('p+')\n", (25423, 25429), False, 'from plasmapy.particles import Particle\n'), ((28862, 28899), 'numpy.array', 'np.array', (['[gyroperiod / 12, gridstep]'], {}), '([gyroperiod / 12, gridstep])\n', (28870, 28899), True, 'import numpy as np\n'), ((28953, 28996), 'numpy.clip', 'np.clip', (['candidates', 'self.dt[0]', 'self.dt[1]'], {}), '(candidates, self.dt[0], self.dt[1])\n', (28960, 28996), True, 'import numpy as np\n'), ((29065, 29083), 'numpy.min', 'np.min', (['candidates'], {}), '(candidates)\n', (29071, 29083), True, 'import numpy as np\n'), ((30503, 30523), 'numpy.cross', 'np.cross', (['hdir', 'vdir'], {}), '(hdir, vdir)\n', (30511, 30523), True, 'import numpy as np\n'), ((31042, 31068), 'numpy.dot', 'np.dot', (['(x - center)', 'normal'], {}), '(x - center, normal)\n', (31048, 31068), True, 'import numpy as np\n'), ((31084, 31120), 'numpy.allclose', 'np.allclose', (['plane_eq', '(0)'], {'atol': '(1e-06)'}), '(plane_eq, 0, atol=1e-06)\n', (31095, 31120), True, 'import numpy as np\n'), ((31449, 31476), 'numpy.dot', 'np.dot', (['self.v', '(-self.det_n)'], {}), '(self.v, -self.det_n)\n', (31455, 31476), True, 'import numpy as np\n'), ((33983, 34004), 'numpy.moveaxis', 'np.moveaxis', (['E', '(0)', '(-1)'], {}), '(E, 0, -1)\n', (33994, 34004), True, 'import numpy as np\n'), ((34094, 34115), 'numpy.moveaxis', 'np.moveaxis', (['B', '(0)', '(-1)'], {}), '(B, 0, -1)\n', (34105, 34115), True, 'import numpy as np\n'), ((34537, 34579), 'plasmapy.simulation.particle_integrators.boris_push', 'boris_push', (['x', 'v', 'B', 'E', 'self.q', 'self.m', 'dt'], {}), '(x, v, B, E, self.q, self.m, dt)\n', (34547, 34579), False, 'from plasmapy.simulation.particle_integrators import boris_push\n'), ((38754, 38769), 'numpy.copy', 'np.copy', (['self.v'], {}), '(self.v)\n', (38761, 38769), True, 'import numpy as np\n'), ((39492, 39524), 'numpy.zeros', 'np.zeros', (['[self.nparticles_grid]'], {}), '([self.nparticles_grid])\n', (39500, 39524), True, 'import numpy as np\n'), ((39622, 39654), 'numpy.zeros', 'np.zeros', (['[self.nparticles_grid]'], {}), '([self.nparticles_grid])\n', (39630, 39654), True, 'import numpy as np\n'), ((40116, 40312), 'tqdm.tqdm', 'tqdm', ([], {'initial': '(0)', 'total': '(self.nparticles_grid + 1)', 'disable': '(not self.verbose)', 'desc': '"""Particles on grid"""', 'unit': '"""particles"""', 'bar_format': '"""{l_bar}{bar}{n:.1e}/{total:.1e} {unit}"""', 'file': 'sys.stdout'}), "(initial=0, total=self.nparticles_grid + 1, disable=not self.verbose,\n desc='Particles on grid', unit='particles', bar_format=\n '{l_bar}{bar}{n:.1e}/{total:.1e} {unit}', file=sys.stdout)\n", (40120, 40312), False, 'from tqdm import tqdm\n'), ((42209, 42245), 'numpy.sum', 'np.sum', (['(v_norm * v_init_norm)'], {'axis': '(1)'}), '(v_norm * v_init_norm, axis=1)\n', (42215, 42245), True, 'import numpy as np\n'), ((42362, 42389), 'numpy.where', 'np.where', (['(proj > 1)', '(1)', 'proj'], {}), '(proj > 1, 1, proj)\n', (42370, 42389), True, 'import numpy as np\n'), ((44751, 44791), 'numpy.dot', 'np.dot', (['(x - self.detector)', 'self.det_hdir'], {}), '(x - self.detector, self.det_hdir)\n', (44757, 44791), True, 'import numpy as np\n'), ((44807, 44847), 'numpy.dot', 'np.dot', (['(x - self.detector)', 'self.det_vdir'], {}), '(x - self.detector, self.det_vdir)\n', (44813, 44847), True, 'import numpy as np\n'), ((5709, 5736), 'numpy.linalg.norm', 'np.linalg.norm', (['self.source'], {}), '(self.source)\n', (5723, 5736), True, 'import numpy as np\n'), ((5775, 5804), 'numpy.linalg.norm', 'np.linalg.norm', (['self.detector'], {}), '(self.detector)\n', (5789, 5804), True, 'import numpy as np\n'), ((7251, 7269), 'numpy.linalg.norm', 'np.linalg.norm', (['ny'], {}), '(ny)\n', (7265, 7269), True, 'import numpy as np\n'), ((9519, 9537), 'numpy.abs', 'np.abs', (['self.det_n'], {}), '(self.det_n)\n', (9525, 9537), True, 'import numpy as np\n'), ((9539, 9558), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9547, 9558), True, 'import numpy as np\n'), ((9578, 9597), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (9586, 9597), True, 'import numpy as np\n'), ((9689, 9707), 'numpy.linalg.norm', 'np.linalg.norm', (['nx'], {}), '(nx)\n', (9703, 9707), True, 'import numpy as np\n'), ((15113, 15144), 'numpy.cross', 'np.cross', (['mesh_hdir', 'self.det_n'], {}), '(mesh_hdir, self.det_n)\n', (15121, 15144), True, 'import numpy as np\n'), ((15354, 15392), 'numpy.linalg.norm', 'np.linalg.norm', (['(location - self.source)'], {}), '(location - self.source)\n', (15368, 15392), True, 'import numpy as np\n'), ((15395, 15422), 'numpy.linalg.norm', 'np.linalg.norm', (['self.source'], {}), '(self.source)\n', (15409, 15422), True, 'import numpy as np\n'), ((16834, 16871), 'numpy.isclose', 'np.isclose', (['xloc', 'c'], {'atol': 'wire_radius'}), '(xloc, c, atol=wire_radius)\n', (16844, 16871), True, 'import numpy as np\n'), ((16992, 17029), 'numpy.isclose', 'np.isclose', (['yloc', 'c'], {'atol': 'wire_radius'}), '(yloc, c, atol=wire_radius)\n', (17002, 17029), True, 'import numpy as np\n'), ((17805, 17835), 'numpy.sqrt', 'np.sqrt', (['(xloc ** 2 + yloc ** 2)'], {}), '(xloc ** 2 + yloc ** 2)\n', (17812, 17835), True, 'import numpy as np\n'), ((19479, 19491), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (19485, 19491), True, 'import numpy as np\n'), ((20517, 20539), 'numpy.sin', 'np.sin', (['self.max_theta'], {}), '(self.max_theta)\n', (20523, 20539), True, 'import numpy as np\n'), ((20542, 20552), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20549, 20552), True, 'import numpy as np\n'), ((20777, 20807), 'numpy.sqrt', 'np.sqrt', (['(harr ** 2 + varr ** 2)'], {}), '(harr ** 2 + varr ** 2)\n', (20784, 20807), True, 'import numpy as np\n'), ((23970, 24032), 'numpy.clip', 'np.clip', (['(1.5 * self.max_theta_hit_grid)', '(0.01)', '(0.99 * np.pi / 2)'], {}), '(1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2)\n', (23977, 24032), True, 'import numpy as np\n'), ((24296, 24326), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / (ER + 1) ** 2)'], {}), '(1 - 1 / (ER + 1) ** 2)\n', (24303, 24326), True, 'import numpy as np\n'), ((24791, 24802), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (24797, 24802), True, 'import numpy as np\n'), ((24847, 24858), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (24853, 24858), True, 'import numpy as np\n'), ((24887, 24900), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (24893, 24900), True, 'import numpy as np\n'), ((27248, 27286), 'numpy.where', 'np.where', (['(self.theta > np.pi / 2)', '(1)', '(0)'], {}), '(self.theta > np.pi / 2, 1, 0)\n', (27256, 27286), True, 'import numpy as np\n'), ((27328, 27528), 'warnings.warn', 'warnings.warn', (['f"""{100 * n_wrong_way / self.nparticles:.2f}% of particles initialized are heading away from the grid. Check the orientation of the provided velocity vectors."""', 'RuntimeWarning'], {}), "(\n f'{100 * n_wrong_way / self.nparticles:.2f}% of particles initialized are heading away from the grid. Check the orientation of the provided velocity vectors.'\n , RuntimeWarning)\n", (27341, 27528), False, 'import warnings\n'), ((29442, 29493), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.grid_arr - self.source)'], {'axis': '(3)'}), '(self.grid_arr - self.source, axis=3)\n', (29456, 29493), True, 'import numpy as np\n'), ((29586, 29612), 'numpy.dot', 'np.dot', (['self.v', 'self.src_n'], {}), '(self.v, self.src_n)\n', (29592, 29612), True, 'import numpy as np\n'), ((30624, 30672), 'numpy.inner', 'np.inner', (['(center[np.newaxis, :] - self.x)', 'normal'], {}), '(center[np.newaxis, :] - self.x, normal)\n', (30632, 30672), True, 'import numpy as np\n'), ((30675, 30699), 'numpy.inner', 'np.inner', (['self.v', 'normal'], {}), '(self.v, normal)\n', (30683, 30699), True, 'import numpy as np\n'), ((30849, 30870), 'numpy.empty_like', 'np.empty_like', (['self.x'], {}), '(self.x)\n', (30862, 30870), True, 'import numpy as np\n'), ((31365, 31391), 'numpy.dot', 'np.dot', (['self.x', 'self.det_n'], {}), '(self.x, self.det_n)\n', (31371, 31391), True, 'import numpy as np\n'), ((31394, 31423), 'numpy.linalg.norm', 'np.linalg.norm', (['self.detector'], {}), '(self.detector)\n', (31408, 31423), True, 'import numpy as np\n'), ((32319, 32546), 'warnings.warn', 'warnings.warn', (['f"""{100 * self.fract_deflected:.1f}% particles have been deflected away from the detector plane. The fields provided may be too high to successfully radiograph with this particle energy."""', 'RuntimeWarning'], {}), "(\n f'{100 * self.fract_deflected:.1f}% particles have been deflected away from the detector plane. The fields provided may be too high to successfully radiograph with this particle energy.'\n , RuntimeWarning)\n", (32332, 32546), False, 'import warnings\n'), ((35108, 35132), 'numpy.sum', 'np.sum', (['self.num_entered'], {}), '(self.num_entered)\n', (35114, 35132), True, 'import numpy as np\n'), ((35311, 35335), 'numpy.sum', 'np.sum', (['self.num_entered'], {}), '(self.num_entered)\n', (35317, 35335), True, 'import numpy as np\n'), ((38905, 38936), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v'], {'axis': '(-1)'}), '(self.v, axis=-1)\n', (38919, 38936), True, 'import numpy as np\n'), ((39170, 39216), 'numpy.where', 'np.where', (['(self.theta < self.max_theta_hit_grid)'], {}), '(self.theta < self.max_theta_hit_grid)\n', (39178, 39216), True, 'import numpy as np\n'), ((40586, 40606), 'numpy.sum', 'np.sum', (['self.on_grid'], {}), '(self.on_grid)\n', (40592, 40606), True, 'import numpy as np\n'), ((42027, 42072), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.v, axis=1, keepdims=True)\n', (42041, 42072), True, 'import numpy as np\n'), ((42109, 42159), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v_init'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.v_init, axis=1, keepdims=True)\n', (42123, 42159), True, 'import numpy as np\n'), ((42422, 42437), 'numpy.arccos', 'np.arccos', (['proj'], {}), '(proj)\n', (42431, 42437), True, 'import numpy as np\n'), ((46023, 46040), 'numpy.sum', 'np.sum', (['intensity'], {}), '(intensity)\n', (46029, 46040), True, 'import numpy as np\n'), ((46100, 46268), 'warnings.warn', 'warnings.warn', (['f"""Only {percentage:.2%} of the particles are shown on this synthetic radiograph. Consider increasing the size to include more."""', 'RuntimeWarning'], {}), "(\n f'Only {percentage:.2%} of the particles are shown on this synthetic radiograph. Consider increasing the size to include more.'\n , RuntimeWarning)\n", (46113, 46268), False, 'import warnings\n'), ((46669, 46689), 'numpy.mean', 'np.mean', (['I0[I0 != 0]'], {}), '(I0[I0 != 0])\n', (46676, 46689), True, 'import numpy as np\n'), ((5955, 5984), 'numpy.linalg.norm', 'np.linalg.norm', (['self.detector'], {}), '(self.detector)\n', (5969, 5984), True, 'import numpy as np\n'), ((5987, 6014), 'numpy.linalg.norm', 'np.linalg.norm', (['self.source'], {}), '(self.source)\n', (6001, 6014), True, 'import numpy as np\n'), ((7031, 7060), 'numpy.linalg.norm', 'np.linalg.norm', (['detector_hdir'], {}), '(detector_hdir)\n', (7045, 7060), True, 'import numpy as np\n'), ((8224, 8245), 'numpy.abs', 'np.abs', (['self.grid[rq]'], {}), '(self.grid[rq])\n', (8230, 8245), True, 'import numpy as np\n'), ((8786, 9065), 'warnings.warn', 'warnings.warn', (['f"""Fields should go to zero at edges of grid to avoid non-physical effects, but a value of {edge_max:.2E} {unit} was found on the edge of the {rq} array. Consider applying a envelope function to force the fields at the edge to go to zero."""', 'RuntimeWarning'], {}), "(\n f'Fields should go to zero at edges of grid to avoid non-physical effects, but a value of {edge_max:.2E} {unit} was found on the edge of the {rq} array. Consider applying a envelope function to force the fields at the edge to go to zero.'\n , RuntimeWarning)\n", (8799, 9065), False, 'import warnings\n'), ((9638, 9657), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9646, 9657), True, 'import numpy as np\n'), ((15032, 15057), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_hdir'], {}), '(mesh_hdir)\n', (15046, 15057), True, 'import numpy as np\n'), ((15182, 15207), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_vdir'], {}), '(mesh_vdir)\n', (15196, 15207), True, 'import numpy as np\n'), ((15258, 15283), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_vdir'], {}), '(mesh_vdir)\n', (15272, 15283), True, 'import numpy as np\n'), ((18003, 18048), 'numpy.isclose', 'np.isclose', (['loc_rad', 'radius'], {'atol': 'wire_radius'}), '(loc_rad, radius, atol=wire_radius)\n', (18013, 18048), True, 'import numpy as np\n'), ((24775, 24788), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (24781, 24788), True, 'import numpy as np\n'), ((24831, 24844), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (24837, 24844), True, 'import numpy as np\n'), ((27145, 27173), 'numpy.inner', 'np.inner', (['self.v', 'self.src_n'], {}), '(self.v, self.src_n)\n', (27153, 27173), True, 'import numpy as np\n'), ((27176, 27207), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v'], {'axis': '(-1)'}), '(self.v, axis=-1)\n', (27190, 27207), True, 'import numpy as np\n'), ((28324, 28334), 'numpy.min', 'np.min', (['ds'], {}), '(ds)\n', (28330, 28334), True, 'import numpy as np\n'), ((34982, 35011), 'numpy.nonzero', 'np.nonzero', (['self.entered_grid'], {}), '(self.entered_grid)\n', (34992, 35011), True, 'import numpy as np\n'), ((35364, 35384), 'numpy.sum', 'np.sum', (['self.on_grid'], {}), '(self.on_grid)\n', (35370, 35384), True, 'import numpy as np\n'), ((35387, 35411), 'numpy.sum', 'np.sum', (['self.num_entered'], {}), '(self.num_entered)\n', (35393, 35411), True, 'import numpy as np\n'), ((35656, 35848), 'warnings.warn', 'warnings.warn', (['f"""Only {100 * self.num_entered / self.nparticles:.2f}% of particles entered the field grid: consider decreasing the max_theta to increase this number."""', 'RuntimeWarning'], {}), "(\n f'Only {100 * self.num_entered / self.nparticles:.2f}% of particles entered the field grid: consider decreasing the max_theta to increase this number.'\n , RuntimeWarning)\n", (35669, 35848), False, 'import warnings\n'), ((38050, 38073), 'numpy.array', 'np.array', (['[0.0, np.inf]'], {}), '([0.0, np.inf])\n', (38058, 38073), True, 'import numpy as np\n'), ((46868, 46892), 'numpy.log10', 'np.log10', (['(intensity / I0)'], {}), '(intensity / I0)\n', (46876, 46892), True, 'import numpy as np\n'), ((8698, 8709), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (8704, 8709), True, 'import numpy as np\n'), ((20249, 20273), 'numpy.sqrt', 'np.sqrt', (['self.nparticles'], {}), '(self.nparticles)\n', (20256, 20273), True, 'import numpy as np\n'), ((28681, 28693), 'numpy.max', 'np.max', (['Bmag'], {}), '(Bmag)\n', (28687, 28693), True, 'import numpy as np\n'), ((31801, 31859), 'numpy.logical_not', 'np.logical_not', (['((v_towards_det < 0) & (dist_remaining > 0))'], {}), '((v_towards_det < 0) & (dist_remaining > 0))\n', (31815, 31859), True, 'import numpy as np\n'), ((45503, 45531), 'numpy.array', 'np.array', (['[[-w, w], [-w, w]]'], {}), '([[-w, w], [-w, w]])\n', (45511, 45531), True, 'import numpy as np\n'), ((45837, 45851), 'numpy.roll', 'np.roll', (['h', '(-1)'], {}), '(h, -1)\n', (45844, 45851), True, 'import numpy as np\n'), ((45882, 45896), 'numpy.roll', 'np.roll', (['v', '(-1)'], {}), '(v, -1)\n', (45889, 45896), True, 'import numpy as np\n'), ((7732, 7764), 'numpy.isfinite', 'np.isfinite', (['self.grid[rq].value'], {}), '(self.grid[rq].value)\n', (7743, 7764), True, 'import numpy as np\n'), ((8355, 8375), 'numpy.max', 'np.max', (['arr[0, :, :]'], {}), '(arr[0, :, :])\n', (8361, 8375), True, 'import numpy as np\n'), ((8401, 8422), 'numpy.max', 'np.max', (['arr[-1, :, :]'], {}), '(arr[-1, :, :])\n', (8407, 8422), True, 'import numpy as np\n'), ((8448, 8468), 'numpy.max', 'np.max', (['arr[:, 0, :]'], {}), '(arr[:, 0, :])\n', (8454, 8468), True, 'import numpy as np\n'), ((8494, 8515), 'numpy.max', 'np.max', (['arr[:, -1, :]'], {}), '(arr[:, -1, :])\n', (8500, 8515), True, 'import numpy as np\n'), ((8541, 8561), 'numpy.max', 'np.max', (['arr[:, :, 0]'], {}), '(arr[:, :, 0])\n', (8547, 8561), True, 'import numpy as np\n'), ((8587, 8608), 'numpy.max', 'np.max', (['arr[:, :, -1]'], {}), '(arr[:, :, -1])\n', (8593, 8608), True, 'import numpy as np\n'), ((28471, 28507), 'numpy.sqrt', 'np.sqrt', (['(Bx ** 2 + By ** 2 + Bz ** 2)'], {}), '(Bx ** 2 + By ** 2 + Bz ** 2)\n', (28478, 28507), True, 'import numpy as np\n'), ((1698, 1707), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1704, 1707), True, 'import numpy as np\n'), ((1749, 1758), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1755, 1758), True, 'import numpy as np\n'), ((10557, 10585), 'numpy.linalg.norm', 'np.linalg.norm', (['self.src_det'], {}), '(self.src_det)\n', (10571, 10585), True, 'import numpy as np\n'), ((17320, 17337), 'numpy.max', 'np.max', (['h_centers'], {}), '(h_centers)\n', (17326, 17337), True, 'import numpy as np\n'), ((17380, 17397), 'numpy.min', 'np.min', (['h_centers'], {}), '(h_centers)\n', (17386, 17397), True, 'import numpy as np\n'), ((17583, 17600), 'numpy.max', 'np.max', (['v_centers'], {}), '(v_centers)\n', (17589, 17600), True, 'import numpy as np\n'), ((17643, 17660), 'numpy.min', 'np.min', (['v_centers'], {}), '(v_centers)\n', (17649, 17660), True, 'import numpy as np\n'), ((1989, 1998), 'numpy.cos', 'np.cos', (['p'], {}), '(p)\n', (1995, 1998), True, 'import numpy as np\n'), ((2052, 2061), 'numpy.sin', 'np.sin', (['p'], {}), '(p)\n', (2058, 2061), True, 'import numpy as np\n'), ((2103, 2112), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2109, 2112), True, 'import numpy as np\n'), ((10459, 10484), 'numpy.dot', 'np.dot', (['vec', 'self.src_det'], {}), '(vec, self.src_det)\n', (10465, 10484), True, 'import numpy as np\n'), ((10511, 10530), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (10525, 10530), True, 'import numpy as np\n'), ((1977, 1986), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1983, 1986), True, 'import numpy as np\n'), ((2040, 2049), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2046, 2049), True, 'import numpy as np\n')]
|
from numpy import array, rad2deg, pi, mgrid, argmin
from matplotlib.pylab import contour
import matplotlib.pyplot as plt
import mplstereonet
from obspy.imaging.beachball import aux_plane
from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm
from focal_mech.io.read_hash import read_demo, read_hash_solutions
from focal_mech.util.hash_routines import hash_to_classifier
from focal_mech.lib.sph_harm import get_sph_harm
from focal_mech.lib.correlate import corr_shear
hash_solns = read_hash_solutions("example1.out")
# we want solutions that are symetric
polarity_data = read_demo("north1.phase", "scsn.reverse", reverse=True)
inputs = hash_to_classifier(polarity_data, parity=1)
event = 3146815
result = classify(*inputs[event], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth1 = c.collections[0].get_paths()[0].vertices
pth1 = rad2deg(pth1)
pth2 = c.collections[0].get_paths()[1].vertices
pth2 = rad2deg(pth2)
hash_focal = rad2deg(hash_solns[event])
event2 = 3158361
result = classify(*inputs[event2], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln2, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth3 = c.collections[0].get_paths()[0].vertices
pth3 = rad2deg(pth3)
pth4 = c.collections[0].get_paths()[1].vertices
pth4 = rad2deg(pth4)
hash_focal2 = rad2deg(hash_solns[event2])
event3 = 3153955
result = classify(*inputs[event3], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln3, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth5 = c.collections[0].get_paths()[0].vertices
pth5 = rad2deg(pth5)
pth6 = c.collections[0].get_paths()[1].vertices
pth6 = rad2deg(pth6)
hash_focal3 = rad2deg(hash_solns[event3])
fig = plt.figure(facecolor="white", figsize=(10,20))
ax = fig.add_subplot(221, projection='stereonet')
ax.rake(pth1[:,0], pth1[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth2[:,0], pth2[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event][:,0])
toa = rad2deg(polarity_data[event][:,1])
polarity = polarity_data[event][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(222, projection='stereonet')
ax.rake(pth3[:,0], pth3[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth4[:,0], pth4[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln2
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln2)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal2
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal2)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event2][:,0])
toa = rad2deg(polarity_data[event2][:,1])
polarity = polarity_data[event2][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(224, projection='stereonet')
ax.rake(pth5[:,0], pth5[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth6[:,0], pth6[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln3
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln3)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal3
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal3)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event3][:,0])
toa = rad2deg(polarity_data[event3][:,1])
polarity = polarity_data[event3][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
plt.tight_layout(pad=4.0, h_pad=20.0)
plt.show()
|
[
"focal_mech.lib.classify_mechanism.classify",
"focal_mech.util.hash_routines.hash_to_classifier",
"matplotlib.pylab.contour",
"focal_mech.io.read_hash.read_hash_solutions",
"focal_mech.io.read_hash.read_demo",
"numpy.array",
"matplotlib.pyplot.figure",
"obspy.imaging.beachball.aux_plane",
"focal_mech.lib.classify_mechanism.translate_to_sphharm",
"matplotlib.pyplot.tight_layout",
"focal_mech.lib.correlate.corr_shear",
"focal_mech.lib.sph_harm.get_sph_harm",
"numpy.rad2deg",
"matplotlib.pyplot.show"
] |
[((507, 542), 'focal_mech.io.read_hash.read_hash_solutions', 'read_hash_solutions', (['"""example1.out"""'], {}), "('example1.out')\n", (526, 542), False, 'from focal_mech.io.read_hash import read_demo, read_hash_solutions\n'), ((598, 653), 'focal_mech.io.read_hash.read_demo', 'read_demo', (['"""north1.phase"""', '"""scsn.reverse"""'], {'reverse': '(True)'}), "('north1.phase', 'scsn.reverse', reverse=True)\n", (607, 653), False, 'from focal_mech.io.read_hash import read_demo, read_hash_solutions\n'), ((663, 706), 'focal_mech.util.hash_routines.hash_to_classifier', 'hash_to_classifier', (['polarity_data'], {'parity': '(1)'}), '(polarity_data, parity=1)\n', (681, 706), False, 'from focal_mech.util.hash_routines import hash_to_classifier\n'), ((734, 775), 'focal_mech.lib.classify_mechanism.classify', 'classify', (['*inputs[event]'], {'kernel_degree': '(2)'}), '(*inputs[event], kernel_degree=2)\n', (742, 775), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((782, 828), 'focal_mech.lib.classify_mechanism.translate_to_sphharm', 'translate_to_sphharm', (['*result'], {'kernel_degree': '(2)'}), '(*result, kernel_degree=2)\n', (802, 828), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((839, 952), 'numpy.array', 'array', (['[Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1], Alm[2,\n 0], Alm[2, 1], Alm[2, 2]]'], {}), '([Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1],\n Alm[2, 0], Alm[2, 1], Alm[2, 2]])\n', (844, 952), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((990, 1005), 'focal_mech.lib.correlate.corr_shear', 'corr_shear', (['Alm'], {}), '(Alm)\n', (1000, 1005), False, 'from focal_mech.lib.correlate import corr_shear\n'), ((1047, 1082), 'focal_mech.lib.sph_harm.get_sph_harm', 'get_sph_harm', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (1059, 1082), False, 'from focal_mech.lib.sph_harm import get_sph_harm\n'), ((1188, 1219), 'matplotlib.pylab.contour', 'contour', (['longi', 'lati', 'mech', '[0]'], {}), '(longi, lati, mech, [0])\n', (1195, 1219), False, 'from matplotlib.pylab import contour\n'), ((1275, 1288), 'numpy.rad2deg', 'rad2deg', (['pth1'], {}), '(pth1)\n', (1282, 1288), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1345, 1358), 'numpy.rad2deg', 'rad2deg', (['pth2'], {}), '(pth2)\n', (1352, 1358), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1373, 1399), 'numpy.rad2deg', 'rad2deg', (['hash_solns[event]'], {}), '(hash_solns[event])\n', (1380, 1399), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1430, 1472), 'focal_mech.lib.classify_mechanism.classify', 'classify', (['*inputs[event2]'], {'kernel_degree': '(2)'}), '(*inputs[event2], kernel_degree=2)\n', (1438, 1472), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((1479, 1525), 'focal_mech.lib.classify_mechanism.translate_to_sphharm', 'translate_to_sphharm', (['*result'], {'kernel_degree': '(2)'}), '(*result, kernel_degree=2)\n', (1499, 1525), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((1536, 1649), 'numpy.array', 'array', (['[Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1], Alm[2,\n 0], Alm[2, 1], Alm[2, 2]]'], {}), '([Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1],\n Alm[2, 0], Alm[2, 1], Alm[2, 2]])\n', (1541, 1649), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((1687, 1702), 'focal_mech.lib.correlate.corr_shear', 'corr_shear', (['Alm'], {}), '(Alm)\n', (1697, 1702), False, 'from focal_mech.lib.correlate import corr_shear\n'), ((1744, 1779), 'focal_mech.lib.sph_harm.get_sph_harm', 'get_sph_harm', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (1756, 1779), False, 'from focal_mech.lib.sph_harm import get_sph_harm\n'), ((1885, 1916), 'matplotlib.pylab.contour', 'contour', (['longi', 'lati', 'mech', '[0]'], {}), '(longi, lati, mech, [0])\n', (1892, 1916), False, 'from matplotlib.pylab import contour\n'), ((1972, 1985), 'numpy.rad2deg', 'rad2deg', (['pth3'], {}), '(pth3)\n', (1979, 1985), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2042, 2055), 'numpy.rad2deg', 'rad2deg', (['pth4'], {}), '(pth4)\n', (2049, 2055), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2071, 2098), 'numpy.rad2deg', 'rad2deg', (['hash_solns[event2]'], {}), '(hash_solns[event2])\n', (2078, 2098), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2129, 2171), 'focal_mech.lib.classify_mechanism.classify', 'classify', (['*inputs[event3]'], {'kernel_degree': '(2)'}), '(*inputs[event3], kernel_degree=2)\n', (2137, 2171), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((2178, 2224), 'focal_mech.lib.classify_mechanism.translate_to_sphharm', 'translate_to_sphharm', (['*result'], {'kernel_degree': '(2)'}), '(*result, kernel_degree=2)\n', (2198, 2224), False, 'from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm\n'), ((2235, 2348), 'numpy.array', 'array', (['[Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1], Alm[2,\n 0], Alm[2, 1], Alm[2, 2]]'], {}), '([Alm[0, 0], Alm[1, -1], Alm[1, 0], Alm[1, 1], Alm[2, -2], Alm[2, -1],\n Alm[2, 0], Alm[2, 1], Alm[2, 2]])\n', (2240, 2348), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2386, 2401), 'focal_mech.lib.correlate.corr_shear', 'corr_shear', (['Alm'], {}), '(Alm)\n', (2396, 2401), False, 'from focal_mech.lib.correlate import corr_shear\n'), ((2443, 2478), 'focal_mech.lib.sph_harm.get_sph_harm', 'get_sph_harm', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (2455, 2478), False, 'from focal_mech.lib.sph_harm import get_sph_harm\n'), ((2584, 2615), 'matplotlib.pylab.contour', 'contour', (['longi', 'lati', 'mech', '[0]'], {}), '(longi, lati, mech, [0])\n', (2591, 2615), False, 'from matplotlib.pylab import contour\n'), ((2671, 2684), 'numpy.rad2deg', 'rad2deg', (['pth5'], {}), '(pth5)\n', (2678, 2684), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2741, 2754), 'numpy.rad2deg', 'rad2deg', (['pth6'], {}), '(pth6)\n', (2748, 2754), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2770, 2797), 'numpy.rad2deg', 'rad2deg', (['hash_solns[event3]'], {}), '(hash_solns[event3])\n', (2777, 2797), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((2806, 2853), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""', 'figsize': '(10, 20)'}), "(facecolor='white', figsize=(10, 20))\n", (2816, 2853), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3162), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*svm_soln'], {}), '(*svm_soln)\n', (3151, 3162), False, 'from obspy.imaging.beachball import aux_plane\n'), ((3301, 3323), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*hash_focal'], {}), '(*hash_focal)\n', (3310, 3323), False, 'from obspy.imaging.beachball import aux_plane\n'), ((3374, 3409), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event][:, 0]'], {}), '(polarity_data[event][:, 0])\n', (3381, 3409), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((3415, 3450), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event][:, 1]'], {}), '(polarity_data[event][:, 1])\n', (3422, 3450), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((4002, 4023), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*svm_soln2'], {}), '(*svm_soln2)\n', (4011, 4023), False, 'from obspy.imaging.beachball import aux_plane\n'), ((4163, 4186), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*hash_focal2'], {}), '(*hash_focal2)\n', (4172, 4186), False, 'from obspy.imaging.beachball import aux_plane\n'), ((4237, 4273), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event2][:, 0]'], {}), '(polarity_data[event2][:, 0])\n', (4244, 4273), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((4279, 4315), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event2][:, 1]'], {}), '(polarity_data[event2][:, 1])\n', (4286, 4315), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((4868, 4889), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*svm_soln3'], {}), '(*svm_soln3)\n', (4877, 4889), False, 'from obspy.imaging.beachball import aux_plane\n'), ((5029, 5052), 'obspy.imaging.beachball.aux_plane', 'aux_plane', (['*hash_focal3'], {}), '(*hash_focal3)\n', (5038, 5052), False, 'from obspy.imaging.beachball import aux_plane\n'), ((5103, 5139), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event3][:, 0]'], {}), '(polarity_data[event3][:, 0])\n', (5110, 5139), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((5145, 5181), 'numpy.rad2deg', 'rad2deg', (['polarity_data[event3][:, 1]'], {}), '(polarity_data[event3][:, 1])\n', (5152, 5181), False, 'from numpy import array, rad2deg, pi, mgrid, argmin\n'), ((5444, 5481), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(4.0)', 'h_pad': '(20.0)'}), '(pad=4.0, h_pad=20.0)\n', (5460, 5481), True, 'import matplotlib.pyplot as plt\n'), ((5483, 5493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5491, 5493), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.ensemble
import sklearn.metrics
import sklearn
import progressbar
import sklearn.model_selection
from plotnine import *
import pdb
import sys
sys.path.append("smooth_rf/")
import smooth_base
import smooth_level
# function
def average_depth(random_forest, data):
"""
calculate the average depth of each point (average across trees)
Arguments:
----------
random_forest : sklearn random forest model (fit)
data : array (n, p)
data frame that can be predicted from random_forest
Returns:
--------
average_depth : array (n,)
vector of average depth in forest of each data point
"""
# test:
#rf_fit
#smooth_rf_opt
#d1 = average_depth(rf_fit, data)
#d2 = average_depth(smooth_rf_opt, data)
#np.all(d1 == d2)
n_trees = len(random_forest.estimators_)
n_obs = data.shape[0]
depth = np.zeros(n_obs)
for t in random_forest.estimators_:
d_path = t.decision_path(data)
depth = depth + np.array(d_path.sum(axis = 1)).ravel()
return depth / n_trees
# start of analysis
data, y = smooth_base.generate_data(large_n = 650)
data_vis = pd.DataFrame(data = {"x1":data[:,0],
"x2":data[:,1],
"y":y},
columns = ["x1","x2","y"])
ggout = ggplot(data_vis) +\
geom_point(aes(x = "x1",y ="x2", color = "factor(y)")) +\
theme_minimal() +\
labs(x= "X1", y = "X2", color = "value (minus 100)")
rf = sklearn.ensemble.RandomForestRegressor(n_estimators = 300)
rf_fit = rf.fit(data,y)
smooth_rf_opt, smooth_rf_last ,_, _ = smooth_base.smooth(
rf_fit,
X_trained = data,
y_trained = y.ravel(),
X_tune = None,
y_tune = None,
resample_tune= False, # oob
no_constraint = False,
subgrad_max_num = 10000,
subgrad_t_fix = 1,
parents_all=True,
verbose = True,
all_trees = False,
initial_lamb_seed = None)
# test data
data_test, y_test = smooth_base.generate_data(large_n = 10000)
reorder = np.random.choice(data_test.shape[0],
size = data_test.shape[0], replace= False)
data_test = data_test[reorder,:]
y_test = y_test[reorder]
yhat_base = rf_fit.predict(data_test)
yhat_smooth = smooth_rf_opt.predict(data_test)
base_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_base)
smooth_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_smooth)
error_base = np.abs(yhat_base - y_test)
error_smooth = np.abs(yhat_smooth - y_test)
extreme_binary = np.max([np.max(np.abs(error_base)),
np.max(np.abs(error_smooth))])
col_vis = error_base - error_smooth
extreme = np.max(np.abs(col_vis))
mean_depth_test = average_depth(rf_fit,data_test)
data_vis = pd.DataFrame(data = {"X1":data_test[:,0],
"X2":data_test[:,1],
"y": y_test.ravel(),
"error_base":error_base.copy(),
"error_smooth":error_smooth.copy(),
"error":col_vis.copy(),
"mean_depth":mean_depth_test.copy()},
columns = ["X1","X2","y","error",
"error_base","error_smooth",
"mean_depth"])
a = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error"),
size = .5) +\
scale_color_continuous(name = "bwr",
limits= [-extreme, extreme]) +\
theme_bw() +\
labs(color = "Difference in Error",
title = r'Difference in Error ($Error_{base} - Error_{smooth}$)')
b = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_base"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Base Random Forest")
c = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_smooth"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Smoothed Random Forest")
d = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "factor(y)"),
size = .5) +\
theme_bw() +\
labs(color = "True Value (discrete)",
title = "Test Set True Values")
e = ggplot(data_vis,aes(x = "mean_depth", y = "error")) +\
geom_point(alpha = .1) +\
theme_bw() +\
labs(x = "Mean depth in Forest",
y = "Difference in Error",
title = "Lack of relationship between diff in errors and depth")
f = ggplot(data_vis, aes(x = "X1", y = "X2", color = "mean_depth")) +\
geom_point() +\
scale_color_continuous(name = "Blues") +\
theme_bw() +\
labs(color = "Mean depth in Forest",
title = "Mean depth in Forest (Depth averaged across trees)")
g = ggplot(data_vis) +\
geom_point(aes(x = "error_base", y = "error_smooth"),
alpha = .05) +\
geom_abline(intercept = 0, slope = 1) +\
theme_bw() +\
labs(x = "Error from Random Forest",
y = "Error from Smooth Random Forest",
title = "Comparing Errors Between Models",
subtitle = r"(total error: rf: %f vs srf: %f)" %\
(base_mse, smooth_mse))
save_as_pdf_pages([a + theme(figure_size = (8,6))],
filename = "images/diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([b + theme(figure_size = (8,6))],
filename = "images/error_base"+"_understanding_smoothing.pdf")
save_as_pdf_pages([c + theme(figure_size = (8,6))],
filename = "images/error_smooth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([d + theme(figure_size = (8,6))],
filename = "images/truth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([e + theme(figure_size = (8,6))],
filename = "images/mean_depth_diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([f + theme(figure_size = (8,6))],
filename = "images/mean_depth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([g + theme(figure_size = (8,6))],
filename = "images/error_vs_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([a + theme(figure_size = (8,6)),
b + theme(figure_size = (8,6)),
c + theme(figure_size = (8,6)),
d + theme(figure_size = (8,6)),
e + theme(figure_size = (8,6)),
f + theme(figure_size = (8,6)),
g + theme(figure_size = (8,6))],
filename = "images/understanding_smoothing.pdf")
# some of these observations might be due to the decision on the values of the classes
# we'll see
|
[
"numpy.abs",
"sklearn.ensemble.RandomForestRegressor",
"numpy.random.choice",
"sklearn.metrics.mean_squared_error",
"numpy.zeros",
"smooth_base.generate_data",
"pandas.DataFrame",
"sys.path.append"
] |
[((229, 258), 'sys.path.append', 'sys.path.append', (['"""smooth_rf/"""'], {}), "('smooth_rf/')\n", (244, 258), False, 'import sys\n'), ((1181, 1219), 'smooth_base.generate_data', 'smooth_base.generate_data', ([], {'large_n': '(650)'}), '(large_n=650)\n', (1206, 1219), False, 'import smooth_base\n'), ((1234, 1329), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x1': data[:, 0], 'x2': data[:, 1], 'y': y}", 'columns': "['x1', 'x2', 'y']"}), "(data={'x1': data[:, 0], 'x2': data[:, 1], 'y': y}, columns=[\n 'x1', 'x2', 'y'])\n", (1246, 1329), True, 'import pandas as pd\n'), ((1589, 1645), 'sklearn.ensemble.RandomForestRegressor', 'sklearn.ensemble.RandomForestRegressor', ([], {'n_estimators': '(300)'}), '(n_estimators=300)\n', (1627, 1645), False, 'import sklearn\n'), ((2434, 2474), 'smooth_base.generate_data', 'smooth_base.generate_data', ([], {'large_n': '(10000)'}), '(large_n=10000)\n', (2459, 2474), False, 'import smooth_base\n'), ((2488, 2564), 'numpy.random.choice', 'np.random.choice', (['data_test.shape[0]'], {'size': 'data_test.shape[0]', 'replace': '(False)'}), '(data_test.shape[0], size=data_test.shape[0], replace=False)\n', (2504, 2564), True, 'import numpy as np\n'), ((2751, 2818), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', ([], {'y_true': 'y_test', 'y_pred': 'yhat_base'}), '(y_true=y_test, y_pred=yhat_base)\n', (2785, 2818), False, 'import sklearn\n'), ((2836, 2905), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', ([], {'y_true': 'y_test', 'y_pred': 'yhat_smooth'}), '(y_true=y_test, y_pred=yhat_smooth)\n', (2870, 2905), False, 'import sklearn\n'), ((2925, 2951), 'numpy.abs', 'np.abs', (['(yhat_base - y_test)'], {}), '(yhat_base - y_test)\n', (2931, 2951), True, 'import numpy as np\n'), ((2967, 2995), 'numpy.abs', 'np.abs', (['(yhat_smooth - y_test)'], {}), '(yhat_smooth - y_test)\n', (2973, 2995), True, 'import numpy as np\n'), ((959, 974), 'numpy.zeros', 'np.zeros', (['n_obs'], {}), '(n_obs)\n', (967, 974), True, 'import numpy as np\n'), ((3159, 3174), 'numpy.abs', 'np.abs', (['col_vis'], {}), '(col_vis)\n', (3165, 3174), True, 'import numpy as np\n'), ((3029, 3047), 'numpy.abs', 'np.abs', (['error_base'], {}), '(error_base)\n', (3035, 3047), True, 'import numpy as np\n'), ((3081, 3101), 'numpy.abs', 'np.abs', (['error_smooth'], {}), '(error_smooth)\n', (3087, 3101), True, 'import numpy as np\n')]
|
"""
Several methods for generating graphs from the stochastic block model.
"""
import itertools
import math
import random
import scipy.sparse
import numpy as np
def _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed):
"""
Compute the number of possible edges between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of possible edges between these clusters
"""
if not same_cluster:
# The number is simply the product of the number of vertices
return c1_size * c2_size
else:
# The base number is n choose 2
possible_edges_between_clusters = int((c1_size * (c1_size - 1)) / 2)
# If we are allowed self-loops, then add them on
if self_loops:
possible_edges_between_clusters += c1_size
# The number is normally the same for undirected and directed graphs, unless the clusters are the same, in which
# case the number for the directed graph is double since we need to consider both directions of each edge.
if directed:
possible_edges_between_clusters *= 2
# But if we are allowed self-loops, then we shouldn't double them since there is only one 'direction'.
if directed and self_loops:
possible_edges_between_clusters -= c1_size
return possible_edges_between_clusters
def _get_number_of_edges(c1_size, c2_size, prob, same_cluster, self_loops, directed):
"""
Compute the number of edges there will be between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param prob: The probability of an edge between the clusters
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of edges to generate between these clusters
"""
# We need to compute the number of possible edges
possible_edges_between_clusters = _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed)
# Sample the number of edges from the binomial distribution
return np.random.binomial(possible_edges_between_clusters, prob)
def _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=False):
"""
Given a list of cluster sizes, and a square matrix Q, generates edges for a graph in the following way.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
May return self-loops. The calling code can decide what to do with them.
Returns edges as pairs (u, v) where u and v are integers giving the index of the respective vertices.
:param cluster_sizes: a list giving the number of vertices in each cluster
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:return: Edges (u, v).
"""
# We will iterate over the clusters. This variable keeps track of the index of the first vertex in the current
# cluster_1.
c1_base_index = 0
for cluster_1 in range(len(cluster_sizes)):
# Keep track of the index of the first vertex in the current cluster_2
c2_base_index = c1_base_index
# If we are constructing a directed graph, we need to consider all values of cluster_2.
# Otherwise, we will consider only the clusters with an index >= cluster_1.
if directed:
second_clusters = range(len(cluster_sizes))
c2_base_index = 0
else:
second_clusters = range(cluster_1, len(cluster_sizes))
for cluster_2 in second_clusters:
# Compute the number of edges between these two clusters
num_edges = _get_number_of_edges(cluster_sizes[cluster_1],
cluster_sizes[cluster_2],
prob_mat_q[cluster_1][cluster_2],
cluster_1 == cluster_2,
True,
directed)
# Sample this number of edges. TODO: correct for possible double-sampling of edges
num_possible_edges = (cluster_sizes[cluster_1] * cluster_sizes[cluster_2]) - 1
for i in range(num_edges):
edge_idx = random.randint(0, num_possible_edges)
u = c1_base_index + int(edge_idx / cluster_sizes[cluster_1])
v = c2_base_index + (edge_idx % cluster_sizes[cluster_1])
yield u, v
# Update the base index for the second cluster
c2_base_index += cluster_sizes[cluster_2]
# Update the base index of this cluster
c1_base_index += cluster_sizes[cluster_1]
def sbm_adjmat(cluster_sizes, prob_mat_q, directed=False, self_loops=False):
"""
Generate a graph from the stochastic block model.
The list cluster_sizes gives the number of vertices inside each cluster and the matrix Q gives the probability of
each edge between pairs of clusters.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
Returns the adjacency matrix of the graph as a sparse scipy matrix in the CSR format.
:param cluster_sizes: The number of vertices in each cluster.
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:param self_loops: Whether to generate self-loops (default is false).
:return: The sparse adjacency matrix of the graph.
"""
# Initialize the adjacency matrix
adj_mat = scipy.sparse.lil_matrix((sum(cluster_sizes), sum(cluster_sizes)))
# Generate the edges in the graph
for (u, v) in _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=directed):
if u != v or self_loops:
# Add this edge to the adjacency matrix.
adj_mat[u, v] = 1
if not directed:
adj_mat[v, u] = 1
# Reformat the output matrix to the CSR format
return adj_mat.tocsr()
def sbm_adjmat_equal_clusters(n, k, prob_mat_q, directed=False):
"""
Generate a graph from the general stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by the probability matrix Q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param prob_mat_q: q[i][j] gives the probability of an edge between clusters i and j
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
return sbm_adjmat([int(n/k)] * k, prob_mat_q, directed=directed)
def ssbm_adjmat(n, k, p, q, directed=False):
"""
Generate a graph from the symmetric stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by p. The probability of an edge between two different clusters is q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param p: The probability of an edge inside a cluster.
:param q: The probability of an edge between clusters.
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
# Every cluster has the same size.
cluster_sizes = [int(n/k)] * k
# Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p.
prob_mat_q = []
for row_num in range(k):
new_row = [q] * k
new_row[row_num] = p
prob_mat_q.append(new_row)
# Call the general sbm method.
return sbm_adjmat(cluster_sizes, prob_mat_q, directed=directed)
|
[
"random.randint",
"numpy.random.binomial"
] |
[((2449, 2506), 'numpy.random.binomial', 'np.random.binomial', (['possible_edges_between_clusters', 'prob'], {}), '(possible_edges_between_clusters, prob)\n', (2467, 2506), True, 'import numpy as np\n'), ((5081, 5118), 'random.randint', 'random.randint', (['(0)', 'num_possible_edges'], {}), '(0, num_possible_edges)\n', (5095, 5118), False, 'import random\n')]
|
import numpy as np
import shapely.geometry as geom
class Bbox:
def __init__(self, name, part_id, depth_image, xyz, box_size, projection):
if not isinstance(xyz, np.ndarray):
raise ValueError("xyz must be an np.ndarray")
self.name = name
self.id = part_id
self.center = np.array([xyz[0], xyz[1]])
self.z = xyz[2]
self.im_d = depth_image
self.im_d[self.im_d == 0] = 255
x_delta_scaled = box_size[0]/2
self.weight = 1.0
y_delta_scaled = box_size[1]/2
self.xmin, self.xmax = xyz[0]-x_delta_scaled, xyz[0]+x_delta_scaled
self.ymin, self.ymax = xyz[1]-y_delta_scaled, xyz[1]+y_delta_scaled
self.poly = geom.box(self.xmin, self.ymin, self.xmax, self.ymax)
self.color_min = (int(projection['fx']*self.xmin/xyz[2] + projection['cx']),
int(projection['fy']*self.ymin/xyz[2] + projection['cy']))
self.color_max = (int(projection['fx']*self.xmax/xyz[2] + projection['cx']),
int(projection['fy']*self.ymax/xyz[2] + projection['cy']))
self.depth_min = (int(projection['fx_d']*self.xmin/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymin/xyz[2] + projection['cy_d']))
self.depth_max = (int(projection['fx_d']*self.xmax/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymax/xyz[2] + projection['cy_d']))
def __str__(self):
return "{{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}}".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __repr__(self):
return "(bbox: {{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def size(self):
return (self.xmax - self.xmin) * (self.ymax - self.ymin)
def get_bb_depth_matrix(self):
""" Get the portion of the depth image inside the bounding box """
min_x, max_x = sorted((self.depth_min[0], self.depth_max[0]))
min_y, max_y = sorted((self.depth_min[1], self.depth_max[1]))
bounded_im = self.im_d[min_y: max_y+1, min_x: max_x+1]
return bounded_im
def overlap(self, bb2):
dx = min(self.xmax, bb2.xmax) - max(self.xmin, bb2.xmin)
dy = min(self.ymax, bb2.ymax) - max(self.ymin, bb2.ymin)
if (dx>=0) and (dy>=0):
return dx*dy
return 0
def p_over(self, bb2):
return self.overlap(bb2)/(min(self.size(), bb2.size()))
def p_depth(self, bb2):
bounded_im1 = self.get_bb_depth_matrix()
bounded_im2 = bb2.get_bb_depth_matrix()
print(bounded_im1.empty or bounded_im2.empty)
mean1 = np.mean(bounded_im1)
mean2 = np.mean(bounded_im2)
stdev1 = np.std(bounded_im1)
stdev2 = np.std(bounded_im2)
half_negative_square_of_mean_difference = -1/2 * (mean1 - mean2) ** 2
term1_power = half_negative_square_of_mean_difference / (stdev1 ** 2)
term2_power = half_negative_square_of_mean_difference / (stdev2 ** 2)
out = (np.exp(term1_power) + np.exp(term2_power))/2
return out
def prob(self, bb2, alpha):
return alpha * self.p_over(bb2) + (1-alpha) * self.p_depth(bb2)
|
[
"numpy.mean",
"shapely.geometry.box",
"numpy.exp",
"numpy.array",
"numpy.std"
] |
[((318, 344), 'numpy.array', 'np.array', (['[xyz[0], xyz[1]]'], {}), '([xyz[0], xyz[1]])\n', (326, 344), True, 'import numpy as np\n'), ((717, 769), 'shapely.geometry.box', 'geom.box', (['self.xmin', 'self.ymin', 'self.xmax', 'self.ymax'], {}), '(self.xmin, self.ymin, self.xmax, self.ymax)\n', (725, 769), True, 'import shapely.geometry as geom\n'), ((2697, 2717), 'numpy.mean', 'np.mean', (['bounded_im1'], {}), '(bounded_im1)\n', (2704, 2717), True, 'import numpy as np\n'), ((2734, 2754), 'numpy.mean', 'np.mean', (['bounded_im2'], {}), '(bounded_im2)\n', (2741, 2754), True, 'import numpy as np\n'), ((2772, 2791), 'numpy.std', 'np.std', (['bounded_im1'], {}), '(bounded_im1)\n', (2778, 2791), True, 'import numpy as np\n'), ((2809, 2828), 'numpy.std', 'np.std', (['bounded_im2'], {}), '(bounded_im2)\n', (2815, 2828), True, 'import numpy as np\n'), ((3079, 3098), 'numpy.exp', 'np.exp', (['term1_power'], {}), '(term1_power)\n', (3085, 3098), True, 'import numpy as np\n'), ((3101, 3120), 'numpy.exp', 'np.exp', (['term2_power'], {}), '(term2_power)\n', (3107, 3120), True, 'import numpy as np\n')]
|
import os
import numpy as np
import pytest
import vtk
import pyvista
from pyvista import examples
from pyvista.plotting import system_supports_plotting
beam = pyvista.UnstructuredGrid(examples.hexbeamfile)
# create structured grid
x = np.arange(-10, 10, 2)
y = np.arange(-10, 10, 2)
z = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(x, y, z)
sgrid = pyvista.StructuredGrid(x, y, z)
try:
test_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(test_path, 'test_data')
except:
test_path = '/home/alex/afrl/python/source/pyvista/tests'
def test_volume():
assert beam.volume > 0.0
@pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting")
def test_struct_example():
# create and plot structured grid
grid = examples.load_structured()
cpos = grid.plot(off_screen=True) # basic plot
assert isinstance(cpos, pyvista.CameraPosition)
# Plot mean curvature
cpos_curv = grid.plot_curvature(off_screen=True)
assert isinstance(cpos_curv, pyvista.CameraPosition)
def test_init_from_structured():
unstruct_grid = pyvista.UnstructuredGrid(sgrid)
assert unstruct_grid.points.shape[0] == x.size
assert np.all(unstruct_grid.celltypes == 12)
def test_init_from_unstructured():
grid = pyvista.UnstructuredGrid(beam, deep=True)
grid.points += 1
assert not np.any(grid.points == beam.points)
def test_init_bad_input():
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1))
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1),
np.array(1),
np.array(1),
'woa')
def test_init_from_arrays():
offset = np.array([0, 9], np.int8)
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2)).astype(np.int32)
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
assert grid.n_cells == 2
assert np.allclose(grid.offset, offset)
def test_surface_indices():
surf = beam.extract_surface()
surf_ind = surf.point_arrays['vtkOriginalPointIds']
assert np.allclose(surf_ind, beam.surface_indices())
def test_extract_feature_edges():
edges = beam.extract_feature_edges(90)
assert edges.n_points
edges = beam.extract_feature_edges(180)
assert not edges.n_points
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtu', 'vtk'])
def test_save(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
beam.save(filename, binary)
grid = pyvista.UnstructuredGrid(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
grid = pyvista.read(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_init_bad_filename():
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid(filename)
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('not a file')
def test_save_bad_extension():
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('file.abc')
def test_linear_copy():
# need a grid with quadratic cells
lgrid = beam.linear_copy()
assert np.all(lgrid.celltypes < 20)
def test_extract_cells():
ind = [1, 2, 3]
part_beam = beam.extract_cells(ind)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
mask = np.zeros(beam.n_cells, np.bool)
mask[:3] = True
part_beam = beam.extract_cells(mask)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
def test_merge():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_not_main():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False,
main_has_priority=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_list():
grid_a = beam.copy()
grid_a.points[:, 0] += 1
grid_b = beam.copy()
grid_b.points[:, 1] += 1
grid_a.merge([beam, grid_b], inplace=True, merge_points=True)
assert grid_a.n_points > beam.n_points
def test_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
assert np.allclose(sgrid.x, x)
assert np.allclose(sgrid.y, y)
assert np.allclose(sgrid.z, z)
grid_a = pyvista.StructuredGrid(grid)
assert np.allclose(grid_a.points, grid.points)
def test_invalid_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
z = z[:, :, :2]
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(x, y, z)
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vts', 'vtk'])
def test_save_structured(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
sgrid.save(filename, binary)
grid = pyvista.StructuredGrid(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
grid = pyvista.read(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
assert isinstance(grid, pyvista.StructuredGrid)
def test_load_structured_bad_filename():
with pytest.raises(Exception):
pyvista.StructuredGrid('not a file')
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(filename)
def test_create_rectilinear_grid_from_specs():
# 3D example
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng)
assert grid.n_cells == 9
assert grid.n_points == 10
grid = pyvista.RectilinearGrid(xrng, yrng)
assert grid.n_cells == 9*3
assert grid.n_points == 10*4
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
assert grid.n_cells == 9*3*19
assert grid.n_points == 10*4*20
assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0]
# 2D example
cell_spacings = np.array([1., 1., 2., 2., 5., 10.])
x_coordinates = np.cumsum(cell_spacings)
y_coordinates = np.cumsum(cell_spacings)
grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates)
assert grid.n_cells == 5*5
assert grid.n_points == 6*6
assert grid.bounds == [1.,21., 1.,21., 0.,0.]
def test_create_rectilinear_after_init():
x = np.array([0,1,2])
y = np.array([0,5,8])
z = np.array([3,2,1])
grid = pyvista.RectilinearGrid()
grid.x = x
assert grid.dimensions == [3, 1, 1]
grid.y = y
assert grid.dimensions == [3, 3, 1]
grid.z = z
assert grid.dimensions == [3, 3, 3]
assert np.allclose(grid.x, x)
assert np.allclose(grid.y, y)
assert np.allclose(grid.z, z)
def test_create_rectilinear_grid_from_file():
grid = examples.load_rectilinear()
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_file():
grid = pyvista.read(examples.rectfile)
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_cast_rectilinear_grid():
grid = pyvista.read(examples.rectfile)
structured = grid.cast_to_structured_grid()
assert isinstance(structured, pyvista.StructuredGrid)
assert structured.n_points == grid.n_points
assert structured.n_cells == grid.n_cells
assert np.allclose(structured.points, grid.points)
for k, v in grid.point_arrays.items():
assert np.allclose(structured.point_arrays[k], v)
for k, v in grid.cell_arrays.items():
assert np.allclose(structured.cell_arrays[k], v)
def test_create_uniform_grid_from_specs():
# create UniformGrid
dims = [10, 10, 10]
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
assert grid.dimensions == [10, 10, 10]
assert grid.extent == [0, 9, 0, 9, 0, 9]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [1.0, 1.0, 1.0]
spacing = [2, 1, 5]
grid = pyvista.UniformGrid(dims, spacing) # Using default origin
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [2.0, 1.0, 5.0]
origin = [10, 35, 50]
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [10.0, 35.0, 50.0]
assert grid.spacing == [2.0, 1.0, 5.0]
assert grid.dimensions == [10, 10, 10]
def test_uniform_setters():
grid = pyvista.UniformGrid()
grid.dimensions = [10, 10, 10]
assert grid.GetDimensions() == (10, 10, 10)
assert grid.dimensions == [10, 10, 10]
grid.spacing = [5, 2, 1]
assert grid.GetSpacing() == (5, 2, 1)
assert grid.spacing == [5, 2, 1]
grid.origin = [6, 27.7, 19.8]
assert grid.GetOrigin() == (6, 27.7, 19.8)
assert grid.origin == [6, 27.7, 19.8]
def test_create_uniform_grid_from_file():
grid = examples.load_uniform()
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_file():
grid = pyvista.read(examples.uniformfile)
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_cast_uniform_to_structured():
grid = examples.load_uniform()
structured = grid.cast_to_structured_grid()
assert structured.n_points == grid.n_points
assert structured.n_arrays == grid.n_arrays
assert structured.bounds == grid.bounds
def test_cast_uniform_to_rectilinear():
grid = examples.load_uniform()
rectilinear = grid.cast_to_rectilinear_grid()
assert rectilinear.n_points == grid.n_points
assert rectilinear.n_arrays == grid.n_arrays
assert rectilinear.bounds == grid.bounds
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtr', 'vtk'])
def test_save_rectilinear(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_rectilinear()
ogrid.save(filename, binary)
grid = pyvista.RectilinearGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.RectilinearGrid)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vti', 'vtk'])
def test_save_uniform(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_uniform()
ogrid.save(filename, binary)
grid = pyvista.UniformGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.UniformGrid)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
def test_grid_points():
"""Test the points methods on UniformGrid and RectilinearGrid"""
points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
grid = pyvista.UniformGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert grid.spacing == [1, 1, 1]
assert grid.origin == [0., 0., 0.]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
opts = np.c_[grid.x, grid.y, grid.z]
assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0))
# Now test rectilinear grid
del grid
grid = pyvista.RectilinearGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
def test_grid_extract_selection_points():
grid = pyvista.UnstructuredGrid(sgrid)
sub_grid = grid.extract_selection_points([0])
assert sub_grid.n_cells == 1
sub_grid = grid.extract_selection_points(range(100))
assert sub_grid.n_cells > 1
def test_gaussian_smooth():
uniform = examples.load_uniform()
active = uniform.active_scalars_name
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(scalars=active)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(radius_factor=5, std_dev=1.3)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
|
[
"numpy.array",
"pyvista.UnstructuredGrid",
"numpy.arange",
"pyvista.UniformGrid",
"pyvista.plotting.system_supports_plotting",
"pyvista.examples.load_structured",
"numpy.vstack",
"numpy.meshgrid",
"numpy.allclose",
"numpy.any",
"pytest.raises",
"pyvista.examples.load_uniform",
"pyvista.StructuredGrid",
"os.path.abspath",
"numpy.unique",
"os.path.join",
"pyvista.examples.load_rectilinear",
"pyvista.RectilinearGrid",
"pytest.mark.parametrize",
"numpy.zeros",
"pyvista.read",
"numpy.cumsum",
"numpy.all"
] |
[((162, 208), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['examples.hexbeamfile'], {}), '(examples.hexbeamfile)\n', (186, 208), False, 'import pyvista\n'), ((239, 260), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (248, 260), True, 'import numpy as np\n'), ((265, 286), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (274, 286), True, 'import numpy as np\n'), ((291, 312), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (300, 312), True, 'import numpy as np\n'), ((323, 343), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (334, 343), True, 'import numpy as np\n'), ((352, 383), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (374, 383), False, 'import pyvista\n'), ((3144, 3192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (3167, 3192), False, 'import pytest\n'), ((3194, 3246), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vtu', 'vtk']"], {}), "('extension', ['vtu', 'vtk'])\n", (3217, 3246), False, 'import pytest\n'), ((6169, 6217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (6192, 6217), False, 'import pytest\n'), ((6219, 6271), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vts', 'vtk']"], {}), "('extension', ['vts', 'vtk'])\n", (6242, 6271), False, 'import pytest\n'), ((11768, 11816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (11791, 11816), False, 'import pytest\n'), ((11818, 11870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vtr', 'vtk']"], {}), "('extension', ['vtr', 'vtk'])\n", (11841, 11870), False, 'import pytest\n'), ((12618, 12666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""binary"""', '[True, False]'], {}), "('binary', [True, False])\n", (12641, 12666), False, 'import pytest\n'), ((12668, 12720), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""extension"""', "['vti', 'vtk']"], {}), "('extension', ['vti', 'vtk'])\n", (12691, 12720), False, 'import pytest\n'), ((470, 506), 'os.path.join', 'os.path.join', (['test_path', '"""test_data"""'], {}), "(test_path, 'test_data')\n", (482, 506), False, 'import os\n'), ((803, 829), 'pyvista.examples.load_structured', 'examples.load_structured', ([], {}), '()\n', (827, 829), False, 'from pyvista import examples\n'), ((1126, 1157), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['sgrid'], {}), '(sgrid)\n', (1150, 1157), False, 'import pyvista\n'), ((1220, 1257), 'numpy.all', 'np.all', (['(unstruct_grid.celltypes == 12)'], {}), '(unstruct_grid.celltypes == 12)\n', (1226, 1257), True, 'import numpy as np\n'), ((1306, 1347), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['beam'], {'deep': '(True)'}), '(beam, deep=True)\n', (1330, 1347), False, 'import pyvista\n'), ((1866, 1891), 'numpy.array', 'np.array', (['[0, 9]', 'np.int8'], {}), '([0, 9], np.int8)\n', (1874, 1891), True, 'import numpy as np\n'), ((1904, 1974), 'numpy.array', 'np.array', (['[8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15]'], {}), '([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])\n', (1912, 1974), True, 'import numpy as np\n'), ((1991, 2051), 'numpy.array', 'np.array', (['[vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON]', 'np.int32'], {}), '([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)\n', (1999, 2051), True, 'import numpy as np\n'), ((2065, 2167), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1\n ], [0, 1, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1],\n [1, 1, 1], [0, 1, 1]])\n', (2073, 2167), True, 'import numpy as np\n'), ((2331, 2433), 'numpy.array', 'np.array', (['[[0, 0, 2], [1, 0, 2], [1, 1, 2], [0, 1, 2], [0, 0, 3], [1, 0, 3], [1, 1, 3\n ], [0, 1, 3]]'], {}), '([[0, 0, 2], [1, 0, 2], [1, 1, 2], [0, 1, 2], [0, 0, 3], [1, 0, 3],\n [1, 1, 3], [0, 1, 3]])\n', (2339, 2433), True, 'import numpy as np\n'), ((2652, 2710), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['offset', 'cells', 'cell_type', 'points'], {}), '(offset, cells, cell_type, points)\n', (2676, 2710), False, 'import pyvista\n'), ((2752, 2784), 'numpy.allclose', 'np.allclose', (['grid.offset', 'offset'], {}), '(grid.offset, offset)\n', (2763, 2784), True, 'import numpy as np\n'), ((3403, 3437), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['filename'], {}), '(filename)\n', (3427, 3437), False, 'import pyvista\n'), ((3548, 3570), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (3560, 3570), False, 'import pyvista\n'), ((3770, 3809), 'os.path.join', 'os.path.join', (['test_path', '"""test_grid.py"""'], {}), "(test_path, 'test_grid.py')\n", (3782, 3809), False, 'import os\n'), ((4212, 4240), 'numpy.all', 'np.all', (['(lgrid.celltypes < 20)'], {}), '(lgrid.celltypes < 20)\n', (4218, 4240), True, 'import numpy as np\n'), ((4428, 4459), 'numpy.zeros', 'np.zeros', (['beam.n_cells', 'np.bool'], {}), '(beam.n_cells, np.bool)\n', (4436, 4459), True, 'import numpy as np\n'), ((5509, 5530), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5518, 5530), True, 'import numpy as np\n'), ((5542, 5563), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5551, 5563), True, 'import numpy as np\n'), ((5575, 5596), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5584, 5596), True, 'import numpy as np\n'), ((5611, 5640), 'numpy.meshgrid', 'np.meshgrid', (['xrng', 'yrng', 'zrng'], {}), '(xrng, yrng, zrng)\n', (5622, 5640), True, 'import numpy as np\n'), ((5652, 5683), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (5674, 5683), False, 'import pyvista\n'), ((5695, 5718), 'numpy.allclose', 'np.allclose', (['sgrid.x', 'x'], {}), '(sgrid.x, x)\n', (5706, 5718), True, 'import numpy as np\n'), ((5730, 5753), 'numpy.allclose', 'np.allclose', (['sgrid.y', 'y'], {}), '(sgrid.y, y)\n', (5741, 5753), True, 'import numpy as np\n'), ((5765, 5788), 'numpy.allclose', 'np.allclose', (['sgrid.z', 'z'], {}), '(sgrid.z, z)\n', (5776, 5788), True, 'import numpy as np\n'), ((5803, 5831), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['grid'], {}), '(grid)\n', (5825, 5831), False, 'import pyvista\n'), ((5843, 5882), 'numpy.allclose', 'np.allclose', (['grid_a.points', 'grid.points'], {}), '(grid_a.points, grid.points)\n', (5854, 5882), True, 'import numpy as np\n'), ((5932, 5953), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5941, 5953), True, 'import numpy as np\n'), ((5965, 5986), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (5974, 5986), True, 'import numpy as np\n'), ((5998, 6019), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (6007, 6019), True, 'import numpy as np\n'), ((6034, 6063), 'numpy.meshgrid', 'np.meshgrid', (['xrng', 'yrng', 'zrng'], {}), '(xrng, yrng, zrng)\n', (6045, 6063), True, 'import numpy as np\n'), ((6440, 6472), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['filename'], {}), '(filename)\n', (6462, 6472), False, 'import pyvista\n'), ((6601, 6623), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (6613, 6623), False, 'import pyvista\n'), ((6931, 6970), 'os.path.join', 'os.path.join', (['test_path', '"""test_grid.py"""'], {}), "(test_path, 'test_grid.py')\n", (6943, 6970), False, 'import os\n'), ((7131, 7152), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(2)'], {}), '(-10, 10, 2)\n', (7140, 7152), True, 'import numpy as np\n'), ((7164, 7185), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(5)'], {}), '(-10, 10, 5)\n', (7173, 7185), True, 'import numpy as np\n'), ((7197, 7218), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(1)'], {}), '(-10, 10, 1)\n', (7206, 7218), True, 'import numpy as np\n'), ((7230, 7259), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xrng'], {}), '(xrng)\n', (7253, 7259), False, 'import pyvista\n'), ((7331, 7366), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xrng', 'yrng'], {}), '(xrng, yrng)\n', (7354, 7366), False, 'import pyvista\n'), ((7442, 7483), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['xrng', 'yrng', 'zrng'], {}), '(xrng, yrng, zrng)\n', (7465, 7483), False, 'import pyvista\n'), ((7651, 7692), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0, 2.0, 5.0, 10.0]'], {}), '([1.0, 1.0, 2.0, 2.0, 5.0, 10.0])\n', (7659, 7692), True, 'import numpy as np\n'), ((7707, 7731), 'numpy.cumsum', 'np.cumsum', (['cell_spacings'], {}), '(cell_spacings)\n', (7716, 7731), True, 'import numpy as np\n'), ((7752, 7776), 'numpy.cumsum', 'np.cumsum', (['cell_spacings'], {}), '(cell_spacings)\n', (7761, 7776), True, 'import numpy as np\n'), ((7788, 7841), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['x_coordinates', 'y_coordinates'], {}), '(x_coordinates, y_coordinates)\n', (7811, 7841), False, 'import pyvista\n'), ((8007, 8026), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (8015, 8026), True, 'import numpy as np\n'), ((8033, 8052), 'numpy.array', 'np.array', (['[0, 5, 8]'], {}), '([0, 5, 8])\n', (8041, 8052), True, 'import numpy as np\n'), ((8059, 8078), 'numpy.array', 'np.array', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (8067, 8078), True, 'import numpy as np\n'), ((8088, 8113), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', ([], {}), '()\n', (8111, 8113), False, 'import pyvista\n'), ((8290, 8312), 'numpy.allclose', 'np.allclose', (['grid.x', 'x'], {}), '(grid.x, x)\n', (8301, 8312), True, 'import numpy as np\n'), ((8324, 8346), 'numpy.allclose', 'np.allclose', (['grid.y', 'y'], {}), '(grid.y, y)\n', (8335, 8346), True, 'import numpy as np\n'), ((8358, 8380), 'numpy.allclose', 'np.allclose', (['grid.z', 'z'], {}), '(grid.z, z)\n', (8369, 8380), True, 'import numpy as np\n'), ((8440, 8467), 'pyvista.examples.load_rectilinear', 'examples.load_rectilinear', ([], {}), '()\n', (8465, 8467), False, 'from pyvista import examples\n'), ((8690, 8721), 'pyvista.read', 'pyvista.read', (['examples.rectfile'], {}), '(examples.rectfile)\n', (8702, 8721), False, 'import pyvista\n'), ((8935, 8966), 'pyvista.read', 'pyvista.read', (['examples.rectfile'], {}), '(examples.rectfile)\n', (8947, 8966), False, 'import pyvista\n'), ((9178, 9221), 'numpy.allclose', 'np.allclose', (['structured.points', 'grid.points'], {}), '(structured.points, grid.points)\n', (9189, 9221), True, 'import numpy as np\n'), ((9528, 9553), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims'], {}), '(dims)\n', (9547, 9553), False, 'import pyvista\n'), ((9797, 9831), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims', 'spacing'], {}), '(dims, spacing)\n', (9816, 9831), False, 'import pyvista\n'), ((10020, 10062), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims', 'spacing', 'origin'], {}), '(dims, spacing, origin)\n', (10039, 10062), False, 'import pyvista\n'), ((10304, 10325), 'pyvista.UniformGrid', 'pyvista.UniformGrid', ([], {}), '()\n', (10323, 10325), False, 'import pyvista\n'), ((10738, 10761), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (10759, 10761), False, 'from pyvista import examples\n'), ((11005, 11039), 'pyvista.read', 'pyvista.read', (['examples.uniformfile'], {}), '(examples.uniformfile)\n', (11017, 11039), False, 'import pyvista\n'), ((11283, 11306), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (11304, 11306), False, 'from pyvista import examples\n'), ((11548, 11571), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (11569, 11571), False, 'from pyvista import examples\n'), ((12007, 12034), 'pyvista.examples.load_rectilinear', 'examples.load_rectilinear', ([], {}), '()\n', (12032, 12034), False, 'from pyvista import examples\n'), ((12079, 12112), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', (['filename'], {}), '(filename)\n', (12102, 12112), False, 'import pyvista\n'), ((12165, 12193), 'numpy.allclose', 'np.allclose', (['grid.x', 'ogrid.x'], {}), '(grid.x, ogrid.x)\n', (12176, 12193), True, 'import numpy as np\n'), ((12205, 12233), 'numpy.allclose', 'np.allclose', (['grid.y', 'ogrid.y'], {}), '(grid.y, ogrid.y)\n', (12216, 12233), True, 'import numpy as np\n'), ((12245, 12273), 'numpy.allclose', 'np.allclose', (['grid.z', 'ogrid.z'], {}), '(grid.z, ogrid.z)\n', (12256, 12273), True, 'import numpy as np\n'), ((12332, 12354), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (12344, 12354), False, 'import pyvista\n'), ((12460, 12488), 'numpy.allclose', 'np.allclose', (['grid.x', 'ogrid.x'], {}), '(grid.x, ogrid.x)\n', (12471, 12488), True, 'import numpy as np\n'), ((12500, 12528), 'numpy.allclose', 'np.allclose', (['grid.y', 'ogrid.y'], {}), '(grid.y, ogrid.y)\n', (12511, 12528), True, 'import numpy as np\n'), ((12540, 12568), 'numpy.allclose', 'np.allclose', (['grid.z', 'ogrid.z'], {}), '(grid.z, ogrid.z)\n', (12551, 12568), True, 'import numpy as np\n'), ((12853, 12876), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (12874, 12876), False, 'from pyvista import examples\n'), ((12921, 12950), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['filename'], {}), '(filename)\n', (12940, 12950), False, 'import pyvista\n'), ((13130, 13152), 'pyvista.read', 'pyvista.read', (['filename'], {}), '(filename)\n', (13142, 13152), False, 'import pyvista\n'), ((13478, 13580), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1\n ], [0, 1, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1],\n [1, 1, 1], [0, 1, 1]])\n', (13486, 13580), True, 'import numpy as np\n'), ((13749, 13770), 'pyvista.UniformGrid', 'pyvista.UniformGrid', ([], {}), '()\n', (13768, 13770), False, 'import pyvista\n'), ((14166, 14191), 'pyvista.RectilinearGrid', 'pyvista.RectilinearGrid', ([], {}), '()\n', (14189, 14191), False, 'import pyvista\n'), ((14394, 14425), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['sgrid'], {}), '(sgrid)\n', (14418, 14425), False, 'import pyvista\n'), ((14643, 14666), 'pyvista.examples.load_uniform', 'examples.load_uniform', ([], {}), '()\n', (14664, 14666), False, 'from pyvista import examples\n'), ((422, 447), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (437, 447), False, 'import os\n'), ((653, 679), 'pyvista.plotting.system_supports_plotting', 'system_supports_plotting', ([], {}), '()\n', (677, 679), False, 'from pyvista.plotting import system_supports_plotting\n'), ((1384, 1418), 'numpy.any', 'np.any', (['(grid.points == beam.points)'], {}), '(grid.points == beam.points)\n', (1390, 1418), True, 'import numpy as np\n'), ((1456, 1480), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1469, 1480), False, 'import pytest\n'), ((1554, 1578), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1567, 1578), False, 'import pytest\n'), ((3819, 3843), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3832, 3843), False, 'import pytest\n'), ((3860, 3894), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['filename'], {}), '(filename)\n', (3884, 3894), False, 'import pyvista\n'), ((3905, 3929), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3918, 3929), False, 'import pytest\n'), ((3946, 3984), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['"""not a file"""'], {}), "('not a file')\n", (3970, 3984), False, 'import pyvista\n'), ((4027, 4051), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4040, 4051), False, 'import pytest\n'), ((4068, 4104), 'pyvista.UnstructuredGrid', 'pyvista.UnstructuredGrid', (['"""file.abc"""'], {}), "('file.abc')\n", (4092, 4104), False, 'import pyvista\n'), ((6093, 6117), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6106, 6117), False, 'import pytest\n'), ((6134, 6165), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (6156, 6165), False, 'import pyvista\n'), ((6844, 6868), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6857, 6868), False, 'import pytest\n'), ((6878, 6914), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['"""not a file"""'], {}), "('not a file')\n", (6900, 6914), False, 'import pyvista\n'), ((6980, 7004), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6993, 7004), False, 'import pytest\n'), ((7021, 7053), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', (['filename'], {}), '(filename)\n', (7043, 7053), False, 'import pyvista\n'), ((9280, 9322), 'numpy.allclose', 'np.allclose', (['structured.point_arrays[k]', 'v'], {}), '(structured.point_arrays[k], v)\n', (9291, 9322), True, 'import numpy as np\n'), ((9380, 9421), 'numpy.allclose', 'np.allclose', (['structured.cell_arrays[k]', 'v'], {}), '(structured.cell_arrays[k], v)\n', (9391, 9421), True, 'import numpy as np\n'), ((13935, 13965), 'numpy.unique', 'np.unique', (['grid.points'], {'axis': '(0)'}), '(grid.points, axis=0)\n', (13944, 13965), True, 'import numpy as np\n'), ((13967, 13992), 'numpy.unique', 'np.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (13976, 13992), True, 'import numpy as np\n'), ((14058, 14081), 'numpy.unique', 'np.unique', (['opts'], {'axis': '(0)'}), '(opts, axis=0)\n', (14067, 14081), True, 'import numpy as np\n'), ((14083, 14108), 'numpy.unique', 'np.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (14092, 14108), True, 'import numpy as np\n'), ((14280, 14310), 'numpy.unique', 'np.unique', (['grid.points'], {'axis': '(0)'}), '(grid.points, axis=0)\n', (14289, 14310), True, 'import numpy as np\n'), ((14312, 14337), 'numpy.unique', 'np.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (14321, 14337), True, 'import numpy as np\n'), ((14919, 14959), 'numpy.all', 'np.all', (['(uniform.active_scalars == values)'], {}), '(uniform.active_scalars == values)\n', (14925, 14959), True, 'import numpy as np\n'), ((15185, 15225), 'numpy.all', 'np.all', (['(uniform.active_scalars == values)'], {}), '(uniform.active_scalars == values)\n', (15191, 15225), True, 'import numpy as np\n'), ((1531, 1542), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1539, 1542), True, 'import numpy as np\n'), ((1629, 1640), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1637, 1640), True, 'import numpy as np\n'), ((1691, 1702), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1699, 1702), True, 'import numpy as np\n'), ((1753, 1764), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1761, 1764), True, 'import numpy as np\n'), ((2598, 2623), 'numpy.vstack', 'np.vstack', (['(cell1, cell2)'], {}), '((cell1, cell2))\n', (2607, 2623), True, 'import numpy as np\n')]
|
import warnings
warnings.simplefilter('ignore')
import argparse
import pickle
import numpy as np
import pandas as pd
import networkx as nx
import scipy.sparse as sp
from network_propagation_methods import minprop_2
from sklearn.metrics import roc_auc_score, auc
import matplotlib.pyplot as plt
#### Parameters #############
parser = argparse.ArgumentParser(description='Runs MINProp')
parser.add_argument('--alphaP', type=float, default=0.25, help='diffusion parameter for the protein-protein interaction network')
parser.add_argument('--alphaD', type=float, default=0.25, help='diffusion parameter for the disease similarity network')
parser.add_argument('--max_iter', type=int, default=1000, help='maximum number of iterations')
parser.add_argument('--eps', type=float, default=1.0e-6, help='convergence threshold')
parser.add_argument('--dir_data', type=str, default='./data/', help='directory of pickled network data')
args = parser.parse_args()
#### load data ############
### protein-protein interaction network
with open(args.dir_data + 'norm_adj_networkP.pickle', mode='rb') as f:
norm_adj_networkP = pickle.load(f)
nb_proteins = norm_adj_networkP.shape[0]
### disease similarity network
with open(args.dir_data + 'adj_networkD.pickle', mode='rb') as f:
adj_networkD = pickle.load(f)
nb_diseases = adj_networkD.shape[0]
# normalized adjacency matrix
deg_networkD = np.sum(adj_networkD, axis=0)
norm_adj_networkD = sp.csr_matrix(adj_networkD / np.sqrt(np.dot(deg_networkD.T, deg_networkD)), dtype=np.float64)
del(adj_networkD)
del(deg_networkD)
### protein-disease network (data used in PRINCE study)
with open(args.dir_data + 'biadj_networkPD.pickle', mode='rb') as f:
biadj_networkPD = pickle.load(f)
# get the list of protein-disease pairs
PD_pairs = biadj_networkPD.nonzero()
# number of protein-disease pairs
nb_PD_pairs = len(PD_pairs[0])
#### Network propagation MINProp ###########################
roc_value_set = np.array([], dtype=np.float64)
rankings = np.array([], dtype=np.int64)
for i in range(nb_PD_pairs):
# leave-one-out validation
# remove a protein-disease association
idx_P = PD_pairs[0][i]
idx_D = PD_pairs[1][i]
biadj_networkPD[idx_P, idx_D] = 0.0
biadj_networkPD.eliminate_zeros()
# normalized biadjacency matrix (ToDo: faster implementation)
degP = np.sum(biadj_networkPD, axis=1)
degD = np.sum(biadj_networkPD, axis=0)
norm_biadj_networkPD = sp.csr_matrix(biadj_networkPD / np.sqrt(np.dot(degP, degD)), dtype=np.float64)
norm_biadj_networkPD.data[np.isnan(norm_biadj_networkPD.data)] = 0.0
norm_biadj_networkPD.eliminate_zeros()
# set initial label
yP = np.zeros(nb_proteins, dtype=np.float64)
yD = np.zeros(nb_diseases, dtype=np.float64)
yD[idx_D] = 1.0
# propagation
fP, fD, convergent = minprop_2(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP, yD, args.alphaP, args.alphaD, args.eps, args.max_iter)
# ranking
labels_real = np.zeros(nb_proteins)
labels_real[idx_P] = 1
rank = int(np.where(labels_real[np.argsort(-fP)]==1)[0]) + 1
rankings = np.append(rankings, rank)
# get AUC value
roc_value = roc_auc_score(labels_real, fP)
print(i, "AUC:", roc_value, convergent)
roc_value_set = np.append(roc_value_set, roc_value)
# reassign the protein-disease association
biadj_networkPD[idx_P, idx_D] = 1.0
print("Average AUC", np.mean(roc_value_set))
# compute sensitivity and top rate (ROC-like curve)
# ToDo: faster implementation
sen_set = np.array([], dtype=np.float64)
top_rate_set = np.array([], dtype=np.float64)
for k in range(nb_proteins):
# sensitibity
sen = (rankings <= (k+1)).sum() / nb_PD_pairs
# top rate
top_rate = (k + 1) / nb_proteins
sen_set = np.append(sen_set, sen)
top_rate_set = np.append(top_rate_set, top_rate)
# get AUC value
print("Summarized AUC", auc(top_rate_set, sen_set))
# plot ROC-like curve
plt.scatter(top_rate_set, sen_set)
plt.show()
|
[
"network_propagation_methods.minprop_2",
"numpy.mean",
"argparse.ArgumentParser",
"sklearn.metrics.auc",
"pickle.load",
"sklearn.metrics.roc_auc_score",
"numpy.append",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.argsort",
"warnings.simplefilter",
"matplotlib.pyplot.show"
] |
[((16, 47), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (37, 47), False, 'import warnings\n'), ((335, 386), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Runs MINProp"""'}), "(description='Runs MINProp')\n", (358, 386), False, 'import argparse\n'), ((1386, 1414), 'numpy.sum', 'np.sum', (['adj_networkD'], {'axis': '(0)'}), '(adj_networkD, axis=0)\n', (1392, 1414), True, 'import numpy as np\n'), ((1949, 1979), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (1957, 1979), True, 'import numpy as np\n'), ((1991, 2019), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (1999, 2019), True, 'import numpy as np\n'), ((3523, 3553), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (3531, 3553), True, 'import numpy as np\n'), ((3569, 3599), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (3577, 3599), True, 'import numpy as np\n'), ((3937, 3971), 'matplotlib.pyplot.scatter', 'plt.scatter', (['top_rate_set', 'sen_set'], {}), '(top_rate_set, sen_set)\n', (3948, 3971), True, 'import matplotlib.pyplot as plt\n'), ((3972, 3982), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3980, 3982), True, 'import matplotlib.pyplot as plt\n'), ((1116, 1130), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1127, 1130), False, 'import pickle\n'), ((1289, 1303), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1300, 1303), False, 'import pickle\n'), ((1713, 1727), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1724, 1727), False, 'import pickle\n'), ((2332, 2363), 'numpy.sum', 'np.sum', (['biadj_networkPD'], {'axis': '(1)'}), '(biadj_networkPD, axis=1)\n', (2338, 2363), True, 'import numpy as np\n'), ((2375, 2406), 'numpy.sum', 'np.sum', (['biadj_networkPD'], {'axis': '(0)'}), '(biadj_networkPD, axis=0)\n', (2381, 2406), True, 'import numpy as np\n'), ((2662, 2701), 'numpy.zeros', 'np.zeros', (['nb_proteins'], {'dtype': 'np.float64'}), '(nb_proteins, dtype=np.float64)\n', (2670, 2701), True, 'import numpy as np\n'), ((2711, 2750), 'numpy.zeros', 'np.zeros', (['nb_diseases'], {'dtype': 'np.float64'}), '(nb_diseases, dtype=np.float64)\n', (2719, 2750), True, 'import numpy as np\n'), ((2814, 2946), 'network_propagation_methods.minprop_2', 'minprop_2', (['norm_adj_networkP', 'norm_adj_networkD', 'norm_biadj_networkPD', 'yP', 'yD', 'args.alphaP', 'args.alphaD', 'args.eps', 'args.max_iter'], {}), '(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP,\n yD, args.alphaP, args.alphaD, args.eps, args.max_iter)\n', (2823, 2946), False, 'from network_propagation_methods import minprop_2\n'), ((2975, 2996), 'numpy.zeros', 'np.zeros', (['nb_proteins'], {}), '(nb_proteins)\n', (2983, 2996), True, 'import numpy as np\n'), ((3104, 3129), 'numpy.append', 'np.append', (['rankings', 'rank'], {}), '(rankings, rank)\n', (3113, 3129), True, 'import numpy as np\n'), ((3166, 3196), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_real', 'fP'], {}), '(labels_real, fP)\n', (3179, 3196), False, 'from sklearn.metrics import roc_auc_score, auc\n'), ((3261, 3296), 'numpy.append', 'np.append', (['roc_value_set', 'roc_value'], {}), '(roc_value_set, roc_value)\n', (3270, 3296), True, 'import numpy as np\n'), ((3406, 3428), 'numpy.mean', 'np.mean', (['roc_value_set'], {}), '(roc_value_set)\n', (3413, 3428), True, 'import numpy as np\n'), ((3768, 3791), 'numpy.append', 'np.append', (['sen_set', 'sen'], {}), '(sen_set, sen)\n', (3777, 3791), True, 'import numpy as np\n'), ((3811, 3844), 'numpy.append', 'np.append', (['top_rate_set', 'top_rate'], {}), '(top_rate_set, top_rate)\n', (3820, 3844), True, 'import numpy as np\n'), ((3886, 3912), 'sklearn.metrics.auc', 'auc', (['top_rate_set', 'sen_set'], {}), '(top_rate_set, sen_set)\n', (3889, 3912), False, 'from sklearn.metrics import roc_auc_score, auc\n'), ((2543, 2578), 'numpy.isnan', 'np.isnan', (['norm_biadj_networkPD.data'], {}), '(norm_biadj_networkPD.data)\n', (2551, 2578), True, 'import numpy as np\n'), ((1472, 1508), 'numpy.dot', 'np.dot', (['deg_networkD.T', 'deg_networkD'], {}), '(deg_networkD.T, deg_networkD)\n', (1478, 1508), True, 'import numpy as np\n'), ((2474, 2492), 'numpy.dot', 'np.dot', (['degP', 'degD'], {}), '(degP, degD)\n', (2480, 2492), True, 'import numpy as np\n'), ((3060, 3075), 'numpy.argsort', 'np.argsort', (['(-fP)'], {}), '(-fP)\n', (3070, 3075), True, 'import numpy as np\n')]
|
########################################################################################################################
# #
# This file is part of kAIvy #
# #
# Copyright (c) 2019-2021 by the kAIvy team and contributors #
# #
########################################################################################################################
import numpy as np
from kaivy.geometry.geometry2d import Geometry2D
from kaivy.geometry.transformation2d import Transformation2D
from kivy.graphics import Line, SmoothLine, Color
class Line2D(Geometry2D):
"""
Defines a simple line defined by two points
"""
def __init__(self, points, width=1.0, color=(1.0, 1.0, 1.0, 1.0)):
"""
Initializer
:param points: The line's points
"""
super().__init__()
self.geometry_class_name = 'Line2D'
self.set_nodes(np.array(points))
self.smooth = True
self.color = color
self.width = width
def render_to_kivy(self, target, transformation: Transformation2D, parameters={}, geometry_out=None):
color = parameters.get('color', self.color)
target.add(Color(*color))
nodes = transformation.transform(self.nodes)
if geometry_out is not None:
if self.GO_TAG_LINE_LIST not in geometry_out: # add line array if still missing
geometry_out[self.GO_TAG_LINE_LIST] = []
geometry_out[self.GO_TAG_LINE_LIST].append({self.GO_TAG_OWNER: self, self.GO_TAG_LINE_LIST_LINES: nodes})
nodes = nodes.flatten().tolist()
if self.smooth:
target.add(SmoothLine(points=nodes, width=self.width))
else:
target.add(Line(points=nodes, width=self.width))
def distance_to_point(self, point, ray=False):
"""
Returns the distance between this line and given point
:param point: A 2D coordinate
:param ray: Defines if the line defines an unbound ray
"""
return self.line_distance_to_point(self.nodes, point, ray=ray)
@staticmethod
def line_distance_to_point(point_list, point, ray=False):
"""
Returns the distance from line p1 p2 and a given point point
:param point_list: The line's points as numpy array
:param point: A 2D coordinate
:param ray: Defines if the line defines an unbound ray
:return: The distance to the point and the nearest point. None, None if line is invalid
"""
# two points define the line
n = (point_list[1] - point_list[0])
if np.sum(n) == 0:
return None, None
line_length = np.linalg.norm(n)
n = n / line_length
ap = point - point_list[0]
t = ap.dot(n)
if not ray:
t = min(max(t, 0), line_length)
x = point_list[0] + t * n
# d = (np.cross(ap, n) ** 2).sum()**0.5
return ((point - x) ** 2).sum() ** 0.5, x
def to_dict(self, options): # Overrides Geometry2D to_dict
result = super().to_dict(options)
if options.get(self.OPTION_VISUAL_DETAILS, True):
result['width'] = self.width
result['smooth'] = self.smooth
return result
|
[
"kivy.graphics.Line",
"kivy.graphics.SmoothLine",
"numpy.linalg.norm",
"numpy.sum",
"numpy.array",
"kivy.graphics.Color"
] |
[((3148, 3165), 'numpy.linalg.norm', 'np.linalg.norm', (['n'], {}), '(n)\n', (3162, 3165), True, 'import numpy as np\n'), ((1386, 1402), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1394, 1402), True, 'import numpy as np\n'), ((1663, 1676), 'kivy.graphics.Color', 'Color', (['*color'], {}), '(*color)\n', (1668, 1676), False, 'from kivy.graphics import Line, SmoothLine, Color\n'), ((3080, 3089), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (3086, 3089), True, 'import numpy as np\n'), ((2127, 2169), 'kivy.graphics.SmoothLine', 'SmoothLine', ([], {'points': 'nodes', 'width': 'self.width'}), '(points=nodes, width=self.width)\n', (2137, 2169), False, 'from kivy.graphics import Line, SmoothLine, Color\n'), ((2208, 2244), 'kivy.graphics.Line', 'Line', ([], {'points': 'nodes', 'width': 'self.width'}), '(points=nodes, width=self.width)\n', (2212, 2244), False, 'from kivy.graphics import Line, SmoothLine, Color\n')]
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset9 as ov
import numpy as np
import pytest
from tests.runtime import get_runtime
from openvino.runtime.utils.types import get_element_type_str
from openvino.runtime.utils.types import get_element_type
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, out_type",
[
pytest.param(2, 5, 0, np.float32),
pytest.param(5, 3, 2, np.int64),
pytest.param(3, 3, -1, np.float16),
pytest.param(5, 5, -10, np.float32),
],
)
def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type))
expected_results = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
@pytest.mark.parametrize(
"num_rows, num_columns, diagonal_index, batch_shape, out_type",
[
pytest.param(2, 5, 0, [1], np.float32),
pytest.param(5, 3, 2, [2, 2], np.int64),
pytest.param(3, 3, -1, [1, 3, 2], np.float16),
pytest.param(5, 5, -10, [1, 1], np.float32),
],
)
def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out_type):
num_rows_array = np.array([num_rows], np.int32)
num_columns_array = np.array([num_columns], np.int32)
diagonal_index_array = np.array([diagonal_index], np.int32)
batch_shape_array = np.array(batch_shape, np.int32)
num_rows_tensor = ov.constant(num_rows_array)
num_columns_tensor = ov.constant(num_columns_array)
diagonal_index_tensor = ov.constant(diagonal_index_array)
batch_shape_tensor = ov.constant(batch_shape_array)
# Create with param names
eye_node = ov.eye(num_rows=num_rows_tensor,
num_columns=num_columns_tensor,
diagonal_index=diagonal_index_tensor,
batch_shape=batch_shape_tensor,
output_type=get_element_type_str(out_type))
# Create with default orded
eye_node = ov.eye(num_rows_tensor,
num_columns_tensor,
diagonal_index_tensor,
get_element_type_str(out_type),
batch_shape_tensor)
output_shape = [*batch_shape, 1, 1]
one_matrix = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)
expected_results = np.tile(one_matrix, output_shape)
assert eye_node.get_type_name() == "Eye"
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
# runtime = get_runtime()
# computation = runtime.computation(eye_node)
# eye_results = computation()
# assert np.allclose(eye_results, expected_results)
|
[
"numpy.tile",
"numpy.eye",
"pytest.param",
"numpy.array",
"openvino.runtime.utils.types.get_element_type",
"openvino.runtime.opset9.constant",
"openvino.runtime.utils.types.get_element_type_str"
] |
[((677, 707), 'numpy.array', 'np.array', (['[num_rows]', 'np.int32'], {}), '([num_rows], np.int32)\n', (685, 707), True, 'import numpy as np\n'), ((732, 765), 'numpy.array', 'np.array', (['[num_columns]', 'np.int32'], {}), '([num_columns], np.int32)\n', (740, 765), True, 'import numpy as np\n'), ((793, 829), 'numpy.array', 'np.array', (['[diagonal_index]', 'np.int32'], {}), '([diagonal_index], np.int32)\n', (801, 829), True, 'import numpy as np\n'), ((852, 879), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_rows_array'], {}), '(num_rows_array)\n', (863, 879), True, 'import openvino.runtime.opset9 as ov\n'), ((905, 935), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_columns_array'], {}), '(num_columns_array)\n', (916, 935), True, 'import openvino.runtime.opset9 as ov\n'), ((964, 997), 'openvino.runtime.opset9.constant', 'ov.constant', (['diagonal_index_array'], {}), '(diagonal_index_array)\n', (975, 997), True, 'import openvino.runtime.opset9 as ov\n'), ((1494, 1561), 'numpy.eye', 'np.eye', (['num_rows'], {'M': 'num_columns', 'k': 'diagonal_index', 'dtype': 'np.float32'}), '(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)\n', (1500, 1561), True, 'import numpy as np\n'), ((2450, 2480), 'numpy.array', 'np.array', (['[num_rows]', 'np.int32'], {}), '([num_rows], np.int32)\n', (2458, 2480), True, 'import numpy as np\n'), ((2505, 2538), 'numpy.array', 'np.array', (['[num_columns]', 'np.int32'], {}), '([num_columns], np.int32)\n', (2513, 2538), True, 'import numpy as np\n'), ((2566, 2602), 'numpy.array', 'np.array', (['[diagonal_index]', 'np.int32'], {}), '([diagonal_index], np.int32)\n', (2574, 2602), True, 'import numpy as np\n'), ((2627, 2658), 'numpy.array', 'np.array', (['batch_shape', 'np.int32'], {}), '(batch_shape, np.int32)\n', (2635, 2658), True, 'import numpy as np\n'), ((2681, 2708), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_rows_array'], {}), '(num_rows_array)\n', (2692, 2708), True, 'import openvino.runtime.opset9 as ov\n'), ((2734, 2764), 'openvino.runtime.opset9.constant', 'ov.constant', (['num_columns_array'], {}), '(num_columns_array)\n', (2745, 2764), True, 'import openvino.runtime.opset9 as ov\n'), ((2793, 2826), 'openvino.runtime.opset9.constant', 'ov.constant', (['diagonal_index_array'], {}), '(diagonal_index_array)\n', (2804, 2826), True, 'import openvino.runtime.opset9 as ov\n'), ((2852, 2882), 'openvino.runtime.opset9.constant', 'ov.constant', (['batch_shape_array'], {}), '(batch_shape_array)\n', (2863, 2882), True, 'import openvino.runtime.opset9 as ov\n'), ((3509, 3576), 'numpy.eye', 'np.eye', (['num_rows'], {'M': 'num_columns', 'k': 'diagonal_index', 'dtype': 'np.float32'}), '(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32)\n', (3515, 3576), True, 'import numpy as np\n'), ((3600, 3633), 'numpy.tile', 'np.tile', (['one_matrix', 'output_shape'], {}), '(one_matrix, output_shape)\n', (3607, 3633), True, 'import numpy as np\n'), ((1438, 1468), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (1458, 1468), False, 'from openvino.runtime.utils.types import get_element_type_str\n'), ((1701, 1727), 'openvino.runtime.utils.types.get_element_type', 'get_element_type', (['out_type'], {}), '(out_type)\n', (1717, 1727), False, 'from openvino.runtime.utils.types import get_element_type\n'), ((409, 442), 'pytest.param', 'pytest.param', (['(2)', '(5)', '(0)', 'np.float32'], {}), '(2, 5, 0, np.float32)\n', (421, 442), False, 'import pytest\n'), ((452, 483), 'pytest.param', 'pytest.param', (['(5)', '(3)', '(2)', 'np.int64'], {}), '(5, 3, 2, np.int64)\n', (464, 483), False, 'import pytest\n'), ((493, 527), 'pytest.param', 'pytest.param', (['(3)', '(3)', '(-1)', 'np.float16'], {}), '(3, 3, -1, np.float16)\n', (505, 527), False, 'import pytest\n'), ((537, 572), 'pytest.param', 'pytest.param', (['(5)', '(5)', '(-10)', 'np.float32'], {}), '(5, 5, -10, np.float32)\n', (549, 572), False, 'import pytest\n'), ((3377, 3407), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (3397, 3407), False, 'from openvino.runtime.utils.types import get_element_type_str\n'), ((3773, 3799), 'openvino.runtime.utils.types.get_element_type', 'get_element_type', (['out_type'], {}), '(out_type)\n', (3789, 3799), False, 'from openvino.runtime.utils.types import get_element_type\n'), ((2135, 2173), 'pytest.param', 'pytest.param', (['(2)', '(5)', '(0)', '[1]', 'np.float32'], {}), '(2, 5, 0, [1], np.float32)\n', (2147, 2173), False, 'import pytest\n'), ((2183, 2222), 'pytest.param', 'pytest.param', (['(5)', '(3)', '(2)', '[2, 2]', 'np.int64'], {}), '(5, 3, 2, [2, 2], np.int64)\n', (2195, 2222), False, 'import pytest\n'), ((2232, 2277), 'pytest.param', 'pytest.param', (['(3)', '(3)', '(-1)', '[1, 3, 2]', 'np.float16'], {}), '(3, 3, -1, [1, 3, 2], np.float16)\n', (2244, 2277), False, 'import pytest\n'), ((2287, 2330), 'pytest.param', 'pytest.param', (['(5)', '(5)', '(-10)', '[1, 1]', 'np.float32'], {}), '(5, 5, -10, [1, 1], np.float32)\n', (2299, 2330), False, 'import pytest\n'), ((1225, 1255), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (1245, 1255), False, 'from openvino.runtime.utils.types import get_element_type_str\n'), ((3164, 3194), 'openvino.runtime.utils.types.get_element_type_str', 'get_element_type_str', (['out_type'], {}), '(out_type)\n', (3184, 3194), False, 'from openvino.runtime.utils.types import get_element_type_str\n')]
|
import numpy as np
import scipy.special as ss
import pathlib
from Particle import Particle
def ql_global(l, particles):
# Keep only particles that have neighbors (this was changed 5/23/2020)
particles = [i for i in particles if len(Particle.data[i].neighs)>0]
neigh_total = sum([len(Particle.data[i].neighs) for i in particles])
if isinstance(l, int):
if len(particles)!=0:
# average slmbar weighted by the number of neighbors
Qlmbar = list(sum([np.array(Particle.data[p].qlmbar[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles]))
Qlmtilde = list(sum([np.array(Particle.data[p].qlmtilde[l], dtype=complex)*len(Particle.data[p].neighs)/neigh_total for p in particles]))
if l in Particle.qlmbar_ideal:
Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*np.vdot(np.array(Qlmtilde, dtype=complex), np.array(Qlmtilde, dtype=complex))))
else:
Qlmbar_mag_sq = np.abs(np.vdot(np.array(Qlmbar, dtype=complex), np.array(Qlmbar, dtype=complex)))
Ql = np.abs(np.sqrt((4*np.pi/(2*l+1))*Qlmbar_mag_sq))
D = np.sqrt(Qlmbar_mag_sq)
else:
Qlmbar = [0]*(2*l+1)
Qlmtilde = [0]*(2*l+1)
Ql = 0.0
return [Ql, Qlmbar, Qlmtilde]
|
[
"numpy.array",
"numpy.sqrt"
] |
[((1087, 1109), 'numpy.sqrt', 'np.sqrt', (['Qlmbar_mag_sq'], {}), '(Qlmbar_mag_sq)\n', (1094, 1109), True, 'import numpy as np\n'), ((1036, 1084), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi / (2 * l + 1) * Qlmbar_mag_sq)'], {}), '(4 * np.pi / (2 * l + 1) * Qlmbar_mag_sq)\n', (1043, 1084), True, 'import numpy as np\n'), ((952, 983), 'numpy.array', 'np.array', (['Qlmbar'], {'dtype': 'complex'}), '(Qlmbar, dtype=complex)\n', (960, 983), True, 'import numpy as np\n'), ((985, 1016), 'numpy.array', 'np.array', (['Qlmbar'], {'dtype': 'complex'}), '(Qlmbar, dtype=complex)\n', (993, 1016), True, 'import numpy as np\n'), ((479, 530), 'numpy.array', 'np.array', (['Particle.data[p].qlmbar[l]'], {'dtype': 'complex'}), '(Particle.data[p].qlmbar[l], dtype=complex)\n', (487, 530), True, 'import numpy as np\n'), ((618, 671), 'numpy.array', 'np.array', (['Particle.data[p].qlmtilde[l]'], {'dtype': 'complex'}), '(Particle.data[p].qlmtilde[l], dtype=complex)\n', (626, 671), True, 'import numpy as np\n'), ((831, 864), 'numpy.array', 'np.array', (['Qlmtilde'], {'dtype': 'complex'}), '(Qlmtilde, dtype=complex)\n', (839, 864), True, 'import numpy as np\n'), ((866, 899), 'numpy.array', 'np.array', (['Qlmtilde'], {'dtype': 'complex'}), '(Qlmtilde, dtype=complex)\n', (874, 899), True, 'import numpy as np\n')]
|
""" IO Handler for LAS (and compressed LAZ) file format """
import laspy
import numpy as np
from laserchicken import keys
from laserchicken.io.base_io_handler import IOHandler
from laserchicken.io.utils import convert_to_short_type, select_valid_attributes
DEFAULT_LAS_ATTRIBUTES = {
'x',
'y',
'z',
'intensity',
'gps_time',
'raw_classification',
}
class LASHandler(IOHandler):
""" Class for IO of point-cloud data in LAS file format """
def read(self, attributes=DEFAULT_LAS_ATTRIBUTES):
"""
Load the points from a LAS(LAZ) file into memory.
:param attributes: list of attributes to read ('all' for all attributes in file)
:return: point cloud data structure
"""
file = laspy.read(self.path)
dtype = file.header.point_format.dtype()
attributes_available = [el if el not in ['X', 'Y', 'Z'] else el.lower()
for el in dtype.fields.keys()]
attributes = select_valid_attributes(attributes_available, attributes)
points = {}
for name in attributes:
if hasattr(file, name):
file_data = getattr(file, name)
data = np.zeros_like(file_data)
data[:] = file_data
points[name] = _get_attribute(data, data.dtype.name)
return {keys.point: points}
def write(self, point_cloud, attributes='all', file_version='1.2', point_format=3):
"""
Write point cloud to a LAS(LAZ) file.
:param point_cloud:
:param attributes: list of attributes to write ('all' for all attributes in point_cloud)
:param file_version:
:param point_format:
:return:
"""
file = laspy.create(point_format=point_format,
file_version=file_version)
points = point_cloud[keys.point]
attributes = select_valid_attributes([attr for attr in points.keys()], attributes)
# NOTE: adding extra dims and assignment should be done in two steps,
# some fields (e.g. raw_classification) are otherwise overwritten
dtype = file.header.point_format.dtype()
for attribute in attributes:
data, type = _get_data_and_type(points[attribute])
type_short = convert_to_short_type(type)
if attribute not in 'xyz':
# x,y,z are not there but file methods can be used to convert coords to int4
if attribute not in dtype.fields:
param = laspy.ExtraBytesParams(name=attribute, type=type)
file.add_extra_dim(param)
file_type_short = convert_to_short_type(getattr(file, attribute).dtype.name)
if not file_type_short == type_short:
raise TypeError('Data type in file does not match the one in point cloud: '
'for {}, {} vs {}'.format(attribute, file_type_short, type_short))
for dim in 'xyz':
data, _ = _get_data_and_type(points[dim])
setattr(file.header, '{}_offset'.format(dim), data.min())
setattr(file.header, '{}_scale'.format(dim), 0.001)
for attribute in attributes:
data, _ = _get_data_and_type(points[attribute])
if data.size == 0:
raise ValueError('Cannot write empty point-cloud!')
else:
setattr(file, attribute, data)
try:
file.write(self.path)
except ValueError as err:
raise ValueError('Error in writing LAS file (file_version {}, point_format_id {}). '
'laspy error below:\n{}'.format(file_version, point_format, err))
def _get_attribute(data, data_type):
return {'type': data_type, 'data': data}
def _get_data_and_type(attribute):
return attribute['data'], attribute['type']
|
[
"laspy.create",
"laserchicken.io.utils.convert_to_short_type",
"laserchicken.io.utils.select_valid_attributes",
"laspy.ExtraBytesParams",
"laspy.read",
"numpy.zeros_like"
] |
[((757, 778), 'laspy.read', 'laspy.read', (['self.path'], {}), '(self.path)\n', (767, 778), False, 'import laspy\n'), ((993, 1050), 'laserchicken.io.utils.select_valid_attributes', 'select_valid_attributes', (['attributes_available', 'attributes'], {}), '(attributes_available, attributes)\n', (1016, 1050), False, 'from laserchicken.io.utils import convert_to_short_type, select_valid_attributes\n'), ((1753, 1819), 'laspy.create', 'laspy.create', ([], {'point_format': 'point_format', 'file_version': 'file_version'}), '(point_format=point_format, file_version=file_version)\n', (1765, 1819), False, 'import laspy\n'), ((2308, 2335), 'laserchicken.io.utils.convert_to_short_type', 'convert_to_short_type', (['type'], {}), '(type)\n', (2329, 2335), False, 'from laserchicken.io.utils import convert_to_short_type, select_valid_attributes\n'), ((1211, 1235), 'numpy.zeros_like', 'np.zeros_like', (['file_data'], {}), '(file_data)\n', (1224, 1235), True, 'import numpy as np\n'), ((2546, 2595), 'laspy.ExtraBytesParams', 'laspy.ExtraBytesParams', ([], {'name': 'attribute', 'type': 'type'}), '(name=attribute, type=type)\n', (2568, 2595), False, 'import laspy\n')]
|
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras import utils
import tensorflow as tf
import numpy as np
import argparse
import logging
import os
# Set Log Level
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Seed for Reproducability
SEED = 123
np.random.seed(SEED)
tf.random.set_seed(SEED)
# Setup Logger
logger = logging.getLogger('sagemaker')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def parse_args():
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--data', type=str, default=os.environ.get('SM_CHANNEL_DATA'))
parser.add_argument('--output', type=str, default=os.environ.get('SM_CHANNEL_OUTPUT'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
X_train = np.load(os.path.join(train_dir, 'X_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
logger.info(f'X_train: {X_train.shape} | y_train: {y_train.shape}')
return X_train, y_train
def get_validation_data(val_dir):
X_validation = np.load(os.path.join(val_dir, 'X_validation.npy'))
y_validation = np.load(os.path.join(val_dir, 'y_validation.npy'))
logger.info(f'X_validation: {X_validation.shape} | y_validation: {y_validation.shape}')
return X_validation, y_validation
def get_test_data(test_dir):
X_test = np.load(os.path.join(test_dir, 'X_test.npy'))
y_test = np.load(os.path.join(test_dir, 'y_test.npy'))
logger.info(f'X_test: {X_test.shape} | y_test: {y_test.shape}')
return X_test, y_test
if __name__ == '__main__':
logger.info(f'[Using TensorFlow version: {tf.__version__}]')
DEVICE = '/cpu:0'
args, _ = parse_args()
epochs = args.epochs
# Load train, validation and test sets from S3
X_train, y_train = get_train_data(args.train)
X_validation, y_validation = get_validation_data(args.val)
X_test, y_test = get_test_data(args.test)
with tf.device(DEVICE):
# Data Augmentation
TRAIN_BATCH_SIZE = 32
data_generator = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
train_iterator = data_generator.flow(X_train, y_train, batch_size=TRAIN_BATCH_SIZE)
# Define Model Architecture
model = Sequential()
# CONVOLUTIONAL LAYER 1
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
# CONVOLUTIONAL LAYER 1
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
# CONVOLUTIONAL LAYER 3
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
# FULLY CONNECTED LAYER
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.summary()
# Compile Model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train Model
BATCH_SIZE = 32
STEPS_PER_EPOCH = int(X_train.shape[0]/TRAIN_BATCH_SIZE)
model.fit(train_iterator,
steps_per_epoch=STEPS_PER_EPOCH,
batch_size=BATCH_SIZE,
epochs=epochs,
validation_data=(X_validation, y_validation),
callbacks=[],
verbose=2,
shuffle=True)
# Evaluate on Test Set
result = model.evaluate(X_test, y_test, verbose=1)
print(f'Test Accuracy: {result[1]}')
# Save Model
model.save(f'{args.model_dir}/1')
|
[
"logging.getLogger",
"tensorflow.device",
"logging.StreamHandler",
"tensorflow.random.set_seed",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"os.environ.get",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"numpy.random.seed",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.models.Sequential"
] |
[((538, 558), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (552, 558), True, 'import numpy as np\n'), ((559, 583), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['SEED'], {}), '(SEED)\n', (577, 583), True, 'import tensorflow as tf\n'), ((609, 639), 'logging.getLogger', 'logging.getLogger', (['"""sagemaker"""'], {}), "('sagemaker')\n", (626, 639), False, 'import logging\n'), ((688, 711), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (709, 711), False, 'import logging\n'), ((746, 771), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (769, 771), False, 'import argparse\n'), ((1542, 1580), 'os.path.join', 'os.path.join', (['train_dir', '"""X_train.npy"""'], {}), "(train_dir, 'X_train.npy')\n", (1554, 1580), False, 'import os\n'), ((1604, 1642), 'os.path.join', 'os.path.join', (['train_dir', '"""y_train.npy"""'], {}), "(train_dir, 'y_train.npy')\n", (1616, 1642), False, 'import os\n'), ((1807, 1848), 'os.path.join', 'os.path.join', (['val_dir', '"""X_validation.npy"""'], {}), "(val_dir, 'X_validation.npy')\n", (1819, 1848), False, 'import os\n'), ((1877, 1918), 'os.path.join', 'os.path.join', (['val_dir', '"""y_validation.npy"""'], {}), "(val_dir, 'y_validation.npy')\n", (1889, 1918), False, 'import os\n'), ((2103, 2139), 'os.path.join', 'os.path.join', (['test_dir', '"""X_test.npy"""'], {}), "(test_dir, 'X_test.npy')\n", (2115, 2139), False, 'import os\n'), ((2162, 2198), 'os.path.join', 'os.path.join', (['test_dir', '"""y_test.npy"""'], {}), "(test_dir, 'y_test.npy')\n", (2174, 2198), False, 'import os\n'), ((2692, 2709), 'tensorflow.device', 'tf.device', (['DEVICE'], {}), '(DEVICE)\n', (2701, 2709), True, 'import tensorflow as tf\n'), ((2794, 2885), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'horizontal_flip': '(True)'}), '(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n', (2812, 2885), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3035, 3047), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3045, 3047), False, 'from tensorflow.keras.models import Sequential\n'), ((974, 1007), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_DATA"""'], {}), "('SM_CHANNEL_DATA')\n", (988, 1007), False, 'import os\n'), ((1063, 1098), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_OUTPUT"""'], {}), "('SM_CHANNEL_OUTPUT')\n", (1077, 1098), False, 'import os\n'), ((1153, 1187), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TRAIN"""'], {}), "('SM_CHANNEL_TRAIN')\n", (1167, 1187), False, 'import os\n'), ((1240, 1272), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_VAL"""'], {}), "('SM_CHANNEL_VAL')\n", (1254, 1272), False, 'import os\n'), ((1326, 1359), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TEST"""'], {}), "('SM_CHANNEL_TEST')\n", (1340, 1359), False, 'import os\n'), ((1418, 1448), 'os.environ.get', 'os.environ.get', (['"""SM_MODEL_DIR"""'], {}), "('SM_MODEL_DIR')\n", (1432, 1448), False, 'import os\n'), ((3107, 3204), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(32, 32, 3)'}), "(filters=16, kernel_size=2, padding='same', activation='relu',\n input_shape=(32, 32, 3))\n", (3113, 3204), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3220, 3240), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3238, 3240), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3260, 3285), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (3272, 3285), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3338, 3406), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=2, padding='same', activation='relu')\n", (3344, 3406), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3426, 3446), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3444, 3446), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3466, 3491), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (3478, 3491), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3544, 3612), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=2, padding='same', activation='relu')\n", (3550, 3612), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3632, 3652), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3650, 3652), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3672, 3697), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (3684, 3697), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3717, 3729), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3724, 3729), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3783, 3792), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3790, 3792), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3812, 3841), 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (3817, 3841), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3861, 3873), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (3868, 3873), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3893, 3924), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (3898, 3924), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n')]
|
"""
Thư viện này viết ra phục vụ cho môn học `Các mô hình ngẫu nhiên và ứng dụng`
Sử dụng các thư viện `networkx, pandas, numpy, matplotlib`
"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
import pandas as pd
def _gcd(a, b):
if a == 0:
return b
return _gcd(b % a, a)
def gcd(arr):
if len(arr) == 0:
return 0
if (len(arr) == 1):
return arr[0]
t = arr[0]
for i in range(len(arr)):
t = _gcd(t, arr[i])
return t
class MarkovChain:
"""
Constructor function: Generate blank instance
Có 2 cách để xích:
- Nhập từ file csv:
Sử dụng from_file
- Nhập từ bàn phím:
Sử dụng from_stdin
"""
def __init__(self):
self.data = None
self.state = None
self.struct = None
def from_stdin(self, state=None, data=None, pi=None):
if state == None or data == None:
return "Nothing is given"
else:
self.P = data
self.pi = pi
self.data = self.P
self.state = state
self.struct = self.__generate_struct__()
def from_file(self, path='input.csv'):
data = pd.read_csv(path)
matrix = pd.DataFrame(data)
data = matrix.values.tolist()
self.pi = data[0]
self.state = matrix.columns
self.P = data[1:]
self.data = self.P
self.struct = self.__generate_struct__()
"""
Sinh ra cấu trúc của đồ thị
Cấu trúc của đồ thị hiện tại như sau:
['đỉnh 1', 'đỉnh 2', '{'label':label}']
"""
def __generate_struct__(self):
struct = []
for i in range(len(self.data)):
for j in range(len(self.data)):
if self.data[i][j] > 0:
struct.append([self.state[i], self.state[j],
{'label': self.data[i][j]}])
return struct
"""
Sinh ma trận xác suất chuyển trạng thái của quá trình
"""
def matrix_at(self, n):
self.data = np.matrix.round(np.linalg.matrix_power(self.P, n), 3)
self.struct = self.__generate_struct__()
"""
Sinh đồ thị, đồ thị được lưu vào thư mục img
"""
def __get_state_vector__(self, n):
self.matrix_at(n)
self.state_vector = np.matmul(self.pi, self.data)
def __get_state_track__(self, n):
state = np.empty(shape=(len(self.pi), 1))
state = state.tolist()
steps = []
for i in range(n):
steps.append(i+1)
self.__get_state_vector__(i)
state.append(self.state_vector)
state = np.transpose(state)
return state.tolist(), steps
def generate_state_graph(self, n):
if self.pi == None:
return "Not found origin state"
else:
state, steps = self.__get_state_track__(n)
legend = self.state
for i in range(len(self.pi)):
plt.plot(steps, state[i][1:])
plt.legend(legend, loc='best')
plt.title("Distribution state vector through time")
plt.xlabel("Steps")
plt.ylabel("Probability")
plt.savefig('img/state_vector.svg', format='svg', dpi=1200)
plt.show()
def generate_graph(self, n=1):
if self.state is None:
return "Graph is empty. \n Nothing to show"
else:
self.matrix_at(n)
self = nx.drawing.nx_agraph.to_agraph(nx.DiGraph(self.struct))
self.layout('dot')
self.node_attr.update(color='red', height=0.5,
width=0.5, fontname="Calibri", fontsize=10)
self.edge_attr.update(color='blue', fontsize=8,
fontname="Calibri", rotate=True)
self.draw('img/Graph.svg')
self.draw('img/Graph.png')
img = imread('img/Graph.png')
plt.axis("off")
plt.imshow(img)
def __convert_to_adjagecy__(self):
adjagecy_vector = {i: [] for i in self.state}
for i in range(len(self.P)):
for j in range(len(self.P)):
if self.P[i][j] != 0:
adjagecy_vector[self.state[i]].append(self.state[j])
return adjagecy_vector
def is_connected(self, source, target):
vector = self.__convert_to_adjagecy__()
visit_status = {i: False for i in self.state}
queue = []
queue.append(source)
while queue != []:
current_state = queue[0]
visit_status[current_state] = True
queue.pop(0)
for s in vector[current_state]:
if target == s:
return True
if visit_status[s] == False:
queue.append(s)
return False
# This part is unused -> comment for later use
# ------------------------------------------
# def has_selfloop(self):
# for i in range(len(self.P)):
# if self.P[i][i] != 0:
# return True
# return False
# def rank_test(self):
# P = np.subtract(self.P, np.identity(len(self.P)))
# if np.linalg.matrix_rank(P) == len(self.P):
# return True
# return False
# -------------------------------------------
def is_regular(self):
# Check is irreducible
component = self.get_connected_component()
if len(component) > 1:
return False
tmp = self.get_period(self.state[0])
if tmp == 1:
return True
return False
# ----------------------------------------------------------
# Get period of a state
# ----------------------------------------------------------
def __cycle_length__(self, source):
vector = self.__convert_to_adjagecy__()
visit_status = {i: False for i in self.state}
step = 0
queue = [source]
while queue != []:
current_state = queue[0]
visit_status[current_state] = True
queue.pop(0)
step += 1
for s in vector[current_state]:
if s == source:
return step
if visit_status[s] == False:
queue.append(s)
return step
def get_connected_component(self):
connected_component = [[]]
status = {i: False for i in self.state}
while True:
counter = 0
for i in self.state:
for j in self.state:
if (self.is_connected(i, j) and self.is_connected(j, i)):
if status[i] == False:
connected_component[counter].append(i)
status[i] = True
if status[j] == False:
connected_component[counter].append(j)
status[j] = True
connected_component.append([])
counter += 1
if i == self.state[len(self.state) - 1] and j == self.state[len(self.state) - 1]:
break
connected_component = list(filter(None, connected_component))
return connected_component
def get_period(self, target):
component = self.get_connected_component()
for sl in component:
if target in sl:
break
t = []
if target not in sl:
return 0
else:
for i in sl:
t.append(self.__cycle_length__(i))
return gcd(t)
# ----------------------------------------------------
# Get steady state
# ----------------------------------------------------
def get_steady_state(self):
A = np.transpose(self.P)
A = np.subtract(A, np.identity(len(A)))
A = np.ndarray.tolist(A)
A.append(np.ndarray.tolist(np.ones(len(A))))
b = np.ndarray.tolist(np.transpose(np.zeros(len(A))))
b[len(b)-1] = 1
# Calc
return np.matmul(np.linalg.inv(np.matmul(np.transpose(A), A)), np.matmul(np.transpose(A), b))
# ----------------------------------------------------
# Get mean time spent
# ----------------------------------------------------
def __get_index__(self, state_set):
idx_list = []
tmp = list(self.state)
try:
for state in state_set:
idx_list.append(tmp.index(state))
del tmp
return idx_list
except:
return "State is not in the state set"
def __get_absoring_state__(self):
abr_state = []
for i in range((len(self.state))):
if self.P[i][i] == 1:
abr_state.append(self.state[i])
return abr_state
def __get_mean_state_list__(self, state_set):
tmp = list(self.state)
tmp = [state for state in tmp if state not in rm_state]
return tmp
def __get_mean_time_absoring__(self):
try:
idx_list = self.__get_index__(self.__get_absoring_state__())
state_list = self.__get_mean_state_list__(target_set)
P = self.data
P = np.delete(P, idx_list, 0)
P = np.delete(P, idx_list, 1)
P = np.transpose(P)
I = np.identity(len(P))
A = np.subtract(I, P)
b = np.transpose(np.ones(len(P)))
x = np.round(np.linalg.solve(A, b), 2)
del idx_list, P, I, A, b
mean_time = {"Mean time spent " +
state: x_val for (state, x_val) in zip(state_list, x)}
return mean_time
except:
return "Check your state or matrix"
def __get_mean_time_transient__(self, source=None, target=None):
idx_list = self.__get_index__(self.__get_absoring_state__())
P = self.data
P = np.delete(P, idx_list, 0)
P = np.delete(P, idx_list, 1)
P = np.transpose(P)
I = np.identity(len(P))
A = np.subtract(I, P)
A = A.tolist()
if source == None or target == None:
return A
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.image.imread",
"numpy.ndarray.tolist",
"matplotlib.pyplot.imshow",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"networkx.DiGraph",
"numpy.subtract",
"numpy.matmul",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.linalg.solve",
"numpy.linalg.matrix_power"
] |
[((1247, 1264), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1258, 1264), True, 'import pandas as pd\n'), ((1282, 1300), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1294, 1300), True, 'import pandas as pd\n'), ((2356, 2385), 'numpy.matmul', 'np.matmul', (['self.pi', 'self.data'], {}), '(self.pi, self.data)\n', (2365, 2385), True, 'import numpy as np\n'), ((2683, 2702), 'numpy.transpose', 'np.transpose', (['state'], {}), '(state)\n', (2695, 2702), True, 'import numpy as np\n'), ((7837, 7857), 'numpy.transpose', 'np.transpose', (['self.P'], {}), '(self.P)\n', (7849, 7857), True, 'import numpy as np\n'), ((7918, 7938), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['A'], {}), '(A)\n', (7935, 7938), True, 'import numpy as np\n'), ((9960, 9985), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(0)'], {}), '(P, idx_list, 0)\n', (9969, 9985), True, 'import numpy as np\n'), ((9998, 10023), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(1)'], {}), '(P, idx_list, 1)\n', (10007, 10023), True, 'import numpy as np\n'), ((10036, 10051), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (10048, 10051), True, 'import numpy as np\n'), ((10096, 10113), 'numpy.subtract', 'np.subtract', (['I', 'P'], {}), '(I, P)\n', (10107, 10113), True, 'import numpy as np\n'), ((2109, 2142), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['self.P', 'n'], {}), '(self.P, n)\n', (2131, 2142), True, 'import numpy as np\n'), ((3053, 3083), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {'loc': '"""best"""'}), "(legend, loc='best')\n", (3063, 3083), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3147), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution state vector through time"""'], {}), "('Distribution state vector through time')\n", (3105, 3147), True, 'import matplotlib.pyplot as plt\n'), ((3160, 3179), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {}), "('Steps')\n", (3170, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3217), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (3202, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3289), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""img/state_vector.svg"""'], {'format': '"""svg"""', 'dpi': '(1200)'}), "('img/state_vector.svg', format='svg', dpi=1200)\n", (3241, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3310, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3969), 'matplotlib.image.imread', 'imread', (['"""img/Graph.png"""'], {}), "('img/Graph.png')\n", (3952, 3969), False, 'from matplotlib.image import imread\n'), ((3982, 3997), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3990, 3997), True, 'import matplotlib.pyplot as plt\n'), ((4010, 4025), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4020, 4025), True, 'import matplotlib.pyplot as plt\n'), ((9264, 9289), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(0)'], {}), '(P, idx_list, 0)\n', (9273, 9289), True, 'import numpy as np\n'), ((9306, 9331), 'numpy.delete', 'np.delete', (['P', 'idx_list', '(1)'], {}), '(P, idx_list, 1)\n', (9315, 9331), True, 'import numpy as np\n'), ((9348, 9363), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (9360, 9363), True, 'import numpy as np\n'), ((9416, 9433), 'numpy.subtract', 'np.subtract', (['I', 'P'], {}), '(I, P)\n', (9427, 9433), True, 'import numpy as np\n'), ((3011, 3040), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'state[i][1:]'], {}), '(steps, state[i][1:])\n', (3019, 3040), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3553), 'networkx.DiGraph', 'nx.DiGraph', (['self.struct'], {}), '(self.struct)\n', (3540, 3553), True, 'import networkx as nx\n'), ((8174, 8189), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (8186, 8189), True, 'import numpy as np\n'), ((9505, 9526), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (9520, 9526), True, 'import numpy as np\n'), ((8142, 8157), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (8154, 8157), True, 'import numpy as np\n')]
|
# Copyright © 2019. <NAME>. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
import warnings
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_positions
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params,
duration, channel_locations=None, cluster_ids=None, epochs=None, seed=None, verbose=True):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
duration : length of recording (seconds)
channel_locations : numpy.ndarray (num_channels x 2)
Channel locations (if None, a linear geometry is assumed)
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times >= epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'],
duration=duration,
verbose=verbose)
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
duration=duration, verbose=verbose)
print("Calculating firing rate")
firing_rate = calculate_firing_rates(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units, duration=duration, verbose=verbose)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters=spike_clusters[in_epoch],
amplitudes=amplitudes[in_epoch],
total_units=total_units,
verbose=verbose)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = \
calculate_pc_metrics(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
num_channels_to_compare=params['num_channels_to_compare'],
max_spikes_for_cluster=params['max_spikes_for_unit'],
spikes_for_nn=spikes_for_nn,
n_neighbors=params['n_neighbors'],
channel_locations=
channel_locations,
seed=seed,
verbose=verbose)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
spikes_for_silhouette=spikes_for_silhouette,
seed=seed, verbose=verbose)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
interval_length=params['drift_metrics_interval_s'],
min_spikes_per_interval=
params['drift_metrics_min_spikes_per_interval'],
channel_locations=
channel_locations,
verbose=verbose)
if cluster_ids is None:
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids_out)
metrics = pd.concat((metrics, pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids_out),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_violation', isi_viol),
('amplitude_cutoff', amplitude_cutoff),
('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name', epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi, duration,
spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
duration=duration,
isi_threshold=isi_threshold,
min_isi=min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None,
verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
duration=duration)
return ratios
def calculate_num_spikes(spike_times, spike_clusters, total_units, spike_cluster_subset=None, verbose=True):
num_spikes = np.zeros((total_units,))
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
num_spikes[cluster_id] = len(spike_times[for_this_cluster])
return num_spikes
def calculate_firing_rates(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
firing_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
firing_rates[cluster_id] = firing_rate(spike_times[for_this_cluster],
duration=duration)
return firing_rates
def calculate_amplitude_cutoff(spike_clusters, amplitudes, total_units, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs[cluster_id] = amplitude_cutoff(amplitudes[for_this_cluster])
return amplitude_cutoffs
def calculate_pc_metrics(spike_clusters, total_units, pc_features, pc_feature_ind,
num_channels_to_compare, max_spikes_for_cluster, spikes_for_nn,
n_neighbors, channel_locations, min_num_pcs=10, metric_names=None,
seed=None, spike_cluster_subset=None, verbose=True):
"""
Computes metrics from projection of waveforms to principal components
including: isolation distance, l ratio, d prime, nn hit rate, nn miss rate
Parameters
----------
spike_clusters: numpy.ndarray (num_spikes,)
Unit ID for each spike time
total_units: int
Total number of units
pc_features: numpy.ndarray (num_spikes, num_pcs, num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units, num_channels)
Channel indices of PCs for each unit
num_channels_to_compare: int
Number of channels around the max channel over which to compute the
metrics (e.g. only units from these channels will be considered for the
nearest neighbor metrics)
max_spikes_for_cluster: int
Total number of spikes to use for computing the metrics
spikes_for_nn: int
Number of spikes in a unit to use for computing nearest neighbor metrics
(nn_hit_rate, nn_miss_rate)
n_neighbors: int
Number of nearest neighbor spikes to compare membership
channel_locations: array, (channels, 2)
(x,y) location of channels; used to identify neighboring channels
min_num_pcs: int, default=10
Minimum number of spikes a unit must have to compute these metrics
metric_names: list of str, default=None
List of metrics to compute
seed: int, default=None
Random seed for subsampling spikes from the unit
spike_cluster_subset: numpy.array (units,), default=None
If specified compute metrics for only these units
verbose: bool, default=True
Prints out progress bar if True
Returns (all 1d numpy.arrays)
-------
isolation_distances
l_ratios
d_primes
nn_hit_rates
nn_miss_rates
"""
if metric_names is None:
metric_names = ['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor']
if num_channels_to_compare > channel_locations.shape[0]:
num_channels_to_compare = channel_locations.shape[0]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
peak_channels = np.zeros((total_units,), dtype='uint16')
neighboring_channels = np.zeros((total_units, num_channels_to_compare))
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(all_cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :], 0))
peak_channels[idx] = pc_feature_ind[idx, pc_max]
# find neighboring channels
neighboring_channels[idx] = find_neighboring_channels(pc_feature_ind[idx, pc_max],
pc_feature_ind[idx, :],
num_channels_to_compare,
channel_locations)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(idx + 1, total_units)
peak_channel = peak_channels[idx]
# units_for_channel: index (not ID) of units defined at the target unit's peak channel
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0],
pc_feature_ind.shape)
# units_in_range: list of bool, True for units whose peak channels are in the neighborhood of target unit
units_in_range = [channel in neighboring_channels[idx] for channel in peak_channels[units_for_channel]]
channels_to_use = neighboring_channels[idx]
# only get index of units who are in the neighborhood of target unit
units_for_channel = units_for_channel[units_in_range]
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == all_cluster_ids[cluster_id2])
# index of target unit within the subset of units in its neighborhood (including itself)
this_unit_idx = np.where(units_for_channel == idx)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
print('Unit outside the range set by channel_to_use, skipping...')
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, all_cluster_ids[cluster_id2], min_num=0, max_num=subsample,
seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * all_cluster_ids[cluster_id2]
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if all_pcs.shape[0] > min_num_pcs:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[idx], l_ratios[idx] = mahalanobis_metrics(all_pcs, all_labels,
cluster_id)
else:
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
if 'd_prime' in metric_names:
d_primes[idx] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[idx] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[idx], nn_miss_rates[idx] = nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
spikes_for_nn,
n_neighbors)
else:
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
else:
print(f'Unit {str(cluster_id)} only has ' + str(
all_pcs.shape[0]) + ' spikes, which is not enough to compute metric; assigning nan...')
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
d_primes[idx] = np.nan
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
def calculate_silhouette_score(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
spikes_for_silhouette,
seed=None,
spike_cluster_subset=None,
verbose=True):
random_spike_inds = np.random.RandomState(seed=seed).permutation(spike_clusters.size)
random_spike_inds = random_spike_inds[:spikes_for_silhouette]
num_pc_features = pc_features.shape[1]
num_channels = np.max(pc_feature_ind) + 1
all_pcs = np.zeros((spikes_for_silhouette, num_channels * num_pc_features))
for idx, i in enumerate(random_spike_inds):
unit_id = spike_clusters[i]
channels = pc_feature_ind[unit_id, :]
for j in range(0, num_pc_features):
all_pcs[idx, channels + num_channels * j] = pc_features[i, j, :]
cluster_labels = spike_clusters[random_spike_inds]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
SS = np.empty((total_units, total_units))
SS[:] = np.nan
seen_unit_pairs = set()
for idx1, i in enumerate(cluster_ids):
if verbose:
printProgressBar(idx1 + 1, len(cluster_ids))
for idx2, j in enumerate(all_cluster_ids):
if (i, j) not in seen_unit_pairs and (j, i) not in seen_unit_pairs and i != j:
inds = np.in1d(cluster_labels, np.array([i, j]))
X = all_pcs[inds, :]
labels = cluster_labels[inds]
if len(labels) > 2:
SS[i, j] = silhouette_score(X, labels, random_state=seed)
seen_unit_pairs.add((i, j))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a = np.nanmin(SS, 0)
b = np.nanmin(SS, 1)
return np.array([np.nanmin([a, b]) for a, b in zip(a, b)])
def calculate_drift_metrics(spike_times,
spike_clusters,
total_units,
pc_features,
pc_feature_ind,
interval_length,
min_spikes_per_interval,
vertical_channel_spacing=10,
channel_locations=None,
spike_cluster_subset=None,
verbose=True):
max_drift = np.zeros((total_units,))
cumulative_drift = np.zeros((total_units,))
positions = get_spike_positions(spike_clusters, pc_features, pc_feature_ind, channel_locations,
vertical_channel_spacing)
interval_starts = np.arange(np.min(spike_times), np.max(spike_times), interval_length)
interval_ends = interval_starts + interval_length
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, len(cluster_ids))
in_cluster = spike_clusters == cluster_id
times_for_cluster = spike_times[in_cluster]
positions_for_cluster = positions[in_cluster]
median_positions = []
for t1, t2 in zip(interval_starts, interval_ends):
in_range = (times_for_cluster > t1) * (times_for_cluster < t2)
if np.sum(in_range) >= min_spikes_per_interval:
median_positions.append(np.median(positions_for_cluster[in_range], 0))
else:
median_positions.append([np.nan, np.nan])
median_positions = np.array(median_positions)
# Extract emi-matrix of shifts in positions (used to extract max_drift and cum_drift)
position_diffs = np.zeros((len(median_positions), len(median_positions)))
for i, pos_i in enumerate(median_positions):
for j, pos_j in enumerate(median_positions):
if j > i:
if not np.isnan(pos_i[0]) and not np.isnan(pos_j[0]):
position_diffs[i, j] = np.linalg.norm(pos_i - pos_j)
else:
position_diffs[i, j] = 0
# Maximum drift among all periods
if np.any(position_diffs > 0):
max_drift[cluster_id] = np.around(np.max(position_diffs[position_diffs > 0]), 2)
# The +1 diagonal contains the step-by-step drifts between intervals.
# Summing them up we obtain cumulative drift
cumulative_drift[cluster_id] = np.around(np.sum(np.diag(position_diffs, 1)), 2)
else:
# not enough spikes
max_drift[cluster_id] = 0
cumulative_drift[cluster_id] = 0
return max_drift, cumulative_drift
# ==========================================================
# IMPLEMENTATION OF ACTUAL METRICS:
# ==========================================================
def isi_violations(spike_train, duration, isi_threshold, min_isi=0):
"""Calculate Inter-Spike Interval (ISI) violations for a spike train.
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Originally written in Matlab by <NAME> (https://github.com/cortex-lab/sortingQuality)
Converted to Python by <NAME>
Inputs:
-------
spike_train : array of monotonically increasing spike times (in seconds) [t1, t2, t3, ...]
duration : length of recording (seconds)
isi_threshold : threshold for classifying adjacent spikes as an ISI violation
- this is the biophysical refractory period
min_isi : minimum possible inter-spike interval (default = 0)
- this is the artificial refractory period enforced by the data acquisition system
or post-processing algorithms
Outputs:
--------
fpRate : rate of contaminating spikes as a fraction of overall rate
- higher values indicate more contamination
num_violations : total number of violations detected
"""
isis_initial = np.diff(spike_train)
if min_isi > 0:
duplicate_spikes = np.where(isis_initial <= min_isi)[0]
spike_train = np.delete(spike_train, duplicate_spikes + 1)
isis = np.diff(spike_train)
num_spikes = len(spike_train)
num_violations = sum(isis < isi_threshold)
violation_time = 2 * num_spikes * (isi_threshold - min_isi)
total_rate = firing_rate(spike_train, duration)
violation_rate = num_violations / violation_time
fpRate = violation_rate / total_rate
return fpRate, num_violations
def presence_ratio(spike_train, duration, num_bin_edges=101):
"""Calculate fraction of time the unit is present within an epoch.
Inputs:
-------
spike_train : array of spike times
duration : length of recording (seconds)
num_bin_edges : number of bin edges for histogram
- total bins = num_bin_edges - 1
Outputs:
--------
presence_ratio : fraction of time bins in which this unit is spiking
"""
h, b = np.histogram(spike_train, np.linspace(0, duration, num_bin_edges))
return np.sum(h > 0) / (num_bin_edges - 1)
def firing_rate(spike_train, duration):
"""Calculate firing rate for a spike train.
If either temporal bound is not specified, the first and last spike time are used by default.
Inputs:
-------
spike_train : array of spike times (in seconds)
duration : length of recording (in seconds)
Outputs:
--------
fr : float
Firing rate in Hz
"""
fr = spike_train.size / duration
return fr
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3):
""" Calculate approximate fraction of spikes missing from a distribution of amplitudes
Assumes the amplitude histogram is symmetric (not valid in the presence of drift)
Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Input:
------
amplitudes : numpy.ndarray
Array of amplitudes (don't need to be in physical units)
num_histogram_bins : int
Number of bins for calculating amplitude histogram
histogram_smoothing_value : float
Gaussian filter window for smoothing amplitude histogram
Output:
-------
fraction_missing : float
Fraction of missing spikes (ranges between 0 and 0.5)
If more than 50% of spikes are missing, an accurate estimate isn't possible
"""
h, b = np.histogram(amplitudes, num_histogram_bins, density=True)
pdf = gaussian_filter1d(h, histogram_smoothing_value)
support = b[:-1]
peak_index = np.argmax(pdf)
G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index
bin_size = np.mean(np.diff(support))
fraction_missing = np.sum(pdf[G:]) * bin_size
fraction_missing = np.min([fraction_missing, 0.5])
return fraction_missing
def mahalanobis_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates isolation distance and L-ratio (metrics computed from Mahalanobis distance)
Based on metrics described in Schmitzer-Torbert et al. (2005) Neurosci 131: 1-11
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
isolation_distance : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
pcs_for_this_unit = all_pcs[all_labels == this_unit_id, :]
pcs_for_other_units = all_pcs[all_labels != this_unit_id, :]
mean_value = np.expand_dims(np.mean(pcs_for_this_unit, 0), 0)
try:
VI = np.linalg.inv(np.cov(pcs_for_this_unit.T))
except np.linalg.linalg.LinAlgError: # case of singular matrix
return np.nan, np.nan
mahalanobis_other = np.sort(cdist(mean_value,
pcs_for_other_units,
'mahalanobis', VI=VI)[0])
mahalanobis_self = np.sort(cdist(mean_value,
pcs_for_this_unit,
'mahalanobis', VI=VI)[0])
n = np.min([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]) # number of spikes
if n >= 2:
dof = pcs_for_this_unit.shape[1] # number of features
l_ratio = np.sum(1 - chi2.cdf(pow(mahalanobis_other, 2), dof)) / mahalanobis_self.shape[0]
isolation_distance = pow(mahalanobis_other[n - 1], 2)
# if math.isnan(l_ratio):
# print("NaN detected", mahalanobis_other, VI)
else:
l_ratio = np.nan
isolation_distance = np.nan
return isolation_distance, l_ratio
def lda_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates d-prime based on Linear Discriminant Analysis
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
d_prime : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
X = all_pcs
y = np.zeros((X.shape[0],), dtype='bool')
y[all_labels == this_unit_id] = True
lda = LDA(n_components=1)
X_flda = lda.fit_transform(X, y)
flda_this_cluster = X_flda[np.where(y)[0]]
flda_other_cluster = X_flda[np.where(np.invert(y))[0]]
d_prime = (np.mean(flda_this_cluster) - np.mean(flda_other_cluster)) / np.sqrt(
0.5 * (np.std(flda_this_cluster) ** 2 + np.std(flda_other_cluster) ** 2))
return d_prime
def nearest_neighbors_metrics(all_pcs, all_labels, this_unit_id, spikes_for_nn, n_neighbors):
""" Calculates unit contamination based on NearestNeighbors search in PCA space
Based on metrics described in Chung, Magland et al. (2017) Neuron 95: 1381-1394
A is a (hopefully) representative subset of cluster X
NN_hit(X) = 1/k \sum_i=1^k |{x in A such that ith closest neighbor is in X}| / |A|
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
spikes_for_nn : Int
number of spikes to use (calculation can be very slow when this number is >20000)
n_neighbors : Int
number of neighbors to use
Outputs:
--------
hit_rate : float
Fraction of neighbors for target cluster that are also in target cluster
miss_rate : float
Fraction of neighbors outside target cluster that are in target cluster
"""
total_spikes = all_pcs.shape[0]
ratio = spikes_for_nn / total_spikes
this_unit = all_labels == this_unit_id
X = np.concatenate((all_pcs[this_unit, :], all_pcs[np.invert(this_unit), :]), 0)
n = np.sum(this_unit)
if ratio < 1:
inds = np.arange(0, X.shape[0] - 1, 1 / ratio).astype('int')
X = X[inds, :]
n = int(n * ratio)
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
this_cluster_inds = np.arange(n)
this_cluster_nearest = indices[:n, 1:].flatten()
other_cluster_nearest = indices[n:, 1:].flatten()
hit_rate = np.mean(this_cluster_nearest < n)
miss_rate = np.mean(other_cluster_nearest < n)
return hit_rate, miss_rate
# ==========================================================
# HELPER FUNCTIONS:
# ==========================================================
def make_index_mask(spike_clusters, unit_id, min_num, max_num, seed=None):
""" Create a mask for the spike index dimensions of the pc_features array
Inputs:
-------
spike_clusters : numpy.ndarray (num_spikes x 0)
Contains cluster IDs for all spikes in pc_features array
unit_id : Int
ID for this unit
min_num : Int
Minimum number of spikes to return; if there are not enough spikes for this unit, return all False
max_num : Int
Maximum number of spikes to return; if too many spikes for this unit, return a random subsample
seed: int
Random seed for reproducibility
Output:
-------
index_mask : numpy.ndarray (boolean)
Mask of spike indices for pc_features array
"""
index_mask = spike_clusters == unit_id
inds = np.where(index_mask)[0]
if len(inds) < min_num:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
else:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
order = np.random.RandomState(seed=seed).permutation(inds.size)
index_mask[inds[order[:max_num]]] = True
return index_mask
def make_channel_mask(unit_id, pc_feature_ind, channels_to_use):
""" Create a mask for the channel dimension of the pc_features array
Inputs:
-------
unit_id : Int
ID for this unit
pc_feature_ind : np.ndarray
Channels used for PC calculation for each unit
channels_to_use : np.ndarray
Channels to use for calculating metrics
Output:
-------
channel_mask : numpy.ndarray
Channel indices to extract from pc_features array
"""
these_inds = pc_feature_ind[unit_id, :]
channel_mask = [np.argwhere(these_inds == i)[0][0] for i in channels_to_use]
return np.array(channel_mask)
def get_unit_pcs(these_pc_features, index_mask, channel_mask):
""" Use the index_mask and channel_mask to return PC features for one unit
Inputs:
-------
these_pc_features : numpy.ndarray (float)
Array of pre-computed PC features (num_spikes x num_PCs x num_channels)
index_mask : numpy.ndarray (boolean)
Mask for spike index dimension of pc_features array
channel_mask : numpy.ndarray (boolean)
Mask for channel index dimension of pc_features array
Output:
-------
unit_PCs : numpy.ndarray (float)
PCs for one unit (num_spikes x num_PCs x num_channels)
"""
unit_PCs = these_pc_features[index_mask, :, :]
unit_PCs = unit_PCs[:, :, channel_mask]
return unit_PCs
def find_neighboring_channels(peak_channel, channel_list, num_channels_to_compare, channel_locations):
"""
Finds k nearest channels to the peak channel of a unit
Parameters
----------
peak_channel: int
ID of channel with largest waveform amplitude
channel_list: numpy.ndarray
IDs of channels being considered
num_channels_to_compare: int
Number of nearest channels to return
channel_locations: numpy.ndarray, (n_channels, 2)
x,y coordinates of the channels in channel_list
Returns
-------
neighboring_channels: array_like
id of k channels that neighbor peak channel (including the peak channel itself)
"""
# get peak channel location
channel_idx = list(channel_list).index(peak_channel)
peak_channel_location = channel_locations[channel_idx]
# compute pairwise distance
distances = [np.linalg.norm(peak_channel_location - loc) for loc in channel_locations]
# get k closest channels (+1 because distance 0 is peak_channel)
neighboring_channels_inds = np.argsort(distances)[:num_channels_to_compare]
neighboring_channels = channel_list[neighboring_channels_inds]
return neighboring_channels
|
[
"numpy.invert",
"numpy.argsort",
"numpy.array",
"numpy.linalg.norm",
"numpy.nanmin",
"numpy.cov",
"numpy.arange",
"numpy.random.RandomState",
"numpy.mean",
"numpy.histogram",
"numpy.reshape",
"numpy.where",
"numpy.delete",
"numpy.diff",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"sklearn.neighbors.NearestNeighbors",
"numpy.min",
"pandas.DataFrame",
"warnings.simplefilter",
"numpy.concatenate",
"sklearn.metrics.silhouette_score",
"numpy.abs",
"collections.OrderedDict",
"numpy.ones",
"numpy.argmax",
"numpy.any",
"numpy.squeeze",
"numpy.isnan",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.std",
"numpy.median",
"numpy.unique",
"numpy.logical_and",
"scipy.spatial.distance.cdist",
"warnings.catch_warnings",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
] |
[((1982, 1996), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1994, 1996), True, 'import pandas as pd\n'), ((8894, 8918), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (8902, 8918), True, 'import numpy as np\n'), ((9794, 9818), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (9802, 9818), True, 'import numpy as np\n'), ((10294, 10318), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (10302, 10318), True, 'import numpy as np\n'), ((11027, 11051), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (11035, 11051), True, 'import numpy as np\n'), ((11694, 11718), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (11702, 11718), True, 'import numpy as np\n'), ((14464, 14489), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (14473, 14489), True, 'import numpy as np\n'), ((14643, 14683), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {'dtype': '"""uint16"""'}), "((total_units,), dtype='uint16')\n", (14651, 14683), True, 'import numpy as np\n'), ((14711, 14759), 'numpy.zeros', 'np.zeros', (['(total_units, num_channels_to_compare)'], {}), '((total_units, num_channels_to_compare))\n', (14719, 14759), True, 'import numpy as np\n'), ((14786, 14810), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14794, 14810), True, 'import numpy as np\n'), ((14826, 14850), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14834, 14850), True, 'import numpy as np\n'), ((14866, 14890), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14874, 14890), True, 'import numpy as np\n'), ((14910, 14934), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14918, 14934), True, 'import numpy as np\n'), ((14955, 14979), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (14963, 14979), True, 'import numpy as np\n'), ((20546, 20611), 'numpy.zeros', 'np.zeros', (['(spikes_for_silhouette, num_channels * num_pc_features)'], {}), '((spikes_for_silhouette, num_channels * num_pc_features))\n', (20554, 20611), True, 'import numpy as np\n'), ((20945, 20970), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (20954, 20970), True, 'import numpy as np\n'), ((21113, 21149), 'numpy.empty', 'np.empty', (['(total_units, total_units)'], {}), '((total_units, total_units))\n', (21121, 21149), True, 'import numpy as np\n'), ((22501, 22525), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (22509, 22525), True, 'import numpy as np\n'), ((22549, 22573), 'numpy.zeros', 'np.zeros', (['(total_units,)'], {}), '((total_units,))\n', (22557, 22573), True, 'import numpy as np\n'), ((26111, 26131), 'numpy.diff', 'np.diff', (['spike_train'], {}), '(spike_train)\n', (26118, 26131), True, 'import numpy as np\n'), ((26296, 26316), 'numpy.diff', 'np.diff', (['spike_train'], {}), '(spike_train)\n', (26303, 26316), True, 'import numpy as np\n'), ((28538, 28596), 'numpy.histogram', 'np.histogram', (['amplitudes', 'num_histogram_bins'], {'density': '(True)'}), '(amplitudes, num_histogram_bins, density=True)\n', (28550, 28596), True, 'import numpy as np\n'), ((28608, 28655), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['h', 'histogram_smoothing_value'], {}), '(h, histogram_smoothing_value)\n', (28625, 28655), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((28695, 28709), 'numpy.argmax', 'np.argmax', (['pdf'], {}), '(pdf)\n', (28704, 28709), True, 'import numpy as np\n'), ((28892, 28923), 'numpy.min', 'np.min', (['[fraction_missing, 0.5]'], {}), '([fraction_missing, 0.5])\n', (28898, 28923), True, 'import numpy as np\n'), ((30377, 30443), 'numpy.min', 'np.min', (['[pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]'], {}), '([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]])\n', (30383, 30443), True, 'import numpy as np\n'), ((31589, 31626), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {'dtype': '"""bool"""'}), "((X.shape[0],), dtype='bool')\n", (31597, 31626), True, 'import numpy as np\n'), ((31679, 31698), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LDA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (31682, 31698), True, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n'), ((33381, 33398), 'numpy.sum', 'np.sum', (['this_unit'], {}), '(this_unit)\n', (33387, 33398), True, 'import numpy as np\n'), ((33690, 33702), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (33699, 33702), True, 'import numpy as np\n'), ((33827, 33860), 'numpy.mean', 'np.mean', (['(this_cluster_nearest < n)'], {}), '(this_cluster_nearest < n)\n', (33834, 33860), True, 'import numpy as np\n'), ((33877, 33911), 'numpy.mean', 'np.mean', (['(other_cluster_nearest < n)'], {}), '(other_cluster_nearest < n)\n', (33884, 33911), True, 'import numpy as np\n'), ((35893, 35915), 'numpy.array', 'np.array', (['channel_mask'], {}), '(channel_mask)\n', (35901, 35915), True, 'import numpy as np\n'), ((2096, 2118), 'numpy.max', 'np.max', (['spike_clusters'], {}), '(spike_clusters)\n', (2102, 2118), True, 'import numpy as np\n'), ((2199, 2276), 'numpy.logical_and', 'np.logical_and', (['(spike_times >= epoch.start_time)', '(spike_times < epoch.end_time)'], {}), '(spike_times >= epoch.start_time, spike_times < epoch.end_time)\n', (2213, 2276), True, 'import numpy as np\n'), ((2303, 2319), 'numpy.sum', 'np.sum', (['in_epoch'], {}), '(in_epoch)\n', (2309, 2319), True, 'import numpy as np\n'), ((8850, 8875), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (8859, 8875), True, 'import numpy as np\n'), ((9754, 9779), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (9763, 9779), True, 'import numpy as np\n'), ((10435, 10460), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (10444, 10460), True, 'import numpy as np\n'), ((10981, 11006), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (10990, 11006), True, 'import numpy as np\n'), ((11643, 11668), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (11652, 11668), True, 'import numpy as np\n'), ((15055, 15095), 'numpy.squeeze', 'np.squeeze', (['(spike_clusters == cluster_id)'], {}), '(spike_clusters == cluster_id)\n', (15065, 15095), True, 'import numpy as np\n'), ((16505, 16538), 'numpy.zeros', 'np.zeros', (['units_for_channel.shape'], {}), '(units_for_channel.shape)\n', (16513, 16538), True, 'import numpy as np\n'), ((17091, 17148), 'numpy.zeros', 'np.zeros', (['(0, pc_features.shape[1], channels_to_use.size)'], {}), '((0, pc_features.shape[1], channels_to_use.size))\n', (17099, 17148), True, 'import numpy as np\n'), ((17170, 17184), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (17178, 17184), True, 'import numpy as np\n'), ((18216, 18304), 'numpy.reshape', 'np.reshape', (['all_pcs', '(all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size)'], {}), '(all_pcs, (all_pcs.shape[0], pc_features.shape[1] *\n channels_to_use.size))\n', (18226, 18304), True, 'import numpy as np\n'), ((20504, 20526), 'numpy.max', 'np.max', (['pc_feature_ind'], {}), '(pc_feature_ind)\n', (20510, 20526), True, 'import numpy as np\n'), ((21778, 21803), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21801, 21803), False, 'import warnings\n'), ((21813, 21844), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (21834, 21844), False, 'import warnings\n'), ((21857, 21873), 'numpy.nanmin', 'np.nanmin', (['SS', '(0)'], {}), '(SS, 0)\n', (21866, 21873), True, 'import numpy as np\n'), ((21886, 21902), 'numpy.nanmin', 'np.nanmin', (['SS', '(1)'], {}), '(SS, 1)\n', (21895, 21902), True, 'import numpy as np\n'), ((22769, 22788), 'numpy.min', 'np.min', (['spike_times'], {}), '(spike_times)\n', (22775, 22788), True, 'import numpy as np\n'), ((22790, 22809), 'numpy.max', 'np.max', (['spike_times'], {}), '(spike_times)\n', (22796, 22809), True, 'import numpy as np\n'), ((22999, 23024), 'numpy.unique', 'np.unique', (['spike_clusters'], {}), '(spike_clusters)\n', (23008, 23024), True, 'import numpy as np\n'), ((23736, 23762), 'numpy.array', 'np.array', (['median_positions'], {}), '(median_positions)\n', (23744, 23762), True, 'import numpy as np\n'), ((24356, 24382), 'numpy.any', 'np.any', (['(position_diffs > 0)'], {}), '(position_diffs > 0)\n', (24362, 24382), True, 'import numpy as np\n'), ((26239, 26283), 'numpy.delete', 'np.delete', (['spike_train', '(duplicate_spikes + 1)'], {}), '(spike_train, duplicate_spikes + 1)\n', (26248, 26283), True, 'import numpy as np\n'), ((27127, 27166), 'numpy.linspace', 'np.linspace', (['(0)', 'duration', 'num_bin_edges'], {}), '(0, duration, num_bin_edges)\n', (27138, 27166), True, 'import numpy as np\n'), ((27180, 27193), 'numpy.sum', 'np.sum', (['(h > 0)'], {}), '(h > 0)\n', (27186, 27193), True, 'import numpy as np\n'), ((28800, 28816), 'numpy.diff', 'np.diff', (['support'], {}), '(support)\n', (28807, 28816), True, 'import numpy as np\n'), ((28841, 28856), 'numpy.sum', 'np.sum', (['pdf[G:]'], {}), '(pdf[G:])\n', (28847, 28856), True, 'import numpy as np\n'), ((29827, 29856), 'numpy.mean', 'np.mean', (['pcs_for_this_unit', '(0)'], {}), '(pcs_for_this_unit, 0)\n', (29834, 29856), True, 'import numpy as np\n'), ((34913, 34933), 'numpy.where', 'np.where', (['index_mask'], {}), '(index_mask)\n', (34921, 34933), True, 'import numpy as np\n'), ((34987, 35033), 'numpy.zeros', 'np.zeros', (['(spike_clusters.size,)'], {'dtype': '"""bool"""'}), "((spike_clusters.size,), dtype='bool')\n", (34995, 35033), True, 'import numpy as np\n'), ((35065, 35111), 'numpy.zeros', 'np.zeros', (['(spike_clusters.size,)'], {'dtype': '"""bool"""'}), "((spike_clusters.size,), dtype='bool')\n", (35073, 35111), True, 'import numpy as np\n'), ((37564, 37607), 'numpy.linalg.norm', 'np.linalg.norm', (['(peak_channel_location - loc)'], {}), '(peak_channel_location - loc)\n', (37578, 37607), True, 'import numpy as np\n'), ((37739, 37760), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (37749, 37760), True, 'import numpy as np\n'), ((6726, 6748), 'numpy.arange', 'np.arange', (['total_units'], {}), '(total_units)\n', (6735, 6748), True, 'import numpy as np\n'), ((15123, 15162), 'numpy.mean', 'np.mean', (['pc_features[for_unit, 0, :]', '(0)'], {}), '(pc_features[for_unit, 0, :], 0)\n', (15130, 15162), True, 'import numpy as np\n'), ((16636, 16690), 'numpy.sum', 'np.sum', (['(spike_clusters == all_cluster_ids[cluster_id2])'], {}), '(spike_clusters == all_cluster_ids[cluster_id2])\n', (16642, 16690), True, 'import numpy as np\n'), ((16813, 16847), 'numpy.where', 'np.where', (['(units_for_channel == idx)'], {}), '(units_for_channel == idx)\n', (16821, 16847), True, 'import numpy as np\n'), ((20310, 20342), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (20331, 20342), True, 'import numpy as np\n'), ((21925, 21942), 'numpy.nanmin', 'np.nanmin', (['[a, b]'], {}), '([a, b])\n', (21934, 21942), True, 'import numpy as np\n'), ((26180, 26213), 'numpy.where', 'np.where', (['(isis_initial <= min_isi)'], {}), '(isis_initial <= min_isi)\n', (26188, 26213), True, 'import numpy as np\n'), ((28728, 28761), 'numpy.abs', 'np.abs', (['(pdf[peak_index:] - pdf[0])'], {}), '(pdf[peak_index:] - pdf[0])\n', (28734, 28761), True, 'import numpy as np\n'), ((29898, 29925), 'numpy.cov', 'np.cov', (['pcs_for_this_unit.T'], {}), '(pcs_for_this_unit.T)\n', (29904, 29925), True, 'import numpy as np\n'), ((30058, 30118), 'scipy.spatial.distance.cdist', 'cdist', (['mean_value', 'pcs_for_other_units', '"""mahalanobis"""'], {'VI': 'VI'}), "(mean_value, pcs_for_other_units, 'mahalanobis', VI=VI)\n", (30063, 30118), False, 'from scipy.spatial.distance import cdist\n'), ((30231, 30289), 'scipy.spatial.distance.cdist', 'cdist', (['mean_value', 'pcs_for_this_unit', '"""mahalanobis"""'], {'VI': 'VI'}), "(mean_value, pcs_for_this_unit, 'mahalanobis', VI=VI)\n", (30236, 30289), False, 'from scipy.spatial.distance import cdist\n'), ((31769, 31780), 'numpy.where', 'np.where', (['y'], {}), '(y)\n', (31777, 31780), True, 'import numpy as np\n'), ((31860, 31886), 'numpy.mean', 'np.mean', (['flda_this_cluster'], {}), '(flda_this_cluster)\n', (31867, 31886), True, 'import numpy as np\n'), ((31889, 31916), 'numpy.mean', 'np.mean', (['flda_other_cluster'], {}), '(flda_other_cluster)\n', (31896, 31916), True, 'import numpy as np\n'), ((33549, 33613), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neighbors', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=n_neighbors, algorithm='ball_tree')\n", (33565, 33613), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((18094, 18127), 'numpy.concatenate', 'np.concatenate', (['(all_pcs, pcs)', '(0)'], {}), '((all_pcs, pcs), 0)\n', (18108, 18127), True, 'import numpy as np\n'), ((18157, 18196), 'numpy.concatenate', 'np.concatenate', (['(all_labels, labels)', '(0)'], {}), '((all_labels, labels), 0)\n', (18171, 18196), True, 'import numpy as np\n'), ((23500, 23516), 'numpy.sum', 'np.sum', (['in_range'], {}), '(in_range)\n', (23506, 23516), True, 'import numpy as np\n'), ((24430, 24472), 'numpy.max', 'np.max', (['position_diffs[position_diffs > 0]'], {}), '(position_diffs[position_diffs > 0])\n', (24436, 24472), True, 'import numpy as np\n'), ((31826, 31838), 'numpy.invert', 'np.invert', (['y'], {}), '(y)\n', (31835, 31838), True, 'import numpy as np\n'), ((33433, 33472), 'numpy.arange', 'np.arange', (['(0)', '(X.shape[0] - 1)', '(1 / ratio)'], {}), '(0, X.shape[0] - 1, 1 / ratio)\n', (33442, 33472), True, 'import numpy as np\n'), ((35128, 35160), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (35149, 35160), True, 'import numpy as np\n'), ((35820, 35848), 'numpy.argwhere', 'np.argwhere', (['(these_inds == i)'], {}), '(these_inds == i)\n', (35831, 35848), True, 'import numpy as np\n'), ((18011, 18035), 'numpy.ones', 'np.ones', (['(pcs.shape[0],)'], {}), '((pcs.shape[0],))\n', (18018, 18035), True, 'import numpy as np\n'), ((21509, 21525), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (21517, 21525), True, 'import numpy as np\n'), ((21677, 21723), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'labels'], {'random_state': 'seed'}), '(X, labels, random_state=seed)\n', (21693, 21723), False, 'from sklearn.metrics import silhouette_score\n'), ((23585, 23630), 'numpy.median', 'np.median', (['positions_for_cluster[in_range]', '(0)'], {}), '(positions_for_cluster[in_range], 0)\n', (23594, 23630), True, 'import numpy as np\n'), ((24676, 24702), 'numpy.diag', 'np.diag', (['position_diffs', '(1)'], {}), '(position_diffs, 1)\n', (24683, 24702), True, 'import numpy as np\n'), ((33342, 33362), 'numpy.invert', 'np.invert', (['this_unit'], {}), '(this_unit)\n', (33351, 33362), True, 'import numpy as np\n'), ((6919, 7412), 'collections.OrderedDict', 'OrderedDict', (["(('cluster_id', cluster_ids_out), ('firing_rate', firing_rate), (\n 'presence_ratio', presence_ratio), ('isi_violation', isi_viol), (\n 'amplitude_cutoff', amplitude_cutoff), ('isolation_distance',\n isolation_distance), ('l_ratio', l_ratio), ('d_prime', d_prime), (\n 'nn_hit_rate', nn_hit_rate), ('nn_miss_rate', nn_miss_rate), (\n 'silhouette_score', silhouette_score), ('max_drift', max_drift), (\n 'cumulative_drift', cumulative_drift), ('epoch_name', epoch_name))"], {}), "((('cluster_id', cluster_ids_out), ('firing_rate', firing_rate),\n ('presence_ratio', presence_ratio), ('isi_violation', isi_viol), (\n 'amplitude_cutoff', amplitude_cutoff), ('isolation_distance',\n isolation_distance), ('l_ratio', l_ratio), ('d_prime', d_prime), (\n 'nn_hit_rate', nn_hit_rate), ('nn_miss_rate', nn_miss_rate), (\n 'silhouette_score', silhouette_score), ('max_drift', max_drift), (\n 'cumulative_drift', cumulative_drift), ('epoch_name', epoch_name)))\n", (6930, 7412), False, 'from collections import OrderedDict\n'), ((24197, 24226), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos_i - pos_j)'], {}), '(pos_i - pos_j)\n', (24211, 24226), True, 'import numpy as np\n'), ((31944, 31969), 'numpy.std', 'np.std', (['flda_this_cluster'], {}), '(flda_this_cluster)\n', (31950, 31969), True, 'import numpy as np\n'), ((31977, 32003), 'numpy.std', 'np.std', (['flda_other_cluster'], {}), '(flda_other_cluster)\n', (31983, 32003), True, 'import numpy as np\n'), ((24103, 24121), 'numpy.isnan', 'np.isnan', (['pos_i[0]'], {}), '(pos_i[0])\n', (24111, 24121), True, 'import numpy as np\n'), ((24130, 24148), 'numpy.isnan', 'np.isnan', (['pos_j[0]'], {}), '(pos_j[0])\n', (24138, 24148), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import copy
from .pdp_calc_utils import _sample_data, _find_onehot_actual, _find_closest
from sklearn.cluster import MiniBatchKMeans, KMeans
def _pdp_plot_title(n_grids, feature_name, ax, multi_flag, which_class, plot_params):
"""
Draw pdp plot title
:param n_grids: number of grids
:param feature_name: name of the feature
:param ax: axes to plot on
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'PDP for %s' % feature_name
subtitle = "Number of unique grid points: %d" % n_grids
title_fontsize = 15
subtitle_fontsize = 12
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if multi_flag:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.45, "For Class %d" % which_class, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family)
ax.text(0, 0.25, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
else:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.4, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
ax.axis('off')
def _axes_modify(font_family, ax, top=False, right=False, legend=False):
# modify the axes
for tick in ax.get_xticklabels():
tick.set_fontname(font_family)
for tick in ax.get_yticklabels():
tick.set_fontname(font_family)
ax.set_facecolor('white')
ax.tick_params(axis='both', which='major', labelsize=10, labelcolor='#424242', colors='#9E9E9E')
for d in ['top', 'bottom', 'right', 'left']:
ax.spines[d].set_visible(False)
if not legend:
if top:
ax.get_xaxis().tick_top()
elif right:
ax.get_yaxis().tick_right()
else:
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)
ax.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
else:
ax.set_xticks([])
ax.set_yticks([])
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_org_pts, plot_lines, frac_to_plot,
cluster, n_cluster_centers, cluster_method, x_quantile, ax, plot_params):
"""
Plot partial dependent plot
:param pdp_isolate_out: instance of pdp_isolate_obj
a calculated pdp_isolate_obj instance
:param feature_name: string
name of the feature, not necessary the same as the column name
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
xticks_rotation = 0
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
# modify axes
_axes_modify(font_family, ax)
ax.set_xlabel(feature_name, fontsize=10)
feature_type = pdp_isolate_out.feature_type
feature_grids = pdp_isolate_out.feature_grids
display_columns = pdp_isolate_out.display_columns
actual_columns = pdp_isolate_out.actual_columns
if feature_type == 'binary' or feature_type == 'onehot' or x_quantile:
x = range(len(feature_grids))
ax.set_xticks(x)
ax.set_xticklabels(display_columns, rotation=xticks_rotation)
else:
# for numeric feature
x = feature_grids
ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines)
pdp_y = copy.deepcopy(pdp_isolate_out.pdp)
# whether to fill between std upper and lower
# whether to highlight pdp line
std_fill = True
pdp_hl = False
# whether to center the plot
if center:
pdp_y -= pdp_y[0]
for col in feature_grids[1:]:
ice_lines[col] -= ice_lines[feature_grids[0]]
ice_lines['actual_preds'] -= ice_lines[feature_grids[0]]
ice_lines[feature_grids[0]] = 0
if cluster or plot_lines:
std_fill = False
pdp_hl = True
if cluster:
_ice_cluster_plot(x=x, ice_lines=ice_lines, feature_grids=feature_grids, n_cluster_centers=n_cluster_centers,
cluster_method=cluster_method, ax=ax, plot_params=plot_params)
else:
ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot)
_ice_line_plot(x=x, ice_plot_data=ice_plot_data, feature_grids=feature_grids, ax=ax, plot_params=plot_params)
if plot_org_pts:
ice_lines_temp = ice_lines.copy()
if feature_type == 'onehot':
ice_lines_temp['x'] = ice_lines_temp[actual_columns].apply(lambda x: _find_onehot_actual(x), axis=1)
ice_lines_temp = ice_lines_temp[~ice_lines_temp['x'].isnull()].reset_index(drop=True)
elif feature_type == 'numeric':
feature_grids = pdp_isolate_out.feature_grids
ice_lines_temp = ice_lines_temp[(ice_lines_temp[actual_columns[0]] >= feature_grids[0])
& (ice_lines_temp[actual_columns[0]] <= feature_grids[-1])]
if x_quantile:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]].apply(lambda x: _find_closest(x, feature_grids))
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
ice_plot_data_pts = _sample_data(ice_lines=ice_lines_temp, frac_to_plot=frac_to_plot)
_ice_plot_pts(ice_plot_data_pts=ice_plot_data_pts, ax=ax, plot_params=plot_params)
std = ice_lines[feature_grids].std().values
_pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=ax, plot_params=plot_params)
def _pdp_std_plot(x, y, std, std_fill, pdp_hl, ax, plot_params):
"""
PDP basic plot
:param x: x axis values
:param y: pdp values
:param std: std values
:param std_fill: whether to fill between std upper and lower
:param pdp_hl: whether to highlight pdp line
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
upper = y + std
lower = y - std
pdp_color = '#1A4E5D'
pdp_hl_color = '#FEDC00'
pdp_linewidth = 2
zero_color = '#E75438'
zero_linewidth = 1.5
fill_color = '#66C2D7'
fill_alpha = 0.2
markersize = 5
if plot_params is not None:
if 'pdp_color' in plot_params.keys():
pdp_color = plot_params['pdp_color']
if 'pdp_hl_color' in plot_params.keys():
pdp_hl_color = plot_params['pdp_hl_color']
if 'pdp_linewidth' in plot_params.keys():
pdp_linewidth = plot_params['pdp_linewidth']
if 'zero_color' in plot_params.keys():
zero_color = plot_params['zero_color']
if 'zero_linewidth' in plot_params.keys():
zero_linewidth = plot_params['zero_linewidth']
if 'fill_color' in plot_params.keys():
fill_color = plot_params['fill_color']
if 'fill_alpha' in plot_params.keys():
fill_alpha = plot_params['fill_alpha']
if 'markersize' in plot_params.keys():
markersize = plot_params['markersize']
if pdp_hl:
ax.plot(x, y, color=pdp_hl_color, linewidth=pdp_linewidth * 3, alpha=0.8)
ax.plot(x, y, color=pdp_color, linewidth=pdp_linewidth, marker='o', markersize=markersize)
ax.plot(x, [0] * y, linestyle='--', linewidth=zero_linewidth, color=zero_color)
if std_fill:
ax.fill_between(x, upper, lower, alpha=fill_alpha, color=fill_color)
ax.set_ylim(np.min([np.min(lower) * 2, 0]), np.max([np.max(upper) * 2, 0]))
def _ice_plot_pts(ice_plot_data_pts, ax, plot_params):
"""
Plot the real data points
:param ice_plot_data_pts: data points to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
point_size = 50
point_pos_color = '#5BB573'
point_neg_color = '#E75438'
if plot_params is not None:
if 'point_size' in plot_params.keys():
point_size = plot_params['point_size']
if 'point_pos_color' in plot_params.keys():
point_pos_color = plot_params['point_pos_color']
if 'point_neg_color' in plot_params.keys():
point_neg_color = plot_params['point_neg_color']
ice_plot_data_pts['color'] = ice_plot_data_pts['actual_preds'].apply(lambda x: point_pos_color if x >= 0 else point_neg_color)
ax.scatter(ice_plot_data_pts['x'], ice_plot_data_pts['actual_preds'], s=point_size, marker="+", linewidth=1,
color=ice_plot_data_pts['color'])
def _ice_line_plot(x, ice_plot_data, feature_grids, ax, plot_params):
"""
Plot the ice lines
:param x: x axis values
:param ice_plot_data: ice lines to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
linewidth = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
linealpha = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
line_cmap = 'Blues'
if plot_params is not None:
if 'line_cmap' in plot_params.keys():
line_cmap = plot_params['line_cmap']
colors = plt.get_cmap(line_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(ice_plot_data)):
y = list(ice_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=linewidth, c=colors[i % 10], alpha=linealpha)
def _ice_cluster_plot(x, ice_lines, feature_grids, n_cluster_centers, cluster_method, ax, plot_params):
"""
Cluster the ice lines and plot out the cluster centers
:param x: x axis values
:param ice_lines: ice lines
:param n_cluster_centers: number of cluster centers
:param cluster_method: cluster method
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
if cluster_method == 'approx':
kmeans = MiniBatchKMeans(n_clusters=n_cluster_centers, random_state=0, verbose=0)
else:
kmeans = KMeans(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)
kmeans.fit(ice_lines[feature_grids])
cluster_plot_data = pd.DataFrame(kmeans.cluster_centers_, columns=feature_grids)
cluster_cmap = 'Blues'
if plot_params is not None:
if 'cluster_cmap' in plot_params.keys():
cluster_cmap = plot_params['cluster_cmap']
colors = plt.get_cmap(cluster_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(cluster_plot_data)):
y = list(cluster_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=1, c=colors[i % 10])
def _pdp_interact_plot_title(pdp_interact_out, feature_names, ax,
multi_flag, which_class, only_inter, plot_params):
"""
Draw pdp interaction plot title
:param pdp_interact_out: instance of pdp_interact_obj
:param feature_name: name of the features
:param ax: axes to plot on
:param figsize: figure size
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param only_inter: whether only draw interaction plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'Interaction PDP between %s and %s' % (feature_names[0], feature_names[1])
title_fontsize = 14
subtitle_fontsize = 12
if type(pdp_interact_out) == dict:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out['class_0'].feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out['class_0'].feature_grids[1]))
else:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out.feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out.feature_grids[1]))
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if only_inter:
ax.text(0, 0.8, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.62, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.45, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.3, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.55, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.4, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.6, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.53, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.axis('off')
def _pdp_interact_plot(pdp_interact_out, feature_names, center, plot_org_pts, plot_lines, frac_to_plot, cluster,
n_cluster_centers, cluster_method, x_quantile, figsize, plot_params, multi_flag, which_class):
"""
Plot interaction plot
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param figsize: figure size
:param plot_params: dict, default=None
values of plot parameters
:param multi_flag: boolean, default=False
whether it is a subplot of a multi-class plot
:param which_class: integer, default=None
must not be None under multi-class mode
"""
if figsize is None:
fig = plt.figure(figsize=(15, 15))
else:
fig = plt.figure(figsize=figsize)
pdp_plot_params = None
if plot_params is not None:
if 'pdp' in plot_params.keys():
pdp_plot_params = plot_params['pdp']
gs = GridSpec(2, 2)
ax0 = plt.subplot(gs[0, 0])
_pdp_interact_plot_title(pdp_interact_out=pdp_interact_out, feature_names=feature_names, ax=ax0,
multi_flag=multi_flag, which_class=which_class, only_inter=False, plot_params=plot_params)
ax1 = plt.subplot(gs[0, 1])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out1, feature_name=feature_names[0], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile,
ax=ax1, plot_params=pdp_plot_params)
ax2 = plt.subplot(gs[1, 0])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out2, feature_name=feature_names[1], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile, ax=ax2,
plot_params=pdp_plot_params)
ax3 = plt.subplot(gs[1, 1])
_pdp_contour_plot(pdp_interact_out=pdp_interact_out, feature_names=feature_names, x_quantile=x_quantile,
ax=ax3, fig=fig, plot_params=plot_params)
class ColorBarLocator(object):
def __init__(self, pax, pad=60, width=20):
self.pax = pax
self.pad = pad
self.width = width
def __call__(self, ax, renderer):
x, y, w, h = self.pax.get_position().bounds
fig = self.pax.get_figure()
inv_trans = fig.transFigure.inverted()
pad, _ = inv_trans.transform([self.pad, 0])
width, _ = inv_trans.transform([self.width, 0])
return [x, y - pad, w, width]
def _pdp_contour_plot(pdp_interact_out, feature_names, x_quantile, ax, fig, plot_params):
"""
Plot PDP contour
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param fig: plt figure
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
contour_color = 'white'
contour_cmap = 'viridis'
xticks_rotation = 0
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'contour_color' in plot_params['pdp_inter'].keys():
contour_color = plot_params['pdp_inter']['contour_color']
if 'contour_cmap' in plot_params['pdp_inter'].keys():
contour_cmap = plot_params['pdp_inter']['contour_cmap']
if 'font_family' in plot_params['pdp_inter'].keys():
font_family = plot_params['pdp_inter']['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
_axes_modify(font_family, ax)
feature_types = pdp_interact_out.feature_types
pdp = copy.deepcopy(pdp_interact_out.pdp)
new_feature_names = []
for i, feature_type in enumerate(feature_types):
if feature_type == 'onehot':
new_col = 'onehot_%d' % (i)
pdp[new_col] = pdp.apply(lambda x: list(x[pdp_interact_out.features[i]]).index(1), axis=1)
new_feature_names.append(new_col)
else:
new_feature_names.append(pdp_interact_out.features[i])
if (feature_types[0] == 'numeric') and x_quantile:
pdp[new_feature_names[0]] = pdp[new_feature_names[0]].apply(
lambda x: list(pdp_interact_out.feature_grids[0]).index(x))
if (feature_types[1] == 'numeric') and x_quantile:
pdp[new_feature_names[1]] = pdp[new_feature_names[1]].apply(
lambda x: list(pdp_interact_out.feature_grids[1]).index(x))
X, Y = np.meshgrid(pdp[new_feature_names[0]].unique(), pdp[new_feature_names[1]].unique())
Z = []
for i in range(X.shape[0]):
zs = []
for j in range(X.shape[1]):
x = X[i, j]
y = Y[i, j]
z = pdp[(pdp[new_feature_names[0]] == x) & (pdp[new_feature_names[1]] == y)]['preds'].values[0]
zs.append(z)
Z.append(zs)
Z = np.array(Z)
if feature_types[0] == 'onehot':
ax.set_xticks(range(X.shape[1]))
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
elif feature_types[0] == 'binary':
ax.set_xticks([0, 1])
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
else:
if x_quantile:
ax.set_xticks(range(len(pdp_interact_out.feature_grids[0])))
ax.set_xticklabels(pdp_interact_out.feature_grids[0], rotation=xticks_rotation)
if feature_types[1] == 'onehot':
ax.set_yticks(range(Y.shape[0]))
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
elif feature_types[1] == 'binary':
ax.set_yticks([0, 1])
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
else:
if x_quantile:
ax.set_yticks(range(len(pdp_interact_out.feature_grids[1])))
ax.set_yticklabels(pdp_interact_out.feature_grids[1])
level = np.min([X.shape[0], X.shape[1]])
c1 = ax.contourf(X, Y, Z, N=level, origin='lower', cmap=contour_cmap)
c2 = ax.contour(c1, levels=c1.levels, colors=contour_color, origin='lower')
ax.clabel(c2, contour_label_fontsize=9, inline=1)
ax.set_xlabel(feature_names[0], fontsize=10)
ax.set_ylabel(feature_names[1], fontsize=10)
ax.get_yaxis().tick_right()
if fig is not None:
cax = fig.add_axes([0, 0, 0, 0], axes_locator=ColorBarLocator(ax))
fig.colorbar(c1, cax=cax, orientation='horizontal')
|
[
"sklearn.cluster.KMeans",
"numpy.log10",
"sklearn.cluster.MiniBatchKMeans",
"numpy.min",
"numpy.max",
"numpy.array",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"numpy.linspace",
"copy.deepcopy",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.get_cmap"
] |
[((5131, 5171), 'copy.deepcopy', 'copy.deepcopy', (['pdp_isolate_out.ice_lines'], {}), '(pdp_isolate_out.ice_lines)\n', (5144, 5171), False, 'import copy\n'), ((5184, 5218), 'copy.deepcopy', 'copy.deepcopy', (['pdp_isolate_out.pdp'], {}), '(pdp_isolate_out.pdp)\n', (5197, 5218), False, 'import copy\n'), ((11826, 11886), 'pandas.DataFrame', 'pd.DataFrame', (['kmeans.cluster_centers_'], {'columns': 'feature_grids'}), '(kmeans.cluster_centers_, columns=feature_grids)\n', (11838, 11886), True, 'import pandas as pd\n'), ((17829, 17843), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (17837, 17843), False, 'from matplotlib.gridspec import GridSpec\n'), ((17854, 17875), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (17865, 17875), True, 'import matplotlib.pyplot as plt\n'), ((18109, 18130), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (18120, 18130), True, 'import matplotlib.pyplot as plt\n'), ((18517, 18538), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (18528, 18538), True, 'import matplotlib.pyplot as plt\n'), ((18925, 18946), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 1]'], {}), '(gs[1, 1])\n', (18936, 18946), True, 'import matplotlib.pyplot as plt\n'), ((20938, 20973), 'copy.deepcopy', 'copy.deepcopy', (['pdp_interact_out.pdp'], {}), '(pdp_interact_out.pdp)\n', (20951, 20973), False, 'import copy\n'), ((22157, 22168), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (22165, 22168), True, 'import numpy as np\n'), ((23212, 23244), 'numpy.min', 'np.min', (['[X.shape[0], X.shape[1]]'], {}), '([X.shape[0], X.shape[1]])\n', (23218, 23244), True, 'import numpy as np\n'), ((11597, 11669), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'n_cluster_centers', 'random_state': '(0)', 'verbose': '(0)'}), '(n_clusters=n_cluster_centers, random_state=0, verbose=0)\n', (11612, 11669), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((11697, 11759), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_cluster_centers', 'random_state': '(0)', 'n_jobs': '(1)'}), '(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)\n', (11703, 11759), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((17589, 17617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (17599, 17617), True, 'import matplotlib.pyplot as plt\n'), ((17642, 17669), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (17652, 17669), True, 'import matplotlib.pyplot as plt\n'), ((10889, 10912), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['line_cmap'], {}), '(line_cmap)\n', (10901, 10912), True, 'import matplotlib.pyplot as plt\n'), ((10913, 10934), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (10924, 10934), True, 'import numpy as np\n'), ((12066, 12092), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cluster_cmap'], {}), '(cluster_cmap)\n', (12078, 12092), True, 'import matplotlib.pyplot as plt\n'), ((12093, 12114), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (12104, 12114), True, 'import numpy as np\n'), ((10613, 10645), 'numpy.log10', 'np.log10', (['ice_plot_data.shape[0]'], {}), '(ice_plot_data.shape[0])\n', (10621, 10645), True, 'import numpy as np\n'), ((10683, 10715), 'numpy.log10', 'np.log10', (['ice_plot_data.shape[0]'], {}), '(ice_plot_data.shape[0])\n', (10691, 10715), True, 'import numpy as np\n'), ((9290, 9303), 'numpy.min', 'np.min', (['lower'], {}), '(lower)\n', (9296, 9303), True, 'import numpy as np\n'), ((9322, 9335), 'numpy.max', 'np.max', (['upper'], {}), '(upper)\n', (9328, 9335), True, 'import numpy as np\n')]
|
"""Perform normalization on inputs or rewards.
"""
import numpy as np
import torch
from gym.spaces import Box
def normalize_angle(x):
"""Wraps input angle to [-pi, pi].
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi
class RunningMeanStd():
"""Calulates the running mean and std of a data stream.
Attributes:
mean (np.array): mean of data stream.
var (np.array): variance of data stream.
count (float): total count of data steam.
"""
def __init__(self, epsilon=1e-4, shape=()):
"""Initializes containers for data mean and variance.
Args:
epsilon (float): helps with arithmetic issues.
shape (tuple): the shape of the data stream's output.
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update(self, arr):
"""Update current stats with a new stream of data.
Args:
arr (np.array): 1D array of data, (batch_size, *shape).
"""
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
"""Util function for `update` method.
"""
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class BaseNormalizer(object):
"""Template/default normalizer.
Attributes:
read_only (bool): if to freeze the current stats being tracked.
"""
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def __call__(self, x, *args, **kwargs):
"""Invokes normalization on the given input.
"""
return x
def state_dict(self):
"""Returns snapshot of current stats.
"""
return {}
def load_state_dict(self, _):
"""Restores the stats from a snapshot.
"""
pass
class MeanStdNormalizer(BaseNormalizer):
"""Normalize by the running average.
"""
def __init__(self, shape=(), read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
shape (tuple): shape of data being tracked.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
super().__init__(read_only)
self.read_only = read_only
self.rms = RunningMeanStd(shape=shape)
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
"""Update tracker given data, optionally normalize the data.
"""
x = np.asarray(x)
if not self.read_only:
self.rms.update(x)
return np.clip(
(x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean, 'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
class RewardStdNormalizer(MeanStdNormalizer):
"""Reward normalization by running average of returns.
Papers:
* arxiv.org/pdf/1808.04355.pdf
* arxiv.org/pdf/1810.12894.pdf
Also see:
* github.com/openai/baselines/issues/538
"""
def __init__(self, gamma=0.99, read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
gamma (float): discount factor for rewards.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
# Reward has default shape (1,) or just ().
super().__init__((), read_only, clip, epsilon)
self.gamma = gamma
self.ret = None
def __call__(self, x, dones):
"""Update tracker given reward, optionally normalize the reward (only scaling).
"""
x = np.asarray(x)
if not self.read_only:
# Track running average of forward discounted returns.
if self.ret is None:
self.ret = np.zeros(x.shape[0])
self.ret = self.ret * self.gamma + x
self.rms.update(self.ret)
# Prevent information leak from previous episodes.
self.ret[dones.astype(np.long)] = 0
return np.clip(x / np.sqrt(self.rms.var + self.epsilon), -self.clip, self.clip)
class RescaleNormalizer(BaseNormalizer):
"""Apply constant scaling.
"""
def __init__(self, coef=1.0):
"""Initializes with fixed scaling constant.
Args:
coef (float): scaling coefficient.
"""
super().__init__(self)
self.coef = coef
def __call__(self, x):
"""Scale the input.
"""
if not isinstance(x, torch.Tensor):
x = np.asarray(x)
return self.coef * x
class ImageNormalizer(RescaleNormalizer):
"""Scale image pixles from [0,255] to [0,1].
"""
def __init__(self):
super().__init__(self, 1.0 / 255)
class ActionUnnormalizer(BaseNormalizer):
"""Assumes policy output action is in [-1,1], unnormalize it for gym env.
"""
def __init__(self, action_space):
"""Defines the mean and std for the bounded action space.
"""
super().__init__()
assert isinstance(action_space, Box), "action space must be gym.spaces.Box"
low, high = action_space.low, action_space.high
self.mean = (low + high) / 2.0
self.std = (high - low) / 2.0
def __call__(self, action):
"""Unnormalizes given input action.
"""
x = np.asarray(action)
return self.mean + x * self.std
|
[
"numpy.mean",
"numpy.sqrt",
"numpy.ones",
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"numpy.var"
] |
[((787, 814), 'numpy.zeros', 'np.zeros', (['shape', 'np.float64'], {}), '(shape, np.float64)\n', (795, 814), True, 'import numpy as np\n'), ((834, 860), 'numpy.ones', 'np.ones', (['shape', 'np.float64'], {}), '(shape, np.float64)\n', (841, 860), True, 'import numpy as np\n'), ((1094, 1114), 'numpy.mean', 'np.mean', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1101, 1114), True, 'import numpy as np\n'), ((1135, 1154), 'numpy.var', 'np.var', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1141, 1154), True, 'import numpy as np\n'), ((3364, 3377), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3374, 3377), True, 'import numpy as np\n'), ((4742, 4755), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4752, 4755), True, 'import numpy as np\n'), ((6458, 6476), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (6468, 6476), True, 'import numpy as np\n'), ((5651, 5664), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5661, 5664), True, 'import numpy as np\n'), ((3498, 3534), 'numpy.sqrt', 'np.sqrt', (['(self.rms.var + self.epsilon)'], {}), '(self.rms.var + self.epsilon)\n', (3505, 3534), True, 'import numpy as np\n'), ((4914, 4934), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (4922, 4934), True, 'import numpy as np\n'), ((5160, 5196), 'numpy.sqrt', 'np.sqrt', (['(self.rms.var + self.epsilon)'], {}), '(self.rms.var + self.epsilon)\n', (5167, 5196), True, 'import numpy as np\n'), ((1637, 1653), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (1646, 1653), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.special import factorial
from pyapprox.indexing import hash_array
from pyapprox.indexing import compute_hyperbolic_level_indices
def multiply_multivariate_polynomials(indices1,coeffs1,indices2,coeffs2):
"""
TODO: instead of using dictionary to colect terms consider using
unique_indices,repeated_idx=np.unique(
indices[active_idx,:],axis=1,return_inverse=True)
as is done in multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion. Choose which one is faster
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
"""
num_vars = indices1.shape[0]
num_indices1 = indices1.shape[1]
num_indices2 = indices2.shape[1]
assert num_indices1==coeffs1.shape[0]
assert num_indices2==coeffs2.shape[0]
assert num_vars==indices2.shape[0]
indices_dict = dict()
max_num_indices = num_indices1*num_indices2
indices = np.empty((num_vars,max_num_indices),int)
coeffs = np.empty((max_num_indices),float)
kk = 0
for ii in range(num_indices1):
index1 = indices1[:,ii]
coeff1 = coeffs1[ii]
for jj in range(num_indices2):
index= index1+indices2[:,jj]
key = hash_array(index)
coeff = coeff1*coeffs2[jj]
if key in indices_dict:
coeffs[indices_dict[key]]+=coeff
else:
indices_dict[key]=kk
indices[:,kk]=index
coeffs[kk]=coeff
kk+=1
indices = indices[:,:kk]
coeffs = coeffs[:kk]
return indices, coeffs
def coeffs_of_power_of_nd_linear_polynomial(num_vars, degree, linear_coeffs):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a linear multivariate polynomial (no constant term) to some power.
Parameters
----------
num_vars : integer
The number of variables
degree : integer
The power of the linear polynomial
linear_coeffs: np.ndarray (num_vars)
The coefficients of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
assert len(linear_coeffs)==num_vars
coeffs, indices=multinomial_coeffs_of_power_of_nd_linear_polynomial(
num_vars, degree)
for ii in range(indices.shape[1]):
index = indices[:,ii]
for dd in range(num_vars):
degree = index[dd]
coeffs[ii] *= linear_coeffs[dd]**degree
return coeffs, indices
def substitute_polynomial_for_variables_in_polynomial(
indices_in,coeffs_in,indices,coeffs,var_idx):
num_vars, num_terms = indices.shape
new_indices = []
new_coeffs = []
for ii in range(num_terms):
index = indices[:,ii]
pows = index[var_idx]
ind,cf = substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,index,coeffs[ii],var_idx,pows)
new_indices.append(ind)
new_coeffs.append(cf)
new_indices = np.hstack(new_indices)
new_coeffs = np.vstack(new_coeffs)
return new_indices, new_coeffs
def substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,basis_index,basis_coeff,var_idx,global_var_idx,
num_global_vars):
"""
var_idx : np.ndarray (nsub_vars)
The dimensions in basis_index which will be substituted
global_var_idx : [ np.ndarray(nvars[ii]) for ii in num_inputs]
The index of the active variables for each input
"""
num_inputs = var_idx.shape[0]
assert num_inputs==len(indices_in)
assert num_inputs==len(coeffs_in)
assert basis_coeff.shape[0]==1
assert var_idx.max()<basis_index.shape[0]
assert basis_index.shape[1]==1
assert len(global_var_idx)==num_inputs
# store input indices in global_var_idx
temp = []
for ii in range(num_inputs):
ind = np.zeros((num_global_vars,indices_in[ii].shape[1]))
ind[global_var_idx,:] = indices_in[ii]
temp.append(ind)
indices_in = temp
jj=0
degree = basis_index[var_idx[jj]]
c1,ind1 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
for jj in range(1,var_idx.shape[0]):
degree = basis_index[var_idx[jj]]
c2,ind2 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
ind1,c1 = multiply_multivariate_polynomials(ind1,c1,ind2,c2)
# this mask may be wrong. I might be confusing global and var idx
mask = np.ones(basis_index.shape[0],dtype=bool); mask[var_idx]=False
print(ind1.shape,mask.shape)
ind1[mask,:] += basis_index[mask]
c1*=basis_coeff
return ind1, c1
def composition_of_polynomials(indices_list,coeffs_list):
npolys = len(indices_list)
assert npolys==len(coeffs_list)
for ii in range(1,npolys):
new_poly = 2
return new_poly
def coeffs_of_power_of_polynomial(indices, coeffs, degree):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a multivariate polynomial to some power.
TODO: Deprecate coeffs_of_power_of_nd_linear_polynomial as that function
can be obtained as a special case of this function
Parameters
----------
indices : np.ndarray (num_vars,num_terms)
The indices of the multivariate polynomial
coeffs: np.ndarray (num_vars)
The coefficients of the polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
num_vars, num_terms = indices.shape
assert indices.shape[1]==coeffs.shape[0]
multinomial_coeffs, multinomial_indices = \
multinomial_coeffs_of_power_of_nd_linear_polynomial(num_terms, degree)
new_indices = np.zeros((num_vars,multinomial_indices.shape[1]))
new_coeffs = np.tile(multinomial_coeffs[:,np.newaxis],coeffs.shape[1])
for ii in range(multinomial_indices.shape[1]):
multinomial_index = multinomial_indices[:,ii]
for dd in range(num_terms):
deg = multinomial_index[dd]
new_coeffs[ii] *= coeffs[dd]**deg
new_indices[:,ii] += indices[:,dd]*deg
return new_coeffs, new_indices
def group_like_terms(coeffs, indices):
if coeffs.ndim==1:
coeffs = coeffs[:,np.newaxis]
num_vars,num_indices = indices.shape
indices_dict = {}
for ii in range(num_indices):
key = hash_array(indices[:,ii])
if not key in indices_dict:
indices_dict[key] = [coeffs[ii],ii]
else:
indices_dict[key] = [indices_dict[key][0]+coeffs[ii],ii]
new_coeffs = np.empty((len(indices_dict),coeffs.shape[1]))
new_indices = np.empty((num_vars,len(indices_dict)),dtype=int)
ii=0
for key, item in indices_dict.items():
new_indices[:,ii] = indices[:,item[1]]
new_coeffs[ii] = item[0]
ii+=1
return new_coeffs, new_indices
def multinomial_coefficient(index):
"""Compute the multinomial coefficient of an index [i1,i2,...,id].
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
coeff : double
the multinomial coefficient
"""
level = index.sum()
denom = np.prod(factorial(index))
coeff = factorial(level)/denom
return coeff
def multinomial_coefficients(indices):
coeffs = np.empty((indices.shape[1]),float)
for i in range(indices.shape[1]):
coeffs[i] = multinomial_coefficient(indices[:,i])
return coeffs
def multinomial_coeffs_of_power_of_nd_linear_polynomial(num_vars,degree):
""" Compute the multinomial coefficients of the individual terms
obtained when taking the power of a linear polynomial
(without constant term).
Given a linear multivariate polynomial e.g.
e.g. (x1+x2+x3)**2 = x1**2+2*x1*x2+2*x1*x3+2*x2**2+x2*x3+x3**2
return the coefficients of each quadratic term, i.e.
[1,2,2,1,2,1]
Parameters
----------
num_vars : integer
the dimension of the multivariate polynomial
degree : integer
the power of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
the multinomial coefficients of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
indices: np.ndarray (num_terms)
the indices of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
"""
indices = compute_hyperbolic_level_indices(num_vars,degree,1.0)
coeffs = multinomial_coefficients(indices)
return coeffs, indices
def add_polynomials(indices_list, coeffs_list):
"""
Add many polynomials together.
Example:
p1 = x1**2+x2+x3, p2 = x2**2+2*x3
p3 = p1+p2
return the degrees of each term in the the polynomial
p3 = x1**2+x2+3*x3+x2**2
[2, 1, 1, 2]
and the coefficients of each of these terms
[1., 1., 3., 1.]
Parameters
----------
indices_list : list [np.ndarray (num_vars,num_indices_i)]
List of polynomial indices. indices_i may be different for each
polynomial
coeffs_list : list [np.ndarray (num_indices_i,num_qoi)]
List of polynomial coefficients. indices_i may be different for each
polynomial. num_qoi must be the same for each list element.
Returns
-------
indices: np.ndarray (num_vars,num_terms)
the polynomial indices of the polynomial obtained from
summing the polynomials. This will be the union of the indices
of the input polynomials
coeffs: np.ndarray (num_terms,num_qoi)
the polynomial coefficients of the polynomial obtained from
summing the polynomials
"""
num_polynomials = len(indices_list)
assert num_polynomials==len(coeffs_list)
indices_dict = dict()
indices = []
coeff = []
ii=0; kk=0
for jj in range(indices_list[ii].shape[1]):
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
index=indices_list[ii][:,jj]
indices_dict[hash_array(index)]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
for ii in range(1,num_polynomials):
#print indices_list[ii].T,num_polynomials
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
for jj in range(indices_list[ii].shape[1]):
index=indices_list[ii][:,jj]
key = hash_array(index)
if key in indices_dict:
nn = indices_dict[key]
coeff[nn]+=coeffs_list[ii][jj,:]
else:
indices_dict[key]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
indices = np.asarray(indices).T
coeff = np.asarray(coeff)
return indices, coeff
def get_indices_double_set(indices):
"""
Given muultivariate indices
[i1,i2,...,]
Compute its double set by
[i1*i1,i1*i2,...,i2*i2,i2*i3...]
The double set will only contain unique indices
Parameters
----------
indices : np.ndarray (num_vars,num_indices)
The initial indices
Returns
-------
double_set_indices : np.ndarray (num_vars,num_indices)
The double set of indices
"""
dummy_coeffs = np.zeros(indices.shape[1])
double_set_indices = multiply_multivariate_polynomials(
indices,dummy_coeffs,indices,dummy_coeffs)[0]
return double_set_indices
def shift_momomial_expansion(coef,shift,scale):
assert coef.ndim==1
shifted_coef = np.zeros_like(coef)
shifted_coef[0]=coef[0]
nterms = coef.shape[0]
for ii in range(1,nterms):
temp = np.polynomial.polynomial.polypow([1,-shift],ii)
shifted_coef[:ii+1] += coef[ii]*temp[::-1]/scale**ii
return shifted_coef
#Some of these functions can be replaced by numpy functions described at
#https://docs.scipy.org/doc/numpy/reference/routines.polynomials.polynomial.html
|
[
"numpy.tile",
"numpy.ones",
"numpy.hstack",
"scipy.special.factorial",
"pyapprox.indexing.compute_hyperbolic_level_indices",
"numpy.asarray",
"pyapprox.indexing.hash_array",
"numpy.zeros",
"numpy.empty",
"numpy.vstack",
"numpy.polynomial.polynomial.polypow",
"numpy.zeros_like"
] |
[((1047, 1089), 'numpy.empty', 'np.empty', (['(num_vars, max_num_indices)', 'int'], {}), '((num_vars, max_num_indices), int)\n', (1055, 1089), True, 'import numpy as np\n'), ((1101, 1133), 'numpy.empty', 'np.empty', (['max_num_indices', 'float'], {}), '(max_num_indices, float)\n', (1109, 1133), True, 'import numpy as np\n'), ((3273, 3295), 'numpy.hstack', 'np.hstack', (['new_indices'], {}), '(new_indices)\n', (3282, 3295), True, 'import numpy as np\n'), ((3313, 3334), 'numpy.vstack', 'np.vstack', (['new_coeffs'], {}), '(new_coeffs)\n', (3322, 3334), True, 'import numpy as np\n'), ((4780, 4821), 'numpy.ones', 'np.ones', (['basis_index.shape[0]'], {'dtype': 'bool'}), '(basis_index.shape[0], dtype=bool)\n', (4787, 4821), True, 'import numpy as np\n'), ((6143, 6193), 'numpy.zeros', 'np.zeros', (['(num_vars, multinomial_indices.shape[1])'], {}), '((num_vars, multinomial_indices.shape[1]))\n', (6151, 6193), True, 'import numpy as np\n'), ((6210, 6269), 'numpy.tile', 'np.tile', (['multinomial_coeffs[:, np.newaxis]', 'coeffs.shape[1]'], {}), '(multinomial_coeffs[:, np.newaxis], coeffs.shape[1])\n', (6217, 6269), True, 'import numpy as np\n'), ((7823, 7856), 'numpy.empty', 'np.empty', (['indices.shape[1]', 'float'], {}), '(indices.shape[1], float)\n', (7831, 7856), True, 'import numpy as np\n'), ((8948, 9003), 'pyapprox.indexing.compute_hyperbolic_level_indices', 'compute_hyperbolic_level_indices', (['num_vars', 'degree', '(1.0)'], {}), '(num_vars, degree, 1.0)\n', (8980, 9003), False, 'from pyapprox.indexing import compute_hyperbolic_level_indices\n'), ((11416, 11433), 'numpy.asarray', 'np.asarray', (['coeff'], {}), '(coeff)\n', (11426, 11433), True, 'import numpy as np\n'), ((11944, 11970), 'numpy.zeros', 'np.zeros', (['indices.shape[1]'], {}), '(indices.shape[1])\n', (11952, 11970), True, 'import numpy as np\n'), ((12207, 12226), 'numpy.zeros_like', 'np.zeros_like', (['coef'], {}), '(coef)\n', (12220, 12226), True, 'import numpy as np\n'), ((4154, 4206), 'numpy.zeros', 'np.zeros', (['(num_global_vars, indices_in[ii].shape[1])'], {}), '((num_global_vars, indices_in[ii].shape[1]))\n', (4162, 4206), True, 'import numpy as np\n'), ((6799, 6825), 'pyapprox.indexing.hash_array', 'hash_array', (['indices[:, ii]'], {}), '(indices[:, ii])\n', (6809, 6825), False, 'from pyapprox.indexing import hash_array\n'), ((7700, 7716), 'scipy.special.factorial', 'factorial', (['index'], {}), '(index)\n', (7709, 7716), False, 'from scipy.special import factorial\n'), ((7730, 7746), 'scipy.special.factorial', 'factorial', (['level'], {}), '(level)\n', (7739, 7746), False, 'from scipy.special import factorial\n'), ((11382, 11401), 'numpy.asarray', 'np.asarray', (['indices'], {}), '(indices)\n', (11392, 11401), True, 'import numpy as np\n'), ((12328, 12377), 'numpy.polynomial.polynomial.polypow', 'np.polynomial.polynomial.polypow', (['[1, -shift]', 'ii'], {}), '([1, -shift], ii)\n', (12360, 12377), True, 'import numpy as np\n'), ((1340, 1357), 'pyapprox.indexing.hash_array', 'hash_array', (['index'], {}), '(index)\n', (1350, 1357), False, 'from pyapprox.indexing import hash_array\n'), ((10618, 10635), 'pyapprox.indexing.hash_array', 'hash_array', (['index'], {}), '(index)\n', (10628, 10635), False, 'from pyapprox.indexing import hash_array\n'), ((11051, 11068), 'pyapprox.indexing.hash_array', 'hash_array', (['index'], {}), '(index)\n', (11061, 11068), False, 'from pyapprox.indexing import hash_array\n')]
|
import numpy as np
import cv2
import os
import math
os.system("fswebcam -r 507x456 --no-banner image11.jpg")
def showImage(capImg):
cv2.imshow('img', capImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread('image11.jpg',-1)
height, width, channel = img.shape
topy= height
topx = width
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_color = np.array([0,255,255])
upper_color = np.array([0,255,255])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(img,img, mask=mask)
'''def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x,y), 100, (255,255,255), -1)'''
'''cap = cv2.VideoCapture(-1)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('hjhj', gray)
if cv2.waitKey(0) & 0xFF -- ord('q'):
break
cap.release()
cv2.destroyAllWindows()'''
propx = (topx/512)
propy = (topy/512)
'''lineX1 = int(0*propx)
lineY2 = int(0*propy)
lineX2 = int(511*propx)
lineY1 = int(511*propy)
img = cv2.line(img, (lineX1,lineY1), (lineX2, lineY2), (255,255,255), 5)'''
w = 100*(propx+propy)/2
x1 = int(topx/2 - w/2)
x2 = int(topx/2 + w/2)
y1 = int(topy/2 + w/2)
y2 = int(topy/2 - w/2)
img = cv2.rectangle(res, (x1,y1), (x2,y2), (0,255,0),3)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
showImage(img)
ret, thresh = cv2.threshold(img, 15, 250, 0)
showImage(thresh)
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#showImage(image)
cv2.drawContours(img, contours, 0, (0,255,0), 3)
showImage(img)
print('Num of Contours ', len(contours))
cnt = contours[0]
M = cv2.moments(cnt)
print (M)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(cnt)
print (cx)
print (cy)
print (area)
'''xCircle = 40*propx
xCircle = int(xCircle)
yCircle = xCircle
radCircle = xCircle
img = cv2.circle(img, (xCircle, yCircle), radCircle, (0,0,255),-1)
x3 = int(topx - 60*propx)
y3 = int(topy - 110*propy)
minAx = int(50*propx)
majAx = int(100*propy)
img = cv2.ellipse(img, (x3, y3), (minAx,majAx), 0, 0, 360, (0,150,255), -1)'''
'''pt1X = int(70*propx)
pt1Y = int(60*propy)
pt2X = int(154*propx)
pt2Y = int(23*propy)
pt3X = int(500*propx)
pt3Y = int(3*propy)'''
#pts = np.array([[pt1X, pt1Y], [pt2X, pt2Y], [pt3X, pt3Y]], np.int32)
#pts = pts.reshape((-1,1,2))
#img = cv2.polylines(img, [pts], True, (100,100,234))
#font = cv2.FONT_HERSHEY_SIMPLEX
#startPtX = int(240*propx)
#startPtY = int(240*propy)
#scale = 2*(propx + propy)/2
#cv2.putText(img, 'Apurva', (startPtX, startPtY), font, scale, (210, 80, 150), 4, cv2.LINE_AA)
#cv2.imshow("kl", img)
'''cv2.setMouseCallback('kl', draw_circle)'''
''''''
#cv2.imshow('frame', img)
#cv2.imshow('mask',mask)
cv2.imshow('res',res)
'''sd = img[130:200, 175:245]
img[20:90, 140:210]=sd
cv2.imshow("kl", img)'''
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"cv2.drawContours",
"cv2.threshold",
"cv2.inRange",
"cv2.bitwise_and",
"cv2.contourArea",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"os.system",
"cv2.imread"
] |
[((52, 108), 'os.system', 'os.system', (['"""fswebcam -r 507x456 --no-banner image11.jpg"""'], {}), "('fswebcam -r 507x456 --no-banner image11.jpg')\n", (61, 108), False, 'import os\n'), ((217, 246), 'cv2.imread', 'cv2.imread', (['"""image11.jpg"""', '(-1)'], {}), "('image11.jpg', -1)\n", (227, 246), False, 'import cv2\n'), ((314, 350), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (326, 350), False, 'import cv2\n'), ((366, 389), 'numpy.array', 'np.array', (['[0, 255, 255]'], {}), '([0, 255, 255])\n', (374, 389), True, 'import numpy as np\n'), ((402, 425), 'numpy.array', 'np.array', (['[0, 255, 255]'], {}), '([0, 255, 255])\n', (410, 425), True, 'import numpy as np\n'), ((432, 474), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_color', 'upper_color'], {}), '(hsv, lower_color, upper_color)\n', (443, 474), False, 'import cv2\n'), ((482, 518), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (497, 518), False, 'import cv2\n'), ((1256, 1310), 'cv2.rectangle', 'cv2.rectangle', (['res', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(3)'], {}), '(res, (x1, y1), (x2, y2), (0, 255, 0), 3)\n', (1269, 1310), False, 'import cv2\n'), ((1313, 1350), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1325, 1350), False, 'import cv2\n'), ((1381, 1411), 'cv2.threshold', 'cv2.threshold', (['img', '(15)', '(250)', '(0)'], {}), '(img, 15, 250, 0)\n', (1394, 1411), False, 'import cv2\n'), ((1460, 1524), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1476, 1524), False, 'import cv2\n'), ((1543, 1593), 'cv2.drawContours', 'cv2.drawContours', (['img', 'contours', '(0)', '(0, 255, 0)', '(3)'], {}), '(img, contours, 0, (0, 255, 0), 3)\n', (1559, 1593), False, 'import cv2\n'), ((1672, 1688), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (1683, 1688), False, 'import cv2\n'), ((1763, 1783), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1778, 1783), False, 'import cv2\n'), ((2782, 2804), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'res'], {}), "('res', res)\n", (2792, 2804), False, 'import cv2\n'), ((2884, 2898), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2895, 2898), False, 'import cv2\n'), ((2899, 2922), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2920, 2922), False, 'import cv2\n'), ((137, 162), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'capImg'], {}), "('img', capImg)\n", (147, 162), False, 'import cv2\n'), ((167, 181), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (178, 181), False, 'import cv2\n'), ((186, 209), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (207, 209), False, 'import cv2\n')]
|
"""
Analysis code for plotting vertical flux transport and/or a gif of temperature,
velocity and KE from the merged output of a Dedalus Rayleigh-Bérnard code.
Author: <NAME>
"""
# ====================
# IMPORTS
# ====================
import numpy as np
import h5py
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pathlib
import os
import shutil
import time
import imageio
from dedalus import public as de
from dedalus.tools import post
# ====================
# CLA PARSING
# ====================
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", help="Folder where the processing data is stored", required=True
)
parser.add_argument(
"-t", "--heatmap", help="Plot a gif of the temperature heatmap", action="store_true"
)
parser.add_argument(
"-f", "--flux", help="Plot the average flux contributions", action="store_true"
)
parser.add_argument(
"-k", "--KE", help="Plot the kinetic energy only", action="store_true"
)
args = parser.parse_args()
direc = os.path.normpath(args.input) + "/"
with h5py.File(direc + "run_params/run_params_s1.h5", "r") as f:
a = int(np.array(f["tasks"]["a"]))
y = de.Fourier("y", 256, interval=(0, a), dealias=3 / 2)
z = de.Chebyshev("z", 64, interval=(0, 1), dealias=3 / 2)
y = np.array(y.grid(1))
z = np.array(z.grid(1))
# ====================
# Plot Fluxes
# ====================
if args.flux:
avg_t_start = float(input("Start average at: "))
avg_t_stop = float(input("End average at: "))
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
L_cond_arr = np.array(file["tasks"]["L_cond"])[:, 0]
L_conv_arr = np.array(file["tasks"]["L_conv"])[:, 0]
KE = np.array(file["tasks"]["KE"])[:, 0]
snap_t = np.array(file["scales"]["sim_time"])
if (
(avg_t_start <= snap_t[0])
or (avg_t_start >= snap_t[-1])
or (avg_t_stop <= snap_t[0])
or (avg_t_stop >= snap_t[-1])
):
print(
"Average time period out of simulation range: {} -> {}".format(
snap_t[0], snap_t[-1]
)
)
pass
ASI = np.abs(snap_t - avg_t_start).argmin()
if np.isnan(avg_t_stop):
AEI = -1
else:
AEI = np.abs(snap_t - avg_t_stop).argmin()
avg_t_range = snap_t[AEI] - snap_t[ASI]
print("Averaging between {} and {}".format(snap_t[ASI], snap_t[AEI]))
mean_L_cond = np.mean(np.array(L_cond_arr[ASI:AEI]), axis=0)
mean_L_conv = np.mean(np.array(L_conv_arr[ASI:AEI]), axis=0)
mean_L_tot = mean_L_cond + mean_L_conv
del_L = np.max(np.abs(1.0 - mean_L_tot))
print("max del_L = {}".format(del_L))
fig = plt.figure(figsize=(6, 6))
KE_ax = fig.add_subplot(311)
KE_ax.plot(snap_t, KE, "k", label="Kinetic Energy")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.axvspan(
snap_t[ASI], snap_t[AEI], color="r", alpha=0.5, label="Flux averaging"
)
L_ax = fig.add_subplot(212)
L_ax.plot(z, mean_L_cond, "r", linestyle="-", label=r"$L_{cond}$")
L_ax.plot(z, mean_L_conv, "g", linestyle="-", label=r"$L_{conv}$")
L_ax.plot(z, mean_L_tot, "k", ls="-", label=r"$L_{total}$")
L_ax.set_xlabel("z")
L_ax.set_ylabel("L")
L_ax.legend()
plt.savefig(direc + "fluxes.png")
plt.show()
plt.close()
# ====================
# Plot heatmap
# ====================
if args.heatmap:
filenames = []
os.makedirs(direc + "figure", exist_ok=True)
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
KE = np.array(file["tasks"]["KE"])[:, 0]
with h5py.File(direc + "snapshots/snapshots_s1.h5", "r") as file:
T = np.array(file["tasks"]["T"])
v = np.array(file["tasks"]["v"])
w = np.array(file["tasks"]["w"])
snap_t = np.array(file["scales"]["sim_time"])
snap_iter = np.array(file["scales"]["iteration"])
yy, zz = np.meshgrid(y, z)
maxT = np.max(T)
maxV = np.max(v)
maxW = np.max(w)
n_iter = len(T[:, 0:, 0])
start_time = time.time()
print("Plotting {} graphs".format(n_iter))
try:
for i in range(0, int(n_iter)):
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(ncols=2, nrows=3, figure=fig)
T_ax = fig.add_subplot(gs[0:2, 0])
v_ax = fig.add_subplot(gs[0, 1])
w_ax = fig.add_subplot(gs[1, 1])
KE_ax = fig.add_subplot(gs[2, :])
if (i % 50 == 0) and (i != 0):
sec_per_frame = (time.time() - start_time) / i
eta = sec_per_frame * (n_iter - i)
print(
"image {}/{} at {:.3f}ips \t| ETA in {}m {}s".format(
i, n_iter, sec_per_frame, int(eta // 60), int(eta % 60)
)
)
fig.suptitle(
"Iteration: {}\n".format(snap_iter[i])
+ r"Sim Time: {:.2f} $\tau_\kappa$".format(snap_t[i])
)
c1 = v_ax.contourf(
yy,
zz,
np.transpose(v[i, :, :]),
levels=np.linspace(np.min(v), maxV),
cmap="coolwarm",
)
c1_bar = fig.colorbar(c1, ax=v_ax)
c1_bar.set_label("v", rotation=0)
v_ax.set_ylabel("z")
v_ax.set_xlabel("y")
v_ax.invert_xaxis()
c2 = w_ax.contourf(
yy,
zz,
np.transpose(w[i, :, :]),
levels=np.linspace(np.min(w), maxW),
cmap="coolwarm",
)
c2_bar = fig.colorbar(c2, ax=w_ax)
c2_bar.set_label("w", rotation=0)
w_ax.set_ylabel("z")
w_ax.set_xlabel("y")
w_ax.invert_xaxis()
c3 = T_ax.contourf(
yy,
zz,
np.transpose(T[i, :, :]),
levels=np.linspace(0, maxT),
cmap="coolwarm",
)
c3_bar = fig.colorbar(c3, ax=T_ax)
c3_bar.set_label("T", rotation=0)
T_ax.set_ylabel("z")
T_ax.set_xlabel("y")
T_ax.invert_xaxis()
KE_ax.plot(snap_t[:i], KE[:i], "k")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.set_ylim([0, 1.1 * np.max(KE)])
KE_ax.set_xlim([0, np.max(snap_t)])
plt.tight_layout()
plt.savefig(direc + "figure/fig_{:03d}.png".format(i))
filenames.append(direc + "figure/fig_{:03d}.png".format(i))
plt.close()
plt.clf()
except KeyboardInterrupt:
print("ending loop")
print("completed in {:.2f} sec".format(time.time() - start_time))
print("Creating gif...")
with imageio.get_writer(direc + "info.gif", mode="I") as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
print("Removing raw image files...")
shutil.rmtree(direc + "figure")
if args.KE:
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as f:
KE = np.array(f["tasks"]["KE"])[:, 0]
snap_t = np.array(f["scales"]["sim_time"])
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.plot(snap_t, KE, "k")
ax.set_xlabel(r"time [$\tau_\kappa$]")
ax.set_ylabel("KE")
plt.show()
plt.close()
print("done.")
|
[
"numpy.array",
"imageio.get_writer",
"argparse.ArgumentParser",
"numpy.max",
"os.path.normpath",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.min",
"numpy.meshgrid",
"dedalus.public.Fourier",
"numpy.abs",
"matplotlib.pyplot.savefig",
"h5py.File",
"numpy.isnan",
"imageio.imread",
"numpy.transpose",
"time.time",
"dedalus.public.Chebyshev",
"matplotlib.pyplot.show",
"os.makedirs",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"shutil.rmtree"
] |
[((553, 578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (576, 578), False, 'import argparse\n'), ((1187, 1239), 'dedalus.public.Fourier', 'de.Fourier', (['"""y"""', '(256)'], {'interval': '(0, a)', 'dealias': '(3 / 2)'}), "('y', 256, interval=(0, a), dealias=3 / 2)\n", (1197, 1239), True, 'from dedalus import public as de\n'), ((1244, 1297), 'dedalus.public.Chebyshev', 'de.Chebyshev', (['"""z"""', '(64)'], {'interval': '(0, 1)', 'dealias': '(3 / 2)'}), "('z', 64, interval=(0, 1), dealias=3 / 2)\n", (1256, 1297), True, 'from dedalus import public as de\n'), ((1042, 1070), 'os.path.normpath', 'os.path.normpath', (['args.input'], {}), '(args.input)\n', (1058, 1070), False, 'import os\n'), ((1083, 1136), 'h5py.File', 'h5py.File', (["(direc + 'run_params/run_params_s1.h5')", '"""r"""'], {}), "(direc + 'run_params/run_params_s1.h5', 'r')\n", (1092, 1136), False, 'import h5py\n'), ((2205, 2225), 'numpy.isnan', 'np.isnan', (['avg_t_stop'], {}), '(avg_t_stop)\n', (2213, 2225), True, 'import numpy as np\n'), ((2696, 2722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (2706, 2722), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3333), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(direc + 'fluxes.png')"], {}), "(direc + 'fluxes.png')\n", (3311, 3333), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3346, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3364), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3362, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3513), 'os.makedirs', 'os.makedirs', (["(direc + 'figure')"], {'exist_ok': '(True)'}), "(direc + 'figure', exist_ok=True)\n", (3480, 3513), False, 'import os\n'), ((3951, 3968), 'numpy.meshgrid', 'np.meshgrid', (['y', 'z'], {}), '(y, z)\n', (3962, 3968), True, 'import numpy as np\n'), ((3981, 3990), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (3987, 3990), True, 'import numpy as np\n'), ((4002, 4011), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (4008, 4011), True, 'import numpy as np\n'), ((4023, 4032), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (4029, 4032), True, 'import numpy as np\n'), ((4081, 4092), 'time.time', 'time.time', ([], {}), '()\n', (4090, 4092), False, 'import time\n'), ((7078, 7109), 'shutil.rmtree', 'shutil.rmtree', (["(direc + 'figure')"], {}), "(direc + 'figure')\n", (7091, 7109), False, 'import shutil\n'), ((7295, 7321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (7305, 7321), True, 'import matplotlib.pyplot as plt\n'), ((7452, 7462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7460, 7462), True, 'import matplotlib.pyplot as plt\n'), ((7467, 7478), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7476, 7478), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1180), 'numpy.array', 'np.array', (["f['tasks']['a']"], {}), "(f['tasks']['a'])\n", (1163, 1180), True, 'import numpy as np\n'), ((1534, 1583), 'h5py.File', 'h5py.File', (["(direc + 'analysis/analysis_s1.h5')", '"""r"""'], {}), "(direc + 'analysis/analysis_s1.h5', 'r')\n", (1543, 1583), False, 'import h5py\n'), ((1781, 1817), 'numpy.array', 'np.array', (["file['scales']['sim_time']"], {}), "(file['scales']['sim_time'])\n", (1789, 1817), True, 'import numpy as np\n'), ((2450, 2479), 'numpy.array', 'np.array', (['L_cond_arr[ASI:AEI]'], {}), '(L_cond_arr[ASI:AEI])\n', (2458, 2479), True, 'import numpy as np\n'), ((2515, 2544), 'numpy.array', 'np.array', (['L_conv_arr[ASI:AEI]'], {}), '(L_conv_arr[ASI:AEI])\n', (2523, 2544), True, 'import numpy as np\n'), ((2617, 2641), 'numpy.abs', 'np.abs', (['(1.0 - mean_L_tot)'], {}), '(1.0 - mean_L_tot)\n', (2623, 2641), True, 'import numpy as np\n'), ((3524, 3573), 'h5py.File', 'h5py.File', (["(direc + 'analysis/analysis_s1.h5')", '"""r"""'], {}), "(direc + 'analysis/analysis_s1.h5', 'r')\n", (3533, 3573), False, 'import h5py\n'), ((3641, 3692), 'h5py.File', 'h5py.File', (["(direc + 'snapshots/snapshots_s1.h5')", '"""r"""'], {}), "(direc + 'snapshots/snapshots_s1.h5', 'r')\n", (3650, 3692), False, 'import h5py\n'), ((3714, 3742), 'numpy.array', 'np.array', (["file['tasks']['T']"], {}), "(file['tasks']['T'])\n", (3722, 3742), True, 'import numpy as np\n'), ((3755, 3783), 'numpy.array', 'np.array', (["file['tasks']['v']"], {}), "(file['tasks']['v'])\n", (3763, 3783), True, 'import numpy as np\n'), ((3796, 3824), 'numpy.array', 'np.array', (["file['tasks']['w']"], {}), "(file['tasks']['w'])\n", (3804, 3824), True, 'import numpy as np\n'), ((3842, 3878), 'numpy.array', 'np.array', (["file['scales']['sim_time']"], {}), "(file['scales']['sim_time'])\n", (3850, 3878), True, 'import numpy as np\n'), ((3899, 3936), 'numpy.array', 'np.array', (["file['scales']['iteration']"], {}), "(file['scales']['iteration'])\n", (3907, 3936), True, 'import numpy as np\n'), ((6854, 6902), 'imageio.get_writer', 'imageio.get_writer', (["(direc + 'info.gif')"], {'mode': '"""I"""'}), "(direc + 'info.gif', mode='I')\n", (6872, 6902), False, 'import imageio\n'), ((7132, 7181), 'h5py.File', 'h5py.File', (["(direc + 'analysis/analysis_s1.h5')", '"""r"""'], {}), "(direc + 'analysis/analysis_s1.h5', 'r')\n", (7141, 7181), False, 'import h5py\n'), ((7251, 7284), 'numpy.array', 'np.array', (["f['scales']['sim_time']"], {}), "(f['scales']['sim_time'])\n", (7259, 7284), True, 'import numpy as np\n'), ((1614, 1647), 'numpy.array', 'np.array', (["file['tasks']['L_cond']"], {}), "(file['tasks']['L_cond'])\n", (1622, 1647), True, 'import numpy as np\n'), ((1675, 1708), 'numpy.array', 'np.array', (["file['tasks']['L_conv']"], {}), "(file['tasks']['L_conv'])\n", (1683, 1708), True, 'import numpy as np\n'), ((1728, 1757), 'numpy.array', 'np.array', (["file['tasks']['KE']"], {}), "(file['tasks']['KE'])\n", (1736, 1757), True, 'import numpy as np\n'), ((2160, 2188), 'numpy.abs', 'np.abs', (['(snap_t - avg_t_start)'], {}), '(snap_t - avg_t_start)\n', (2166, 2188), True, 'import numpy as np\n'), ((3596, 3625), 'numpy.array', 'np.array', (["file['tasks']['KE']"], {}), "(file['tasks']['KE'])\n", (3604, 3625), True, 'import numpy as np\n'), ((4208, 4234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (4218, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4252, 4299), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(2)', 'nrows': '(3)', 'figure': 'fig'}), '(ncols=2, nrows=3, figure=fig)\n', (4269, 4299), True, 'import matplotlib.gridspec as gridspec\n'), ((6481, 6499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6497, 6499), True, 'import matplotlib.pyplot as plt\n'), ((6651, 6662), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6660, 6662), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6684), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6682, 6684), True, 'import matplotlib.pyplot as plt\n'), ((6969, 6993), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (6983, 6993), False, 'import imageio\n'), ((7201, 7227), 'numpy.array', 'np.array', (["f['tasks']['KE']"], {}), "(f['tasks']['KE'])\n", (7209, 7227), True, 'import numpy as np\n'), ((2268, 2295), 'numpy.abs', 'np.abs', (['(snap_t - avg_t_stop)'], {}), '(snap_t - avg_t_stop)\n', (2274, 2295), True, 'import numpy as np\n'), ((5111, 5135), 'numpy.transpose', 'np.transpose', (['v[i, :, :]'], {}), '(v[i, :, :])\n', (5123, 5135), True, 'import numpy as np\n'), ((5517, 5541), 'numpy.transpose', 'np.transpose', (['w[i, :, :]'], {}), '(w[i, :, :])\n', (5529, 5541), True, 'import numpy as np\n'), ((5923, 5947), 'numpy.transpose', 'np.transpose', (['T[i, :, :]'], {}), '(T[i, :, :])\n', (5935, 5947), True, 'import numpy as np\n'), ((6788, 6799), 'time.time', 'time.time', ([], {}), '()\n', (6797, 6799), False, 'import time\n'), ((5972, 5992), 'numpy.linspace', 'np.linspace', (['(0)', 'maxT'], {}), '(0, maxT)\n', (5983, 5992), True, 'import numpy as np\n'), ((6451, 6465), 'numpy.max', 'np.max', (['snap_t'], {}), '(snap_t)\n', (6457, 6465), True, 'import numpy as np\n'), ((4559, 4570), 'time.time', 'time.time', ([], {}), '()\n', (4568, 4570), False, 'import time\n'), ((5172, 5181), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (5178, 5181), True, 'import numpy as np\n'), ((5578, 5587), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (5584, 5587), True, 'import numpy as np\n'), ((6407, 6417), 'numpy.max', 'np.max', (['KE'], {}), '(KE)\n', (6413, 6417), True, 'import numpy as np\n')]
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests covering attention used by the DIN model.
"""
import tensorflow as tf
import unittest
import pytest
import numpy as np
import sys
from pathlib import Path
# Add common module to path
common_path = Path(Path(__file__).absolute().parent.parent.parent)
sys.path.append(str(common_path))
from common.utils import din_attention
from din.din_model import DIN
seed = 3
tf.set_random_seed(seed)
@pytest.mark.category1
@pytest.mark.ipus(1)
class TestDINFCN(unittest.TestCase):
"""Testing att layer"""
@classmethod
def setUpClass(cls):
cls.model_dtype = tf.float32
cls.ATTENTION_SIZE = 1
def test_att_results(self):
# test attention layer output
query_value = np.ones([4, 2], np.float32)
query_value = query_value * 0.8
query_inp = tf.placeholder(shape=[4, 2], dtype='float32')
facts_value = np.ones([4, 8, 2], np.float32)
facts_value = facts_value * 0.5
facts_inp = tf.placeholder(shape=[4, 8, 2], dtype='float32')
mask_value = np.ones([4, 8], np.float32)
mask_value = mask_value * 0.2
mask_inp = tf.placeholder(shape=[4, 8], dtype='float32')
out = din_attention(query_inp, facts_inp, self.ATTENTION_SIZE, mask_inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(out, feed_dict={query_inp: query_value, facts_inp: facts_value, mask_inp: mask_value})
y0 = np.float32(0.5)
y1 = np.float32(0.5)
self.assertAlmostEqual(output[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(output[0, 0, 0], y1, delta = 0.01)
def test_fcn_results(self):
# test fcn results
inputs_value = np.ones([2, 6, 2], np.float32)
inp = tf.placeholder(shape=[2, 6, 2], dtype='float32')
y_hat = DIN.build_fcn_net(self, inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
y = sess.run(y_hat, feed_dict={inp: inputs_value})
y0 = np.float32(0.5225718)
y1 = np.float32(0.47742826)
self.assertAlmostEqual(y[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(y[0, 0, 1], y1, delta = 0.01)
|
[
"pytest.mark.ipus",
"numpy.ones",
"numpy.float32",
"tensorflow.compat.v1.Session",
"pathlib.Path",
"tensorflow.placeholder",
"din.din_model.DIN.build_fcn_net",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"common.utils.din_attention"
] |
[((977, 1001), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (995, 1001), True, 'import tensorflow as tf\n'), ((1028, 1047), 'pytest.mark.ipus', 'pytest.mark.ipus', (['(1)'], {}), '(1)\n', (1044, 1047), False, 'import pytest\n'), ((1319, 1346), 'numpy.ones', 'np.ones', (['[4, 2]', 'np.float32'], {}), '([4, 2], np.float32)\n', (1326, 1346), True, 'import numpy as np\n'), ((1407, 1452), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 2]', 'dtype': '"""float32"""'}), "(shape=[4, 2], dtype='float32')\n", (1421, 1452), True, 'import tensorflow as tf\n'), ((1476, 1506), 'numpy.ones', 'np.ones', (['[4, 8, 2]', 'np.float32'], {}), '([4, 8, 2], np.float32)\n', (1483, 1506), True, 'import numpy as np\n'), ((1567, 1615), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 8, 2]', 'dtype': '"""float32"""'}), "(shape=[4, 8, 2], dtype='float32')\n", (1581, 1615), True, 'import tensorflow as tf\n'), ((1638, 1665), 'numpy.ones', 'np.ones', (['[4, 8]', 'np.float32'], {}), '([4, 8], np.float32)\n', (1645, 1665), True, 'import numpy as np\n'), ((1723, 1768), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 8]', 'dtype': '"""float32"""'}), "(shape=[4, 8], dtype='float32')\n", (1737, 1768), True, 'import tensorflow as tf\n'), ((1784, 1850), 'common.utils.din_attention', 'din_attention', (['query_inp', 'facts_inp', 'self.ATTENTION_SIZE', 'mask_inp'], {}), '(query_inp, facts_inp, self.ATTENTION_SIZE, mask_inp)\n', (1797, 1850), False, 'from common.utils import din_attention\n'), ((2082, 2097), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (2092, 2097), True, 'import numpy as np\n'), ((2111, 2126), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (2121, 2126), True, 'import numpy as np\n'), ((2344, 2374), 'numpy.ones', 'np.ones', (['[2, 6, 2]', 'np.float32'], {}), '([2, 6, 2], np.float32)\n', (2351, 2374), True, 'import numpy as np\n'), ((2389, 2437), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[2, 6, 2]', 'dtype': '"""float32"""'}), "(shape=[2, 6, 2], dtype='float32')\n", (2403, 2437), True, 'import tensorflow as tf\n'), ((2454, 2482), 'din.din_model.DIN.build_fcn_net', 'DIN.build_fcn_net', (['self', 'inp'], {}), '(self, inp)\n', (2471, 2482), False, 'from din.din_model import DIN\n'), ((2660, 2681), 'numpy.float32', 'np.float32', (['(0.5225718)'], {}), '(0.5225718)\n', (2670, 2681), True, 'import numpy as np\n'), ((2695, 2717), 'numpy.float32', 'np.float32', (['(0.47742826)'], {}), '(0.47742826)\n', (2705, 2717), True, 'import numpy as np\n'), ((1864, 1886), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1884, 1886), True, 'import tensorflow as tf\n'), ((2496, 2518), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2516, 2518), True, 'import tensorflow as tf\n'), ((1917, 1950), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1948, 1950), True, 'import tensorflow as tf\n'), ((2549, 2582), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2580, 2582), True, 'import tensorflow as tf\n'), ((816, 830), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (820, 830), False, 'from pathlib import Path\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
import os
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.contrib.mixed_precision.amp_nn as amp_nn
from test_update_loss_scaling_op_npu import TestUpdateLossScalingOpBad
paddle.enable_static()
SEED = 2021
class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad):
def setUp(self):
self.set_npu()
self.op_type = "update_loss_scaling"
self.place = paddle.NPUPlace(0)
self.init()
fluid.core.globals()['FLAGS_min_loss_scaling'] = 1639
found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
x[i[0]][j[0]] = np.inf
self.inputs = {
'X': [('x0', x)],
'FoundInfinite': found_inf,
'PrevLossScaling': self.prev_loss_scaling,
'InGoodSteps': self.num_good_steps,
'InBadSteps': self.num_bad_steps
}
self.outputs = {
'Out': [('out0', np.zeros_like(x))],
'LossScaling': np.array([1639.0]).astype(self.dtype),
'OutGoodSteps': self.zero_steps,
'OutBadSteps': self.zero_steps
}
def init(self):
self.incr_ratio = 2.0
self.decr_ratio = 0.8
self.dtype = np.float32
self.prev_loss_scaling = np.array([2048]).astype(self.dtype)
self.num_good_steps = np.array([999], dtype=np.int32)
self.num_bad_steps = np.array([1], dtype=np.int32)
self.zero_steps = np.array([0], dtype=np.int32)
self.attrs = {
'incr_every_n_steps': 1000,
'decr_every_n_nan_or_inf': 2,
'incr_ratio': self.incr_ratio,
'decr_ratio': self.decr_ratio,
}
if __name__ == '__main__':
unittest.main()
|
[
"numpy.random.random",
"numpy.zeros_like",
"paddle.enable_static",
"numpy.array",
"numpy.random.randint",
"paddle.fluid.core.globals",
"unittest.main",
"paddle.NPUPlace",
"sys.path.append"
] |
[((670, 691), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (685, 691), False, 'import sys\n'), ((895, 917), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (915, 917), False, 'import paddle\n'), ((2536, 2551), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2549, 2551), False, 'import unittest\n'), ((1119, 1137), 'paddle.NPUPlace', 'paddle.NPUPlace', (['(0)'], {}), '(0)\n', (1134, 1137), False, 'import paddle\n'), ((1241, 1273), 'numpy.array', 'np.array', (['[True]'], {'dtype': 'np.bool_'}), '([True], dtype=np.bool_)\n', (1249, 1273), True, 'import numpy as np\n'), ((1348, 1377), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1024)', '(1)'], {}), '(0, 1024, 1)\n', (1365, 1377), True, 'import numpy as np\n'), ((1390, 1419), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1024)', '(1)'], {}), '(0, 1024, 1)\n', (1407, 1419), True, 'import numpy as np\n'), ((2155, 2186), 'numpy.array', 'np.array', (['[999]'], {'dtype': 'np.int32'}), '([999], dtype=np.int32)\n', (2163, 2186), True, 'import numpy as np\n'), ((2216, 2245), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (2224, 2245), True, 'import numpy as np\n'), ((2272, 2301), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (2280, 2301), True, 'import numpy as np\n'), ((1167, 1187), 'paddle.fluid.core.globals', 'fluid.core.globals', ([], {}), '()\n', (1185, 1187), True, 'import paddle.fluid as fluid\n'), ((1286, 1316), 'numpy.random.random', 'np.random.random', (['(1024, 1024)'], {}), '((1024, 1024))\n', (1302, 1316), True, 'import numpy as np\n'), ((2089, 2105), 'numpy.array', 'np.array', (['[2048]'], {}), '([2048])\n', (2097, 2105), True, 'import numpy as np\n'), ((1759, 1775), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1772, 1775), True, 'import numpy as np\n'), ((1806, 1824), 'numpy.array', 'np.array', (['[1639.0]'], {}), '([1639.0])\n', (1814, 1824), True, 'import numpy as np\n')]
|
import math
import numpy as np
import pandas as pd
class PenmanMonteithDaily(object):
r"""The class *PenmanMonteithDaily* calculates daily potential evapotranspiration according to the Penman-Monteith
method as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ (Allen et al.,
1998). Reference evapotranspiration for a hypothetical grass reference crop (:math:`h=12` *cm*;
:math:`albedo=0.23`, and :math:`LAI=2.88`) is calculated by default. Wind and humidity observations at 2 meters
height as well as soil heat flux density :math:`G=0.0` *MJ/m²day* are also assumed by default.
Default values can be changed in the keyword arguments (`**kwargs`) described below.
The class *PenmanMonteithDaily* solves equation 3 in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_:
.. math::
ET = \frac{\Delta (R_n - G) + \rho_a c_p \frac{e_s - e_a}{r_a}}
{\lambda \left[ \Delta + \gamma \left( 1 + \frac{r_s}{r_a} \right) \right]}
\tag{eq. 3, p. 19}
:param elevation: elevation above sea level (*z*) *[m]*. Used in :meth:`clear_sky_shortwave_radiation` and
:meth:`atmospheric_pressure`
:type elevation: float
:param latitude: latitude (:math:`\varphi`) *[decimal degrees]*. Used in :meth:`sunset_hour_angle` and
:meth:`extraterrestrial_radiation`
:type latitude: float
:Keyword Arguments:
* **albedo** (*float*) - albedo or canopy reflection coefficient (:math:`\alpha`) *[-]*.
Range: :math:`0.0 \leq \alpha \leq 1.0`. Default :math:`albedo=0.23` for the hypothetical grass
reference crop. Used in :meth:`net_shortwave_radiation`
* **h** (*float*) - crop height (*h*) *[m]*. Default :math:`h=0.12` for the hypothetical grass reference
crop. Required to calculate the zero plane displacement height (:math:`d`) *[m]* and the roughness length
governing momentum (:math:`z_{om}`) *[m]*, both necessary for the aerodynamic resistance (:math:`r_a`) *[s/m]*.
See :meth:`aerodynamic_resistance_factor`
* **lai** (*float*) - leaf area index (:math:`LAI`) *[-]*. Default :math:`lai=2.88` for the hypothetical
grass reference crop. See *BOX 5* in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ and
:meth:`bulk_surface_resistance`
* **rl** (*float*) - bulk stomatal resistance of well-illuminated leaf (:math:`r_l`) *[s/m]*. Default
:math:`rl=100.0` for any crop. See :meth:`bulk_surface_resistance`
* **zm** (*float*) - height of wind measurements (:math:`z_m`) *[m]*. Default :math:`zm=2.0`. Required to
calculate aerodynamic resistance (:math:`r_a`) *[s/m]*. See :meth:`aerodynamic_resistance_factor`
* **zh** (*float*) - height of humidity measurements (:math:`z_h`) *[m]*. Default :math:`zh=2.0`. Required to
calculate aerodynamic resistance (:math:`r_a`) *[s/m]*. See :meth:`aerodynamic_resistance_factor`
* **g** (*float*) - soil heat flux density (:math:`G`) *[MJ/m²day]*. Default :math:`g=0.0`. This
corresponds to :math:`G` in eq. 3, p. 19 above. It can be also given with daily parameters in :meth:`et0`
.. note::
Only :attr:`elevation` and :attr:`latitude` are mandatory parameters of :meth:`PenmanMonteithDaily()`.
:attr:`albedo`, :attr:`h`, and :attr:`lai` are only necessary when calculating evapotranspiration for crops
other than reference grass.
:ivar doy: day of year *[-]*
:ivar z: elevation in meters above sea level (*z*) *[m]*
:ivar p: atmospheric pressure (*P*) *[kPa]*
:ivar u2: wind speed at height :math:`z` (:math:`u_2`) *[m/s]*
:ivar ld: latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*. See :meth:`latent_heat_of_vaporization()`
:ivar s: slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]*.
See :meth:`slope_of_saturation_vapour_pressure_curve()`
:ivar psych: psychrometric constant (:math:`\gamma`) *[kPa/°C]*. See :meth:`psychrometric_constant()`
:ivar mn: daylight hours (:math:`N`) *[hours]*. See :meth:`daylight_hours()`
:ivar es: saturation vapour pressure (:math:`e_s`) *[kPa]*. See :meth:`saturation_vapour_pressure()`
:ivar ea: actual vapour pressure (:math:`e_a`) *[kPa]*. See :meth:`actual_vapour_pressure()`
:ivar ra: daily extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation()`
:ivar rs: daily shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation()`
:ivar rs0: clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*.
See :meth:`clear_sky_shortwave_radiation()`
:ivar rns: net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]*. See :meth:`net_shortwave_radiation()`
:ivar rnl: net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]*. See :meth:`net_longwave_radiation()`
:ivar rn: net radiation (:math:`R_{n}`) *[MJ/m²day]*. :math:`R_{n} = R_{ns} - R_{nl}`
:ivar etr: radiation component of reference evapotranspiration *[mm/day]*
:ivar etw: wind component of reference evapotranspiration *[mm/day]*
:ivar et: reference evapotranspiration *[mm/day]*
Object Constants:
* **e** - ratio molecular weight of water vapour/dry air (:math:`\varepsilon`) *[-]*.
:math:`e = 0.622`
* **r** - specific gas constant *[kJ/kg.K]*. :math:`r = 0.287`
* **k** - von Karman constant (:math:`k`) *[-]*, see
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ eq. 4.
:math:`k=0.41`
Object crop specific factors:
* **d_factor** - factor of the zero plane displacement height (:math:`d`) *[-]*. :math:`d\_factor = 2.0 / 3.0`
* **zom_factor** - factor of the roughness length governing momentum transfer (:math:`z_{om}`) *[-]*.
:math:`zom\_factor = 0.123`
* **zoh_factor** - factor of the roughness length governing transfer of heat and vapour (:math:`z_{oh}`) *[-]*.
:math:`zoh\_factor = 0.1`
* **lai_active_factor** - factor of the active (sunlit) leaf area index (:math:`LAI_{active}`) *[-]* (it
considers that generally only the upper half of dense clipped grass is actively contributing to the surface
heat and vapour transfer). :math:`lai\_active\_factor = 0.5`
Calculation with :meth:`et0`::
- pm = PenmanMonteithDaily(elevation, latitude, ...)
- et0 = pm.et0(...)
Calculation with :meth:`et0_frame` given a *pandas.DataFrame()* as input parameter::
- pm = PenmanMonteithDaily(elevation, latitude, ...)
- df = pm.et0_frame(df, ...)
"""
def __init__(self, elevation, latitude, **kwargs):
self.albedo = kwargs.get('albedo', 0.23) # albedo
self.h = kwargs.get('h', 0.12) # crop height h [m]
self.zm = kwargs.get('zm', 2.0) # height of wind measurements [m]
self.zh = kwargs.get('zh', 2.0) # roughness length governing transfer of heat and vapour [m]
self.lai = kwargs.get('lai', 2.88) # LAI dependence
self.rl = kwargs.get('rl', 100.0) # The stomatal resistance
self.g_default = kwargs.get('g', 0.0) # soil heat flux density [MJ/m²day]
self.doy = None
self.u2 = None
self.ld = None
self.s = None
self.pc = None
self.mn = None
self.es = None
self.ea = None
self.ra = None
self.rs = None
self.rs0 = None
self.rns = None
self.rnl = None
self.rn = None
self.etr = None
self.etw = None
self.et = None
self.e = 0.622
self.r = 0.287
self.k = 0.41
self.d_factor = 2.0 / 3.0
self.zom_factor = 0.123
self.zoh_factor = 0.1
self.lai_active_factor = 0.5
if latitude:
days = np.array(range(367))
latitude = float(np.radians(latitude))
dr_366 = self.inverse_relative_distance_earth_sun(days)
sd_366 = np.array([self.solar_declination(day) for day in range(367)])
ws_366 = np.array([self.sunset_hour_angle(latitude, s) for s in sd_366])
self.daylight_hours_366 = np.array([PenmanMonteithDaily.daylight_hours(w) for w in ws_366])
self.ra_366 = np.array([self.extraterrestrial_radiation(
dr_366[i], ws_366[i], latitude, sd_366[i]) for i in range(len(dr_366))])
self.rs0_366 = np.array([self.clear_sky_shortwave_radiation(
ra, elevation=elevation) for ra in self.ra_366])
else:
self.daylight_hours_366 = None
self.ra_366 = None
self.rs0_366 = None
self.z = elevation
self.p = PenmanMonteithDaily.atmospheric_pressure(self.z)
ra_factor = self.aerodynamic_resistance_factor()
self.f1 = 86400 * self.e / (1.01 * self.r * ra_factor)
"""f1 = (specific heat at constant pressure) * (mean air density at constant pressure) /
(1.01 * :attr:`r` * :meth:`aerodynamic_resistance_factor`).
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ Box 6
"""
self.f2 = self.bulk_surface_resistance() / ra_factor
r""":math:`f_1 = \frac{rs}{f_{ra}}` with :math:`f_{ra}` = :meth:`aerodynamic_resistance_factor`"""
def reset(self):
r"""Reset the following output attributes before calculating :math:`ETo`: :math:`doy`, :math:`u2`,
:math:`ld`, :math:`s`, :math:`pc`, :math:`mn`, :math:`es`, :math:`ea`, :math:`ra`,
:math:`rs`, :math:`rs0`, :math:`rns`, :math:`rnl`, :math:`rn`, :math:`etr`, :math:`etw`, and :math:`et`
"""
self.doy = None
self.u2 = None
self.ld = None
self.s = None
self.pc = None
self.mn = None
self.es = None
self.ea = None
self.ra = None
self.rs = None
self.rs0 = None
self.rns = None
self.rnl = None
self.rn = None
self.etr = None
self.etw = None
self.et = None
@staticmethod
def atmospheric_pressure(z):
r""" Return the atmospheric pressure (:math:`P`) *[kPa]* as a function of the elevation above sea level as
defined in `FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 7, p. 31):
.. math::
P = 101.3\left(\frac{293-0.0065z}{293}\right)^{5.26}
The atmospheric pressure (:math:`P`) is the pressure exerted by the weight of the earth's atmosphere.
Evaporation at high altitudes is promoted due to low atmospheric pressure as expressed in the psychrometric
constant. The effect is, however, small and in the calculation procedures, the average value for a location
is sufficient. A simplification of the ideal gas law, assuming :math:`20` *°C* for a standard atmosphere,
can be employed to calculate :math:`P`
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_).
:param z: elevation above sea level *[m]*
:type z: float or np.array
:return: (*float or np.array*) atmospheric pressure (:math:`P`) *[kPa]*
"""
return 101.3 * ((293.0 - 0.0065 * z) / 293.0) ** 5.26
@staticmethod
def latent_heat_of_vaporization(temperature=20):
r"""Return the latent heat of vaporization (:math:`\lambda`) *[MJ/kg]* as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(Annex 3, eq. 3-1, p. 223):
.. math::
\lambda = 2.501-(2.361 * 10^{-3})T
:param temperature: air temperature (:math:`T`) *[°C]*. Default :math:`temperature=20`
:type temperature: float or np.array
:return: (*float or np.array*) latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*.
Default :math:`\lambda=2.45378`
"""
return 2.501 - 2.361e-3 * temperature
@staticmethod
def psychrometric_constant(p, **kwargs):
r"""Return the psychrometric constant (:math:`\gamma`) *[kPa/°C]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
eq. 8, p. 32:
.. math::
\gamma = \frac{c_p P}{\varepsilon \lambda}
or, using default values:
.. math::
\gamma = a_{psy} \cdot P
:param p: atmospheric pressure (:math:`P`) *[kPa]*
:type p: float or np.array
:Keyword Arguments:
* **lamda** (*float*) - latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*. Default :math:`lamda=2.45`.
See Used in :meth:`latent_heat_of_vaporization`
* **cp** (*float*) - specific heat at constant pressure (:math:`c_p`) *[MJ/kg]*. Default
:math:`cp=1.013e^{-3}`
* **epsilon** (*float*) - ratio molecular weight of water vapour/dry air (:math:`\epsilon`) *[-]*.
Default :math:`epsilon=0.622`
* **a_psy** (*float*) - coefficient depending on the type of the ventilation of the bulb *[1/°C]*. Examples:
* :math:`a_{psy} = 0.000665` (default)
* :math:`a_{psy} = 0.000662` for ventilated (Asmann type) psychrometers, with an air movement of some 5
*m/s*
* :math:`a_{psy} = 0.000800` for natural ventilated psychrometers (about 1 *m/s*)
* :math:`a_{psy} = 0.001200` for non-ventilated psychrometers installed indoors
The method uses :math:`a_{psy}` if given, otherwise eq. 8 (see above) with given or default values. Default
values correspond to :math:`a_{psy} = 0.000665` as argument.
:return: (*float or np.array*) psychrometric constant (:math:`\gamma`) *[kPa/°C]*
"""
if 'a_psy' in kwargs:
return kwargs.get('a_psy', 0.000665) * p
else:
return (kwargs.get('cp', 1.013e-3) * p) / (kwargs.get('epsilon', 0.622) * kwargs.get('lamda', 2.45))
@staticmethod
def saturation_vapour_pressure(*temperature):
r"""Return the saturation vapour pressure (:math:`e_s`) *[kPa]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 11, p. 36):
.. math::
e^{°}(T) = 0.6108 exp \left[\frac{17.27 T}{T + 237.3}\right]
:param temperature: air temperature (:math:`T`) *[°C]*
:type temperature: float or np.array
:return: (*float or np.array*) saturation vapour pressure (:math:`e_s`) *[kPa]*
"""
t = np.array([0.6108 * np.exp((17.27 * t) / (t + 237.3)) for t in temperature])
t = np.mean(t, axis=0)
return t
@staticmethod
def slope_of_saturation_vapour_pressure_curve(*temperature):
r"""Return the slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 13, p. 37):
.. math::
\Delta = 4098\left[\frac{0.6108exp\left(\frac{17.27 T}{T + 237.3}\right)}{(T + 237.3)^{2}}\right]
:param temperature: air temperature (:math:`T`) *[°C]*
:type temperature: float or np.array
:return: (*float or np.array*) slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]*
"""
sl = np.array([(4098.0 * PenmanMonteithDaily.saturation_vapour_pressure(t)) / ((t + 237.3) ** 2)
for t in temperature])
return np.mean(sl, axis=0)
@staticmethod
def actual_vapour_pressure(**kwargs):
"""Return the actual vapour pressure (:math:`e_a`) *[kPa]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(p. 37 , 38 , and 39):
:Keyword Arguments:
* **rh_min** (*float*) - 0.0 to 100.0 *[%]*
* **rh_max** (*float*) - 0.0 to 100.0 *[%]*
* **es_min** (*float*) - saturation vapour pressure for :math:`t\_min` *[kPa]*
* **es_max** (*float*) - saturation vapour pressure for :math:`t\_max` *[kPa]*
* **t_min** (*float*) - minimum air temperature *[°C]*
* **t_max** (*float*) - maximum air temperature *[°C]*
* **t_dew** (*float*) - dew point temperature *[°C]*
* **t_wet** (*float*) - wet bulb temperature *[°C]*
* **t_dry** (*float*) - dry bulb temperature *[°C]*
* **apsy** (*float*) - coefficient depending on the type of ventilation of the wet bulb *[-]*
:return: (*float or np.array*) actual vapour pressure (:math:`e_a`) *[kPa]*
"""
try:
rh_min = kwargs['rh_min'] / 100.0
rh_max = kwargs['rh_max'] / 100.0
if 'es_min' in kwargs and 'es_max' in kwargs:
es_min = kwargs['es_min']
es_max = kwargs['es_max']
else:
es_min = PenmanMonteithDaily.saturation_vapour_pressure(kwargs['t_min'])
es_max = PenmanMonteithDaily.saturation_vapour_pressure(kwargs['t_max'])
return (rh_max * es_min + rh_min * es_max) / 2.0
except KeyError:
t_dew = kwargs.get('t_dew', None)
return 0.6108 * math.exp((17.27 * t_dew) / (t_dew + 237.3))
def aerodynamic_resistance_factor(self):
r"""Return the aerodynamic resistance (:math:`r_a`) *[s/m]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 4, p. 20):
.. math::
r_a = \frac{ \ln \left( \frac{z_m - d}{z_{om}} \right) \ln \left( \frac{z_h - d}{z_{oh}} \right) }
{ k^2 u_z }
where (see :meth:`PenmanMonteithDaily()`):
:math:`u_z` --- the wind speed *[m/s]* at height :math:`z` (see :meth:`et0()`)
:math:`k` --- von Karman's constant *[-]*
:math:`zm` --- height of wind measurements *[m]*
:math:`zh` --- height of air humidity measurements *[m]*
The aerodynamic resistance factor :math:`f_{r_a}` is constant for a given crop:
.. math::
f_{r_a} = \frac{ \ln \left( \frac{z_m - d}{z_{om}} \right) \ln \left( \frac{z_h - d}{z_{oh}} \right) }
{ k^2}
with the zero plane displacement height (:math:`d`):
.. math::
d = f_d \cdot h
and roughness length governing momentum transfer (:math:`z_{om}`):
.. math::
z_{om} = f_{zom} \cdot h
where:
:math:`f_d` --- defined in :attr:`d_factor`
:math:`f_{zom}` --- defined in in :attr:`zom_factor`
:return: (*float*) aerodynamic resistance factor :math:`f_{r_a}`
"""
# zero plane displacement height, d [m]
d = self.d_factor * self.h
# roughness length governing momentum transfer [m]
zom = self.zom_factor * self.h
# roughness length governing transfer of heat and vapour [m]
zoh = self.zoh_factor * zom
return math.log((self.zm - d) / zom) * math.log((self.zh - d) / zoh) / (self.k ** 2)
def bulk_surface_resistance(self):
r"""Return (bulk) surface resistance (:math:`r_s`) *[s/m]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 5, p. 21):
.. math::
r_s = \frac{ r_l } { LAI_{active} }
where:
:math:`r_l` --- the bulk stomatal resistance of the well-illuminated leaf *[s/m]*
:math:`LAI_{active}` --- the active (sunlit) leaf area index *[m² (leaf area) / m² (soil surface)]*
A general equation for :math:`LAI_{active}` is:
.. math::
LAI_{active} = 0.5 LAI
with:
.. math::
LAI = 24 h
where :math:`h` is an optional input parameter in :class:`PenmanMonteithDaily`.
:return: (*float*) (bulk) surface resistance :math:`r_s` *[s/m]*
"""
#
# active (sunlit) leaf area index [m^2 (leaf area) / m^2 (soil surface)]
lai_active = self.lai_active_factor * self.lai
rs = self.rl / lai_active
return rs
@staticmethod
def to_u2(uz, z):
r""" Return the calculated wind speed at 2 meters above ground surface (:math:`u_2`) *[m/s]*:
.. math::
u_2 = \frac{ 4.87 u_z}{ \ln{(67.8 z - 5.42)}}
:param uz: measured wind speed at :math:`z` meters above ground surface *[m/s]*
:type uz: float or np.array
:param z: height of measurement above ground surface *[m]*
:type z: float
:return: (*float or np.array*) wind speed at 2 meters above ground surface *[m/s]*
"""
return uz * 4.87 / np.log(67.8 * z - 5.42)
@staticmethod
def extraterrestrial_radiation(dr, ws, lat, sd):
r"""Return the extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 21, p. 46):
.. math::
R_a = \frac{24(60)}{\pi} G_{sc} d_r [ \omega_s \sin(\varphi) \sin(\delta) + \cos(\varphi) \cos(\delta)
\sin(\omega_s)]
:param dr: inverse relative distance Earth-Sun (:math:`d_r`) *[-]*.
See :meth:`inverse_relative_distance_earth_sun`
:type dr: float
:param ws: sunset hour angle (:math:`\omega_s`) *[rad]*. See :meth:`sunset_hour_angle`
:type ws: float
:param lat: latitude (:math:`\varphi`) *[rad]*
:type lat: float
:param sd: solar declination (:math:`\delta`) *[rad]*. See :meth:`solar_declination`
:type sd: float
:return: *(float or np.array)* daily extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*
"""
# solar_constant = 0.0820 # MJ.m-2.min-1
# (24.0 * 60.0 / pi) * solar_constant = 37.586031360582005
return 37.586031360582005 * dr * (ws * np.sin(lat) * np.sin(sd) + np.cos(lat) * np.cos(sd) * np.sin(ws))
@staticmethod
def inverse_relative_distance_earth_sun(day):
r"""Return the inverse relative distance Earth-Sun (:math:`d_r`) *[-]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 23, p. 46):
.. math::
d_r = 1 + 0.033 \cos{ \left( \frac{2 \pi}{365} J \right)}
:param day: day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`
:type day: int or np.array
:return: *(float or np.array)* inverse relative distance Earth-Sun (:math:`d_r`) *[-]*
"""
# 2.0 * pi / 365 = 0.01721420632103996
return 1 + 0.033 * np.cos(0.01721420632103996 * day)
@staticmethod
def solar_declination(day):
r"""Return the solar declination (:math:`\delta`) *[rad]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 24, p. 46):
.. math::
\delta = 0.409 \sin{ \left( \frac{2 \pi}{365} J - 1.39\right)}
:param day: day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`
:type day: int
:return: (*float or np.array*) solar declination (:math:`\delta`) *[rad]*
"""
# 2.0 * pi / 365 = 0.01721420632103996
return 0.409 * np.sin(0.01721420632103996 * day - 1.39)
@staticmethod
def sunset_hour_angle(lat, sd):
r"""Return the sunset hour angle (:math:`\omega_s`) *[rad]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 25, p. 46):
.. math::
\omega_s = \arccos{ \left[-tan(\varphi)tan(\delta)\right]}
:param lat: latitude (:math:`\varphi`) *[rad]*
:type lat: float or np.array
:param sd: solar declination (:math:`\delta`) *[rad]*. See :meth:`solar_declination`
:type sd: float or np.array
:return: (*float or np.array*) sunset hour angle (:math:`\omega_s`) *[rad]*
"""
return np.arccos(-np.tan(sd) * np.tan(lat))
@staticmethod
def daylight_hours(ws):
r"""Return the daylight hours (:math:`N`) *[hour]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 34, p. 49):
.. math::
N = \frac{24}{\pi} \omega_s
:param ws: sunset hour angle (:math:`\omega_s`) *[rad]*. See :meth:`sunset_hour_angle`
:type ws: float or np.numpy
:return: (*float or np.numpy*) daylight hours (:math:`N`) *[hour]*
"""
# 24.0 / pi = 7.639437268410976
return 7.639437268410976 * ws
@staticmethod
def clear_sky_shortwave_radiation(ra, elevation=0.0, a_s=0.25, b_s=0.50):
r"""Return the clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*. It is required for computing
:meth:`net_longwave_radiation`.
For near sea level or when calibrated values for :math:`a_s` and :math:`b_s` are available
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_, eq. 36,
p. 51):
.. math::
R_{so} = (a_s + b_s ) R_a
When calibrated values for :math:`a_s` and :math:`b_s` are not available
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_,
eq. 37, p. 51):
.. math::
R_{so} = (0.75 + 2 * 10^{−5} z) R_a
where :math:`z` is the station elevation above sea level *[m]*.
:param ra: extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation`
:type ra: float or np.numpy
:param elevation: meters above sea level see (:math:`z`) [m]. See :attr:`elevation`
:type elevation: float or np.numpy
:param a_s: regression constant (:math:`a_s`) *[-]*. Default :math:`a_s=0.25`. It expresses the fraction of
extraterrestrial radiation reaching the earth on overcast days (:math:`n = 0`)
:type a_s: float or np.numpy
:param b_s: regression constant (:math:`b_s`) *[-]*. Default :math:`b_s=0.50`. The expression
:math:`a_s+b_s` indicates the fraction of extraterrestrial radiation reaching the earth on clear days
(:math:`n = N`)
:type b_s: float or np.numpy
:return: (*float or np.numpy*) daily clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*
"""
rs0 = ((a_s + b_s) + 2e-5 * elevation) * ra
return rs0
@staticmethod
def shortwave_radiation(ra, n, mn, a_s=0.25, b_s=0.50):
r"""Return the daily shortwave radiation (:math:`R_s`) *[MJ/m²day]* according to the Angstrom formula as
described in `FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 35, p. 50):
.. math::
R_s = \left( a_s + b_s \frac{n}{N} \right) R_a
Depending on atmospheric conditions (humidity, dust) and solar declination (latitude and month), the Angstrom
values :math:`a_s` and :math:`b_s` will vary. Where no actual solar radiation data are available and no
calibration has been carried out for improved :math:`a_s` and :math:`b_s` parameters, the values
:math:`a_s = 0.25` and :math:`b_s = 0.50` are recommended.
:param ra: extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation`
:type ra: float or np.array
:param n: actual duration of sunshine or cloudless hours (:math:`n`) *[hour]*
:type n: float or np.array
:param mn: maximum possible duration of sunshine or daylight hours (:math:`N`) *[hour]*
See :meth:`daylight_hours`
:type mn: float, np.array
:param a_s: regression constant (:math:`as`) *[-]*. Default :math:`a_s=0.25`. It expresses the fraction
of extraterrestrial radiation reaching the earth on overcast days (:math:`n = 0`)
:type a_s: float or np.numpy
:param b_s: regression constant (:math:`bs`) *[-]*. Default :math:`b_s=0.50`. The expression
:math:`a_s+b_s` indicates the fraction of extraterrestrial radiation reaching the earth on clear days
(:math:`n = N`)
:type b_s: float or np.numpy
:return: (*float, np.array*) daily total shortwave radiation (:math:`R_s`) *[MJ/m²day]* reaching the earth
.. note::
If shortwave radiation (i.e., solar radiation) measurements are available, :meth:`shortwave_radiation`
function is no needed. Measurements of shortwave radiation may be directly used as input data in
:meth:`et0`.
"""
rns = (a_s + b_s * n / mn) * ra
return rns
@staticmethod
def net_shortwave_radiation(rs, albedo):
r"""The net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]* resulting from the balance between incoming
and reflected solar radiation as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 38, p. 51):
.. math::
R_{ns} = (1 − \alpha) R_s
:param rs: daily shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation`
:type rs: float or np.array
:param albedo: albedo or reflection coefficient (:math:`\alpha` *[-]*). Range:
:math:`0.0 \leq \alpha \leq 1.0` (:math:`\alpha=0.23` for the hypothetical grass reference crop).
See :class:`PenmanMonteithDaily` and :meth:`et0`
:type albedo: float or np.array
:return: (*float or np.array*) daily net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]* reaching the earth
"""
return (1.0 - albedo) * rs
@staticmethod
def net_longwave_radiation(t_min, t_max, rs, rs0, ea=None):
r"""Return the net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 39, p. 52):
.. math::
R_{nl} = \sigma\left[\frac{T_{max,K}^4 + T_{min,K}^4}{2}\right](0.34-0.14\sqrt{e_a})\left(1.35
\frac{R_s}{R_{so}}-0.35\right)
:param t_min: minimum daily air temperature (:math:`T_{max}`) *[°C]*
:type t_min: float or np.array
:param t_max: maximum daily air temperature (:math:`T_{min}`) *[°C]*
:type t_max: float or np.array
:param rs: shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation`
:type rs: float or np.array
:param rs0: clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*. See
:meth:`clear_sky_shortwave_radiation`
:type rs0: float or np.array
:param ea: actual vapour pressure (:math:`e_a`) *[kPa]*
:type ea: float or np.array
:return: (*float or np.array*) daily net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]*
.. note::
The :math:`R_s/R_{so}` term in the equation above must be limited so that :math:`R_s/R_{so} \leq 1.0`.
"""
t_min = t_min + 273.15
t_max = t_max + 273.15
if ea is not None:
rln = 4.903e-9 * (t_min ** 4 + t_max ** 4) * 0.5 * (0.34 - 0.14 * np.sqrt(ea)) * (1.35 * rs / rs0 - 0.35)
else:
t_mean = (t_min + t_max) / 2.0
rln = 4.903e-9 * (t_min ** 4 + t_max ** 4) * 0.5 * \
(-0.02 + 0.261 * np.exp(-7.77e10 ** -4 * t_mean ** 2)) * (1.35 * rs / rs0 - 0.35)
return rln
def et0(self, **kwargs):
r"""Returns potential evapotranspiration (:math:`ETo`) *[mm/day]* as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_. Reference
(grass) potencial evapotranspiration is returned for default constructor values. If values in `**kwargs` are
arrays, their lengths must be the same.
:Keyword Arguments:
* **date** (*str, datetime.date, datetime.datetime, pandas.TimeStamp, or np.array*)
* **doy** (*int or np.array*) - day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`.
It is not used if date is given
* **u2** (*float or np.array*) - wind speed at 2 meters above ground surface *[m/s]*
* **uz** (*float or np.array*) - measured wind speed at :math:`z` meters above ground surface *[m/s]*
* **z** (*float or np.array*) - height of measurement above ground surface *[m]*
* **t_mean** (*float or np.array*) - daily mean air temperature *[°C]*
* **t_min** (*float or np.array*) - daily minimum air temperature *[°C]*
* **t_max** (*float or np.array*) - daily maximum air temperature *[°C]*
* **rh_mean** (*float or np.array*) - daily mean relative humidity *[%]*
* **rh_min** (*float or np.array*) - daily minimum relative humidity *[%]*
* **rh_max** (*float or np.array*) - daily maximum relative humidity *[%]*
* **rs** (*float or np.array*) - solar or shortwave radiation *[MJ/m²day]*
* **n** (*float or np.array*) - daily actual duration of sunshine or cloudless hours *[hour]*
* **g** (*float or np.array*) - soil heat flux density *[MJ/m²day]*. If not given, *g* defined in
:meth:`PenmanMonteithDaily` will be used
* **a_s** (*float or np.array*) - see :meth:`shortwave_radiation`. Default :math:`a_s = 0.25`
* **b_s** (*float or np.array*) - see :meth:`shortwave_radiation`. Default :math:`b_s = 0.50`
* **negative_rnl** (*bool*) - allow negative net longwave radiation. Default :math:`negative\_rnl=True`
* **negative_et0** (*bool*) - allow negative reference evapotranspiration. Default :math:`negative\_et0=True`
:return: (*float or np.array*) potential evapotranspiration (:math:`ETo`) *[mm/day]*
Cases:
* If date and doy are given, :math:`doy` is disregarded
* if :math:`uz` is given, :math:`z` must also be given
* if :math:`u2` and (:math:`uz`, :math:`z`) are given, both :math:`uz` and :math:`z` are disregarded
* if :math:`rs` and :math:`n` are given, :math:`n` will be disregarded
* The best options for air temperature are, in this order: 1) t_min, t_max, and t_mean, 2) t_min, t_max, and
3) tmean
* The best options for relative air humidity are, in this order: 1) rh_max and rh_min, 2) rh_max, and 3)
rh_mean
Example 1::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> et0 = pm.et0(doy=187, u2=2.078, t_min=12.3, t_max=21.5, rh_min=63, rh_max=84, n=9.25)
>>> print(et0)
3.872968723753793
Example 2::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> et0 = pm.et0(date='2001-07-06', u2=2.078, t_min=12.3, t_max=21.5, rh_min=63, rh_max=84, n=9.25)
>>> print(et0)
3.872968723753793
Example 3::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> date=np.array(['2001-07-06', '2001-07-06'])
>>> u2=np.array([2.078, 2.078])
>>> t_min=np.array([12.3, 12.3])
>>> t_max=np.array([21.5, 21.5])
>>> rh_min=np.array([63, 63])
>>> rh_max=np.array([84, 84])
>>> n=np.array([9.25, 9.25])
>>> et0 = pm.et0(date=date, u2=u2, t_min=t_min, t_max=t_max, rh_min=rh_min, rh_max=rh_max, n=n)
>>> print(et0)
[3.87296872 3.87296872]
"""
self.reset()
try:
self.u2 = kwargs.get('u2', None)
if self.u2 is None:
self.u2 = self.to_u2(kwargs['uz'], kwargs['z'])
except KeyError:
raise KeyError('Penmam-Monteith: Either u2 or both uz and z must be given')
t_min = kwargs.get('t_min', None)
if t_min is None:
t_min = kwargs['t_mean']
t_max = kwargs.get('t_max', None)
if t_max is None:
t_max = kwargs['t_mean']
t_mean = kwargs.get('t_mean', None)
rh_min = kwargs.get('rh_min', None)
rh_max = kwargs.get('rh_max', None)
if rh_max is not None:
if rh_min is None:
rh_min = rh_max
else:
rh_min = rh_max = kwargs['rh_mean']
self.doy = kwargs.get('doy', None)
if self.doy is None:
self.doy = pd.to_datetime(kwargs['date']).dayofyear
self.rs = kwargs.get('rs', None)
n = kwargs.get('n', None)
g = kwargs.get('g', None)
if g is None:
g = self.g_default
a_s = kwargs.get('a_s', 0.25)
b_s = kwargs.get('b_s', 0.50)
if t_mean is None:
t_mean = (t_min + t_max) / 2.0
self.ld = PenmanMonteithDaily.latent_heat_of_vaporization(t_mean)
# In FAO 56, where delta occurs in the numerator and denominator, the slope
# of the vapour pressure curve is calculated using mean air temperature (Equation 9)
self.s = PenmanMonteithDaily.slope_of_saturation_vapour_pressure_curve(t_mean)
self.pc = PenmanMonteithDaily.psychrometric_constant(self.p, lamda=self.ld)
self.es = PenmanMonteithDaily.saturation_vapour_pressure(t_min, t_max)
self.ea = PenmanMonteithDaily.actual_vapour_pressure(rh_min=rh_min, rh_max=rh_max, t_min=t_min, t_max=t_max)
try:
self.ra = np.array([self.ra_366[i] for i in self.doy])
self.rs0 = np.array([self.rs0_366[i] for i in self.doy])
if self.rs is None:
self.mn = np.array([self.daylight_hours_366[i] for i in self.doy])
self.rs = self.shortwave_radiation(self.ra, n, self.mn, a_s, b_s)
# FAO56 eq. 39. The Rs/Rso term in equation 39 must be limited so that Rs/Rso ≤ 1.0.
self.rs = np.where(self.rs > self.rs0, self.rs0, self.rs)
except TypeError:
self.ra = self.ra_366[self.doy]
self.rs0 = self.rs0_366[self.doy]
if self.rs is None:
self.mn = self.daylight_hours_366[self.doy]
self.rs = self.shortwave_radiation(self.ra, n, self.mn, a_s, b_s)
# FAO56 eq. 39. The Rs/Rso term in equation 39 must be limited so that Rs/Rso ≤ 1.0.
self.rs = self.rs0 if self.rs > self.rs0 else self.rs
self.rns = self.net_shortwave_radiation(self.rs, self.albedo)
self.rnl = self.net_longwave_radiation(t_min, t_max, self.rs, self.rs0, self.ea)
if kwargs.get('negative_rnl', False) and self.rnl < 0.0:
self.rnl = 0.0
self.rn = self.rns - self.rnl
# denominator of FAO 56 eq. 3
etd = self.ld * (self.s + self.pc * (1 + self.f2 * self.u2))
# ETo energy component of FAO 56 eq. 3
self.etr = self.s * (self.rn - g) / etd
# ETo wind component of FAO 56 eq. 3
self.etw = (self.ld * self.pc * self.u2 * self.f1 * (self.es - self.ea) / (t_mean + 273.0)) / etd
# Reference evapotranspiration
self.et = self.etr + self.etw
self.et = np.where(self.et < 0.0, 0.0, self.et)
try:
self.et = float(self.et)
except TypeError:
pass
if kwargs.get('negative_rnl', False) and self.et < 0.0:
self.et = 0.0
return self.et
def et0_frame(self, df, **kwargs):
"""Return the input DataFrame extended by :meth:`et0` and further calculation parameters.
:param df: pandas DataFrame with columns corresponding to the inputs described in :meth:`et0`
:type df: pandas.DataFrame
:Keyword Arguments:
* **show_all** (*bool*) - show all results if :math:`True`, otherwise set `parameter=True` to show individual
parameters. For example :math:`doy=True`, :math:`ld=True`, etc. See :meth:`PenmanMonteithDaily`
:return: (*pandas.DataFrame*) DataFrame
"""
doy_str = kwargs.get('doy', 'doy')
date_str = kwargs.get('date', 'date')
u2_str = kwargs.get('u2', 'u2')
uz_str = kwargs.get('uz', 'uz')
z_str = kwargs.get('z', 'z')
t_mean_str = kwargs.get('t_mean', 't_mean')
t_min_str = kwargs.get('t_min', 't_min')
t_max_str = kwargs.get('t_max', 't_max')
rh_mean_str = kwargs.get('rh_mean', 'rh_mean')
rh_min_str = kwargs.get('rh_min', 'rh_min')
rh_max_str = kwargs.get('rh_max', 'rh_max')
rs_str = kwargs.get('rs', 'rs')
n_str = kwargs.get('n', 'n')
g_str = kwargs.get('g', 'g')
columns = df.columns
doy = df[doy_str].values if doy_str in columns else None
date = df[date_str].values if date_str in columns else None
u2 = df[u2_str].values if u2_str in columns else None
uz = df[uz_str].values if uz_str in columns else None
z = df[z_str].values if z_str in columns else None
t_mean = df[t_mean_str].values if t_mean_str in columns else None
t_min = df[t_min_str].values if t_min_str in columns else None
t_max = df[t_max_str].values if t_max_str in columns else None
rh_mean = df[rh_mean_str].values if rh_mean_str in columns else None
rh_min = df[rh_min_str].values if rh_min_str in columns else None
rh_max = df[rh_max_str].values if rh_max_str in columns else None
rs = df[rs_str].values if rs_str in columns else None
n = df[n_str].values if n_str in columns else None
g = df[g_str].values if g_str in columns else None
self.et0(doy=doy, date=date, u2=u2, uz=uz, z=z, t_mean=t_mean, t_min=t_min, t_max=t_max,
rh_mean=rh_mean, rh_min=rh_min, rh_max=rh_max, rs=rs, n=n, g=g)
show_all = kwargs.get('show_all', True)
if show_all:
if doy is None:
df['DoY'] = self.doy
df['Lambda'] = self.ld
df['Psy'] = self.pc
df['Delta'] = self.s
df['es'] = self.es
df['ea'] = self.ea
df['Rs'] = self.rs
df['Rns'] = self.rns
df['Rnl'] = self.rnl
df['ET0r'] = self.etr
df['ET0w'] = self.etw
df['ET0'] = self.et
else:
if kwargs.get('Lambda', False):
df['Lambda'] = self.ld
if kwargs.get('Psy', False):
df['Psy'] = self.pc
if kwargs.get('Delta', False):
df['Delta'] = self.s
if kwargs.get('es', False):
df['es'] = self.es
if kwargs.get('ea', False):
df['ea'] = self.ea
if kwargs.get('Rs', False):
df['Rs'] = self.rs
if kwargs.get('Rns', False):
df['Rns'] = self.rns
if kwargs.get('Rnl', False):
df['Rnl'] = self.rnl
if kwargs.get('ET0r', False):
df['ET0r'] = self.etr
if kwargs.get('ET0w', False):
df['ET0w'] = self.etw
if kwargs.get('ET0', True):
df['ET0'] = self.et
return df
|
[
"numpy.radians",
"numpy.mean",
"numpy.sqrt",
"numpy.tan",
"numpy.where",
"numpy.log",
"math.log",
"numpy.exp",
"numpy.array",
"numpy.cos",
"numpy.sin",
"math.exp",
"pandas.to_datetime"
] |
[((15037, 15055), 'numpy.mean', 'np.mean', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (15044, 15055), True, 'import numpy as np\n'), ((15924, 15943), 'numpy.mean', 'np.mean', (['sl'], {'axis': '(0)'}), '(sl, axis=0)\n', (15931, 15943), True, 'import numpy as np\n'), ((40204, 40241), 'numpy.where', 'np.where', (['(self.et < 0.0)', '(0.0)', 'self.et'], {}), '(self.et < 0.0, 0.0, self.et)\n', (40212, 40241), True, 'import numpy as np\n'), ((21214, 21237), 'numpy.log', 'np.log', (['(67.8 * z - 5.42)'], {}), '(67.8 * z - 5.42)\n', (21220, 21237), True, 'import numpy as np\n'), ((23873, 23913), 'numpy.sin', 'np.sin', (['(0.01721420632103996 * day - 1.39)'], {}), '(0.01721420632103996 * day - 1.39)\n', (23879, 23913), True, 'import numpy as np\n'), ((38516, 38560), 'numpy.array', 'np.array', (['[self.ra_366[i] for i in self.doy]'], {}), '([self.ra_366[i] for i in self.doy])\n', (38524, 38560), True, 'import numpy as np\n'), ((38584, 38629), 'numpy.array', 'np.array', (['[self.rs0_366[i] for i in self.doy]'], {}), '([self.rs0_366[i] for i in self.doy])\n', (38592, 38629), True, 'import numpy as np\n'), ((8125, 8145), 'numpy.radians', 'np.radians', (['latitude'], {}), '(latitude)\n', (8135, 8145), True, 'import numpy as np\n'), ((19481, 19510), 'math.log', 'math.log', (['((self.zm - d) / zom)'], {}), '((self.zm - d) / zom)\n', (19489, 19510), False, 'import math\n'), ((19513, 19542), 'math.log', 'math.log', (['((self.zh - d) / zoh)'], {}), '((self.zh - d) / zoh)\n', (19521, 19542), False, 'import math\n'), ((23204, 23237), 'numpy.cos', 'np.cos', (['(0.01721420632103996 * day)'], {}), '(0.01721420632103996 * day)\n', (23210, 23237), True, 'import numpy as np\n'), ((24632, 24643), 'numpy.tan', 'np.tan', (['lat'], {}), '(lat)\n', (24638, 24643), True, 'import numpy as np\n'), ((37506, 37536), 'pandas.to_datetime', 'pd.to_datetime', (["kwargs['date']"], {}), "(kwargs['date'])\n", (37520, 37536), True, 'import pandas as pd\n'), ((38688, 38744), 'numpy.array', 'np.array', (['[self.daylight_hours_366[i] for i in self.doy]'], {}), '([self.daylight_hours_366[i] for i in self.doy])\n', (38696, 38744), True, 'import numpy as np\n'), ((38954, 39001), 'numpy.where', 'np.where', (['(self.rs > self.rs0)', 'self.rs0', 'self.rs'], {}), '(self.rs > self.rs0, self.rs0, self.rs)\n', (38962, 39001), True, 'import numpy as np\n'), ((14968, 14999), 'numpy.exp', 'np.exp', (['(17.27 * t / (t + 237.3))'], {}), '(17.27 * t / (t + 237.3))\n', (14974, 14999), True, 'import numpy as np\n'), ((17672, 17713), 'math.exp', 'math.exp', (['(17.27 * t_dew / (t_dew + 237.3))'], {}), '(17.27 * t_dew / (t_dew + 237.3))\n', (17680, 17713), False, 'import math\n'), ((22462, 22472), 'numpy.sin', 'np.sin', (['sd'], {}), '(sd)\n', (22468, 22472), True, 'import numpy as np\n'), ((22502, 22512), 'numpy.sin', 'np.sin', (['ws'], {}), '(ws)\n', (22508, 22512), True, 'import numpy as np\n'), ((24619, 24629), 'numpy.tan', 'np.tan', (['sd'], {}), '(sd)\n', (24625, 24629), True, 'import numpy as np\n'), ((22448, 22459), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (22454, 22459), True, 'import numpy as np\n'), ((22475, 22486), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (22481, 22486), True, 'import numpy as np\n'), ((22489, 22499), 'numpy.cos', 'np.cos', (['sd'], {}), '(sd)\n', (22495, 22499), True, 'import numpy as np\n'), ((31976, 31987), 'numpy.sqrt', 'np.sqrt', (['ea'], {}), '(ea)\n', (31983, 31987), True, 'import numpy as np\n'), ((32171, 32213), 'numpy.exp', 'np.exp', (['(-77700000000.0 ** -4 * t_mean ** 2)'], {}), '(-77700000000.0 ** -4 * t_mean ** 2)\n', (32177, 32213), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# pylint: disable=invalid-name,g-bad-import-order,missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from absl import app
from absl import flags
from concurrent import futures
import gin
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Any, Dict, List, Optional, Tuple
from neutra import utils
tfd = tfp.distributions
tfb = tfp.bijectors
FLAGS = flags.FLAGS
TRAIN_BATCH = 250
TEST_BATCH = 1000
AIS_BATCH = 50
def ReduceL2(tensor, dims):
return tf.sqrt(tf.reduce_sum(tf.square(tensor), dims))
@utils.MakeTFTemplate
def Conv2DWN(inputs,
num_filters,
kernel_size=[3, 3],
stride=[1, 1],
pad="SAME",
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.shape[3])
with tf.variable_scope(scope, "conv_2d_wn"):
w = tf.get_variable(
"w", [kernel_size[0], kernel_size[1], num_inputs, num_filters],
initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, stride[0], stride[1], 1], pad)
if biases_initializer is not None:
out += tf.reshape(b, [1, 1, 1, num_filters])
return activation(out)
def GetLinearARMask(num_inputs, num_outputs, zero_diagonal=False):
assert num_inputs % num_outputs == 0 or num_outputs % num_inputs == 0, "%d vs %d" % (num_inputs, num_outputs)
mask = np.ones([num_inputs, num_outputs], dtype=np.float32)
if num_outputs >= num_inputs:
k = num_outputs // num_inputs
for i in range(num_inputs):
mask[i + 1:, i * k:(i + 1) * k] = 0
if zero_diagonal:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = num_inputs // num_outputs
for i in range(num_outputs):
mask[(i + 1) * k:, i:i + 1] = 0
if zero_diagonal:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def GetConvARMask(h, w, num_inputs, num_filters, zero_diagonal=False):
l = (h - 1) // 2
m = (w - 1) // 2
mask = np.ones([h, w, num_inputs, num_filters], dtype=np.float32)
mask[:l, :, :, :] = 0
mask[l, :m, :, :] = 0
mask[l, m, :, :] = GetLinearARMask(num_inputs, num_filters, zero_diagonal)
return mask
@utils.MakeTFTemplate
def Conv2DAR(inputs, num_filters,
kernel_size=[3, 3],
zero_diagonal=False,
weights_initializer=None,
biases_initializer=tf.zeros_initializer(),
scope=None):
num_inputs = int(inputs.get_shape()[3])
mask = GetConvARMask(kernel_size[0], kernel_size[1], num_inputs, num_filters, zero_diagonal)
w = tf.get_variable("w", [kernel_size[0], kernel_size[1], num_inputs, num_filters], initializer=weights_initializer)
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value() * mask, [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w * mask, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, 1, 1, 1], "SAME")
return out + tf.reshape(b, [1, 1, 1, num_filters])
@utils.MakeTFTemplate
def ConvAR(x,
h=None,
real_event_shape=[],
hidden_layers=[],
**kwargs):
#input_shape = (
# np.int32(x.shape.as_list())
# if x.shape.is_fully_defined() else tf.shape(x))
#x = tf.reshape(x, [-1] + real_event_shape)
for i, units in enumerate(hidden_layers):
x = Conv2DAR("conv2d_ar_%d"%i, num_filters=units, zero_diagonal=False, **kwargs)(inputs=x)
if i == 0 and h is not None:
if h.shape[-1] != x.shape[-1]:
x += Conv2DWN("conv2d_h", num_filters=int(x.shape[-1]), kernel_size=[1, 1], stride=[1, 1])(h)
else:
x += h
x = tf.nn.elu(x)
shift = Conv2DAR(
"conv2d_shift",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
log_scale = Conv2DAR(
"conv2d_scale",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
#shift = tf.reshape(shift, input_shape)
#log_scale = tf.reshape(log_scale, input_shape)
return shift, log_scale
@utils.MakeTFTemplate
def DenseWN(inputs,
num_outputs,
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.get_shape()[1])
with tf.variable_scope(scope, "dense_wn"):
w = tf.get_variable(
"w", [num_inputs, num_outputs], initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_outputs], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0])))
g = tf.exp(g)
w = g * tf.nn.l2_normalize(w, [0])
out = tf.matmul(inputs, w)
if biases_initializer is not None:
out += tf.expand_dims(b, 0)
return activation(out)
@utils.MakeTFTemplate
def ResConv2D(inputs,
num_filters,
kernel_size,
stride,
activation=tf.nn.elu,
output_init_factor=1.0):
x = Conv2DWN(
"conv2d_in",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
activation=activation)(
inputs=inputs)
non_linear = Conv2DWN(
"conv2d_nl",
num_filters=num_filters,
kernel_size=kernel_size,
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=x)
skip = Conv2DWN(
"conv2d_skip",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=inputs)
return non_linear + skip
@utils.MakeTFTemplate
def ResDense(inputs, num_dims, activation=None):
x = DenseWN("dense_in", num_outputs=num_dims, activation=activation)(inputs)
non_linear = DenseWN("dense_nl", num_outputs=num_dims)(x)
skip = DenseWN("dense_skip", num_outputs=num_dims)(x)
return non_linear + skip
@gin.configurable("conv_hier_encoder")
@utils.MakeTFTemplate
def ConvHierEncoder(images, depth = 2, num_blocks = 2, z_dims = 32, h_dims=160):
x = Conv2DWN("conv2d_in", num_filters=h_dims, stride=[2, 2], kernel_size=[5, 5])(inputs=images - 0.5)
means = []
raw_scales = []
contexts = []
for i in range(depth):
for j in range(num_blocks):
downsample = i > 0 and j == 0
if downsample:
stride = [2, 2]
else:
stride = [1, 1]
h = tf.nn.elu(x)
h = Conv2DWN("conv2d_in_%d_%d"%(i, j), num_filters=2*z_dims + 2 * h_dims, stride=stride, kernel_size=[3, 3])(inputs=h)
mean, raw_scale, context, h = tf.split(h, [z_dims, z_dims, h_dims, h_dims], -1)
means.append(mean)
raw_scales.append(raw_scale)
contexts.append(context)
h = tf.nn.elu(h)
h = Conv2DWN("conv2d_h_%d_%d"%(i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
if downsample:
x = tf.image.resize_nearest_neighbor(x, [int(x.shape[1]) // 2, int(x.shape[2]) // 2])
x += 0.1 * h
return means, raw_scales, contexts
@gin.configurable("conv_hier_prior_post")
@utils.MakeTFTemplate
def ConvHierPriorPost(images=None,
encoder=None,
z=None,
batch=None,
depth = 2,
num_blocks = 2,
z_dims = 32,
h_dims = 160,
image_width = 32):
is_q = encoder is not None
if is_q:
means, raw_scales, up_contexts = encoder(images)
if batch is None:
if images is not None:
batch = tf.shape(images)[0]
else:
batch = tf.shape(z[0])[0]
h = tf.get_variable("h_top", [h_dims], initializer=tf.zeros_initializer())
h = tf.reshape(h, [1, 1, 1, -1])
top_width = image_width // 2 ** num_blocks
h = tf.tile(h, [batch, top_width, top_width, 1])
x = h
ret_z = []
ret_log_pz = []
for i in reversed(list(range(depth))):
for j in reversed(list(range(num_blocks))):
downsample = i > 0 and j == 0
h = tf.nn.elu(x)
h_p = Conv2DWN(
"conv2d_p_%d_%d" % (i, j),
num_filters=2 * h_dims + 2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
p_mean, p_raw_scale, down_context, h_det = tf.split(
h_p, [z_dims, z_dims, h_dims, h_dims], -1)
p_z = tfd.Independent(
tfd.Normal(loc=p_mean, scale=tf.nn.softplus(p_raw_scale)),
reinterpreted_batch_ndims=3)
if is_q:
h_q = Conv2DWN(
"conv2d_q_%d_%d" % (i, j),
num_filters=2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
q_mean, q_raw_scale = tf.split(h_q, [z_dims, z_dims], -1)
context = down_context + up_contexts.pop()
q_mean += means.pop()
q_raw_scale += raw_scales.pop()
num_flat_dims = np.prod(q_mean.shape.as_list()[1:])
_maf_template = ConvAR(
"iaf_%d_%d" % (i, j),
real_event_shape=q_mean.shape.as_list()[1:],
hidden_layers=[h_dims, h_dims],
h=context,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
#x.set_shape([None, num_flat_dims])
x.set_shape([None] + q_mean.shape.as_list()[1:])
return t(x)
bijectors = []
#bijectors.append(tfb.Reshape(tf.shape(q_mean)[1:], [num_flat_dims]))
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
#bijectors.append(tfb.Reshape([num_flat_dims], tf.shape(q_mean)[1:]))
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
bijectors.append(tfb.AffineScalar(shift=q_mean, scale=tf.nn.softplus(q_raw_scale)))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(q_mean), scale=tf.ones_like(q_raw_scale)),
reinterpreted_batch_ndims=3)
q_z = tfd.TransformedDistribution(mvn, bijector)
if is_q:
dist = q_z
else:
dist = p_z
if z is None:
z_val = dist.sample()
else:
z_val = z[0]
z = z[1:]
ret_z.append(z_val)
ret_log_pz.append(dist.log_prob(z_val))
h = tf.concat([z_val, h_det], -1)
if downsample:
new_shape = [2 * int(x.shape[1]), 2 * int(x.shape[2])]
x = tf.image.resize_nearest_neighbor(x, new_shape)
h = tf.image.resize_nearest_neighbor(h, new_shape)
h = Conv2DWN("deconv2d_%d_%d" % (i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
x = x + 0.1 * h
x = tf.image.resize_nearest_neighbor(x, [2 * int(x.shape[1]), 2 * int(x.shape[2])])
x = Conv2DWN("conv2d_out", num_filters=3, stride=[1, 1], kernel_size=[5, 5])(inputs=x)
return ret_z, ret_log_pz, x
@gin.configurable("conv_encoder")
@utils.MakeTFTemplate
def ConvEncoder(images, num_outputs, hidden_dims = 450,
filter_scale = 1, fully_convolutional = False):
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return ResConv2D("res_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
else:
x = tf.reshape(x, [-1, filter_scale * 32 * 4 * 4])
x = ResDense("dense_h", num_dims=hidden_dims, activation=tf.nn.elu)(x)
return DenseWN(
"dense_out",
num_outputs=num_outputs,
weights_initializer=utils.L2HMCInitializer())(
x)
@gin.configurable("conv_decoder")
@utils.MakeTFTemplate
def ConvDecoder(encoding,
output_shape,
filter_scale = 1,
hidden_dims = 450,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
if fully_convolutional:
tf.logging.info("Encoding shape: %s", encoding.shape)
x = ResConv2D("res_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
else:
x = ResDense("dense_in", num_dims=hidden_dims, activation=tf.nn.elu)(encoding)
x = ResDense("dense_h", num_dims=filter_scale * 32 * 4 * 4, activation=tf.nn.elu)(x)
x = tf.reshape(x, [-1, 4, 4, filter_scale * 32])
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = ResConv2D("res_5", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = ResConv2D("res_3", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = ResConv2D(
"res_1",
num_filters=output_shape[-1],
kernel_size=[3, 3],
stride=[1, 1],
output_init_factor=0.01)(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder2")
@utils.MakeTFTemplate
def ConvEncoder2(images, num_outputs, filter_scale = 1):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_5", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
return ResConv2D("conv_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
@gin.configurable("conv_decoder2")
@utils.MakeTFTemplate
def ConvDecoder2(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = Conv2DWN("conv_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = Conv2DWN("conv_5", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_3", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN(
"conv_1",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder3")
@utils.MakeTFTemplate
def ConvEncoder3(images, num_outputs, hidden_dims = 450,
filter_scale = 1):
# This comes from VLAE paper.
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_8", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
return Conv2DWN("conv_10", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
@gin.configurable("conv_decoder3")
@utils.MakeTFTemplate
def ConvDecoder3(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
x = Conv2DWN("conv_1", num_filters=filter_scale * 96, kernel_size=[1, 1], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_8", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_10", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder4")
@utils.MakeTFTemplate
def ConvEncoder4(images, num_outputs,
filter_scale = 1,
fully_convolutional = False):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return Conv2DWN("conv_out", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
else:
return DenseWN("dense_out", num_outputs=num_outputs)(tf.layers.flatten(x))
@gin.configurable("conv_decoder4")
@utils.MakeTFTemplate
def ConvDecoder4(encoding,
output_shape,
filter_scale = 1,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
if not fully_convolutional:
x = tf.reshape(DenseWN("dense_in", num_outputs=8*8*16)(x), [-1, 8, 8, 16])
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[1, 1],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("dense_encoder")
@utils.MakeTFTemplate
def DenseEncoder(images,
num_outputs,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
x = tf.layers.flatten(images)
# Center the data, assuming it goes from [0, 1] initially.
# x = 2.0 * x - 1.0
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
return tf.layers.dense(x, num_outputs, kernel_initializer=utils.L2HMCInitializer())
@gin.configurable("dense_decoder")
@utils.MakeTFTemplate
def DenseDecoder(encoding,
output_shape,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = tf.layers.flatten(encoding)
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
num_outputs = np.prod(output_shape)
return tf.reshape(
tf.layers.dense(
x, num_outputs, kernel_initializer=utils.L2HMCInitializer(factor=0.01)),
[-1] + output_shape)
def IndependentBernouli3D(logits):
return tfd.Independent(
tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=3)
def IndependentDiscreteLogistic3D(locations,
scales):
dist = tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=locations, scale=scales),
bijector=tfb.AffineScalar(scale=255.0))
dist = tfd.QuantizedDistribution(distribution=dist, low=0., high=255.0)
dist = tfd.Independent(dist, reinterpreted_batch_ndims=3)
class ScaleHack(object):
def __init__(self, dist):
self._dist = dist
def sample(self, *args, **kwargs):
return self._dist.sample(*args, **kwargs) / 255.0
def log_prob(self, x, *args, **kwargs):
return self._dist.log_prob(tf.clip_by_value(x * 255.0, 0.0, 255.0), *args, **kwargs)
return ScaleHack(dist)
def IndependentDiscreteLogistic3D2(locations,
scales):
class IndependentDiscreteLogistic(object):
def __init__(self, loc, scale):
self._loc = loc
self._scale = scale
def sample(self, *args, **kwargs):
dist = tfd.Logistic(loc=self._loc, scale=self._scale)
return tf.clip_by_value(dist.sample(*args, **kwargs), 0.0, 1.0)
def log_prob(self, x, *args, **kwargs):
sample = x
mean = self._loc
scales = self._scale
binsize=1.0 / 256.0
sample = (tf.floor(sample / binsize) * binsize - mean) / scales
return tf.reduce_sum(
tf.log(
tf.sigmoid(sample + binsize / scales) - tf.sigmoid(sample) + 1e-7),
[-1, -2, -3])
return IndependentDiscreteLogistic(locations, scales)
@gin.configurable("dense_recognition")
@utils.MakeTFTemplate
def DenseRecognition(images, encoder, z=None, sigma_activation="exp"
):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
num_dims = int(encoding.shape[-1]) // 2
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, 2]), num=2, axis=-1)
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.Affine(shift=mu, scale_diag=sigma)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
tf.logging.info("bijector z shape: %s", z[0].shape)
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine")
@utils.MakeTFTemplate
def DenseRecognitionAffine(images, encoder, z=None,
z_dims=None):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
tril_raw = tfd.fill_triangular(encoding[:, z_dims:])
sigma = tf.nn.softplus(tf.matrix_diag_part(tril_raw))
tril = tf.linalg.set_diag(tril_raw, sigma)
bijector = tfb.Affine(shift=mu, scale_tril=tril)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine_lr")
@utils.MakeTFTemplate
def DenseRecognitionAffineLR(images, encoder, z=None,
z_dims=None, rank=1):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
sigma = encoding[:, z_dims:2*z_dims]
perturb = encoding[:, 2*z_dims:]
perturb = tf.reshape(perturb, [-1, z_dims, rank])
sigma = tf.nn.softplus(sigma)
bijector = tfb.Affine(shift=mu, scale_diag=sigma,
scale_perturb_factor=perturb)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_rnvp")
@utils.MakeTFTemplate
def DenseRecognitionRNVP(
images,
encoder,
z=None,
num_bijectors=3,
condition_bijector=False,
layer_sizes=[128, 128],
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_bijector:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_bijector:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_bijectors):
_rnvp_template = utils.DenseShiftLogScale(
"rnvp_%d" % i,
h=h,
hidden_layers=layer_sizes,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def rnvp_template(x, output_units, t=_rnvp_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims - output_units])
return t(x, output_units)
bijectors.append(
tfb.Invert(
tfb.RealNVP(
num_masked=num_dims // 2,
shift_and_log_scale_fn=rnvp_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_iaf")
@utils.MakeTFTemplate
def DenseRecognitionIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_iaf_layers):
#_maf_template = tfb.masked_autoregressive_default_template(
# hidden_layers=iaf_layer_sizes,
# activation=tf.nn.softplus,
# kernel_initializer=utils.L2HMCInitializer(factor=0.01))
_maf_template = utils.DenseAR(
"maf_%d" % i,
hidden_layers=iaf_layer_sizes,
h=h,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
class FlipImageBijector(tfb.Bijector):
def __init__(self, validate_args=False, name=None):
"""Creates the `Permute` bijector.
Args:
permutation: An `int`-like vector-shaped `Tensor` representing the
permutation to apply to the rightmost dimension of the transformed
`Tensor`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if `not permutation.dtype.is_integer`.
ValueError: if `permutation` does not contain exactly one of each of
`{0, 1, ..., d}`.
"""
super(FlipImageBijector, self).__init__(
forward_min_event_ndims=3,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "flip_image")
def _forward(self, x):
return tf.image.flip_left_right(tf.image.flip_up_down(x))
def _inverse(self, y):
return tf.image.flip_up_down(tf.image.flip_left_right(y))
def _inverse_log_det_jacobian(self, y):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return tf.constant(0., dtype=y.dtype.base_dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0., dtype=x.dtype.base_dtype)
@gin.configurable("conv_iaf")
@utils.MakeTFTemplate
def ConvIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
bijectors = []
for i in range(num_iaf_layers):
_maf_template = ConvAR(
"iaf_%d" % i,
real_event_shape=encoding_parts[0].shape.as_list()[1:],
hidden_layers=iaf_layer_sizes,
h=h,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None] + encoding_parts[0].shape.as_list()[1:])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(FlipImageBijector())
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.AffineScalar(shift=mu, scale=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("conv_shift_scale")
@utils.MakeTFTemplate
def ConvShiftScale(
images,
encoder,
z=None,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.AffineScalar(shift=mu, scale=sigma)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def SimplePrior(z=None, batch=None,
num_dims=None):
"""Models P(z)"""
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros(num_dims), scale_diag=tf.ones(num_dims))
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def Simple3DPrior(z=None, batch=None,
shape=None):
"""Models P(z)"""
mvn = tfd.Independent(tfd.Normal(loc=tf.zeros(shape), scale=tf.ones(shape)), reinterpreted_batch_ndims=3)
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def DenseMNISTNoise(x=None, z=None, decoder=None, return_means=True):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
bernoulli = IndependentBernouli3D(decoding)
if x is None:
if return_means:
x = bernoulli.mean()
else:
x = tf.to_float(bernoulli.sample())
return x, bernoulli.log_prob(x)
@gin.configurable("cifar10_noise")
@utils.MakeTFTemplate
def DenseCIFAR10TNoise(x=None, z=None, decoder=None, return_means=True, uniform_scale=False, logistic_impl="mine"):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
if uniform_scale:
scale = tf.get_variable("scale", initializer=1.0)
scales = tf.reshape(scale, [1, 1, 1])
else:
scales = tf.get_variable(
"scales", [32, 32, 3], initializer=tf.ones_initializer())
if logistic_impl == "mine":
disc_logistic = IndependentDiscreteLogistic3D(decoding, tf.nn.softplus(scales))
elif logistic_impl == "kingma":
disc_logistic = IndependentDiscreteLogistic3D2(decoding, tf.nn.softplus(scales))
if x is None:
x = tf.to_float(disc_logistic.sample())
return x, disc_logistic.log_prob(x)
@gin.configurable("learning_rate")
def LearningRate(train_size, global_step, schedule = "hoffman", warmup_steps=0):
if schedule == "hoffman":
base = tf.train.piecewise_constant(
global_step, [train_size * 500 // TRAIN_BATCH], [1e-3, 1e-4])
elif schedule == "new":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[1e-3, 1e-4, 1e-5])
elif schedule == "new_gentle":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[0.5e-3, 1e-4, 1e-5])
elif schedule == "fast":
base = tf.train.piecewise_constant(
global_step,
[train_size * 800 // TRAIN_BATCH],
[1e-2, 1e-5])
else:
raise ValueError("Invalid schedule: " + schedule)
if warmup_steps == 0:
return base
else:
return tf.minimum(base * tf.to_float(global_step) / warmup_steps, base)
VAEOutputs = collections.namedtuple(
"VAEOutputs", "log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z")
AISOutputs = collections.namedtuple(
"AISOutputs",
"log_p, p_accept, z_fin, recon"
)
def MakeVAE(images, recognition, prior, noise, beta, num_samples,
min_kl):
z, log_q_z = recognition(images)
_, log_p_z = prior(z)
_, log_p_x_z = noise(images, z)
post_z = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - beta * total_klqp
recon_means, _ = noise(None, z)
z, _ = prior(batch=num_samples)
sample_means, _ = noise(None, z)
return VAEOutputs(
log_p_x_z=log_p_x_z,
elbo=elbo,
sample_means=sample_means,
recon_means=recon_means,
klqp=klqp,
total_klqp=total_klqp,
post_z=post_z,
prior_z=z)
DLGMOutputs = collections.namedtuple(
"DLGMOutputs",
"elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq"
)
@gin.configurable("dlgm")
class DLGM(object):
def __init__(self,
z_dims=64,
beta=1.0,
beta_steps=0,
step_size=0.2,
num_leapfrog_steps=5,
num_hmc_steps=2,
use_neutra=True,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
q_loss_type="klqp",
min_kl=0.0,
symm_factor=0.5,
save_chain_state=False,
chain_warmup_epochs=5,
use_q_z_for_gen=False,
no_gen_train_steps=0,
dataset=None,
use_bijector_for_ais=False,
prior_type="simple",
adapt_step_size=False,
step_size_gain=1e-3,
use_q_z_for_ais=False,
affine_rank=1,
step_size_warmup=0):
self.train_size = dataset.train_size
self._use_q_z_for_ais = use_q_z_for_ais
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._use_bijector_for_ais = use_bijector_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
self._use_neutra = use_neutra
self._num_leapfrog_steps = num_leapfrog_steps
self._num_hmc_steps = num_hmc_steps
self._q_loss_type = q_loss_type
self._symm_factor = symm_factor
self._save_chain_state = save_chain_state
self._chain_warmup_epochs = chain_warmup_epochs
self._use_q_z_for_gen = use_q_z_for_gen
self._no_gen_train_steps = no_gen_train_steps
self._step_size_gain = step_size_gain
self._adapt_step_size = adapt_step_size
self._step_size_warmup = step_size_warmup
self._init_step_size = step_size
if self._adapt_step_size:
self._step_size = tf.get_variable("step_size", initializer=step_size)
else:
self._step_size = tf.constant(step_size)
if self._save_chain_state:
self._chain_state = tf.get_variable(
"train_chain_state", [self.train_size, z_dims], trainable=False)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
#assert dataset.name == "cifar10"
#self._encoder = ConvHierEncoder("encoder")
#self._prior_posterior = ConvHierPriorPost("prior_post")
#self._decoder = lambda z: self._prior_posterior(z=z)[2]
#self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
#self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
pass
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition(
"recog",
encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
self._recog = recog
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def AdjustedStepSize(self):
if self._step_size_warmup > 0:
global_step = tf.train.get_or_create_global_step()
max_step = self._init_step_size * tf.to_float(
global_step) / self._step_size_warmup
return tf.where(global_step > self._step_size_warmup, self._step_size,
tf.minimum(max_step, self._step_size))
else:
return self._step_size
def RecogVars(self):
return self._encoder.variables + self._recog.variables
def GenVars(self):
return (
self._prior.variables + self._decoder.variables + self._noise.variables)
def MakeDLGM(self,
images,
other_z_init=None,
use_other_z_init=None,
num_samples=64):
z, log_q_z, bijector = self._recog(images)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
post_z = z
q_z = z
if use_other_z_init is not None:
z_init = [tf.cond(use_other_z_init, lambda: tf.identity(other_layer_z),
lambda: tf.identity(layer_z)) for other_layer_z, layer_z in zip(z, other_z_init)]
z_init = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(self._min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - self._beta * total_klqp
def TargetLogProbFn(*z):
for post_z_e, z_e in zip(post_z, z):
tf.logging.info("Shape here: %s %s", post_z_e.shape, z_e.shape)
z_e.set_shape(post_z_e.shape)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=TargetLogProbFn,
step_size=self.AdjustedStepSize(),
num_leapfrog_steps=self._num_leapfrog_steps)
if self._use_neutra:
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel, bijector=bijector)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=self._num_hmc_steps, current_state=z, kernel=kernel)
z = [tf.stop_gradient(s[-1, Ellipsis]) for s in states]
post_z = z
_, log_q_z, _ = self._recog(images, z=z)
xentpq = -tf.add_n([tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z])
if self._use_q_z_for_gen:
z = q_z
recon_means, _ = self._noise(None, z)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
mcmc_log_p = tf.reduce_mean(tf.add_n(log_p_z) + log_p_x_z)
if self._use_neutra:
log_accept_ratio = kernel_results.inner_results.log_accept_ratio
else:
log_accept_ratio = kernel_results.log_accept_ratio
p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))
z, _ = self._prior(batch=num_samples)
sample_means, _ = self._noise(None, z)
return DLGMOutputs(
elbo=elbo,
sample_means=sample_means,
mcmc_log_p=mcmc_log_p,
recon_means=recon_means,
p_accept=p_accept,
post_z=post_z,
post_z_chain=states,
q_z=z_init,
xentpq=xentpq)
def GetPosterior(self, images):
outputs = self.MakeDLGM(images)
return outputs.post_z
def TrainOp(self, data_idx, images):
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
if self._save_chain_state:
other_z_init = tf.gather(self._chain_state, data_idx)
use_other_z_init = (
global_step > self._chain_warmup_epochs * self.train_size // TRAIN_BATCH)
else:
other_z_init = None
use_other_z_init = None
outputs = self.MakeDLGM(
images, other_z_init=other_z_init, use_other_z_init=use_other_z_init)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
utils.LogAndSummarizeMetrics({
"learning_rate": learning_rate,
"elbo": outputs.elbo,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
"step_size": self.AdjustedStepSize(),
}, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
if self._save_chain_state:
with tf.control_dependencies([outputs.post_z]):
chain_state_update_op = tf.scatter_update(self._chain_state, data_idx,
outputs.post_z)
else:
chain_state_update_op = tf.no_op()
if self._adapt_step_size:
new_step_size = self._step_size + self._step_size_gain * (outputs.p_accept - 0.651)
new_step_size = tf.clip_by_value(new_step_size, 1e-3, 0.5)
step_size_op = self._step_size.assign(
tf.where(global_step > self._step_size_warmup, new_step_size,
self._step_size))
else:
step_size_op = tf.no_op()
with tf.name_scope("recog_train"):
if self._q_loss_type == "klqp":
loss = -outputs.elbo
elif self._q_loss_type == "symm":
loss = (
self._symm_factor * -outputs.elbo +
(1.0 - self._symm_factor) * outputs.xentpq)
elif self._q_loss_type == "klpq":
loss = outputs.xentpq
if self._save_chain_state:
# Not super efficient...
loss = tf.cond(use_other_z_init, lambda: tf.identity(loss),
lambda: tf.identity(-outputs.elbo))
recog_train_op = tf.contrib.training.create_train_op(
loss,
opt,
summarize_gradients=True,
variables_to_train=self.RecogVars(),
transform_grads_fn=utils.ProcessGradients)
with tf.name_scope("gen_train"):
gen_loss = tf.cond(global_step < self._no_gen_train_steps,
lambda: -outputs.elbo, lambda: -outputs.mcmc_log_p)
gen_train_op = tf.contrib.training.create_train_op(
gen_loss,
opt,
None,
summarize_gradients=True,
variables_to_train=self.GenVars(),
transform_grads_fn=utils.ProcessGradients)
return tf.group(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)
def EvalOp(self, data_idx, images):
outputs = self.MakeDLGM(images)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
return utils.LogAndSummarizeMetrics({
"elbo": outputs.elbo,
"xentpq": outputs.xentpq,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
})
def AIS(self, images, num_chains):
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z, _ = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
if self._use_bijector_for_ais:
_, _, bijector = self._recog(images)
else:
bijector = None
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("vae")
class VAE(object):
def __init__(self,
z_dims=64,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
beta=1.0,
beta_steps=0,
min_kl=0,
use_q_z_for_ais=False,
dataset=None,
prior_type="simple",
affine_rank=1):
self.train_size = dataset.train_size
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._beta = beta
self._use_q_z_for_ais = use_q_z_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
assert dataset.name == "cifar10"
self._encoder = ConvHierEncoder("encoder")
self._prior_posterior = ConvHierPriorPost("prior_post")
self._decoder = lambda z: self._prior_posterior(z=z)[2]
self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition("recog", encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
# Drop the bijector return.
self._recog = lambda *args, **kwargs: recog(*args, **kwargs)[:2]
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def MakeVAE(self, images, beta_override=None, num_samples=64):
if beta_override is not None:
beta = beta_override
else:
beta = self._beta
return MakeVAE(images, self._recog, self._prior, self._noise, beta,
num_samples, self._min_kl)
def TrainOp(self, data_idx, images):
outputs = self.MakeVAE(images)
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
metrics = {
"learning_rate": learning_rate,
"log_p_x_z": outputs.log_p_x_z,
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
"beta": self._beta,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
utils.LogAndSummarizeMetrics(metrics, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
return tf.contrib.training.create_train_op(
-outputs.elbo,
opt,
summarize_gradients=True,
transform_grads_fn=utils.ProcessGradients)
def GetPosterior(self, images):
outputs = self.MakeVAE(images)
return outputs.post_z
def EvalOp(self, data_idx, images):
outputs = self.MakeVAE(images, 1.0)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
metrics = {
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
return utils.LogAndSummarizeMetrics(metrics)
def AIS(self, images, num_chains):
outputs = self.MakeVAE(images)
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("train")
def Train(model, dataset, train_dir, master, epochs=600, polyak_averaging=0.0, warmstart_ckpt=""):
data_idx, images = dataset.TrainBatch(TRAIN_BATCH, epochs)
train_op = model.TrainOp(data_idx, images)
if polyak_averaging > 0.0:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=polyak_averaging)
with tf.control_dependencies([train_op]):
train_op = ema.apply()
utils.LogAndSaveHParams()
tf.Session.reset(master)
if warmstart_ckpt:
tf.init_from_checkpoint(warmstart_ckpt, {"/": "/"})
hooks = [
tf.train.StopAtStepHook(last_step=dataset.train_size * epochs //
TRAIN_BATCH),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), every_n_secs=60)
]
tf.contrib.training.train(
train_op,
logdir=train_dir,
master=master,
hooks=hooks,
save_checkpoint_secs=120,
save_summaries_steps=60)
def Eval(model, dataset, train_dir, eval_dir, master,
use_polyak_averaging=False, max_number_of_evaluations=None):
data_idx, images = dataset.TestBatch(TEST_BATCH)
eval_op = model.EvalOp(data_idx, images)
utils.LogAndSaveHParams()
tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(saver=saver)
tf.Session.reset(master)
hooks = [
# Just for logging.
tf.contrib.training.StopAfterNEvalsHook(dataset.test_size // TEST_BATCH),
tf.contrib.training.SummaryAtEndHook(eval_dir),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), at_end=True)
]
tf.contrib.training.evaluate_repeatedly(
train_dir,
eval_ops=eval_op,
hooks=hooks,
# LOL...
eval_interval_secs=120,
max_number_of_evaluations=max_number_of_evaluations,
master=master,
scaffold=scaffold)
def AISEvalShard(shard, master, num_workers, num_chains, dataset, use_polyak_averaging, writer, train_dir, model_fn, batch):
tf.logging.info("Thread started")
model = model_fn()
tf.logging.info("Built model")
shard_idx = tf.placeholder(tf.int64, [])
tf.logging.info("built data")
data_iterator = dataset.AISIterator(batch, shard_idx, num_workers)
images, _ = data_iterator.get_next()
tf.logging.info("Built mA")
ais_outputs = model.AIS(images, num_chains)
log_p = ais_outputs.log_p
p_accept = ais_outputs.p_accept
tf.logging.info("Built mB")
if shard == 1:
utils.LogAndSaveHParams()
summary_op = tf.summary.merge_all()
global_step = tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
tf.logging.info("Built mC")
global_step_val = []
tf.logging.info("Starting shard %d, %s", shard, master)
#with tf.MonitoredSession(
# tf.train.ChiefSessionCreator(
# master=master,
# checkpoint_dir=train_dir)) as sess:
while True:
try:
tf.Session.reset(master)
with tf.Session(master) as sess:
all_log_p = np.zeros([0])
saver.restore(sess, tf.train.latest_checkpoint(train_dir))
sess.run(data_iterator.initializer, {shard_idx: shard})
try:
step_num = 0
while True:
fetch = {
"log_p": log_p,
"global_step": global_step,
"p_accept": p_accept
}
if shard == 0:
fetch["summary"] = summary_op
tf.logging.info("Shard %d step %d started.", shard, step_num)
fetch = sess.run(fetch)
tf.logging.info("Shard %d step %d done.", shard, step_num)
tf.logging.info("Shard %d log_p %.2f, p_accept: %.2f", shard,
np.mean(fetch["log_p"]),
np.mean(fetch["p_accept"]))
all_log_p = np.hstack([all_log_p, fetch["log_p"]])
if shard == 0 and step_num == 0:
global_step_val.append(fetch["global_step"])
writer.add_summary(fetch["summary"], global_step_val[0])
step_num += 1
except tf.errors.OutOfRangeError:
tf.logging.info("Shard %d done.", shard)
pass
return all_log_p
except tf.errors.AbortedError:
pass
def AISEval(model_fn, dataset, train_dir, eval_dir, worker_master_pattern,
num_workers, num_chains, use_polyak_averaging=False):
tf.reset_default_graph()
log_p_ph = tf.placeholder(tf.float32, [None])
log_p_summary = tf.summary.scalar("log_p", tf.reduce_mean(log_p_ph))
writer = tf.summary.FileWriter(eval_dir)
with futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
results = []
for shard in range(num_workers):
tf.logging.info("Submitting shard %d", shard)
master = worker_master_pattern.format(shard)
results.append(
executor.submit(AISEvalShard, shard, master, num_workers, num_chains,
dataset, use_polyak_averaging, writer, train_dir,
model_fn, AIS_BATCH))
all_log_p = np.zeros([0])
for result in results:
log_p = result.result()
all_log_p = np.hstack([all_log_p, log_p])
log_p = np.mean(all_log_p)
tf.logging.info("Log P: %.2f", log_p)
with tf.Session() as sess:
writer.add_summary(
sess.run(log_p_summary, {log_p_ph: all_log_p}), 0)
writer.flush()
return log_p
MODEL_TO_CLASS = {"vae": VAE, "dlgm": DLGM}
def main(argv):
del argv # Unused.
utils.BindHParams(FLAGS.hparams)
if FLAGS.data_type == "mnist":
dataset = utils.MNISTDataset(FLAGS.mnist_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "fashion_mnist":
dataset = utils.MNISTDataset(FLAGS.fashion_mnist_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "cifar10":
dataset = utils.CIFAR10Dataset(FLAGS.cifar10_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "fake":
dataset = utils.FakeMNISTDataset()
if FLAGS.mode == "train":
model = MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
Train(model, dataset, FLAGS.train_dir, FLAGS.master,
polyak_averaging=FLAGS.polyak_averaging)
elif FLAGS.mode == "eval":
model = MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
Eval(model, dataset, FLAGS.train_dir, FLAGS.eval_dir,
FLAGS.master,
use_polyak_averaging=FLAGS.polyak_averaging > 0.0)
elif FLAGS.mode == "ais_eval":
replica_log_p = []
if FLAGS.ais_replicas:
replicas = FLAGS.ais_replicas
else:
replicas = list(range(FLAGS.ais_num_replicas))
for i in replicas:
train_dir = FLAGS.train_dir.format(i)
eval_dir = FLAGS.eval_dir.format(i)
model_fn = lambda: MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
log_p = AISEval(model_fn, dataset, train_dir, eval_dir,
FLAGS.ais_worker_pattern, FLAGS.ais_num_workers,
FLAGS.ais_num_chains,
use_polyak_averaging=FLAGS.polyak_averaging > 0.0)
replica_log_p.append(log_p)
log_p = np.mean(replica_log_p)
std_log_p = np.std(replica_log_p)
tf.logging.info("Log P: %.2f +- %.2f", log_p,
std_log_p / np.sqrt(len(replicas)))
tf.logging.info("All log_p: %s", replica_log_p)
elif FLAGS.mode == "ais_eval2":
if FLAGS.ais_replicas:
replicas = FLAGS.ais_replicas
else:
replicas = list(range(FLAGS.ais_num_replicas))
for i in replicas:
tf.reset_default_graph()
train_dir = FLAGS.train_dir.format(i)
eval_dir = FLAGS.eval_dir.format(i)
model_fn = lambda: MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
sentinel_filename = os.path.join(eval_dir, "ais_shard_%d_done" % FLAGS.ais_shard)
if tf.gfile.Exists(sentinel_filename):
continue
batch = FLAGS.ais_batch_size
assert (dataset.test_size // FLAGS.ais_num_workers) % batch == 0
writer = tf.summary.FileWriter(eval_dir)
log_p = AISEvalShard(FLAGS.ais_shard, "", FLAGS.ais_num_workers, FLAGS.ais_num_chains,
dataset, FLAGS.polyak_averaging > 0.0, writer, train_dir, model_fn, batch)
tf.gfile.MakeDirs(eval_dir)
with tf.gfile.Open(os.path.join(eval_dir, "ais_shard_%d" % FLAGS.ais_shard), "w") as f:
np.savetxt(f, log_p)
with tf.gfile.Open(sentinel_filename, "w") as f:
f.write("done")
if __name__ == "__main__":
flags.DEFINE_string("mnist_data_dir", "", "")
flags.DEFINE_string("fashion_mnist_data_dir", "", "")
flags.DEFINE_string("cifar10_data_dir", "", "")
flags.DEFINE_string("data_type", "mnist", "")
flags.DEFINE_enum("mode", "train", ["train", "eval", "ais_eval", "ais_eval2"], "")
flags.DEFINE_enum("model", "vae", list(MODEL_TO_CLASS.keys()), "")
flags.DEFINE_string("train_dir", "/tmp/vae/train", "")
flags.DEFINE_string("eval_dir", "/tmp/vae/eval", "")
flags.DEFINE_string("master", "", "")
flags.DEFINE_string("ais_worker_pattern", "", "")
flags.DEFINE_integer("ais_shard", 0, "")
flags.DEFINE_integer("ais_num_workers", 1, "")
flags.DEFINE_integer("ais_num_chains", 1, "")
flags.DEFINE_integer("ais_num_replicas", 1, "")
flags.DEFINE_list("ais_replicas", "", "Manual listing of replicas")
flags.DEFINE_integer("ais_batch_size", 25, "")
flags.DEFINE_float("polyak_averaging", 0.0, "")
flags.DEFINE_boolean("test_is_valid", False, "")
flags.DEFINE(utils.YAMLDictParser(), "hparams", "", "")
app.run(main)
|
[
"tensorflow.tile",
"neutra.utils.LogAndSaveHParams",
"tensorflow.matrix_diag_part",
"tensorflow.group",
"tensorflow.nn.softplus",
"neutra.utils.LogAndSummarizeMetrics",
"absl.flags.DEFINE_float",
"numpy.arange",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.gfile.Exists",
"tensorflow.Session",
"tensorflow.Session.reset",
"tensorflow.train.get_or_create_global_step",
"tensorflow.image.flip_left_right",
"collections.namedtuple",
"neutra.utils.FakeMNISTDataset",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.reshape",
"tensorflow.expand_dims",
"six.moves.range",
"neutra.utils.CIFAR10Dataset",
"neutra.utils.AIS",
"tensorflow.train.Saver",
"neutra.utils.YAMLDictParser",
"tensorflow.name_scope",
"neutra.utils.MNISTDataset",
"neutra.utils.L2HMCInitializer",
"tensorflow.shape",
"tensorflow.layers.flatten",
"tensorflow.nn.elu",
"tensorflow.split",
"neutra.utils.StitchImages",
"gin.configurable",
"neutra.utils.GetLoggingOutputs",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.init_from_checkpoint",
"tensorflow.maximum",
"tensorflow.clip_by_value",
"six.moves.zip",
"tensorflow.zeros",
"tensorflow.nn.conv2d",
"tensorflow.summary.merge_all",
"numpy.ones",
"tensorflow.variable_scope",
"tensorflow.nn.l2_normalize",
"tensorflow.train.StopAtStepHook",
"tensorflow.gather",
"numpy.std",
"tensorflow.floor",
"tensorflow.train.latest_checkpoint",
"tensorflow.summary.FileWriter",
"tensorflow.minimum",
"tensorflow.reset_default_graph",
"tensorflow.to_float",
"tensorflow.logging.info",
"absl.flags.DEFINE_integer",
"tensorflow.cond",
"tensorflow.contrib.training.create_train_op",
"tensorflow.add_n",
"numpy.prod",
"tensorflow.get_variable",
"tensorflow.contrib.training.SummaryAtEndHook",
"tensorflow.control_dependencies",
"tensorflow.gfile.MakeDirs",
"tensorflow.ones_like",
"tensorflow.reduce_mean",
"absl.flags.DEFINE_enum",
"tensorflow.image.flip_up_down",
"absl.flags.DEFINE_boolean",
"tensorflow.contrib.training.evaluate_repeatedly",
"tensorflow.square",
"tensorflow.zeros_like",
"tensorflow.where",
"numpy.savetxt",
"absl.flags.DEFINE_string",
"tensorflow.no_op",
"tensorflow.ones",
"tensorflow.contrib.training.train",
"numpy.zeros",
"tensorflow.constant",
"tensorflow.stop_gradient",
"tensorflow.identity",
"tensorflow.linalg.set_diag",
"tensorflow.contrib.training.StopAfterNEvalsHook",
"numpy.hstack",
"tensorflow.scatter_update",
"tensorflow.train.Scaffold",
"tensorflow.zeros_initializer",
"tensorflow_probability.mcmc.sample_chain",
"absl.flags.DEFINE_list",
"neutra.utils.BindHParams",
"absl.app.run",
"tensorflow.concat",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.piecewise_constant",
"tensorflow.sigmoid",
"tensorflow_probability.mcmc.TransformedTransitionKernel",
"tensorflow.gfile.Open",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
"tensorflow.ones_initializer",
"tensorflow.exp"
] |
[((7472, 7509), 'gin.configurable', 'gin.configurable', (['"""conv_hier_encoder"""'], {}), "('conv_hier_encoder')\n", (7488, 7509), False, 'import gin\n'), ((8570, 8610), 'gin.configurable', 'gin.configurable', (['"""conv_hier_prior_post"""'], {}), "('conv_hier_prior_post')\n", (8586, 8610), False, 'import gin\n'), ((12586, 12618), 'gin.configurable', 'gin.configurable', (['"""conv_encoder"""'], {}), "('conv_encoder')\n", (12602, 12618), False, 'import gin\n'), ((13745, 13777), 'gin.configurable', 'gin.configurable', (['"""conv_decoder"""'], {}), "('conv_decoder')\n", (13761, 13777), False, 'import gin\n'), ((15405, 15438), 'gin.configurable', 'gin.configurable', (['"""conv_encoder2"""'], {}), "('conv_encoder2')\n", (15421, 15438), False, 'import gin\n'), ((16208, 16241), 'gin.configurable', 'gin.configurable', (['"""conv_decoder2"""'], {}), "('conv_decoder2')\n", (16224, 16241), False, 'import gin\n'), ((17508, 17541), 'gin.configurable', 'gin.configurable', (['"""conv_encoder3"""'], {}), "('conv_encoder3')\n", (17524, 17541), False, 'import gin\n'), ((18816, 18849), 'gin.configurable', 'gin.configurable', (['"""conv_decoder3"""'], {}), "('conv_decoder3')\n", (18832, 18849), False, 'import gin\n'), ((20581, 20614), 'gin.configurable', 'gin.configurable', (['"""conv_encoder4"""'], {}), "('conv_encoder4')\n", (20597, 20614), False, 'import gin\n'), ((21207, 21240), 'gin.configurable', 'gin.configurable', (['"""conv_decoder4"""'], {}), "('conv_decoder4')\n", (21223, 21240), False, 'import gin\n'), ((22219, 22252), 'gin.configurable', 'gin.configurable', (['"""dense_encoder"""'], {}), "('dense_encoder')\n", (22235, 22252), False, 'import gin\n'), ((22770, 22803), 'gin.configurable', 'gin.configurable', (['"""dense_decoder"""'], {}), "('dense_decoder')\n", (22786, 22803), False, 'import gin\n'), ((25066, 25103), 'gin.configurable', 'gin.configurable', (['"""dense_recognition"""'], {}), "('dense_recognition')\n", (25082, 25103), False, 'import gin\n'), ((25991, 26035), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_affine"""'], {}), "('dense_recognition_affine')\n", (26007, 26035), False, 'import gin\n'), ((26722, 26769), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_affine_lr"""'], {}), "('dense_recognition_affine_lr')\n", (26738, 26769), False, 'import gin\n'), ((27527, 27569), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_rnvp"""'], {}), "('dense_recognition_rnvp')\n", (27543, 27569), False, 'import gin\n'), ((29626, 29667), 'gin.configurable', 'gin.configurable', (['"""dense_recognition_iaf"""'], {}), "('dense_recognition_iaf')\n", (29642, 29667), False, 'import gin\n'), ((33226, 33254), 'gin.configurable', 'gin.configurable', (['"""conv_iaf"""'], {}), "('conv_iaf')\n", (33242, 33254), False, 'import gin\n'), ((35385, 35421), 'gin.configurable', 'gin.configurable', (['"""conv_shift_scale"""'], {}), "('conv_shift_scale')\n", (35401, 35421), False, 'import gin\n'), ((37643, 37676), 'gin.configurable', 'gin.configurable', (['"""cifar10_noise"""'], {}), "('cifar10_noise')\n", (37659, 37676), False, 'import gin\n'), ((38429, 38462), 'gin.configurable', 'gin.configurable', (['"""learning_rate"""'], {}), "('learning_rate')\n", (38445, 38462), False, 'import gin\n'), ((39427, 39553), 'collections.namedtuple', 'collections.namedtuple', (['"""VAEOutputs"""', '"""log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z"""'], {}), "('VAEOutputs',\n 'log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z'\n )\n", (39449, 39553), False, 'import collections\n'), ((39565, 39634), 'collections.namedtuple', 'collections.namedtuple', (['"""AISOutputs"""', '"""log_p, p_accept, z_fin, recon"""'], {}), "('AISOutputs', 'log_p, p_accept, z_fin, recon')\n", (39587, 39634), False, 'import collections\n'), ((40592, 40730), 'collections.namedtuple', 'collections.namedtuple', (['"""DLGMOutputs"""', '"""elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq"""'], {}), "('DLGMOutputs',\n 'elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq'\n )\n", (40614, 40730), False, 'import collections\n'), ((40735, 40759), 'gin.configurable', 'gin.configurable', (['"""dlgm"""'], {}), "('dlgm')\n", (40751, 40759), False, 'import gin\n'), ((54623, 54646), 'gin.configurable', 'gin.configurable', (['"""vae"""'], {}), "('vae')\n", (54639, 54646), False, 'import gin\n'), ((61917, 61942), 'gin.configurable', 'gin.configurable', (['"""train"""'], {}), "('train')\n", (61933, 61942), False, 'import gin\n'), ((1586, 1610), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (1608, 1610), False, 'from neutra import utils\n'), ((1644, 1666), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (1664, 1666), True, 'import tensorflow as tf\n'), ((2668, 2720), 'numpy.ones', 'np.ones', (['[num_inputs, num_outputs]'], {'dtype': 'np.float32'}), '([num_inputs, num_outputs], dtype=np.float32)\n', (2675, 2720), True, 'import numpy as np\n'), ((3247, 3305), 'numpy.ones', 'np.ones', (['[h, w, num_inputs, num_filters]'], {'dtype': 'np.float32'}), '([h, w, num_inputs, num_filters], dtype=np.float32)\n', (3254, 3305), True, 'import numpy as np\n'), ((3673, 3695), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3693, 3695), True, 'import tensorflow as tf\n'), ((3874, 3990), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[kernel_size[0], kernel_size[1], num_inputs, num_filters]'], {'initializer': 'weights_initializer'}), "('w', [kernel_size[0], kernel_size[1], num_inputs,\n num_filters], initializer=weights_initializer)\n", (3889, 3990), True, 'import tensorflow as tf\n'), ((3993, 4060), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[num_filters]'], {'initializer': 'biases_initializer'}), "('b', [num_filters], initializer=biases_initializer)\n", (4008, 4060), True, 'import tensorflow as tf\n'), ((4173, 4182), 'tensorflow.exp', 'tf.exp', (['g'], {}), '(g)\n', (4179, 4182), True, 'import tensorflow as tf\n'), ((4278, 4323), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'w', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(inputs, w, [1, 1, 1, 1], 'SAME')\n", (4290, 4323), True, 'import tensorflow as tf\n'), ((5576, 5600), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (5598, 5600), False, 'from neutra import utils\n'), ((5633, 5655), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (5653, 5655), True, 'import tensorflow as tf\n'), ((7775, 7787), 'six.moves.range', 'range', (['depth'], {}), '(depth)\n', (7780, 7787), False, 'from six.moves import range\n'), ((9251, 9279), 'tensorflow.reshape', 'tf.reshape', (['h', '[1, 1, 1, -1]'], {}), '(h, [1, 1, 1, -1])\n', (9261, 9279), True, 'import tensorflow as tf\n'), ((9331, 9375), 'tensorflow.tile', 'tf.tile', (['h', '[batch, top_width, top_width, 1]'], {}), '(h, [batch, top_width, top_width, 1])\n', (9338, 9375), True, 'import tensorflow as tf\n'), ((12874, 12886), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (12883, 12886), True, 'import tensorflow as tf\n'), ((12987, 12999), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (12996, 12999), True, 'import tensorflow as tf\n'), ((13100, 13112), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (13109, 13112), True, 'import tensorflow as tf\n'), ((13213, 13225), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (13222, 13225), True, 'import tensorflow as tf\n'), ((13326, 13338), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (13335, 13338), True, 'import tensorflow as tf\n'), ((14467, 14510), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[8, 8]'], {}), '(x, [8, 8])\n', (14499, 14510), True, 'import tensorflow as tf\n'), ((14611, 14623), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (14620, 14623), True, 'import tensorflow as tf\n'), ((14724, 14736), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (14733, 14736), True, 'import tensorflow as tf\n'), ((14812, 14897), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (14844, 14897), True, 'import tensorflow as tf\n'), ((14994, 15006), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (15003, 15006), True, 'import tensorflow as tf\n'), ((15107, 15119), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (15116, 15119), True, 'import tensorflow as tf\n'), ((15126, 15197), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (15158, 15197), True, 'import tensorflow as tf\n'), ((15367, 15401), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (15377, 15401), True, 'import tensorflow as tf\n'), ((16533, 16576), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[8, 8]'], {}), '(x, [8, 8])\n', (16565, 16576), True, 'import tensorflow as tf\n'), ((16884, 16969), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (16916, 16969), True, 'import tensorflow as tf\n'), ((17204, 17275), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (17236, 17275), True, 'import tensorflow as tf\n'), ((17470, 17504), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (17480, 17504), True, 'import tensorflow as tf\n'), ((17802, 17814), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (17811, 17814), True, 'import tensorflow as tf\n'), ((17915, 17927), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (17924, 17927), True, 'import tensorflow as tf\n'), ((18028, 18040), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18037, 18040), True, 'import tensorflow as tf\n'), ((18142, 18154), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18151, 18154), True, 'import tensorflow as tf\n'), ((18255, 18267), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18264, 18267), True, 'import tensorflow as tf\n'), ((18368, 18380), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18377, 18380), True, 'import tensorflow as tf\n'), ((18482, 18494), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18491, 18494), True, 'import tensorflow as tf\n'), ((18595, 18607), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18604, 18607), True, 'import tensorflow as tf\n'), ((18708, 18720), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (18717, 18720), True, 'import tensorflow as tf\n'), ((19148, 19160), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19157, 19160), True, 'import tensorflow as tf\n'), ((19261, 19273), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19270, 19273), True, 'import tensorflow as tf\n'), ((19374, 19386), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19383, 19386), True, 'import tensorflow as tf\n'), ((19487, 19499), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19496, 19499), True, 'import tensorflow as tf\n'), ((19506, 19591), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (19538, 19591), True, 'import tensorflow as tf\n'), ((19688, 19700), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19697, 19700), True, 'import tensorflow as tf\n'), ((19802, 19814), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19811, 19814), True, 'import tensorflow as tf\n'), ((19915, 19927), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (19924, 19927), True, 'import tensorflow as tf\n'), ((19934, 20005), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (19966, 20005), True, 'import tensorflow as tf\n'), ((20106, 20118), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20115, 20118), True, 'import tensorflow as tf\n'), ((20220, 20232), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20229, 20232), True, 'import tensorflow as tf\n'), ((20334, 20346), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20343, 20346), True, 'import tensorflow as tf\n'), ((20543, 20577), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (20553, 20577), True, 'import tensorflow as tf\n'), ((20870, 20882), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20879, 20882), True, 'import tensorflow as tf\n'), ((20983, 20995), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (20992, 20995), True, 'import tensorflow as tf\n'), ((21599, 21684), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0] // 2, output_shape[1] // 2]'], {}), '(x, [output_shape[0] // 2, output_shape[1] //\n 2])\n', (21631, 21684), True, 'import tensorflow as tf\n'), ((21781, 21793), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (21790, 21793), True, 'import tensorflow as tf\n'), ((21800, 21871), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', '[output_shape[0], output_shape[1]]'], {}), '(x, [output_shape[0], output_shape[1]])\n', (21832, 21871), True, 'import tensorflow as tf\n'), ((21972, 21984), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (21981, 21984), True, 'import tensorflow as tf\n'), ((22181, 22215), 'tensorflow.reshape', 'tf.reshape', (['x', '([-1] + output_shape)'], {}), '(x, [-1] + output_shape)\n', (22191, 22215), True, 'import tensorflow as tf\n'), ((22428, 22453), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['images'], {}), '(images)\n', (22445, 22453), True, 'import tensorflow as tf\n'), ((23051, 23078), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['encoding'], {}), '(encoding)\n', (23068, 23078), True, 'import tensorflow as tf\n'), ((23239, 23260), 'numpy.prod', 'np.prod', (['output_shape'], {}), '(output_shape)\n', (23246, 23260), True, 'import numpy as np\n'), ((25855, 25906), 'tensorflow.logging.info', 'tf.logging.info', (['"""bijector z shape: %s"""', 'z[0].shape'], {}), "('bijector z shape: %s', z[0].shape)\n", (25870, 25906), True, 'import tensorflow as tf\n'), ((26360, 26395), 'tensorflow.linalg.set_diag', 'tf.linalg.set_diag', (['tril_raw', 'sigma'], {}), '(tril_raw, sigma)\n', (26378, 26395), True, 'import tensorflow as tf\n'), ((27073, 27112), 'tensorflow.reshape', 'tf.reshape', (['perturb', '[-1, z_dims, rank]'], {}), '(perturb, [-1, z_dims, rank])\n', (27083, 27112), True, 'import tensorflow as tf\n'), ((27124, 27145), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['sigma'], {}), '(sigma)\n', (27138, 27145), True, 'import tensorflow as tf\n'), ((28243, 28263), 'six.moves.range', 'range', (['num_bijectors'], {}), '(num_bijectors)\n', (28248, 28263), False, 'from six.moves import range\n'), ((30330, 30351), 'six.moves.range', 'range', (['num_iaf_layers'], {}), '(num_iaf_layers)\n', (30335, 30351), False, 'from six.moves import range\n'), ((33996, 34017), 'six.moves.range', 'range', (['num_iaf_layers'], {}), '(num_iaf_layers)\n', (34001, 34017), False, 'from six.moves import range\n'), ((39997, 40022), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_p_x_z'], {}), '(log_p_x_z)\n', (40011, 40022), True, 'import tensorflow as tf\n'), ((40203, 40217), 'tensorflow.add_n', 'tf.add_n', (['klqp'], {}), '(klqp)\n', (40211, 40217), True, 'import tensorflow as tf\n'), ((62370, 62395), 'neutra.utils.LogAndSaveHParams', 'utils.LogAndSaveHParams', ([], {}), '()\n', (62393, 62395), False, 'from neutra import utils\n'), ((62399, 62423), 'tensorflow.Session.reset', 'tf.Session.reset', (['master'], {}), '(master)\n', (62415, 62423), True, 'import tensorflow as tf\n'), ((62713, 62850), 'tensorflow.contrib.training.train', 'tf.contrib.training.train', (['train_op'], {'logdir': 'train_dir', 'master': 'master', 'hooks': 'hooks', 'save_checkpoint_secs': '(120)', 'save_summaries_steps': '(60)'}), '(train_op, logdir=train_dir, master=master, hooks=\n hooks, save_checkpoint_secs=120, save_summaries_steps=60)\n', (62738, 62850), True, 'import tensorflow as tf\n'), ((63106, 63131), 'neutra.utils.LogAndSaveHParams', 'utils.LogAndSaveHParams', ([], {}), '()\n', (63129, 63131), False, 'from neutra import utils\n'), ((63134, 63170), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (63168, 63170), True, 'import tensorflow as tf\n'), ((63407, 63437), 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {'saver': 'saver'}), '(saver=saver)\n', (63424, 63437), True, 'import tensorflow as tf\n'), ((63441, 63465), 'tensorflow.Session.reset', 'tf.Session.reset', (['master'], {}), '(master)\n', (63457, 63465), True, 'import tensorflow as tf\n'), ((63718, 63920), 'tensorflow.contrib.training.evaluate_repeatedly', 'tf.contrib.training.evaluate_repeatedly', (['train_dir'], {'eval_ops': 'eval_op', 'hooks': 'hooks', 'eval_interval_secs': '(120)', 'max_number_of_evaluations': 'max_number_of_evaluations', 'master': 'master', 'scaffold': 'scaffold'}), '(train_dir, eval_ops=eval_op, hooks=\n hooks, eval_interval_secs=120, max_number_of_evaluations=\n max_number_of_evaluations, master=master, scaffold=scaffold)\n', (63757, 63920), True, 'import tensorflow as tf\n'), ((64098, 64131), 'tensorflow.logging.info', 'tf.logging.info', (['"""Thread started"""'], {}), "('Thread started')\n", (64113, 64131), True, 'import tensorflow as tf\n'), ((64155, 64185), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built model"""'], {}), "('Built model')\n", (64170, 64185), True, 'import tensorflow as tf\n'), ((64201, 64229), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[]'], {}), '(tf.int64, [])\n', (64215, 64229), True, 'import tensorflow as tf\n'), ((64232, 64261), 'tensorflow.logging.info', 'tf.logging.info', (['"""built data"""'], {}), "('built data')\n", (64247, 64261), True, 'import tensorflow as tf\n'), ((64372, 64399), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built mA"""'], {}), "('Built mA')\n", (64387, 64399), True, 'import tensorflow as tf\n'), ((64510, 64537), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built mB"""'], {}), "('Built mB')\n", (64525, 64537), True, 'import tensorflow as tf\n'), ((64601, 64623), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (64621, 64623), True, 'import tensorflow as tf\n'), ((64640, 64676), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (64674, 64676), True, 'import tensorflow as tf\n'), ((64901, 64928), 'tensorflow.logging.info', 'tf.logging.info', (['"""Built mC"""'], {}), "('Built mC')\n", (64916, 64928), True, 'import tensorflow as tf\n'), ((64955, 65010), 'tensorflow.logging.info', 'tf.logging.info', (['"""Starting shard %d, %s"""', 'shard', 'master'], {}), "('Starting shard %d, %s', shard, master)\n", (64970, 65010), True, 'import tensorflow as tf\n'), ((66636, 66660), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (66658, 66660), True, 'import tensorflow as tf\n'), ((66674, 66708), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (66688, 66708), True, 'import tensorflow as tf\n'), ((66792, 66823), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['eval_dir'], {}), '(eval_dir)\n', (66813, 66823), True, 'import tensorflow as tf\n'), ((67427, 67445), 'numpy.mean', 'np.mean', (['all_log_p'], {}), '(all_log_p)\n', (67434, 67445), True, 'import numpy as np\n'), ((67448, 67485), 'tensorflow.logging.info', 'tf.logging.info', (['"""Log P: %.2f"""', 'log_p'], {}), "('Log P: %.2f', log_p)\n", (67463, 67485), True, 'import tensorflow as tf\n'), ((67719, 67751), 'neutra.utils.BindHParams', 'utils.BindHParams', (['FLAGS.hparams'], {}), '(FLAGS.hparams)\n', (67736, 67751), False, 'from neutra import utils\n'), ((70614, 70659), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""mnist_data_dir"""', '""""""', '""""""'], {}), "('mnist_data_dir', '', '')\n", (70633, 70659), False, 'from absl import flags\n'), ((70662, 70715), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""fashion_mnist_data_dir"""', '""""""', '""""""'], {}), "('fashion_mnist_data_dir', '', '')\n", (70681, 70715), False, 'from absl import flags\n'), ((70718, 70765), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""cifar10_data_dir"""', '""""""', '""""""'], {}), "('cifar10_data_dir', '', '')\n", (70737, 70765), False, 'from absl import flags\n'), ((70768, 70813), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_type"""', '"""mnist"""', '""""""'], {}), "('data_type', 'mnist', '')\n", (70787, 70813), False, 'from absl import flags\n'), ((70816, 70902), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""mode"""', '"""train"""', "['train', 'eval', 'ais_eval', 'ais_eval2']", '""""""'], {}), "('mode', 'train', ['train', 'eval', 'ais_eval',\n 'ais_eval2'], '')\n", (70833, 70902), False, 'from absl import flags\n'), ((70970, 71024), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""train_dir"""', '"""/tmp/vae/train"""', '""""""'], {}), "('train_dir', '/tmp/vae/train', '')\n", (70989, 71024), False, 'from absl import flags\n'), ((71027, 71079), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""eval_dir"""', '"""/tmp/vae/eval"""', '""""""'], {}), "('eval_dir', '/tmp/vae/eval', '')\n", (71046, 71079), False, 'from absl import flags\n'), ((71082, 71119), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""master"""', '""""""', '""""""'], {}), "('master', '', '')\n", (71101, 71119), False, 'from absl import flags\n'), ((71122, 71171), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ais_worker_pattern"""', '""""""', '""""""'], {}), "('ais_worker_pattern', '', '')\n", (71141, 71171), False, 'from absl import flags\n'), ((71174, 71214), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_shard"""', '(0)', '""""""'], {}), "('ais_shard', 0, '')\n", (71194, 71214), False, 'from absl import flags\n'), ((71217, 71263), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_num_workers"""', '(1)', '""""""'], {}), "('ais_num_workers', 1, '')\n", (71237, 71263), False, 'from absl import flags\n'), ((71266, 71311), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_num_chains"""', '(1)', '""""""'], {}), "('ais_num_chains', 1, '')\n", (71286, 71311), False, 'from absl import flags\n'), ((71314, 71361), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_num_replicas"""', '(1)', '""""""'], {}), "('ais_num_replicas', 1, '')\n", (71334, 71361), False, 'from absl import flags\n'), ((71364, 71431), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""ais_replicas"""', '""""""', '"""Manual listing of replicas"""'], {}), "('ais_replicas', '', 'Manual listing of replicas')\n", (71381, 71431), False, 'from absl import flags\n'), ((71434, 71480), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ais_batch_size"""', '(25)', '""""""'], {}), "('ais_batch_size', 25, '')\n", (71454, 71480), False, 'from absl import flags\n'), ((71483, 71530), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""polyak_averaging"""', '(0.0)', '""""""'], {}), "('polyak_averaging', 0.0, '')\n", (71501, 71530), False, 'from absl import flags\n'), ((71533, 71581), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""test_is_valid"""', '(False)', '""""""'], {}), "('test_is_valid', False, '')\n", (71553, 71581), False, 'from absl import flags\n'), ((71643, 71656), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (71650, 71656), False, 'from absl import app\n'), ((1792, 1830), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope', '"""conv_2d_wn"""'], {}), "(scope, 'conv_2d_wn')\n", (1809, 1830), True, 'import tensorflow as tf\n'), ((1840, 1956), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[kernel_size[0], kernel_size[1], num_inputs, num_filters]'], {'initializer': 'weights_initializer'}), "('w', [kernel_size[0], kernel_size[1], num_inputs,\n num_filters], initializer=weights_initializer)\n", (1855, 1956), True, 'import tensorflow as tf\n'), ((2198, 2207), 'tensorflow.exp', 'tf.exp', (['g'], {}), '(g)\n', (2204, 2207), True, 'import tensorflow as tf\n'), ((2299, 2357), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'w', '[1, stride[0], stride[1], 1]', 'pad'], {}), '(inputs, w, [1, stride[0], stride[1], 1], pad)\n', (2311, 2357), True, 'import tensorflow as tf\n'), ((2800, 2817), 'six.moves.range', 'range', (['num_inputs'], {}), '(num_inputs)\n', (2805, 2817), False, 'from six.moves import range\n'), ((2985, 3003), 'six.moves.range', 'range', (['num_outputs'], {}), '(num_outputs)\n', (2990, 3003), False, 'from six.moves import range\n'), ((4189, 4226), 'tensorflow.reshape', 'tf.reshape', (['g', '[1, 1, 1, num_filters]'], {}), '(g, [1, 1, 1, num_filters])\n', (4199, 4226), True, 'import tensorflow as tf\n'), ((4229, 4268), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['(w * mask)', '[0, 1, 2]'], {}), '(w * mask, [0, 1, 2])\n', (4247, 4268), True, 'import tensorflow as tf\n'), ((4339, 4376), 'tensorflow.reshape', 'tf.reshape', (['b', '[1, 1, 1, num_filters]'], {}), '(b, [1, 1, 1, num_filters])\n', (4349, 4376), True, 'import tensorflow as tf\n'), ((5018, 5030), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (5027, 5030), True, 'import tensorflow as tf\n'), ((5786, 5822), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope', '"""dense_wn"""'], {}), "(scope, 'dense_wn')\n", (5803, 5822), True, 'import tensorflow as tf\n'), ((5832, 5917), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[num_inputs, num_outputs]'], {'initializer': 'weights_initializer'}), "('w', [num_inputs, num_outputs], initializer=weights_initializer\n )\n", (5847, 5917), True, 'import tensorflow as tf\n'), ((6144, 6153), 'tensorflow.exp', 'tf.exp', (['g'], {}), '(g)\n', (6150, 6153), True, 'import tensorflow as tf\n'), ((6203, 6223), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'w'], {}), '(inputs, w)\n', (6212, 6223), True, 'import tensorflow as tf\n'), ((7802, 7819), 'six.moves.range', 'range', (['num_blocks'], {}), '(num_blocks)\n', (7807, 7819), False, 'from six.moves import range\n'), ((13476, 13522), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, filter_scale * 32 * 4 * 4]'], {}), '(x, [-1, filter_scale * 32 * 4 * 4])\n', (13486, 13522), True, 'import tensorflow as tf\n'), ((14070, 14123), 'tensorflow.logging.info', 'tf.logging.info', (['"""Encoding shape: %s"""', 'encoding.shape'], {}), "('Encoding shape: %s', encoding.shape)\n", (14085, 14123), True, 'import tensorflow as tf\n'), ((14416, 14460), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 4, 4, filter_scale * 32]'], {}), '(x, [-1, 4, 4, filter_scale * 32])\n', (14426, 14460), True, 'import tensorflow as tf\n'), ((25365, 25404), 'tensorflow.reshape', 'tf.reshape', (['encoding', '[-1, num_dims, 2]'], {}), '(encoding, [-1, num_dims, 2])\n', (25375, 25404), True, 'import tensorflow as tf\n'), ((25492, 25523), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (25498, 25523), True, 'import tensorflow as tf\n'), ((26320, 26349), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['tril_raw'], {}), '(tril_raw)\n', (26339, 26349), True, 'import tensorflow as tf\n'), ((28002, 28049), 'tensorflow.reshape', 'tf.reshape', (['encoding', '[-1, num_dims, num_parts]'], {}), '(encoding, [-1, num_dims, num_parts])\n', (28012, 28049), True, 'import tensorflow as tf\n'), ((29139, 29170), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (29145, 29170), True, 'import tensorflow as tf\n'), ((30094, 30141), 'tensorflow.reshape', 'tf.reshape', (['encoding', '[-1, num_dims, num_parts]'], {}), '(encoding, [-1, num_dims, num_parts])\n', (30104, 30141), True, 'import tensorflow as tf\n'), ((31336, 31367), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (31342, 31367), True, 'import tensorflow as tf\n'), ((33085, 33127), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'y.dtype.base_dtype'}), '(0.0, dtype=y.dtype.base_dtype)\n', (33096, 33127), True, 'import tensorflow as tf\n'), ((33181, 33223), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'x.dtype.base_dtype'}), '(0.0, dtype=x.dtype.base_dtype)\n', (33192, 33223), True, 'import tensorflow as tf\n'), ((34864, 34895), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (34870, 34895), True, 'import tensorflow as tf\n'), ((36158, 36189), 'tensorflow.exp', 'tf.exp', (['(0.5 * encoding_parts[1])'], {}), '(0.5 * encoding_parts[1])\n', (36164, 36189), True, 'import tensorflow as tf\n'), ((37904, 37945), 'tensorflow.get_variable', 'tf.get_variable', (['"""scale"""'], {'initializer': '(1.0)'}), "('scale', initializer=1.0)\n", (37919, 37945), True, 'import tensorflow as tf\n'), ((37959, 37987), 'tensorflow.reshape', 'tf.reshape', (['scale', '[1, 1, 1]'], {}), '(scale, [1, 1, 1])\n', (37969, 37987), True, 'import tensorflow as tf\n'), ((38583, 38679), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 500 // TRAIN_BATCH]', '[0.001, 0.0001]'], {}), '(global_step, [train_size * 500 // TRAIN_BATCH],\n [0.001, 0.0001])\n', (38610, 38679), True, 'import tensorflow as tf\n'), ((39850, 39879), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_q_z'], {}), '(layer_log_q_z)\n', (39864, 39879), True, 'import tensorflow as tf\n'), ((39923, 39952), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_p_z'], {}), '(layer_log_p_z)\n', (39937, 39952), True, 'import tensorflow as tf\n'), ((40133, 40163), 'tensorflow.maximum', 'tf.maximum', (['min_kl', 'layer_klqp'], {}), '(min_kl, layer_klqp)\n', (40143, 40163), True, 'import tensorflow as tf\n'), ((42213, 42232), 'tensorflow.to_float', 'tf.to_float', (['min_kl'], {}), '(min_kl)\n', (42224, 42232), True, 'import tensorflow as tf\n'), ((47810, 47835), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_p_x_z'], {}), '(log_p_x_z)\n', (47824, 47835), True, 'import tensorflow as tf\n'), ((48028, 48042), 'tensorflow.add_n', 'tf.add_n', (['klqp'], {}), '(klqp)\n', (48036, 48042), True, 'import tensorflow as tf\n'), ((48738, 48828), 'tensorflow_probability.mcmc.sample_chain', 'tfp.mcmc.sample_chain', ([], {'num_results': 'self._num_hmc_steps', 'current_state': 'z', 'kernel': 'kernel'}), '(num_results=self._num_hmc_steps, current_state=z,\n kernel=kernel)\n', (48759, 48828), True, 'import tensorflow_probability as tfp\n'), ((50011, 50047), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (50045, 50047), True, 'import tensorflow as tf\n'), ((50499, 50550), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (50521, 50550), True, 'import tensorflow as tf\n'), ((52815, 52890), 'tensorflow.group', 'tf.group', (['recog_train_op', 'gen_train_op', 'chain_state_update_op', 'step_size_op'], {}), '(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)\n', (52823, 52890), True, 'import tensorflow as tf\n'), ((53132, 53289), 'neutra.utils.LogAndSummarizeMetrics', 'utils.LogAndSummarizeMetrics', (["{'elbo': outputs.elbo, 'xentpq': outputs.xentpq, 'mcmc_log_p': outputs.\n mcmc_log_p, 'mcmc_p_accept': outputs.p_accept}"], {}), "({'elbo': outputs.elbo, 'xentpq': outputs.\n xentpq, 'mcmc_log_p': outputs.mcmc_log_p, 'mcmc_p_accept': outputs.\n p_accept})\n", (53160, 53289), False, 'from neutra import utils\n'), ((53714, 53752), 'tensorflow.tile', 'tf.tile', (['images', '[num_chains, 1, 1, 1]'], {}), '(images, [num_chains, 1, 1, 1])\n', (53721, 53752), True, 'import tensorflow as tf\n'), ((54020, 54092), 'neutra.utils.AIS', 'utils.AIS', (['ProposalLogProbFn', 'TargetLogProbFn', 'z_init'], {'bijector': 'bijector'}), '(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)\n', (54029, 54092), False, 'from neutra import utils\n'), ((55564, 55583), 'tensorflow.to_float', 'tf.to_float', (['min_kl'], {}), '(min_kl)\n', (55575, 55583), True, 'import tensorflow as tf\n'), ((59399, 59435), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (59433, 59435), True, 'import tensorflow as tf\n'), ((59509, 59560), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (59531, 59560), True, 'import tensorflow as tf\n'), ((59852, 59896), 'neutra.utils.LogAndSummarizeMetrics', 'utils.LogAndSummarizeMetrics', (['metrics', '(False)'], {}), '(metrics, False)\n', (59880, 59896), False, 'from neutra import utils\n'), ((59998, 60127), 'tensorflow.contrib.training.create_train_op', 'tf.contrib.training.create_train_op', (['(-outputs.elbo)', 'opt'], {'summarize_gradients': '(True)', 'transform_grads_fn': 'utils.ProcessGradients'}), '(-outputs.elbo, opt, summarize_gradients\n =True, transform_grads_fn=utils.ProcessGradients)\n', (60033, 60127), True, 'import tensorflow as tf\n'), ((60675, 60712), 'neutra.utils.LogAndSummarizeMetrics', 'utils.LogAndSummarizeMetrics', (['metrics'], {}), '(metrics)\n', (60703, 60712), False, 'from neutra import utils\n'), ((61140, 61178), 'tensorflow.tile', 'tf.tile', (['images', '[num_chains, 1, 1, 1]'], {}), '(images, [num_chains, 1, 1, 1])\n', (61147, 61178), True, 'import tensorflow as tf\n'), ((61333, 61386), 'neutra.utils.AIS', 'utils.AIS', (['ProposalLogProbFn', 'TargetLogProbFn', 'z_init'], {}), '(ProposalLogProbFn, TargetLogProbFn, z_init)\n', (61342, 61386), False, 'from neutra import utils\n'), ((62182, 62223), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using polyak averaging"""'], {}), "('Using polyak averaging')\n", (62197, 62223), True, 'import tensorflow as tf\n'), ((62234, 62291), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': 'polyak_averaging'}), '(decay=polyak_averaging)\n', (62267, 62291), True, 'import tensorflow as tf\n'), ((62450, 62501), 'tensorflow.init_from_checkpoint', 'tf.init_from_checkpoint', (['warmstart_ckpt', "{'/': '/'}"], {}), "(warmstart_ckpt, {'/': '/'})\n", (62473, 62501), True, 'import tensorflow as tf\n'), ((62521, 62598), 'tensorflow.train.StopAtStepHook', 'tf.train.StopAtStepHook', ([], {'last_step': '(dataset.train_size * epochs // TRAIN_BATCH)'}), '(last_step=dataset.train_size * epochs // TRAIN_BATCH)\n', (62544, 62598), True, 'import tensorflow as tf\n'), ((63203, 63244), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using polyak averaging"""'], {}), "('Using polyak averaging')\n", (63218, 63244), True, 'import tensorflow as tf\n'), ((63255, 63300), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.99)'}), '(decay=0.99)\n', (63288, 63300), True, 'import tensorflow as tf\n'), ((63376, 63392), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (63390, 63392), True, 'import tensorflow as tf\n'), ((63511, 63583), 'tensorflow.contrib.training.StopAfterNEvalsHook', 'tf.contrib.training.StopAfterNEvalsHook', (['(dataset.test_size // TEST_BATCH)'], {}), '(dataset.test_size // TEST_BATCH)\n', (63550, 63583), True, 'import tensorflow as tf\n'), ((63591, 63637), 'tensorflow.contrib.training.SummaryAtEndHook', 'tf.contrib.training.SummaryAtEndHook', (['eval_dir'], {}), '(eval_dir)\n', (63627, 63637), True, 'import tensorflow as tf\n'), ((64559, 64584), 'neutra.utils.LogAndSaveHParams', 'utils.LogAndSaveHParams', ([], {}), '()\n', (64582, 64584), False, 'from neutra import utils\n'), ((64708, 64749), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using polyak averaging"""'], {}), "('Using polyak averaging')\n", (64723, 64749), True, 'import tensorflow as tf\n'), ((64760, 64805), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.99)'}), '(decay=0.99)\n', (64793, 64805), True, 'import tensorflow as tf\n'), ((64881, 64897), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (64895, 64897), True, 'import tensorflow as tf\n'), ((66754, 66778), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_p_ph'], {}), '(log_p_ph)\n', (66768, 66778), True, 'import tensorflow as tf\n'), ((66832, 66883), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'num_workers'}), '(max_workers=num_workers)\n', (66858, 66883), False, 'from concurrent import futures\n'), ((66931, 66949), 'six.moves.range', 'range', (['num_workers'], {}), '(num_workers)\n', (66936, 66949), False, 'from six.moves import range\n'), ((67297, 67310), 'numpy.zeros', 'np.zeros', (['[0]'], {}), '([0])\n', (67305, 67310), True, 'import numpy as np\n'), ((67493, 67505), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (67503, 67505), True, 'import tensorflow as tf\n'), ((67800, 67861), 'neutra.utils.MNISTDataset', 'utils.MNISTDataset', (['FLAGS.mnist_data_dir', 'FLAGS.test_is_valid'], {}), '(FLAGS.mnist_data_dir, FLAGS.test_is_valid)\n', (67818, 67861), False, 'from neutra import utils\n'), ((71597, 71619), 'neutra.utils.YAMLDictParser', 'utils.YAMLDictParser', ([], {}), '()\n', (71617, 71619), False, 'from neutra import utils\n'), ((1340, 1357), 'tensorflow.square', 'tf.square', (['tensor'], {}), '(tensor)\n', (1349, 1357), True, 'import tensorflow as tf\n'), ((2019, 2086), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[num_filters]'], {'initializer': 'biases_initializer'}), "('b', [num_filters], initializer=biases_initializer)\n", (2034, 2086), True, 'import tensorflow as tf\n'), ((2216, 2253), 'tensorflow.reshape', 'tf.reshape', (['g', '[1, 1, 1, num_filters]'], {}), '(g, [1, 1, 1, num_filters])\n', (2226, 2253), True, 'import tensorflow as tf\n'), ((2256, 2288), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['w', '[0, 1, 2]'], {}), '(w, [0, 1, 2])\n', (2274, 2288), True, 'import tensorflow as tf\n'), ((2411, 2448), 'tensorflow.reshape', 'tf.reshape', (['b', '[1, 1, 1, num_filters]'], {}), '(b, [1, 1, 1, num_filters])\n', (2421, 2448), True, 'import tensorflow as tf\n'), ((5971, 6038), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[num_outputs]'], {'initializer': 'biases_initializer'}), "('b', [num_outputs], initializer=biases_initializer)\n", (5986, 6038), True, 'import tensorflow as tf\n'), ((6166, 6192), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['w', '[0]'], {}), '(w, [0])\n', (6184, 6192), True, 'import tensorflow as tf\n'), ((6277, 6297), 'tensorflow.expand_dims', 'tf.expand_dims', (['b', '(0)'], {}), '(b, 0)\n', (6291, 6297), True, 'import tensorflow as tf\n'), ((7949, 7961), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (7958, 7961), True, 'import tensorflow as tf\n'), ((8123, 8172), 'tensorflow.split', 'tf.split', (['h', '[z_dims, z_dims, h_dims, h_dims]', '(-1)'], {}), '(h, [z_dims, z_dims, h_dims, h_dims], -1)\n', (8131, 8172), True, 'import tensorflow as tf\n'), ((8274, 8286), 'tensorflow.nn.elu', 'tf.nn.elu', (['h'], {}), '(h)\n', (8283, 8286), True, 'import tensorflow as tf\n'), ((9221, 9243), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (9241, 9243), True, 'import tensorflow as tf\n'), ((9441, 9453), 'six.moves.range', 'range', (['depth'], {}), '(depth)\n', (9446, 9453), False, 'from six.moves import range\n'), ((9552, 9564), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (9561, 9564), True, 'import tensorflow as tf\n'), ((9800, 9851), 'tensorflow.split', 'tf.split', (['h_p', '[z_dims, z_dims, h_dims, h_dims]', '(-1)'], {}), '(h_p, [z_dims, z_dims, h_dims, h_dims], -1)\n', (9808, 9851), True, 'import tensorflow as tf\n'), ((12011, 12040), 'tensorflow.concat', 'tf.concat', (['[z_val, h_det]', '(-1)'], {}), '([z_val, h_det], -1)\n', (12020, 12040), True, 'import tensorflow as tf\n'), ((21182, 21202), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x'], {}), '(x)\n', (21199, 21202), True, 'import tensorflow as tf\n'), ((22741, 22765), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (22763, 22765), False, 'from neutra import utils\n'), ((25575, 25608), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (25589, 25608), True, 'import tensorflow as tf\n'), ((25709, 25726), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (25722, 25726), True, 'import tensorflow as tf\n'), ((25739, 25758), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (25751, 25758), True, 'import tensorflow as tf\n'), ((26494, 26511), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (26507, 26511), True, 'import tensorflow as tf\n'), ((26524, 26543), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (26536, 26543), True, 'import tensorflow as tf\n'), ((27299, 27316), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (27312, 27316), True, 'import tensorflow as tf\n'), ((27329, 27348), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (27341, 27348), True, 'import tensorflow as tf\n'), ((28181, 28212), 'numpy.arange', 'np.arange', (['(num_dims - 1)', '(-1)', '(-1)'], {}), '(num_dims - 1, -1, -1)\n', (28190, 28212), True, 'import numpy as np\n'), ((29222, 29255), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (29236, 29255), True, 'import tensorflow as tf\n'), ((29397, 29414), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (29410, 29414), True, 'import tensorflow as tf\n'), ((29427, 29446), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (29439, 29446), True, 'import tensorflow as tf\n'), ((30268, 30299), 'numpy.arange', 'np.arange', (['(num_dims - 1)', '(-1)', '(-1)'], {}), '(num_dims - 1, -1, -1)\n', (30277, 30299), True, 'import numpy as np\n'), ((31419, 31452), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (31433, 31452), True, 'import tensorflow as tf\n'), ((31594, 31611), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (31607, 31611), True, 'import tensorflow as tf\n'), ((31624, 31643), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (31636, 31643), True, 'import tensorflow as tf\n'), ((32734, 32758), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['x'], {}), '(x)\n', (32755, 32758), True, 'import tensorflow as tf\n'), ((32819, 32846), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['y'], {}), '(y)\n', (32843, 32846), True, 'import tensorflow as tf\n'), ((34947, 34980), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (34961, 34980), True, 'import tensorflow as tf\n'), ((36241, 36274), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['encoding_parts[1]'], {}), '(encoding_parts[1])\n', (36255, 36274), True, 'import tensorflow as tf\n'), ((36793, 36811), 'tensorflow.zeros', 'tf.zeros', (['num_dims'], {}), '(num_dims)\n', (36801, 36811), True, 'import tensorflow as tf\n'), ((36824, 36841), 'tensorflow.ones', 'tf.ones', (['num_dims'], {}), '(num_dims)\n', (36831, 36841), True, 'import tensorflow as tf\n'), ((38183, 38205), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['scales'], {}), '(scales)\n', (38197, 38205), True, 'import tensorflow as tf\n'), ((38719, 38856), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH]', '[0.001, 0.0001, 1e-05]'], {}), '(global_step, [train_size * 500 // TRAIN_BATCH, \n train_size * 800 // TRAIN_BATCH], [0.001, 0.0001, 1e-05])\n', (38746, 38856), True, 'import tensorflow as tf\n'), ((40100, 40121), 'six.moves.zip', 'zip', (['log_q_z', 'log_p_z'], {}), '(log_q_z, log_p_z)\n', (40103, 40121), False, 'from six.moves import zip\n'), ((42094, 42115), 'tensorflow.minimum', 'tf.minimum', (['frac', '(1.0)'], {}), '(frac, 1.0)\n', (42104, 42115), True, 'import tensorflow as tf\n'), ((42176, 42193), 'tensorflow.constant', 'tf.constant', (['beta'], {}), '(beta)\n', (42187, 42193), True, 'import tensorflow as tf\n'), ((42844, 42895), 'tensorflow.get_variable', 'tf.get_variable', (['"""step_size"""'], {'initializer': 'step_size'}), "('step_size', initializer=step_size)\n", (42859, 42895), True, 'import tensorflow as tf\n'), ((42930, 42952), 'tensorflow.constant', 'tf.constant', (['step_size'], {}), '(step_size)\n', (42941, 42952), True, 'import tensorflow as tf\n'), ((43011, 43096), 'tensorflow.get_variable', 'tf.get_variable', (['"""train_chain_state"""', '[self.train_size, z_dims]'], {'trainable': '(False)'}), "('train_chain_state', [self.train_size, z_dims], trainable=False\n )\n", (43026, 43096), True, 'import tensorflow as tf\n'), ((46593, 46629), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (46627, 46629), True, 'import tensorflow as tf\n'), ((47659, 47688), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_q_z'], {}), '(layer_log_q_z)\n', (47673, 47688), True, 'import tensorflow as tf\n'), ((47734, 47763), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_p_z'], {}), '(layer_log_p_z)\n', (47748, 47763), True, 'import tensorflow as tf\n'), ((47950, 47986), 'tensorflow.maximum', 'tf.maximum', (['self._min_kl', 'layer_klqp'], {}), '(self._min_kl, layer_klqp)\n', (47960, 47986), True, 'import tensorflow as tf\n'), ((48147, 48161), 'six.moves.zip', 'zip', (['post_z', 'z'], {}), '(post_z, z)\n', (48150, 48161), False, 'from six.moves import zip\n'), ((48620, 48696), 'tensorflow_probability.mcmc.TransformedTransitionKernel', 'tfp.mcmc.TransformedTransitionKernel', ([], {'inner_kernel': 'kernel', 'bijector': 'bijector'}), '(inner_kernel=kernel, bijector=bijector)\n', (48656, 48696), True, 'import tensorflow_probability as tfp\n'), ((48843, 48876), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['s[-1, Ellipsis]'], {}), '(s[-1, Ellipsis])\n', (48859, 48876), True, 'import tensorflow as tf\n'), ((50164, 50202), 'tensorflow.gather', 'tf.gather', (['self._chain_state', 'data_idx'], {}), '(self._chain_state, data_idx)\n', (50173, 50202), True, 'import tensorflow as tf\n'), ((50916, 50956), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.sample_means'], {}), '(outputs.sample_means)\n', (50934, 50956), False, 'from neutra import utils\n'), ((51229, 51239), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (51237, 51239), True, 'import tensorflow as tf\n'), ((51383, 51426), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_step_size', '(0.001)', '(0.5)'], {}), '(new_step_size, 0.001, 0.5)\n', (51399, 51426), True, 'import tensorflow as tf\n'), ((51611, 51621), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (51619, 51621), True, 'import tensorflow as tf\n'), ((51632, 51660), 'tensorflow.name_scope', 'tf.name_scope', (['"""recog_train"""'], {}), "('recog_train')\n", (51645, 51660), True, 'import tensorflow as tf\n'), ((52389, 52415), 'tensorflow.name_scope', 'tf.name_scope', (['"""gen_train"""'], {}), "('gen_train')\n", (52402, 52415), True, 'import tensorflow as tf\n'), ((52434, 52539), 'tensorflow.cond', 'tf.cond', (['(global_step < self._no_gen_train_steps)', '(lambda : -outputs.elbo)', '(lambda : -outputs.mcmc_log_p)'], {}), '(global_step < self._no_gen_train_steps, lambda : -outputs.elbo, lambda\n : -outputs.mcmc_log_p)\n', (52441, 52539), True, 'import tensorflow as tf\n'), ((52996, 53027), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (53014, 53027), False, 'from neutra import utils\n'), ((53074, 53118), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.recon_means[:64]'], {}), '(outputs.recon_means[:64])\n', (53092, 53118), False, 'from neutra import utils\n'), ((53531, 53548), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (53539, 53548), True, 'import tensorflow as tf\n'), ((54177, 54208), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (54195, 54208), False, 'from neutra import utils\n'), ((54246, 54277), 'neutra.utils.StitchImages', 'utils.StitchImages', (['recons[:64]'], {}), '(recons[:64])\n', (54264, 54277), False, 'from neutra import utils\n'), ((54313, 54349), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ais_outputs.p_accept'], {}), '(ais_outputs.p_accept)\n', (54327, 54349), True, 'import tensorflow as tf\n'), ((55445, 55466), 'tensorflow.minimum', 'tf.minimum', (['frac', '(1.0)'], {}), '(frac, 1.0)\n', (55455, 55466), True, 'import tensorflow as tf\n'), ((55527, 55544), 'tensorflow.constant', 'tf.constant', (['beta'], {}), '(beta)\n', (55538, 55544), True, 'import tensorflow as tf\n'), ((59944, 59984), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.sample_means'], {}), '(outputs.sample_means)\n', (59962, 59984), False, 'from neutra import utils\n'), ((60361, 60392), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (60379, 60392), False, 'from neutra import utils\n'), ((60439, 60483), 'neutra.utils.StitchImages', 'utils.StitchImages', (['outputs.recon_means[:64]'], {}), '(outputs.recon_means[:64])\n', (60457, 60483), False, 'from neutra import utils\n'), ((60957, 60974), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (60965, 60974), True, 'import tensorflow as tf\n'), ((61471, 61502), 'neutra.utils.StitchImages', 'utils.StitchImages', (['images[:64]'], {}), '(images[:64])\n', (61489, 61502), False, 'from neutra import utils\n'), ((61540, 61571), 'neutra.utils.StitchImages', 'utils.StitchImages', (['recons[:64]'], {}), '(recons[:64])\n', (61558, 61571), False, 'from neutra import utils\n'), ((61607, 61643), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ais_outputs.p_accept'], {}), '(ais_outputs.p_accept)\n', (61621, 61643), True, 'import tensorflow as tf\n'), ((62301, 62336), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[train_op]'], {}), '([train_op])\n', (62324, 62336), True, 'import tensorflow as tf\n'), ((62663, 62688), 'neutra.utils.GetLoggingOutputs', 'utils.GetLoggingOutputs', ([], {}), '()\n', (62686, 62688), False, 'from neutra import utils\n'), ((63672, 63697), 'neutra.utils.GetLoggingOutputs', 'utils.GetLoggingOutputs', ([], {}), '()\n', (63695, 63697), False, 'from neutra import utils\n'), ((65179, 65203), 'tensorflow.Session.reset', 'tf.Session.reset', (['master'], {}), '(master)\n', (65195, 65203), True, 'import tensorflow as tf\n'), ((66957, 67002), 'tensorflow.logging.info', 'tf.logging.info', (['"""Submitting shard %d"""', 'shard'], {}), "('Submitting shard %d', shard)\n", (66972, 67002), True, 'import tensorflow as tf\n'), ((67386, 67415), 'numpy.hstack', 'np.hstack', (['[all_log_p, log_p]'], {}), '([all_log_p, log_p])\n', (67395, 67415), True, 'import numpy as np\n'), ((67919, 67988), 'neutra.utils.MNISTDataset', 'utils.MNISTDataset', (['FLAGS.fashion_mnist_data_dir', 'FLAGS.test_is_valid'], {}), '(FLAGS.fashion_mnist_data_dir, FLAGS.test_is_valid)\n', (67937, 67988), False, 'from neutra import utils\n'), ((6849, 6898), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': 'output_init_factor'}), '(factor=output_init_factor)\n', (6871, 6898), False, 'from neutra import utils\n'), ((7070, 7119), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': 'output_init_factor'}), '(factor=output_init_factor)\n', (7092, 7119), False, 'from neutra import utils\n'), ((9106, 9122), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (9114, 9122), True, 'import tensorflow as tf\n'), ((9150, 9164), 'tensorflow.shape', 'tf.shape', (['z[0]'], {}), '(z[0])\n', (9158, 9164), True, 'import tensorflow as tf\n'), ((9484, 9501), 'six.moves.range', 'range', (['num_blocks'], {}), '(num_blocks)\n', (9489, 9501), False, 'from six.moves import range\n'), ((10231, 10266), 'tensorflow.split', 'tf.split', (['h_q', '[z_dims, z_dims]', '(-1)'], {}), '(h_q, [z_dims, z_dims], -1)\n', (10239, 10266), True, 'import tensorflow as tf\n'), ((12138, 12184), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x', 'new_shape'], {}), '(x, new_shape)\n', (12170, 12184), True, 'import tensorflow as tf\n'), ((12197, 12243), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['h', 'new_shape'], {}), '(h, new_shape)\n', (12229, 12243), True, 'import tensorflow as tf\n'), ((17417, 17445), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', (['(0.01)'], {}), '(0.01)\n', (17439, 17445), False, 'from neutra import utils\n'), ((20490, 20518), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', (['(0.01)'], {}), '(0.01)\n', (20512, 20518), False, 'from neutra import utils\n'), ((22128, 22156), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', (['(0.01)'], {}), '(0.01)\n', (22150, 22156), False, 'from neutra import utils\n'), ((22655, 22679), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (22677, 22679), False, 'from neutra import utils\n'), ((23197, 23221), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (23219, 23221), False, 'from neutra import utils\n'), ((23350, 23385), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (23372, 23385), False, 'from neutra import utils\n'), ((24171, 24210), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(x * 255.0)', '(0.0)', '(255.0)'], {}), '(x * 255.0, 0.0, 255.0)\n', (24187, 24210), True, 'import tensorflow as tf\n'), ((28445, 28480), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (28467, 28480), False, 'from neutra import utils\n'), ((30730, 30765), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (30752, 30765), False, 'from neutra import utils\n'), ((34213, 34248), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (34235, 34248), False, 'from neutra import utils\n'), ((35123, 35140), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (35136, 35140), True, 'import tensorflow as tf\n'), ((35148, 35167), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (35160, 35167), True, 'import tensorflow as tf\n'), ((36376, 36393), 'tensorflow.zeros_like', 'tf.zeros_like', (['mu'], {}), '(mu)\n', (36389, 36393), True, 'import tensorflow as tf\n'), ((36401, 36420), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (36413, 36420), True, 'import tensorflow as tf\n'), ((37109, 37124), 'tensorflow.zeros', 'tf.zeros', (['shape'], {}), '(shape)\n', (37117, 37124), True, 'import tensorflow as tf\n'), ((37132, 37146), 'tensorflow.ones', 'tf.ones', (['shape'], {}), '(shape)\n', (37139, 37146), True, 'import tensorflow as tf\n'), ((38069, 38090), 'tensorflow.ones_initializer', 'tf.ones_initializer', ([], {}), '()\n', (38088, 38090), True, 'import tensorflow as tf\n'), ((38302, 38324), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['scales'], {}), '(scales)\n', (38316, 38324), True, 'import tensorflow as tf\n'), ((38917, 39055), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH]', '[0.0005, 0.0001, 1e-05]'], {}), '(global_step, [train_size * 500 // TRAIN_BATCH, \n train_size * 800 // TRAIN_BATCH], [0.0005, 0.0001, 1e-05])\n', (38944, 39055), True, 'import tensorflow as tf\n'), ((42057, 42080), 'tensorflow.to_float', 'tf.to_float', (['beta_steps'], {}), '(beta_steps)\n', (42068, 42080), True, 'import tensorflow as tf\n'), ((46830, 46867), 'tensorflow.minimum', 'tf.minimum', (['max_step', 'self._step_size'], {}), '(max_step, self._step_size)\n', (46840, 46867), True, 'import tensorflow as tf\n'), ((47915, 47936), 'six.moves.zip', 'zip', (['log_q_z', 'log_p_z'], {}), '(log_q_z, log_p_z)\n', (47918, 47936), False, 'from six.moves import zip\n'), ((48171, 48234), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shape here: %s %s"""', 'post_z_e.shape', 'z_e.shape'], {}), "('Shape here: %s %s', post_z_e.shape, z_e.shape)\n", (48186, 48234), True, 'import tensorflow as tf\n'), ((48364, 48381), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (48372, 48381), True, 'import tensorflow as tf\n'), ((49237, 49254), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (49245, 49254), True, 'import tensorflow as tf\n'), ((49470, 49503), 'tensorflow.minimum', 'tf.minimum', (['log_accept_ratio', '(0.0)'], {}), '(log_accept_ratio, 0.0)\n', (49480, 49503), True, 'import tensorflow as tf\n'), ((51001, 51042), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[outputs.post_z]'], {}), '([outputs.post_z])\n', (51024, 51042), True, 'import tensorflow as tf\n'), ((51076, 51138), 'tensorflow.scatter_update', 'tf.scatter_update', (['self._chain_state', 'data_idx', 'outputs.post_z'], {}), '(self._chain_state, data_idx, outputs.post_z)\n', (51093, 51138), True, 'import tensorflow as tf\n'), ((51481, 51559), 'tensorflow.where', 'tf.where', (['(global_step > self._step_size_warmup)', 'new_step_size', 'self._step_size'], {}), '(global_step > self._step_size_warmup, new_step_size, self._step_size)\n', (51489, 51559), True, 'import tensorflow as tf\n'), ((53670, 53687), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (53678, 53687), True, 'import tensorflow as tf\n'), ((55408, 55431), 'tensorflow.to_float', 'tf.to_float', (['beta_steps'], {}), '(beta_steps)\n', (55419, 55431), True, 'import tensorflow as tf\n'), ((61096, 61113), 'tensorflow.add_n', 'tf.add_n', (['log_p_z'], {}), '(log_p_z)\n', (61104, 61113), True, 'import tensorflow as tf\n'), ((65215, 65233), 'tensorflow.Session', 'tf.Session', (['master'], {}), '(master)\n', (65225, 65233), True, 'import tensorflow as tf\n'), ((65263, 65276), 'numpy.zeros', 'np.zeros', (['[0]'], {}), '([0])\n', (65271, 65276), True, 'import numpy as np\n'), ((68040, 68105), 'neutra.utils.CIFAR10Dataset', 'utils.CIFAR10Dataset', (['FLAGS.cifar10_data_dir', 'FLAGS.test_is_valid'], {}), '(FLAGS.cifar10_data_dir, FLAGS.test_is_valid)\n', (68060, 68105), False, 'from neutra import utils\n'), ((69258, 69280), 'numpy.mean', 'np.mean', (['replica_log_p'], {}), '(replica_log_p)\n', (69265, 69280), True, 'import numpy as np\n'), ((69297, 69318), 'numpy.std', 'np.std', (['replica_log_p'], {}), '(replica_log_p)\n', (69303, 69318), True, 'import numpy as np\n'), ((69429, 69476), 'tensorflow.logging.info', 'tf.logging.info', (['"""All log_p: %s"""', 'replica_log_p'], {}), "('All log_p: %s', replica_log_p)\n", (69444, 69476), True, 'import tensorflow as tf\n'), ((13700, 13724), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {}), '()\n', (13722, 13724), False, 'from neutra import utils\n'), ((39111, 39205), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', '[train_size * 800 // TRAIN_BATCH]', '[0.01, 1e-05]'], {}), '(global_step, [train_size * 800 // TRAIN_BATCH],\n [0.01, 1e-05])\n', (39138, 39205), True, 'import tensorflow as tf\n'), ((39365, 39389), 'tensorflow.to_float', 'tf.to_float', (['global_step'], {}), '(global_step)\n', (39376, 39389), True, 'import tensorflow as tf\n'), ((42017, 42053), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (42051, 42053), True, 'import tensorflow as tf\n'), ((46670, 46694), 'tensorflow.to_float', 'tf.to_float', (['global_step'], {}), '(global_step)\n', (46681, 46694), True, 'import tensorflow as tf\n'), ((47606, 47626), 'six.moves.zip', 'zip', (['z', 'other_z_init'], {}), '(z, other_z_init)\n', (47609, 47626), False, 'from six.moves import zip\n'), ((48981, 49010), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['layer_log_q_z'], {}), '(layer_log_q_z)\n', (48995, 49010), True, 'import tensorflow as tf\n'), ((55368, 55404), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (55402, 55404), True, 'import tensorflow as tf\n'), ((65305, 65342), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['train_dir'], {}), '(train_dir)\n', (65331, 65342), True, 'import tensorflow as tf\n'), ((68154, 68178), 'neutra.utils.FakeMNISTDataset', 'utils.FakeMNISTDataset', ([], {}), '()\n', (68176, 68178), False, 'from neutra import utils\n'), ((9931, 9958), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['p_raw_scale'], {}), '(p_raw_scale)\n', (9945, 9958), True, 'import tensorflow as tf\n'), ((10672, 10707), 'neutra.utils.L2HMCInitializer', 'utils.L2HMCInitializer', ([], {'factor': '(0.01)'}), '(factor=0.01)\n', (10694, 10707), False, 'from neutra import utils\n'), ((24800, 24826), 'tensorflow.floor', 'tf.floor', (['(sample / binsize)'], {}), '(sample / binsize)\n', (24808, 24826), True, 'import tensorflow as tf\n'), ((47494, 47520), 'tensorflow.identity', 'tf.identity', (['other_layer_z'], {}), '(other_layer_z)\n', (47505, 47520), True, 'import tensorflow as tf\n'), ((47554, 47574), 'tensorflow.identity', 'tf.identity', (['layer_z'], {}), '(layer_z)\n', (47565, 47574), True, 'import tensorflow as tf\n'), ((52075, 52092), 'tensorflow.identity', 'tf.identity', (['loss'], {}), '(loss)\n', (52086, 52092), True, 'import tensorflow as tf\n'), ((52125, 52151), 'tensorflow.identity', 'tf.identity', (['(-outputs.elbo)'], {}), '(-outputs.elbo)\n', (52136, 52151), True, 'import tensorflow as tf\n'), ((53870, 53886), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (53878, 53886), True, 'import tensorflow as tf\n'), ((54422, 54469), 'tensorflow.reshape', 'tf.reshape', (['ais_outputs.log_p', '[num_chains, -1]'], {}), '(ais_outputs.log_p, [num_chains, -1])\n', (54432, 54469), True, 'import tensorflow as tf\n'), ((61293, 61309), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (61301, 61309), True, 'import tensorflow as tf\n'), ((61716, 61763), 'tensorflow.reshape', 'tf.reshape', (['ais_outputs.log_p', '[num_chains, -1]'], {}), '(ais_outputs.log_p, [num_chains, -1])\n', (61726, 61763), True, 'import tensorflow as tf\n'), ((65698, 65759), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shard %d step %d started."""', 'shard', 'step_num'], {}), "('Shard %d step %d started.', shard, step_num)\n", (65713, 65759), True, 'import tensorflow as tf\n'), ((65808, 65866), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shard %d step %d done."""', 'shard', 'step_num'], {}), "('Shard %d step %d done.', shard, step_num)\n", (65823, 65866), True, 'import tensorflow as tf\n'), ((66074, 66112), 'numpy.hstack', 'np.hstack', (["[all_log_p, fetch['log_p']]"], {}), "([all_log_p, fetch['log_p']])\n", (66083, 66112), True, 'import numpy as np\n'), ((66366, 66406), 'tensorflow.logging.info', 'tf.logging.info', (['"""Shard %d done."""', 'shard'], {}), "('Shard %d done.', shard)\n", (66381, 66406), True, 'import tensorflow as tf\n'), ((68751, 68780), 'six.moves.range', 'range', (['FLAGS.ais_num_replicas'], {}), '(FLAGS.ais_num_replicas)\n', (68756, 68780), False, 'from six.moves import range\n'), ((69666, 69690), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (69688, 69690), True, 'import tensorflow as tf\n'), ((69874, 69935), 'os.path.join', 'os.path.join', (['eval_dir', "('ais_shard_%d_done' % FLAGS.ais_shard)"], {}), "(eval_dir, 'ais_shard_%d_done' % FLAGS.ais_shard)\n", (69886, 69935), False, 'import os\n'), ((69945, 69979), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['sentinel_filename'], {}), '(sentinel_filename)\n', (69960, 69979), True, 'import tensorflow as tf\n'), ((70120, 70151), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['eval_dir'], {}), '(eval_dir)\n', (70141, 70151), True, 'import tensorflow as tf\n'), ((70353, 70380), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['eval_dir'], {}), '(eval_dir)\n', (70370, 70380), True, 'import tensorflow as tf\n'), ((11474, 11501), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['q_raw_scale'], {}), '(q_raw_scale)\n', (11488, 11501), True, 'import tensorflow as tf\n'), ((11604, 11625), 'tensorflow.zeros_like', 'tf.zeros_like', (['q_mean'], {}), '(q_mean)\n', (11617, 11625), True, 'import tensorflow as tf\n'), ((11633, 11658), 'tensorflow.ones_like', 'tf.ones_like', (['q_raw_scale'], {}), '(q_raw_scale)\n', (11645, 11658), True, 'import tensorflow as tf\n'), ((24914, 24951), 'tensorflow.sigmoid', 'tf.sigmoid', (['(sample + binsize / scales)'], {}), '(sample + binsize / scales)\n', (24924, 24951), True, 'import tensorflow as tf\n'), ((24954, 24972), 'tensorflow.sigmoid', 'tf.sigmoid', (['sample'], {}), '(sample)\n', (24964, 24972), True, 'import tensorflow as tf\n'), ((54496, 54519), 'tensorflow.to_float', 'tf.to_float', (['num_chains'], {}), '(num_chains)\n', (54507, 54519), True, 'import tensorflow as tf\n'), ((61790, 61813), 'tensorflow.to_float', 'tf.to_float', (['num_chains'], {}), '(num_chains)\n', (61801, 61813), True, 'import tensorflow as tf\n'), ((65969, 65992), 'numpy.mean', 'np.mean', (["fetch['log_p']"], {}), "(fetch['log_p'])\n", (65976, 65992), True, 'import numpy as np\n'), ((66022, 66048), 'numpy.mean', 'np.mean', (["fetch['p_accept']"], {}), "(fetch['p_accept'])\n", (66029, 66048), True, 'import numpy as np\n'), ((69606, 69635), 'six.moves.range', 'range', (['FLAGS.ais_num_replicas'], {}), '(FLAGS.ais_num_replicas)\n', (69611, 69635), False, 'from six.moves import range\n'), ((70483, 70503), 'numpy.savetxt', 'np.savetxt', (['f', 'log_p'], {}), '(f, log_p)\n', (70493, 70503), True, 'import numpy as np\n'), ((70515, 70552), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['sentinel_filename', '"""w"""'], {}), "(sentinel_filename, 'w')\n", (70528, 70552), True, 'import tensorflow as tf\n'), ((70406, 70462), 'os.path.join', 'os.path.join', (['eval_dir', "('ais_shard_%d' % FLAGS.ais_shard)"], {}), "(eval_dir, 'ais_shard_%d' % FLAGS.ais_shard)\n", (70418, 70462), False, 'import os\n')]
|
# ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import jax
import numpy as onp
import chex
from ..reward_tracing import TransitionBatch
from ..utils import SumTree
from ._base import BaseReplayBuffer
__all__ = (
'PrioritizedReplayBuffer',
)
class PrioritizedReplayBuffer(BaseReplayBuffer):
r"""
A simple ring buffer for experience replay, with prioritized sampling.
This class uses *proportional* sampling, which means that the transitions are sampled with
relative probability :math:`p_i` defined as:
.. math::
p_i\ =\ \frac
{\left(|\mathcal{A}_i| + \epsilon\right)^\alpha}
{\sum_{j=1}^N \left(|\mathcal{A}_j| + \epsilon\right)^\alpha}
Here :math:`\mathcal{A}_i` are advantages provided at insertion time and :math:`N` is the
capacity of the buffer, which may be quite large. The :math:`\mathcal{A}_i` are typically just
TD errors collected from a value-function updater, e.g. :func:`QLearning.td_error
<coax.td_learning.QLearning.td_error>`.
Since the prioritized samples are biased, the :attr:`sample` method also produces non-trivial
importance weights (stored in the :class:`TransitionBatch.W
<coax.reward_tracing.TransitionBatch>` attribute). The logic for constructing these weights for
a sample of batch size :math:`n` is:
.. math::
w_i\ =\ \frac{\left(Np_i\right)^{-\beta}}{\max_{j=1}^n \left(Np_j\right)^{-\beta}}
See section 3.4 of https://arxiv.org/abs/1511.05952 for more details.
Parameters
----------
capacity : positive int
The capacity of the experience replay buffer.
alpha : positive float, optional
The sampling temperature :math:`\alpha>0`.
beta : positive float, optional
The importance-weight exponent :math:`\beta>0`.
epsilon : positive float, optional
The small regulator :math:`\epsilon>0`.
random_seed : int, optional
To get reproducible results.
"""
def __init__(self, capacity, alpha=1.0, beta=1.0, epsilon=1e-4, random_seed=None):
if not (isinstance(capacity, int) and capacity > 0):
raise TypeError(f"capacity must be a positive int, got: {capacity}")
if not (isinstance(alpha, (float, int)) and alpha > 0):
raise TypeError(f"alpha must be a positive float, got: {alpha}")
if not (isinstance(beta, (float, int)) and beta > 0):
raise TypeError(f"beta must be a positive float, got: {beta}")
if not (isinstance(epsilon, (float, int)) and epsilon > 0):
raise TypeError(f"epsilon must be a positive float, got: {epsilon}")
self._capacity = int(capacity)
self._alpha = float(alpha)
self._beta = float(beta)
self._epsilon = float(epsilon)
self._random_seed = random_seed
self._rnd = onp.random.RandomState(random_seed)
self.clear() # sets: self._deque, self._index
@property
def capacity(self):
return self._capacity
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, new_alpha):
if not (isinstance(new_alpha, (float, int)) and new_alpha > 0):
raise TypeError(f"alpha must be a positive float, got: {new_alpha}")
if onp.isclose(new_alpha, self._alpha, rtol=0.01):
return # noop if new value is too close to old value (not worth the computation cost)
new_values = onp.where(
self._sumtree.values <= 0, 0., # only change exponents for positive values
onp.exp(onp.log(onp.maximum(self._sumtree.values, 1e-15)) * (new_alpha / self._alpha)))
self._sumtree.set_values(..., new_values)
self._alpha = float(new_alpha)
@property
def beta(self):
return self._beta
@beta.setter
def beta(self, new_beta):
if not (isinstance(new_beta, (float, int)) and new_beta > 0):
raise TypeError(f"beta must be a positive float, got: {new_beta}")
self._beta = float(new_beta)
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, new_epsilon):
if not (isinstance(new_epsilon, (float, int)) and new_epsilon > 0):
raise TypeError(f"epsilon must be a positive float, got: {new_epsilon}")
self._epsilon = float(new_epsilon)
def add(self, transition_batch, Adv):
r"""
Add a transition to the experience replay buffer.
Parameters
----------
transition_batch : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
Adv : ndarray
A batch of advantages, used to construct the priorities :math:`p_i`.
"""
if not isinstance(transition_batch, TransitionBatch):
raise TypeError(
f"transition_batch must be a TransitionBatch, got: {type(transition_batch)}")
transition_batch.idx = self._index + onp.arange(transition_batch.batch_size)
idx = transition_batch.idx % self.capacity # wrap around
chex.assert_equal_shape([idx, Adv])
self._storage[idx] = list(transition_batch.to_singles())
self._sumtree.set_values(idx, onp.power(onp.abs(Adv) + self.epsilon, self.alpha))
self._index += transition_batch.batch_size
def sample(self, batch_size=32):
r"""
Get a batch of transitions to be used for bootstrapped updates.
Parameters
----------
batch_size : positive int, optional
The desired batch size of the sample.
Returns
-------
transitions : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
"""
idx = self._sumtree.sample(n=batch_size)
P = self._sumtree.values[idx] / self._sumtree.root_value # prioritized, biased propensities
W = onp.power(P * len(self), -self.beta) # inverse propensity weights (β≈1)
W /= W.max() # for stability, ensure only down-weighting (see sec. 3.4 of arxiv:1511.05952)
transition_batch = _concatenate_leaves(self._storage[idx])
chex.assert_equal_shape([transition_batch.W, W])
transition_batch.W *= W
return transition_batch
def update(self, idx, Adv):
r"""
Update the priority weights of transitions previously added to the buffer.
Parameters
----------
idx : 1d array of ints
The identifiers of the transitions to be updated.
Adv : ndarray
The corresponding updated advantages.
"""
idx = onp.asarray(idx, dtype='int32')
Adv = onp.asarray(Adv, dtype='float32')
chex.assert_equal_shape([idx, Adv])
chex.assert_rank([idx, Adv], 1)
idx_lookup = idx % self.capacity # wrap around
new_values = onp.where(
_get_transition_batch_idx(self._storage[idx_lookup]) == idx, # only update if ids match
onp.power(onp.abs(Adv) + self.epsilon, self.alpha),
self._sumtree.values[idx_lookup])
self._sumtree.set_values(idx_lookup, new_values)
def clear(self):
r""" Clear the experience replay buffer. """
self._storage = onp.full(shape=(self.capacity,), fill_value=None, dtype='object')
self._sumtree = SumTree(capacity=self.capacity)
self._index = 0
def __len__(self):
return min(self.capacity, self._index)
def __bool__(self):
return bool(len(self))
def __iter__(self):
return iter(self._storage[:len(self)])
def _concatenate_leaves(pytrees):
return jax.tree_multimap(lambda *leaves: onp.concatenate(leaves, axis=0), *pytrees)
@onp.vectorize
def _get_transition_batch_idx(transition):
return transition.idx
|
[
"numpy.abs",
"numpy.isclose",
"chex.assert_rank",
"numpy.arange",
"numpy.asarray",
"chex.assert_equal_shape",
"numpy.concatenate",
"numpy.full",
"numpy.maximum",
"numpy.random.RandomState"
] |
[((4815, 4850), 'numpy.random.RandomState', 'onp.random.RandomState', (['random_seed'], {}), '(random_seed)\n', (4837, 4850), True, 'import numpy as onp\n'), ((5253, 5299), 'numpy.isclose', 'onp.isclose', (['new_alpha', 'self._alpha'], {'rtol': '(0.01)'}), '(new_alpha, self._alpha, rtol=0.01)\n', (5264, 5299), True, 'import numpy as onp\n'), ((7078, 7113), 'chex.assert_equal_shape', 'chex.assert_equal_shape', (['[idx, Adv]'], {}), '([idx, Adv])\n', (7101, 7113), False, 'import chex\n'), ((8175, 8223), 'chex.assert_equal_shape', 'chex.assert_equal_shape', (['[transition_batch.W, W]'], {}), '([transition_batch.W, W])\n', (8198, 8223), False, 'import chex\n'), ((8652, 8683), 'numpy.asarray', 'onp.asarray', (['idx'], {'dtype': '"""int32"""'}), "(idx, dtype='int32')\n", (8663, 8683), True, 'import numpy as onp\n'), ((8698, 8731), 'numpy.asarray', 'onp.asarray', (['Adv'], {'dtype': '"""float32"""'}), "(Adv, dtype='float32')\n", (8709, 8731), True, 'import numpy as onp\n'), ((8740, 8775), 'chex.assert_equal_shape', 'chex.assert_equal_shape', (['[idx, Adv]'], {}), '([idx, Adv])\n', (8763, 8775), False, 'import chex\n'), ((8784, 8815), 'chex.assert_rank', 'chex.assert_rank', (['[idx, Adv]', '(1)'], {}), '([idx, Adv], 1)\n', (8800, 8815), False, 'import chex\n'), ((9272, 9337), 'numpy.full', 'onp.full', ([], {'shape': '(self.capacity,)', 'fill_value': 'None', 'dtype': '"""object"""'}), "(shape=(self.capacity,), fill_value=None, dtype='object')\n", (9280, 9337), True, 'import numpy as onp\n'), ((6964, 7003), 'numpy.arange', 'onp.arange', (['transition_batch.batch_size'], {}), '(transition_batch.batch_size)\n', (6974, 7003), True, 'import numpy as onp\n'), ((9698, 9729), 'numpy.concatenate', 'onp.concatenate', (['leaves'], {'axis': '(0)'}), '(leaves, axis=0)\n', (9713, 9729), True, 'import numpy as onp\n'), ((7227, 7239), 'numpy.abs', 'onp.abs', (['Adv'], {}), '(Adv)\n', (7234, 7239), True, 'import numpy as onp\n'), ((9028, 9040), 'numpy.abs', 'onp.abs', (['Adv'], {}), '(Adv)\n', (9035, 9040), True, 'import numpy as onp\n'), ((5548, 5588), 'numpy.maximum', 'onp.maximum', (['self._sumtree.values', '(1e-15)'], {}), '(self._sumtree.values, 1e-15)\n', (5559, 5588), True, 'import numpy as onp\n')]
|
"""
@author: ludvigolsen
"""
from typing import Union
import numpy as np
import pandas as pd
from utipy.utils.check_instance import check_instance
from utipy.utils.convert_to_type import convert_to_type
def blend(x1: Union[list, np.ndarray, pd.Series], x2: Union[list, np.ndarray, pd.Series], amount: float = 0.5) -> Union[list, np.ndarray, pd.Series]:
"""
Blend two arrays
Parameters
----------
x1 : list, np.ndarray, pd.Series
The first array.
x2 : list, np.ndarray, pd.Series
The second array.
amount : float
Blend rate.
Percentage between 0-1
0: Keep only x1.
1: Keep only x2.
0.1: 10% x2 / 90% x1.
A value in-between 0-1 will result in integers becoming floats.
Returns
-------
list, np.ndarray, pd.Series
Blended array with type of the original (x1)
Examples
--------
Uncomment code to run.
# x1 = [1,2,3,4,5]
# x2 = [4,5,6,7,8]
# blend(x1, x2, amount = 0.5)
returns [2.5,3.5,4.5,5.5,6.5]
"""
# Get instance types (np.ndarray, list, pd.Series)
instance_type = check_instance(x1)
x1_weighted = np.multiply(x1, (1 - amount))
x2_weighted = np.multiply(x2, amount)
blended = x1_weighted + x2_weighted
# Convert to original type (np.ndarray, list, pd.Series)
return convert_to_type(blended, instance_type)
|
[
"utipy.utils.check_instance.check_instance",
"numpy.multiply",
"utipy.utils.convert_to_type.convert_to_type"
] |
[((1154, 1172), 'utipy.utils.check_instance.check_instance', 'check_instance', (['x1'], {}), '(x1)\n', (1168, 1172), False, 'from utipy.utils.check_instance import check_instance\n'), ((1192, 1219), 'numpy.multiply', 'np.multiply', (['x1', '(1 - amount)'], {}), '(x1, 1 - amount)\n', (1203, 1219), True, 'import numpy as np\n'), ((1240, 1263), 'numpy.multiply', 'np.multiply', (['x2', 'amount'], {}), '(x2, amount)\n', (1251, 1263), True, 'import numpy as np\n'), ((1378, 1417), 'utipy.utils.convert_to_type.convert_to_type', 'convert_to_type', (['blended', 'instance_type'], {}), '(blended, instance_type)\n', (1393, 1417), False, 'from utipy.utils.convert_to_type import convert_to_type\n')]
|
import numpy as np
import pandas as pd
from bokeh.core.json_encoder import serialize_json
from bokeh.core.properties import List, String
from bokeh.document import Document
from bokeh.layouts import row, column
from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button
from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs
from bokeh.palettes import viridis
from bokeh.plotting import figure, ColumnDataSource
from bokeh.util.compiler import bundle_all_models
from bokeh.util.serialization import make_id
from matplotlib import cm
from matplotlib.colors import rgb2hex
import os
from skyportal.models import (
DBSession,
Obj,
Photometry,
Group,
Instrument,
Telescope,
PHOT_ZP,
)
import sncosmo
DETECT_THRESH = 5 # sigma
SPEC_LINES = {
'H': ([3970, 4102, 4341, 4861, 6563], '#ff0000'),
'He': ([3886, 4472, 5876, 6678, 7065], '#002157'),
'He II': ([3203, 4686], '#003b99'),
'C II': ([3919, 4267, 6580, 7234, 9234], '#570199'),
'C III': ([4650, 5696], '#a30198'),
'C IV': ([5801], '#ff0073'),
'O': ([7772, 7774, 7775, 8447, 9266], '#007236'),
'O II': ([3727], '#00a64d'),
'O III': ([4959, 5007], '#00bf59'),
'Na': ([5890, 5896, 8183, 8195], '#aba000'),
'Mg': ([2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184], '#8c6239'),
'Mg II': ([2791, 2796, 2803, 4481], '#bf874e'),
'Si II': ([3856, 5041, 5056, 5670, 6347, 6371], '#5674b9'),
'S II': ([5433, 5454, 5606, 5640, 5647, 6715], '#a38409'),
'Ca II': ([3934, 3969, 7292, 7324, 8498, 8542, 8662], '#005050'),
'Fe II': ([5018, 5169], '#f26c4f'),
'Fe III': ([4397, 4421, 4432, 5129, 5158], '#f9917b'),
}
# TODO add groups
# Galaxy lines
#
# 'H': '4341, 4861, 6563;
# 'N II': '6548, 6583;
# 'O I': '6300;'
# 'O II': '3727;
# 'O III': '4959, 5007;
# 'Mg II': '2798;
# 'S II': '6717, 6731'
# 'H': '3970, 4102, 4341, 4861, 6563'
# 'Na': '5890, 5896, 8183, 8195'
# 'He': '3886, 4472, 5876, 6678, 7065'
# 'Mg': '2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184'
# 'He II': '3203, 4686'
# 'Mg II': '2791, 2796, 2803, 4481'
# 'O': '7772, 7774, 7775, 8447, 9266'
# 'Si II': '3856, 5041, 5056, 5670 6347, 6371'
# 'O II': '3727'
# 'Ca II': '3934, 3969, 7292, 7324, 8498, 8542, 8662'
# 'O III': '4959, 5007'
# 'Fe II': '5018, 5169'
# 'S II': '5433, 5454, 5606, 5640, 5647, 6715'
# 'Fe III': '4397, 4421, 4432, 5129, 5158'
#
# Other
#
# 'Tel: 6867-6884, 7594-7621'
# 'Tel': '#b7b7b7',
# 'H: 4341, 4861, 6563;
# 'N II': 6548, 6583;
# 'O I': 6300;
# 'O II': 3727;
# 'O III': 4959, 5007;
# 'Mg II': 2798;
# 'S II': 6717, 6731'
class CheckboxWithLegendGroup(CheckboxGroup):
colors = List(String, help="List of legend colors")
__implementation__ = """
import {empty, input, label, div} from "core/dom"
import * as p from "core/properties"
import {CheckboxGroup, CheckboxGroupView} from "models/widgets/checkbox_group"
export class CheckboxWithLegendGroupView extends CheckboxGroupView
render: () ->
super()
empty(@el)
active = @model.active
colors = @model.colors
for text, i in @model.labels
inputEl = input({type: "checkbox", value: "#{i}"})
inputEl.addEventListener("change", () => @change_input())
if @model.disabled then inputEl.disabled = true
if i in active then inputEl.checked = true
attrs = {
style: "border-left: 12px solid #{colors[i]}; padding-left: 0.3em;"
}
labelEl = label(attrs, inputEl, text)
if @model.inline
labelEl.classList.add("bk-bs-checkbox-inline")
@el.appendChild(labelEl)
else
divEl = div({class: "bk-bs-checkbox"}, labelEl)
@el.appendChild(divEl)
return @
export class CheckboxWithLegendGroup extends CheckboxGroup
type: "CheckboxWithLegendGroup"
default_view: CheckboxWithLegendGroupView
@define {
colors: [ p.Array, [] ]
}
"""
# TODO replace with (script, div) method
def _plot_to_json(plot):
"""Convert plot to JSON objects necessary for rendering with `bokehJS`.
Parameters
----------
plot : bokeh.plotting.figure.Figure
Bokeh plot object to be rendered.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
render_items = [{'docid': plot._id, 'elementid': make_id()}]
doc = Document()
doc.add_root(plot)
docs_json_inner = doc.to_json()
docs_json = {render_items[0]['docid']: docs_json_inner}
docs_json = serialize_json(docs_json)
render_items = serialize_json(render_items)
custom_model_js = bundle_all_models()
return docs_json, render_items, custom_model_js
tooltip_format = [
('mjd', '@mjd{0.000000}'),
('flux', '@flux'),
('filter', '@filter'),
('fluxerr', '@fluxerr'),
('mag', '@mag'),
('magerr', '@magerr'),
('lim_mag', '@lim_mag'),
('instrument', '@instrument'),
('stacked', '@stacked'),
]
cmap = cm.get_cmap('jet_r')
def get_color(bandpass_name, cmap_limits=(3000.0, 10000.0)):
if bandpass_name.startswith('ztf'):
return {'ztfg': 'green', 'ztfi': 'orange', 'ztfr': 'red'}[bandpass_name]
else:
bandpass = sncosmo.get_bandpass(bandpass_name)
wave = bandpass.wave_eff
rgb = cmap((cmap_limits[1] - wave) / (cmap_limits[1] - cmap_limits[0]))[:3]
bandcolor = rgb2hex(rgb)
return bandcolor
# TODO make async so that thread isn't blocked
def photometry_plot(obj_id, user, width=600, height=300):
"""Create scatter plot of photometry for object.
Parameters
----------
obj_id : str
ID of Obj to be plotted.
Returns
-------
(str, str)
Returns (docs_json, render_items) json for the desired plot.
"""
data = pd.read_sql(
DBSession()
.query(
Photometry,
Telescope.nickname.label("telescope"),
Instrument.name.label("instrument"),
)
.join(Instrument, Instrument.id == Photometry.instrument_id)
.join(Telescope, Telescope.id == Instrument.telescope_id)
.filter(Photometry.obj_id == obj_id)
.filter(
Photometry.groups.any(Group.id.in_([g.id for g in user.accessible_groups]))
)
.statement,
DBSession().bind,
)
if data.empty:
return None, None, None
data['color'] = [get_color(f) for f in data['filter']]
data['label'] = [
f'{i} {f}-band' for i, f in zip(data['instrument'], data['filter'])
]
data['zp'] = PHOT_ZP
data['magsys'] = 'ab'
data['alpha'] = 1.0
data['lim_mag'] = -2.5 * np.log10(data['fluxerr'] * DETECT_THRESH) + data['zp']
# Passing a dictionary to a bokeh datasource causes the frontend to die,
# deleting the dictionary column fixes that
del data['original_user_data']
# keep track of things that are only upper limits
data['hasflux'] = ~data['flux'].isna()
# calculate the magnitudes - a photometry point is considered "significant"
# or "detected" (and thus can be represented by a magnitude) if its snr
# is above DETECT_THRESH
obsind = data['hasflux'] & (
data['flux'].fillna(0.0) / data['fluxerr'] >= DETECT_THRESH
)
data.loc[~obsind, 'mag'] = None
data.loc[obsind, 'mag'] = -2.5 * np.log10(data[obsind]['flux']) + PHOT_ZP
# calculate the magnitude errors using standard error propagation formulae
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae
data.loc[~obsind, 'magerr'] = None
coeff = 2.5 / np.log(10)
magerrs = np.abs(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])
data.loc[obsind, 'magerr'] = magerrs
data['obs'] = obsind
data['stacked'] = False
split = data.groupby('label', sort=False)
finite = np.isfinite(data['flux'])
fdata = data[finite]
lower = np.min(fdata['flux']) * 0.95
upper = np.max(fdata['flux']) * 1.05
plot = figure(
plot_width=width,
plot_height=height,
active_drag='box_zoom',
tools='box_zoom,wheel_zoom,pan,reset,save',
y_range=(lower, upper),
)
imhover = HoverTool(tooltips=tooltip_format)
plot.add_tools(imhover)
model_dict = {}
for i, (label, sdf) in enumerate(split):
# for the flux plot, we only show things that have a flux value
df = sdf[sdf['hasflux']]
key = f'obs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='flux',
color='color',
marker='circle',
fill_color='color',
alpha='alpha',
source=ColumnDataSource(df),
)
imhover.renderers.append(model_dict[key])
key = f'bin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='flux',
color='color',
marker='circle',
fill_color='color',
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
stacked=[],
instrument=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = 'obserr' + str(i)
y_err_x = []
y_err_y = []
for d, ro in df.iterrows():
px = ro['mjd']
py = ro['flux']
err = ro['fluxerr']
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
alpha='alpha',
source=ColumnDataSource(
data=dict(
xs=y_err_x, ys=y_err_y, color=df['color'], alpha=[1.0] * len(df)
)
),
)
key = f'binerr{i}'
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
)
plot.xaxis.axis_label = 'MJD'
plot.yaxis.axis_label = 'Flux (μJy)'
plot.toolbar.logo = None
toggle = CheckboxWithLegendGroup(
labels=list(data.label.unique()),
active=list(range(len(data.label.unique()))),
colors=list(data.color.unique()),
)
# TODO replace `eval` with Namespaces
# https://github.com/bokeh/bokeh/pull/6340
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglef.js')
).read(),
)
slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')
callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackf.js')
)
.read()
.replace('default_zp', str(PHOT_ZP))
.replace('detect_thresh', str(DETECT_THRESH)),
)
slider.js_on_change('value', callback)
# Mark the first and last detections
detection_dates = data[data['hasflux']]['mjd']
if len(detection_dates) > 0:
first = round(detection_dates.min(), 6)
last = round(detection_dates.max(), 6)
first_color = "#34b4eb"
last_color = "#8992f5"
midpoint = (upper + lower) / 2
line_top = 5 * upper - 4 * midpoint
line_bottom = 5 * lower - 4 * midpoint
first_x = np.full(5000, first)
last_x = np.full(5000, last)
y = np.linspace(line_bottom, line_top, num=5000)
first_r = plot.line(
x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2,
)
plot.add_tools(
HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],)
)
last_r = plot.line(
x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2
)
plot.add_tools(
HoverTool(tooltips=[("Last detection", f'{last}')], renderers=[last_r],)
)
layout = row(plot, toggle)
layout = column(slider, layout)
p1 = Panel(child=layout, title='Flux')
# now make the mag light curve
ymax = np.nanmax(data['mag']) + 0.1
ymin = np.nanmin(data['mag']) - 0.1
plot = figure(
plot_width=width,
plot_height=height,
active_drag='box_zoom',
tools='box_zoom,wheel_zoom,pan,reset,save',
y_range=(ymax, ymin),
toolbar_location='above',
)
# Mark the first and last detections again
if len(detection_dates) > 0:
midpoint = (ymax + ymin) / 2
line_top = 5 * ymax - 4 * midpoint
line_bottom = 5 * ymin - 4 * midpoint
y = np.linspace(line_bottom, line_top, num=5000)
first_r = plot.line(
x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2,
)
plot.add_tools(
HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],)
)
last_r = plot.line(
x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2
)
plot.add_tools(
HoverTool(
tooltips=[("Last detection", f'{last}')],
renderers=[last_r],
point_policy='follow_mouse',
)
)
imhover = HoverTool(tooltips=tooltip_format)
plot.add_tools(imhover)
model_dict = {}
for i, (label, df) in enumerate(split):
key = f'obs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='mag',
color='color',
marker='circle',
fill_color='color',
alpha='alpha',
source=ColumnDataSource(df[df['obs']]),
)
imhover.renderers.append(model_dict[key])
unobs_source = df[~df['obs']].copy()
unobs_source.loc[:, 'alpha'] = 0.8
key = f'unobs{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='lim_mag',
color='color',
marker='inverted_triangle',
fill_color='white',
line_color='color',
alpha='alpha',
source=ColumnDataSource(unobs_source),
)
imhover.renderers.append(model_dict[key])
key = f'bin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='mag',
color='color',
marker='circle',
fill_color='color',
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
instrument=[],
stacked=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = 'obserr' + str(i)
y_err_x = []
y_err_y = []
for d, ro in df[df['obs']].iterrows():
px = ro['mjd']
py = ro['mag']
err = ro['magerr']
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
alpha='alpha',
source=ColumnDataSource(
data=dict(
xs=y_err_x,
ys=y_err_y,
color=df[df['obs']]['color'],
alpha=[1.0] * len(df[df['obs']]),
)
),
)
key = f'binerr{i}'
model_dict[key] = plot.multi_line(
xs='xs',
ys='ys',
color='color',
source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])),
)
key = f'unobsbin{i}'
model_dict[key] = plot.scatter(
x='mjd',
y='lim_mag',
color='color',
marker='inverted_triangle',
fill_color='white',
line_color='color',
alpha=0.8,
source=ColumnDataSource(
data=dict(
mjd=[],
flux=[],
fluxerr=[],
filter=[],
color=[],
lim_mag=[],
mag=[],
magerr=[],
instrument=[],
stacked=[],
)
),
)
imhover.renderers.append(model_dict[key])
key = f'all{i}'
model_dict[key] = ColumnDataSource(df)
key = f'bold{i}'
model_dict[key] = ColumnDataSource(
df[
[
'mjd',
'flux',
'fluxerr',
'mag',
'magerr',
'filter',
'zp',
'magsys',
'lim_mag',
'stacked',
]
]
)
plot.xaxis.axis_label = 'MJD'
plot.yaxis.axis_label = 'AB mag'
plot.toolbar.logo = None
toggle = CheckboxWithLegendGroup(
labels=list(data.label.unique()),
active=list(range(len(data.label.unique()))),
colors=list(data.color.unique()),
)
# TODO replace `eval` with Namespaces
# https://github.com/bokeh/bokeh/pull/6340
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglem.js')
).read(),
)
slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')
button = Button(label="Export Bold Light Curve to CSV")
button.callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(
os.path.dirname(__file__), '../static/js/plotjs', "download.js"
)
)
.read()
.replace('objname', obj_id)
.replace('default_zp', str(PHOT_ZP)),
)
toplay = row(slider, button)
callback = CustomJS(
args={'slider': slider, 'toggle': toggle, **model_dict},
code=open(
os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackm.js')
)
.read()
.replace('default_zp', str(PHOT_ZP))
.replace('detect_thresh', str(DETECT_THRESH)),
)
slider.js_on_change('value', callback)
layout = row(plot, toggle)
layout = column(toplay, layout)
p2 = Panel(child=layout, title='Mag')
tabs = Tabs(tabs=[p2, p1])
return _plot_to_json(tabs)
# TODO make async so that thread isn't blocked
def spectroscopy_plot(obj_id, spec_id=None):
"""TODO normalization? should this be handled at data ingestion or plot-time?"""
obj = Obj.query.get(obj_id)
spectra = Obj.query.get(obj_id).spectra
if spec_id is not None:
spectra = [spec for spec in spectra if spec.id == int(spec_id)]
if len(spectra) == 0:
return None, None, None
color_map = dict(zip([s.id for s in spectra], viridis(len(spectra))))
data = pd.concat(
[
pd.DataFrame(
{
'wavelength': s.wavelengths,
'flux': s.fluxes,
'id': s.id,
'instrument': s.instrument.telescope.nickname,
}
)
for i, s in enumerate(spectra)
]
)
split = data.groupby('id')
hover = HoverTool(
tooltips=[('wavelength', '$x'), ('flux', '$y'), ('instrument', '@instrument')]
)
plot = figure(
plot_width=600,
plot_height=300,
sizing_mode='scale_both',
tools='box_zoom,wheel_zoom,pan,reset',
active_drag='box_zoom',
)
plot.add_tools(hover)
model_dict = {}
for i, (key, df) in enumerate(split):
model_dict['s' + str(i)] = plot.line(
x='wavelength', y='flux', color=color_map[key], source=ColumnDataSource(df)
)
plot.xaxis.axis_label = 'Wavelength (Å)'
plot.yaxis.axis_label = 'Flux'
plot.toolbar.logo = None
# TODO how to choose a good default?
plot.y_range = Range1d(0, 1.03 * data.flux.max())
toggle = CheckboxWithLegendGroup(
labels=[s.instrument.telescope.nickname for s in spectra],
active=list(range(len(spectra))),
width=100,
colors=[color_map[k] for k, df in split],
)
toggle.callback = CustomJS(
args={'toggle': toggle, **model_dict},
code="""
for (let i = 0; i < toggle.labels.length; i++) {
eval("s" + i).visible = (toggle.active.includes(i))
}
""",
)
elements = CheckboxWithLegendGroup(
labels=list(SPEC_LINES.keys()),
active=[],
width=80,
colors=[c for w, c in SPEC_LINES.values()],
)
z = TextInput(value=str(obj.redshift), title="z:")
v_exp = TextInput(value='0', title="v_exp:")
for i, (wavelengths, color) in enumerate(SPEC_LINES.values()):
el_data = pd.DataFrame({'wavelength': wavelengths})
el_data['x'] = el_data['wavelength'] * (1 + obj.redshift)
model_dict[f'el{i}'] = plot.segment(
x0='x',
x1='x',
# TODO change limits
y0=0,
y1=1e-13,
color=color,
source=ColumnDataSource(el_data),
)
model_dict[f'el{i}'].visible = False
# TODO callback policy: don't require submit for text changes?
elements.callback = CustomJS(
args={'elements': elements, 'z': z, 'v_exp': v_exp, **model_dict},
code="""
let c = 299792.458; // speed of light in km / s
for (let i = 0; i < elements.labels.length; i++) {
let el = eval("el" + i);
el.visible = (elements.active.includes(i))
el.data_source.data.x = el.data_source.data.wavelength.map(
x_i => (x_i * (1 + parseFloat(z.value)) /
(1 + parseFloat(v_exp.value) / c))
);
el.data_source.change.emit();
}
""",
)
z.callback = elements.callback
v_exp.callback = elements.callback
layout = row(plot, toggle, elements, column(z, v_exp))
return _plot_to_json(layout)
|
[
"bokeh.layouts.column",
"bokeh.models.widgets.TextInput",
"numpy.log10",
"bokeh.util.compiler.bundle_all_models",
"bokeh.plotting.figure",
"bokeh.layouts.row",
"skyportal.models.Group.id.in_",
"skyportal.models.Telescope.nickname.label",
"numpy.log",
"bokeh.util.serialization.make_id",
"numpy.isfinite",
"bokeh.models.Slider",
"matplotlib.colors.rgb2hex",
"numpy.nanmin",
"bokeh.models.CustomJS",
"numpy.max",
"numpy.linspace",
"numpy.nanmax",
"numpy.min",
"pandas.DataFrame",
"skyportal.models.DBSession",
"skyportal.models.Instrument.name.label",
"matplotlib.cm.get_cmap",
"numpy.abs",
"bokeh.core.properties.List",
"os.path.dirname",
"skyportal.models.Obj.query.get",
"bokeh.models.Button",
"bokeh.models.widgets.Tabs",
"bokeh.models.HoverTool",
"bokeh.core.json_encoder.serialize_json",
"bokeh.plotting.ColumnDataSource",
"sncosmo.get_bandpass",
"bokeh.models.widgets.Panel",
"numpy.full",
"bokeh.document.Document"
] |
[((4916, 4936), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet_r"""'], {}), "('jet_r')\n", (4927, 4936), False, 'from matplotlib import cm\n'), ((2656, 2698), 'bokeh.core.properties.List', 'List', (['String'], {'help': '"""List of legend colors"""'}), "(String, help='List of legend colors')\n", (2660, 2698), False, 'from bokeh.core.properties import List, String\n'), ((4319, 4329), 'bokeh.document.Document', 'Document', ([], {}), '()\n', (4327, 4329), False, 'from bokeh.document import Document\n'), ((4466, 4491), 'bokeh.core.json_encoder.serialize_json', 'serialize_json', (['docs_json'], {}), '(docs_json)\n', (4480, 4491), False, 'from bokeh.core.json_encoder import serialize_json\n'), ((4511, 4539), 'bokeh.core.json_encoder.serialize_json', 'serialize_json', (['render_items'], {}), '(render_items)\n', (4525, 4539), False, 'from bokeh.core.json_encoder import serialize_json\n'), ((4562, 4581), 'bokeh.util.compiler.bundle_all_models', 'bundle_all_models', ([], {}), '()\n', (4579, 4581), False, 'from bokeh.util.compiler import bundle_all_models\n'), ((7544, 7606), 'numpy.abs', 'np.abs', (["(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])"], {}), "(coeff * data[obsind]['fluxerr'] / data[obsind]['flux'])\n", (7550, 7606), True, 'import numpy as np\n'), ((7762, 7787), 'numpy.isfinite', 'np.isfinite', (["data['flux']"], {}), "(data['flux'])\n", (7773, 7787), True, 'import numpy as np\n'), ((7907, 8048), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'width', 'plot_height': 'height', 'active_drag': '"""box_zoom"""', 'tools': '"""box_zoom,wheel_zoom,pan,reset,save"""', 'y_range': '(lower, upper)'}), "(plot_width=width, plot_height=height, active_drag='box_zoom', tools=\n 'box_zoom,wheel_zoom,pan,reset,save', y_range=(lower, upper))\n", (7913, 8048), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((8106, 8140), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltip_format'}), '(tooltips=tooltip_format)\n', (8115, 8140), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((10772, 10844), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0.0)', 'end': '(15.0)', 'value': '(0.0)', 'step': '(1.0)', 'title': '"""Binsize (days)"""'}), "(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')\n", (10778, 10844), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((12259, 12276), 'bokeh.layouts.row', 'row', (['plot', 'toggle'], {}), '(plot, toggle)\n', (12262, 12276), False, 'from bokeh.layouts import row, column\n'), ((12290, 12312), 'bokeh.layouts.column', 'column', (['slider', 'layout'], {}), '(slider, layout)\n', (12296, 12312), False, 'from bokeh.layouts import row, column\n'), ((12323, 12356), 'bokeh.models.widgets.Panel', 'Panel', ([], {'child': 'layout', 'title': '"""Flux"""'}), "(child=layout, title='Flux')\n", (12328, 12356), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((12485, 12654), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'width', 'plot_height': 'height', 'active_drag': '"""box_zoom"""', 'tools': '"""box_zoom,wheel_zoom,pan,reset,save"""', 'y_range': '(ymax, ymin)', 'toolbar_location': '"""above"""'}), "(plot_width=width, plot_height=height, active_drag='box_zoom', tools=\n 'box_zoom,wheel_zoom,pan,reset,save', y_range=(ymax, ymin),\n toolbar_location='above')\n", (12491, 12654), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((13550, 13584), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'tooltip_format'}), '(tooltips=tooltip_format)\n', (13559, 13584), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((17922, 17994), 'bokeh.models.Slider', 'Slider', ([], {'start': '(0.0)', 'end': '(15.0)', 'value': '(0.0)', 'step': '(1.0)', 'title': '"""Binsize (days)"""'}), "(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)')\n", (17928, 17994), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((18009, 18055), 'bokeh.models.Button', 'Button', ([], {'label': '"""Export Bold Light Curve to CSV"""'}), "(label='Export Bold Light Curve to CSV')\n", (18015, 18055), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((18420, 18439), 'bokeh.layouts.row', 'row', (['slider', 'button'], {}), '(slider, button)\n', (18423, 18439), False, 'from bokeh.layouts import row, column\n'), ((18826, 18843), 'bokeh.layouts.row', 'row', (['plot', 'toggle'], {}), '(plot, toggle)\n', (18829, 18843), False, 'from bokeh.layouts import row, column\n'), ((18857, 18879), 'bokeh.layouts.column', 'column', (['toplay', 'layout'], {}), '(toplay, layout)\n', (18863, 18879), False, 'from bokeh.layouts import row, column\n'), ((18890, 18922), 'bokeh.models.widgets.Panel', 'Panel', ([], {'child': 'layout', 'title': '"""Mag"""'}), "(child=layout, title='Mag')\n", (18895, 18922), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((18935, 18954), 'bokeh.models.widgets.Tabs', 'Tabs', ([], {'tabs': '[p2, p1]'}), '(tabs=[p2, p1])\n', (18939, 18954), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((19175, 19196), 'skyportal.models.Obj.query.get', 'Obj.query.get', (['obj_id'], {}), '(obj_id)\n', (19188, 19196), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((19870, 19963), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('wavelength', '$x'), ('flux', '$y'), ('instrument', '@instrument')]"}), "(tooltips=[('wavelength', '$x'), ('flux', '$y'), ('instrument',\n '@instrument')])\n", (19879, 19963), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((19985, 20118), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(600)', 'plot_height': '(300)', 'sizing_mode': '"""scale_both"""', 'tools': '"""box_zoom,wheel_zoom,pan,reset"""', 'active_drag': '"""box_zoom"""'}), "(plot_width=600, plot_height=300, sizing_mode='scale_both', tools=\n 'box_zoom,wheel_zoom,pan,reset', active_drag='box_zoom')\n", (19991, 20118), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((20843, 21055), 'bokeh.models.CustomJS', 'CustomJS', ([], {'args': "{'toggle': toggle, **model_dict}", 'code': '"""\n for (let i = 0; i < toggle.labels.length; i++) {\n eval("s" + i).visible = (toggle.active.includes(i))\n }\n """'}), '(args={\'toggle\': toggle, **model_dict}, code=\n """\n for (let i = 0; i < toggle.labels.length; i++) {\n eval("s" + i).visible = (toggle.active.includes(i))\n }\n """\n )\n', (20851, 21055), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((21312, 21348), 'bokeh.models.widgets.TextInput', 'TextInput', ([], {'value': '"""0"""', 'title': '"""v_exp:"""'}), "(value='0', title='v_exp:')\n", (21321, 21348), False, 'from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs\n'), ((21918, 22514), 'bokeh.models.CustomJS', 'CustomJS', ([], {'args': "{'elements': elements, 'z': z, 'v_exp': v_exp, **model_dict}", 'code': '"""\n let c = 299792.458; // speed of light in km / s\n for (let i = 0; i < elements.labels.length; i++) {\n let el = eval("el" + i);\n el.visible = (elements.active.includes(i))\n el.data_source.data.x = el.data_source.data.wavelength.map(\n x_i => (x_i * (1 + parseFloat(z.value)) /\n (1 + parseFloat(v_exp.value) / c))\n );\n el.data_source.change.emit();\n }\n """'}), '(args={\'elements\': elements, \'z\': z, \'v_exp\': v_exp, **model_dict},\n code=\n """\n let c = 299792.458; // speed of light in km / s\n for (let i = 0; i < elements.labels.length; i++) {\n let el = eval("el" + i);\n el.visible = (elements.active.includes(i))\n el.data_source.data.x = el.data_source.data.wavelength.map(\n x_i => (x_i * (1 + parseFloat(z.value)) /\n (1 + parseFloat(v_exp.value) / c))\n );\n el.data_source.change.emit();\n }\n """\n )\n', (21926, 22514), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((5150, 5185), 'sncosmo.get_bandpass', 'sncosmo.get_bandpass', (['bandpass_name'], {}), '(bandpass_name)\n', (5170, 5185), False, 'import sncosmo\n'), ((5323, 5335), 'matplotlib.colors.rgb2hex', 'rgb2hex', (['rgb'], {}), '(rgb)\n', (5330, 5335), False, 'from matplotlib.colors import rgb2hex\n'), ((7519, 7529), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (7525, 7529), True, 'import numpy as np\n'), ((7825, 7846), 'numpy.min', 'np.min', (["fdata['flux']"], {}), "(fdata['flux'])\n", (7831, 7846), True, 'import numpy as np\n'), ((7866, 7887), 'numpy.max', 'np.max', (["fdata['flux']"], {}), "(fdata['flux'])\n", (7872, 7887), True, 'import numpy as np\n'), ((11651, 11671), 'numpy.full', 'np.full', (['(5000)', 'first'], {}), '(5000, first)\n', (11658, 11671), True, 'import numpy as np\n'), ((11689, 11708), 'numpy.full', 'np.full', (['(5000)', 'last'], {}), '(5000, last)\n', (11696, 11708), True, 'import numpy as np\n'), ((11721, 11765), 'numpy.linspace', 'np.linspace', (['line_bottom', 'line_top'], {'num': '(5000)'}), '(line_bottom, line_top, num=5000)\n', (11732, 11765), True, 'import numpy as np\n'), ((12404, 12426), 'numpy.nanmax', 'np.nanmax', (["data['mag']"], {}), "(data['mag'])\n", (12413, 12426), True, 'import numpy as np\n'), ((12444, 12466), 'numpy.nanmin', 'np.nanmin', (["data['mag']"], {}), "(data['mag'])\n", (12453, 12466), True, 'import numpy as np\n'), ((12920, 12964), 'numpy.linspace', 'np.linspace', (['line_bottom', 'line_top'], {'num': '(5000)'}), '(line_bottom, line_top, num=5000)\n', (12931, 12964), True, 'import numpy as np\n'), ((16865, 16885), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (16881, 16885), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((16938, 17055), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (["df[['mjd', 'flux', 'fluxerr', 'mag', 'magerr', 'filter', 'zp', 'magsys',\n 'lim_mag', 'stacked']]"], {}), "(df[['mjd', 'flux', 'fluxerr', 'mag', 'magerr', 'filter',\n 'zp', 'magsys', 'lim_mag', 'stacked']])\n", (16954, 17055), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((19211, 19232), 'skyportal.models.Obj.query.get', 'Obj.query.get', (['obj_id'], {}), '(obj_id)\n', (19224, 19232), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((21434, 21475), 'pandas.DataFrame', 'pd.DataFrame', (["{'wavelength': wavelengths}"], {}), "({'wavelength': wavelengths})\n", (21446, 21475), True, 'import pandas as pd\n'), ((22640, 22656), 'bokeh.layouts.column', 'column', (['z', 'v_exp'], {}), '(z, v_exp)\n', (22646, 22656), False, 'from bokeh.layouts import row, column\n'), ((4296, 4305), 'bokeh.util.serialization.make_id', 'make_id', ([], {}), '()\n', (4303, 4305), False, 'from bokeh.util.serialization import make_id\n'), ((6236, 6247), 'skyportal.models.DBSession', 'DBSession', ([], {}), '()\n', (6245, 6247), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((6581, 6622), 'numpy.log10', 'np.log10', (["(data['fluxerr'] * DETECT_THRESH)"], {}), "(data['fluxerr'] * DETECT_THRESH)\n", (6589, 6622), True, 'import numpy as np\n'), ((7261, 7291), 'numpy.log10', 'np.log10', (["data[obsind]['flux']"], {}), "(data[obsind]['flux'])\n", (7269, 7291), True, 'import numpy as np\n'), ((11923, 11997), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('First detection', f'{first}')]", 'renderers': '[first_r]'}), "(tooltips=[('First detection', f'{first}')], renderers=[first_r])\n", (11932, 11997), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((12162, 12233), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('Last detection', f'{last}')]", 'renderers': '[last_r]'}), "(tooltips=[('Last detection', f'{last}')], renderers=[last_r])\n", (12171, 12233), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((13122, 13196), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('First detection', f'{first}')]", 'renderers': '[first_r]'}), "(tooltips=[('First detection', f'{first}')], renderers=[first_r])\n", (13131, 13196), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((13361, 13465), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('Last detection', f'{last}')]", 'renderers': '[last_r]', 'point_policy': '"""follow_mouse"""'}), "(tooltips=[('Last detection', f'{last}')], renderers=[last_r],\n point_policy='follow_mouse')\n", (13370, 13465), False, 'from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button\n'), ((19518, 19642), 'pandas.DataFrame', 'pd.DataFrame', (["{'wavelength': s.wavelengths, 'flux': s.fluxes, 'id': s.id, 'instrument': s\n .instrument.telescope.nickname}"], {}), "({'wavelength': s.wavelengths, 'flux': s.fluxes, 'id': s.id,\n 'instrument': s.instrument.telescope.nickname})\n", (19530, 19642), True, 'import pandas as pd\n'), ((8584, 8604), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (8600, 8604), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((13920, 13951), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (["df[df['obs']]"], {}), "(df[df['obs']])\n", (13936, 13951), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((14393, 14423), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['unobs_source'], {}), '(unobs_source)\n', (14409, 14423), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((20362, 20382), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (20378, 20382), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((21744, 21769), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['el_data'], {}), '(el_data)\n', (21760, 21769), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((6144, 6196), 'skyportal.models.Group.id.in_', 'Group.id.in_', (['[g.id for g in user.accessible_groups]'], {}), '([g.id for g in user.accessible_groups])\n', (6156, 6196), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((10670, 10695), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10685, 10695), False, 'import os\n'), ((17820, 17845), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (17835, 17845), False, 'import os\n'), ((10980, 11005), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10995, 11005), False, 'import os\n'), ((18214, 18239), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (18229, 18239), False, 'import os\n'), ((18574, 18599), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (18589, 18599), False, 'import os\n'), ((5815, 5852), 'skyportal.models.Telescope.nickname.label', 'Telescope.nickname.label', (['"""telescope"""'], {}), "('telescope')\n", (5839, 5852), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((5866, 5901), 'skyportal.models.Instrument.name.label', 'Instrument.name.label', (['"""instrument"""'], {}), "('instrument')\n", (5887, 5901), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n'), ((5751, 5762), 'skyportal.models.DBSession', 'DBSession', ([], {}), '()\n', (5760, 5762), False, 'from skyportal.models import DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP\n')]
|
# python
# import warnings
# Third party imports
import numpy as np
# grAdapt
from .base import Initial
from grAdapt.utils.sampling import sample_corner_bounds
class Vertices(Initial):
"""
Samples vertices if n_evals >= 2 ** len(bounds).
Else low discrepancy sequences are sampled.
"""
def __init__(self, sampling_method):
"""
Parameters
----------
sampling_method : grAdapt.sampling.equidistributed Object
Sample low discrepancy sequences when initial point method is not feasible
"""
super().__init__(sampling_method)
def sample(self, bounds, n_evals):
"""Returns a numpy array of sampled points.
Does not include corner points of the hypercube/search space.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n_evals : int
number of initial points sampled by method
Returns
-------
(self.n_evals, len(self.bounds)) numpy array
"""
super().sample(bounds, n_evals)
if 2 ** len(self.bounds) > self.n_evals:
return self.sampling_method.sample(bounds=bounds, n=n_evals)
else:
corner_points = sample_corner_bounds(self.bounds)
num_corner_points = corner_points.shape[0]
if self.n_evals > 2 ** len(self.bounds):
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - num_corner_points),
x_history=corner_points)
return np.vstack((corner_points, random_points))
else:
return corner_points
|
[
"numpy.vstack",
"grAdapt.utils.sampling.sample_corner_bounds"
] |
[((1391, 1424), 'grAdapt.utils.sampling.sample_corner_bounds', 'sample_corner_bounds', (['self.bounds'], {}), '(self.bounds)\n', (1411, 1424), False, 'from grAdapt.utils.sampling import sample_corner_bounds\n'), ((1819, 1860), 'numpy.vstack', 'np.vstack', (['(corner_points, random_points)'], {}), '((corner_points, random_points))\n', (1828, 1860), True, 'import numpy as np\n')]
|
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains utility code for reading packed data files.
"""
import os
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import h5py
import tqdm
# Atom typing
#
# Atom typing is the process of figuring out which layer each atom should be
# written to. For ease of testing, the packed data file contains a lot of
# potentially useful atomic information which can be distilled during the
# data loading process.
#
# Atom typing is implemented by map functions of the type:
# (atom descriptor) -> (layer index)
#
# If the layer index is -1, the atom is ignored.
class AtomTyper(object):
def __init__(self, fn, num_layers):
"""Initialize an atom typer.
Args:
fn: a function of type:
(atomic_num, aro, hdon, hacc, pcharge) -> (mask)
num_layers: number of output layers (<=32)
"""
self._fn = fn
self._num_layers = num_layers
def size(self):
return self._num_layers
def apply(self, *args):
return self._fn(*args)
class CondAtomTyper(AtomTyper):
def __init__(self, cond_func):
assert len(cond_func) <= 16
def _fn(*args):
v = 0
for k in range(len(cond_func)):
if cond_func[k](*args):
v |= 1 << k
return v
super(CondAtomTyper, self).__init__(_fn, len(cond_func))
REC_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num != 0
]),
# (C,N,O,S,*)
'simple': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (H,C,N,O,S,*)
'simple_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 1,
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta_mix': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
lambda num, aro, hdon, hacc, pcharge: num == 1, # hydrogen
lambda num, aro, hdon, hacc, pcharge: num == 6, # carbon
lambda num, aro, hdon, hacc, pcharge: num == 7, # nitrogen
lambda num, aro, hdon, hacc, pcharge: num == 8, # oxygen
lambda num, aro, hdon, hacc, pcharge: num == 16, # sulfur
])
}
LIG_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num: num != 0
]),
'simple': CondAtomTyper([
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
]),
'simple_h': CondAtomTyper([
lambda num: num == 1, # hydrogen
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
])
}
class FragmentDataset(Dataset):
"""Utility class to work with the packed fragments.h5 format."""
def __init__(self, fragment_file, rec_typer=REC_TYPER['simple'],
lig_typer=LIG_TYPER['simple'], filter_rec=None, filter_smi=None,
fdist_min=None, fdist_max=None, fmass_min=None, fmass_max=None,
verbose=False, lazy_loading=True):
"""Initializes the fragment dataset.
Args:
fragment_file: path to fragments.h5
rec_typer: AtomTyper for receptor
lig_typer: AtomTyper for ligand
filter_rec: list of receptor ids to use (or None to use all)
skip_remap: if True, don't prepare atom type information
(filtering options):
fdist_min: minimum fragment distance
fdist_max: maximum fragment distance
fmass_min: minimum fragment mass (Da)
fmass_max: maximum fragment mass (Da)
"""
self._rec_typer = rec_typer
self._lig_typer = lig_typer
self.verbose = verbose
self._lazy_loading = lazy_loading
self.rec = self._load_rec(fragment_file, rec_typer)
self.frag = self._load_fragments(fragment_file, lig_typer)
self.valid_idx = self._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose)
def _load_rec(self, fragment_file, rec_typer):
"""Loads receptor information."""
f = h5py.File(fragment_file, 'r')
rec_coords = f['rec_coords'][()]
rec_types = f['rec_types'][()]
rec_lookup = f['rec_lookup'][()]
r = range(len(rec_types))
if self.verbose:
r = tqdm.tqdm(r, desc='Remap receptor atoms')
rec_remapped = np.zeros(len(rec_types), dtype=np.uint16)
if not self._lazy_loading:
for i in r:
rec_remapped[i] = rec_typer.apply(*rec_types[i])
rec_loaded = np.zeros(len(rec_lookup)).astype(np.bool)
# create rec mapping
rec_mapping = {}
for i in range(len(rec_lookup)):
rec_mapping[rec_lookup[i][0].decode('ascii')] = i
rec = {
'rec_coords': rec_coords,
'rec_types': rec_types,
'rec_remapped': rec_remapped,
'rec_lookup': rec_lookup,
'rec_mapping': rec_mapping,
'rec_loaded': rec_loaded
}
f.close()
return rec
def _load_fragments(self, fragment_file, lig_typer):
"""Loads fragment information."""
f = h5py.File(fragment_file, 'r')
frag_data = f['frag_data'][()]
frag_lookup = f['frag_lookup'][()]
frag_smiles = f['frag_smiles'][()]
frag_mass = f['frag_mass'][()]
frag_dist = f['frag_dist'][()]
frag_lig_smi = None
frag_lig_idx = None
if 'frag_lig_smi' in f.keys():
frag_lig_smi = f['frag_lig_smi'][()]
frag_lig_idx = f['frag_lig_idx'][()]
# unpack frag data into separate structures
frag_coords = frag_data[:,:3].astype(np.float32)
frag_types = frag_data[:,3].astype(np.uint8)
frag_remapped = np.zeros(len(frag_types), dtype=np.uint16)
if not self._lazy_loading:
for i in range(len(frag_types)):
frag_remapped[i] = lig_typer.apply(frag_types[i])
frag_loaded = np.zeros(len(frag_lookup)).astype(np.bool)
# find and save connection point
r = range(len(frag_lookup))
if self.verbose:
r = tqdm.tqdm(r, desc='Frag connection point')
frag_conn = np.zeros((len(frag_lookup), 3))
for i in r:
_,f_start,f_end,_,_ = frag_lookup[i]
fdat = frag_data[f_start:f_end]
found = False
for j in range(len(fdat)):
if fdat[j][3] == 0:
frag_conn[i,:] = tuple(fdat[j])[:3]
found = True
break
assert found, "missing fragment connection point at %d" % i
frag = {
'frag_coords': frag_coords, # d_idx -> (x,y,z)
'frag_types': frag_types, # d_idx -> (type)
'frag_remapped': frag_remapped, # d_idx -> (layer)
'frag_lookup': frag_lookup, # f_idx -> (rec_id, fstart, fend, pstart, pend)
'frag_conn': frag_conn, # f_idx -> (x,y,z)
'frag_smiles': frag_smiles, # f_idx -> smiles
'frag_mass': frag_mass, # f_idx -> mass
'frag_dist': frag_dist, # f_idx -> dist
'frag_lig_smi': frag_lig_smi,
'frag_lig_idx': frag_lig_idx,
'frag_loaded': frag_loaded
}
f.close()
return frag
def _get_valid_examples(self, filter_rec, filter_smi, fdist_min, fdist_max, fmass_min,
fmass_max, verbose):
"""Returns an array of valid fragment indexes.
"Valid" in this context means the fragment belongs to a receptor in
filter_rec and the fragment abides by the optional mass/distance
constraints.
"""
# keep track of valid examples
valid_mask = np.ones(self.frag['frag_lookup'].shape[0]).astype(np.bool)
num_frags = self.frag['frag_lookup'].shape[0]
# filter by receptor id
if filter_rec is not None:
valid_rec = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter rec')
for i in r:
rec = self.frag['frag_lookup'][i][0].decode('ascii')
if rec in filter_rec:
valid_rec[i] = 1
valid_mask *= valid_rec
# filter by ligand smiles string
if filter_smi is not None:
valid_lig = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter lig')
for i in r:
smi = self.frag['frag_lig_smi'][self.frag['frag_lig_idx'][i]]
smi = smi.decode('ascii')
if smi in filter_smi:
valid_lig[i] = 1
valid_mask *= valid_lig
# filter by fragment distance
if fdist_min is not None:
valid_mask[self.frag['frag_dist'] < fdist_min] = 0
if fdist_max is not None:
valid_mask[self.frag['frag_dist'] > fdist_max] = 0
# filter by fragment mass
if fmass_min is not None:
valid_mask[self.frag['frag_mass'] < fmass_min] = 0
if fmass_max is not None:
valid_mask[self.frag['frag_mass'] > fmass_max] = 0
# convert to a list of indexes
valid_idx = np.where(valid_mask)[0]
return valid_idx
def __len__(self):
"""Returns the number of valid fragment examples."""
return self.valid_idx.shape[0]
def __getitem__(self, idx):
"""Returns the Nth example.
Returns a dict with:
f_coords: fragment coordinates (Fx3)
f_types: fragment layers (Fx1)
p_coords: parent coordinates (Px3)
p_types: parent layers (Px1)
r_coords: receptor coordinates (Rx3)
r_types: receptor layers (Rx1)
conn: fragment connection point in the parent molecule (x,y,z)
smiles: fragment smiles string
"""
# convert to fragment index
frag_idx = self.valid_idx[idx]
return self.get_raw(frag_idx)
def get_raw(self, frag_idx):
# lookup fragment
rec_id, f_start, f_end, p_start, p_end = self.frag['frag_lookup'][frag_idx]
smiles = self.frag['frag_smiles'][frag_idx].decode('ascii')
conn = self.frag['frag_conn'][frag_idx]
# lookup receptor
rec_idx = self.rec['rec_mapping'][rec_id.decode('ascii')]
_, r_start, r_end = self.rec['rec_lookup'][rec_idx]
# fetch data
# f_coords = self.frag['frag_coords'][f_start:f_end]
# f_types = self.frag['frag_types'][f_start:f_end]
p_coords = self.frag['frag_coords'][p_start:p_end]
r_coords = self.rec['rec_coords'][r_start:r_end]
if self._lazy_loading and self.frag['frag_loaded'][frag_idx] == 0:
frag_types = self.frag['frag_types']
frag_remapped = self.frag['frag_remapped']
# load parent
for i in range(p_start, p_end):
frag_remapped[i] = self._lig_typer.apply(frag_types[i])
self.frag['frag_loaded'][frag_idx] = 1
if self._lazy_loading and self.rec['rec_loaded'][rec_idx] == 0:
rec_types = self.rec['rec_types']
rec_remapped = self.rec['rec_remapped']
# load receptor
for i in range(r_start, r_end):
rec_remapped[i] = self._rec_typer.apply(*rec_types[i])
self.rec['rec_loaded'][rec_idx] = 1
p_mask = self.frag['frag_remapped'][p_start:p_end]
r_mask = self.rec['rec_remapped'][r_start:r_end]
return {
# 'f_coords': f_coords,
# 'f_types': f_types,
'p_coords': p_coords,
'p_types': p_mask,
'r_coords': r_coords,
'r_types': r_mask,
'conn': conn,
'smiles': smiles
}
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._lig_typer.size()
def rec_layers(self):
return self._rec_typer.size()
class SharedFragmentDataset(object):
def __init__(self, dat, filter_rec=None, filter_smi=None, fdist_min=None,
fdist_max=None, fmass_min=None, fmass_max=None):
self._dat = dat
self.valid_idx = self._dat._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose=True)
def __len__(self):
return self.valid_idx.shape[0]
def __getitem__(self, idx):
frag_idx = self.valid_idx[idx]
return self._dat.get_raw(frag_idx)
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self._dat.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._dat.lig_layers()
def rec_layers(self):
return self._dat.rec_layers()
class FingerprintDataset(Dataset):
def __init__(self, fingerprint_file):
"""Initializes a fingerprint dataset.
Args:
fingerprint_file: path to a fingerprint .h5 file
"""
self.fingerprints = self._load_fingerprints(fingerprint_file)
def _load_fingerprints(self, fingerprint_file):
"""Loads fingerprint information."""
f = h5py.File(fingerprint_file, 'r')
fingerprint_data = f['fingerprints'][()]
fingerprint_smiles = f['smiles'][()]
# create smiles->idx mapping
fingerprint_mapping = {}
for i in range(len(fingerprint_smiles)):
sm = fingerprint_smiles[i].decode('ascii')
fingerprint_mapping[sm] = i
fingerprints = {
'fingerprint_data': fingerprint_data,
'fingerprint_mapping': fingerprint_mapping,
'fingerprint_smiles': fingerprint_smiles,
}
f.close()
return fingerprints
def for_smiles(self, smiles):
"""Return a Tensor of fingerprints for a list of smiles.
Args:
smiles: size N list of smiles strings (as str not bytes)
"""
fp = np.zeros((len(smiles), self.fingerprints['fingerprint_data'].shape[1]))
for i in range(len(smiles)):
fp_idx = self.fingerprints['fingerprint_mapping'][smiles[i]]
fp[i] = self.fingerprints['fingerprint_data'][fp_idx]
return torch.Tensor(fp)
|
[
"numpy.ones",
"numpy.where",
"tqdm.tqdm",
"torch.Tensor",
"h5py.File",
"numpy.zeros"
] |
[((6574, 6603), 'h5py.File', 'h5py.File', (['fragment_file', '"""r"""'], {}), "(fragment_file, 'r')\n", (6583, 6603), False, 'import h5py\n'), ((7665, 7694), 'h5py.File', 'h5py.File', (['fragment_file', '"""r"""'], {}), "(fragment_file, 'r')\n", (7674, 7694), False, 'import h5py\n'), ((16271, 16303), 'h5py.File', 'h5py.File', (['fingerprint_file', '"""r"""'], {}), "(fingerprint_file, 'r')\n", (16280, 16303), False, 'import h5py\n'), ((17332, 17348), 'torch.Tensor', 'torch.Tensor', (['fp'], {}), '(fp)\n', (17344, 17348), False, 'import torch\n'), ((6802, 6843), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""Remap receptor atoms"""'}), "(r, desc='Remap receptor atoms')\n", (6811, 6843), False, 'import tqdm\n'), ((8655, 8697), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""Frag connection point"""'}), "(r, desc='Frag connection point')\n", (8664, 8697), False, 'import tqdm\n'), ((10515, 10549), 'numpy.zeros', 'np.zeros', (['num_frags'], {'dtype': 'np.bool'}), '(num_frags, dtype=np.bool)\n', (10523, 10549), True, 'import numpy as np\n'), ((10966, 11000), 'numpy.zeros', 'np.zeros', (['num_frags'], {'dtype': 'np.bool'}), '(num_frags, dtype=np.bool)\n', (10974, 11000), True, 'import numpy as np\n'), ((11892, 11912), 'numpy.where', 'np.where', (['valid_mask'], {}), '(valid_mask)\n', (11900, 11912), True, 'import numpy as np\n'), ((10309, 10351), 'numpy.ones', 'np.ones', (["self.frag['frag_lookup'].shape[0]"], {}), "(self.frag['frag_lookup'].shape[0])\n", (10316, 10351), True, 'import numpy as np\n'), ((10628, 10659), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""filter rec"""'}), "(r, desc='filter rec')\n", (10637, 10659), False, 'import tqdm\n'), ((11079, 11110), 'tqdm.tqdm', 'tqdm.tqdm', (['r'], {'desc': '"""filter lig"""'}), "(r, desc='filter lig')\n", (11088, 11110), False, 'import tqdm\n')]
|
import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from dataset_pothole import pothole
from keras.models import model_from_json
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = pothole.load_data()
print(X_train.shape)
print()
print (y_train.shape)
print()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 200, 200, 1)
X_test = X_test.reshape(X_test.shape[0], 200, 200, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 3380
X_test /= 3380
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 4)
Y_test = np_utils.to_categorical(y_test, 4)
# 7. Define model architecture
nb_classes = 4
# number of epochs to train
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(200, 200, 1)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
convout2 = Activation('relu')
model.add(convout2)
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=2, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
|
[
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"keras.layers.Dense",
"keras.utils.np_utils.to_categorical",
"numpy.random.seed",
"keras.layers.Activation",
"dataset_pothole.pothole.load_data",
"keras.layers.Dropout"
] |
[((19, 38), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (33, 38), True, 'import numpy as np\n'), ((423, 442), 'dataset_pothole.pothole.load_data', 'pothole.load_data', ([], {}), '()\n', (440, 442), False, 'from dataset_pothole import pothole\n'), ((785, 820), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', '(4)'], {}), '(y_train, 4)\n', (808, 820), False, 'from keras.utils import np_utils\n'), ((830, 864), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', '(4)'], {}), '(y_test, 4)\n', (853, 864), False, 'from keras.utils import np_utils\n'), ((1099, 1111), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1109, 1111), False, 'from keras.models import Sequential\n'), ((1275, 1293), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1285, 1293), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1380, 1398), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1390, 1398), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1123, 1218), 'keras.layers.Convolution2D', 'Convolution2D', (['nb_filters', 'nb_conv', 'nb_conv'], {'border_mode': '"""valid"""', 'input_shape': '(200, 200, 1)'}), "(nb_filters, nb_conv, nb_conv, border_mode='valid',\n input_shape=(200, 200, 1))\n", (1136, 1218), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((1324, 1367), 'keras.layers.Convolution2D', 'Convolution2D', (['nb_filters', 'nb_conv', 'nb_conv'], {}), '(nb_filters, nb_conv, nb_conv)\n', (1337, 1367), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((1429, 1471), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(nb_pool, nb_pool)'}), '(pool_size=(nb_pool, nb_pool))\n', (1441, 1471), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((1483, 1495), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1490, 1495), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1508, 1517), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1515, 1517), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1529, 1539), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (1534, 1539), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1551, 1569), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1561, 1569), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1581, 1593), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1588, 1593), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1605, 1622), 'keras.layers.Dense', 'Dense', (['nb_classes'], {}), '(nb_classes)\n', (1610, 1622), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1634, 1655), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1644, 1655), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n')]
|
import argparse, time, logging, os, math, random
os.environ["MXNET_USE_OPERATOR_TUNING"] = "0"
import numpy as np
from scipy import stats
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler
from os import listdir
import os.path
import argparse
import pickle
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_size = mpi_comm.Get_size()
mpi_rank = mpi_comm.Get_rank()
# print('rank: %d' % (mpi_rank), flush=True)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="dir of the data", required=True)
parser.add_argument("--valdir", type=str, help="dir of the val data", required=True)
parser.add_argument("--batchsize", type=int, help="batchsize", default=8)
parser.add_argument("--epochs", type=int, help="epochs", default=100)
parser.add_argument("--interval", type=int, help="log interval", default=10)
parser.add_argument("--nsplit", type=int, help="number of split", default=40)
parser.add_argument("--lr", type=float, help="learning rate", default=0.001)
parser.add_argument("--alpha", type=float, help="moving average", default=1.0)
parser.add_argument("--alpha-decay", type=float, help="decay factor of alpha", default=0.5)
parser.add_argument("--alpha-decay-epoch", type=str, help="epoch of alpha decay", default='800')
parser.add_argument("--log", type=str, help="dir of the log file", default='train_cifar100.log')
parser.add_argument("--classes", type=int, help="number of classes", default=20)
parser.add_argument("--iterations", type=int, help="number of local epochs", default=50)
parser.add_argument("--aggregation", type=str, help="aggregation method", default='mean')
parser.add_argument("--nbyz", type=int, help="number of Byzantine workers", default=0)
parser.add_argument("--trim", type=int, help="number of trimmed workers on one side", default=0)
# parser.add_argument("--lr-decay", type=float, help="lr decay rate", default=0.1)
# parser.add_argument("--lr-decay-epoch", type=str, help="lr decay epoch", default='400')
parser.add_argument("--iid", type=int, help="IID setting", default=0)
parser.add_argument("--model", type=str, help="model", default='mobilenetv2_1.0')
parser.add_argument("--save", type=int, help="save", default=0)
parser.add_argument("--start-epoch", type=int, help="epoch start from", default=-1)
parser.add_argument("--seed", type=int, help="random seed", default=733)
args = parser.parse_args()
# print(args, flush=True)
filehandler = logging.FileHandler(args.log)
streamhandler = logging.StreamHandler()
if mpi_rank == 0:
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
mx.random.seed(args.seed + mpi_rank)
random.seed(args.seed + mpi_rank)
np.random.seed(args.seed + mpi_rank)
data_dir = os.path.join(args.dir, 'dataset_split_{}'.format(args.nsplit))
train_dir = os.path.join(data_dir, 'train')
# val_dir = os.path.join(data_dir, 'val')
val_train_dir = os.path.join(args.valdir, 'train')
val_val_dir = os.path.join(args.valdir, 'val')
training_files = []
for filename in sorted(listdir(train_dir)):
absolute_filename = os.path.join(train_dir, filename)
training_files.append(absolute_filename)
context = mx.cpu()
classes = args.classes
def get_train_batch(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_train_batch_byz(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(classes - 1 - L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(classes - 1 - L)
def get_val_train_batch(data_dir):
test_filename = os.path.join(data_dir, 'train_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_val_val_batch(data_dir):
test_filename = os.path.join(data_dir, 'val_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
train_data_list = []
for training_file in training_files:
[train_X, train_Y] = get_train_batch(training_file)
train_dataset = mx.gluon.data.dataset.ArrayDataset(train_X, train_Y)
train_data = gluon.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
train_data_list.append(train_data)
[val_train_X, val_train_Y] = get_val_train_batch(val_train_dir)
val_train_dataset = mx.gluon.data.dataset.ArrayDataset(val_train_X, val_train_Y)
val_train_data = gluon.data.DataLoader(val_train_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
[val_val_X, val_val_Y] = get_val_val_batch(val_val_dir)
val_val_dataset = mx.gluon.data.dataset.ArrayDataset(val_val_X, val_val_Y)
val_val_data = gluon.data.DataLoader(val_val_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
model_name = args.model
if model_name == 'default':
net = gluon.nn.Sequential()
with net.name_scope():
# First convolutional layer
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# Second convolutional layer
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Third convolutional layer
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Flatten and apply fullly connected layers
net.add(gluon.nn.Flatten())
# net.add(gluon.nn.Dense(512, activation="relu"))
# net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dropout(rate=0.25))
net.add(gluon.nn.Dense(classes))
else:
model_kwargs = {'ctx': context, 'pretrained': False, 'classes': classes}
net = get_model(model_name, **model_kwargs)
if model_name.startswith('cifar') or model_name == 'default':
net.initialize(mx.init.Xavier(), ctx=context)
else:
net.initialize(mx.init.MSRAPrelu(), ctx=context)
# # no weight decay
# for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
# v.wd_mult = 0.0
optimizer = 'sgd'
lr = args.lr
# optimizer_params = {'momentum': 0.9, 'learning_rate': lr, 'wd': 0.0001}
optimizer_params = {'momentum': 0.0, 'learning_rate': lr, 'wd': 0.0}
# lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
alpha_decay_epoch = [int(i) for i in args.alpha_decay_epoch.split(',')]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
loss_func = gluon.loss.SoftmaxCrossEntropyLoss()
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
train_cross_entropy = mx.metric.CrossEntropy()
# warmup
# print('warm up', flush=True)
trainer.set_learning_rate(0.01)
# train_data = random.choice(train_data_list)
train_data = train_data_list[90]
for local_epoch in range(5):
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
if args.start_epoch > 0:
break
if args.start_epoch > 0:
break
# # force initialization
# train_data = random.choice(train_data_list)
# for i, (data, label) in enumerate(train_data):
# outputs = net(data)
if mpi_rank == 0:
params_prev = [param.data().copy() for param in net.collect_params().values()]
else:
params_prev = None
nd.waitall()
# broadcast
params_prev = mpi_comm.bcast(params_prev, root=0)
for param, param_prev in zip(net.collect_params().values(), params_prev):
param.set_data(param_prev)
if mpi_rank == 0:
worker_list = list(range(mpi_size))
training_file_index_list = [i for i in range(len(training_files))]
alpha = args.alpha
randperm_choice_list = []
randperm_list = [i for i in range(args.nsplit)]
for i in range(int(math.ceil(args.epochs * mpi_size / args.nsplit))):
random.shuffle(randperm_list)
randperm_choice_list = randperm_choice_list + randperm_list
if args.start_epoch > 0:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (args.start_epoch))
net.load_parameters(filename, ctx=context)
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f'%(args.start_epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha))
nd.waitall()
time_0 = time.time()
for epoch in range(args.start_epoch+1, args.epochs):
# train_metric.reset()
# if epoch in lr_decay_epoch:
# lr = lr * args.lr_decay
if epoch in alpha_decay_epoch:
alpha = alpha * args.alpha_decay
tic = time.time()
if args.iid == 0:
if mpi_rank == 0:
training_file_index_sublist = randperm_choice_list[(mpi_size * epoch):(mpi_size * epoch + mpi_size)]
# logger.info(training_file_index_sublist)
else:
training_file_index_sublist = None
training_file_index = mpi_comm.scatter(training_file_index_sublist, root=0)
train_data = train_data_list[training_file_index]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
trainer.set_learning_rate(lr)
if alpha < 1:
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
param_prev[:] = param.data() * (1-alpha)
# select byz workers
if args.nbyz > 0:
if mpi_rank == 0:
random.shuffle(worker_list)
byz_worker_list = worker_list[0:args.nbyz]
else:
byz_worker_list = None
byz_worker_list = mpi_comm.bcast(byz_worker_list, root=0)
else:
byz_worker_list = []
if mpi_rank in byz_worker_list:
# byz worker
[byz_train_X, byz_train_Y] = get_train_batch_byz(random.choice(training_files))
byz_train_dataset = mx.gluon.data.dataset.ArrayDataset(byz_train_X, byz_train_Y)
byz_train_data = gluon.data.DataLoader(byz_train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
net.initialize(mx.init.MSRAPrelu(), ctx=context, force_reinit=True)
for local_epoch in range(args.iterations):
for i, (data, label) in enumerate(byz_train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
else:
# train
# local epoch
for local_epoch in range(args.iterations):
if args.iid == 1:
train_data = random.choice(train_data_list)
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
# aggregation
nd.waitall()
params_np = [param.data().copy().asnumpy() for param in net.collect_params().values()]
params_np_list = mpi_comm.gather(params_np, root=0)
if mpi_rank == 0:
n_params = len(params_np)
if args.aggregation == "trim" or args.trim > 0:
params_np = [ ( stats.trim_mean( np.stack( [params[j] for params in params_np_list], axis=0), args.trim/mpi_size, axis=0 ) ) for j in range(n_params) ]
else:
params_np = [ ( np.mean( np.stack( [params[j] for params in params_np_list], axis=0), axis=0 ) ) for j in range(n_params) ]
else:
params_np = None
params_np = mpi_comm.bcast(params_np, root=0)
params_nd = [ nd.array(param_np) for param_np in params_np ]
for param, param_nd in zip(net.collect_params().values(), params_nd):
param.set_data(param_nd)
if alpha < 1:
# moving average
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
weight = param.data()
weight[:] = weight * alpha + param_prev
# test
nd.waitall()
toc = time.time()
if ( epoch % args.interval == 0 or epoch == args.epochs-1 ) :
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f, time=%f, elapsed=%f'%(epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha, toc-tic, time.time()-time_0))
# logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f'%(epoch, top1, top5))
if args.save == 1:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (epoch))
net.save_parameters(filename)
nd.waitall()
|
[
"logging.getLogger",
"logging.StreamHandler",
"mxnet.autograd.record",
"mxnet.gluon.nn.Conv2D",
"mxnet.gluon.nn.BatchNorm",
"mxnet.init.Xavier",
"numpy.array",
"mxnet.gluon.nn.MaxPool2D",
"mxnet.gluon.nn.Sequential",
"mxnet.gluon.data.dataset.ArrayDataset",
"mxnet.gluon.nn.Flatten",
"mxnet.gluon.loss.SoftmaxCrossEntropyLoss",
"os.listdir",
"mxnet.metric.Accuracy",
"argparse.ArgumentParser",
"mxnet.nd.waitall",
"numpy.stack",
"logging.FileHandler",
"numpy.random.seed",
"mxnet.nd.array",
"mxnet.gluon.data.DataLoader",
"mxnet.nd.softmax",
"random.choice",
"random.shuffle",
"mxnet.gluon.nn.Dense",
"mxnet.metric.TopKAccuracy",
"os.path.splitext",
"pickle.load",
"mxnet.gluon.nn.Dropout",
"time.time",
"math.ceil",
"mxnet.cpu",
"os.path.join",
"mxnet.metric.CrossEntropy",
"random.seed",
"gluoncv.model_zoo.get_model",
"mxnet.random.seed",
"mxnet.init.MSRAPrelu"
] |
[((622, 647), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (645, 647), False, 'import argparse\n'), ((2619, 2648), 'logging.FileHandler', 'logging.FileHandler', (['args.log'], {}), '(args.log)\n', (2638, 2648), False, 'import argparse, time, logging, os, math, random\n'), ((2665, 2688), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2686, 2688), False, 'import argparse, time, logging, os, math, random\n'), ((2850, 2886), 'mxnet.random.seed', 'mx.random.seed', (['(args.seed + mpi_rank)'], {}), '(args.seed + mpi_rank)\n', (2864, 2886), True, 'import mxnet as mx\n'), ((2887, 2920), 'random.seed', 'random.seed', (['(args.seed + mpi_rank)'], {}), '(args.seed + mpi_rank)\n', (2898, 2920), False, 'import argparse, time, logging, os, math, random\n'), ((2921, 2957), 'numpy.random.seed', 'np.random.seed', (['(args.seed + mpi_rank)'], {}), '(args.seed + mpi_rank)\n', (2935, 2957), True, 'import numpy as np\n'), ((3045, 3076), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (3057, 3076), False, 'import argparse, time, logging, os, math, random\n'), ((3135, 3169), 'os.path.join', 'os.path.join', (['args.valdir', '"""train"""'], {}), "(args.valdir, 'train')\n", (3147, 3169), False, 'import argparse, time, logging, os, math, random\n'), ((3184, 3216), 'os.path.join', 'os.path.join', (['args.valdir', '"""val"""'], {}), "(args.valdir, 'val')\n", (3196, 3216), False, 'import argparse, time, logging, os, math, random\n'), ((3396, 3404), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (3402, 3404), True, 'import mxnet as mx\n'), ((5126, 5186), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['val_train_X', 'val_train_Y'], {}), '(val_train_X, val_train_Y)\n', (5160, 5186), True, 'import mxnet as mx\n'), ((5204, 5314), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['val_train_dataset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'last_batch': '"""keep"""', 'num_workers': '(1)'}), "(val_train_dataset, batch_size=1000, shuffle=False,\n last_batch='keep', num_workers=1)\n", (5225, 5314), False, 'from mxnet import gluon, nd\n'), ((5386, 5442), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['val_val_X', 'val_val_Y'], {}), '(val_val_X, val_val_Y)\n', (5420, 5442), True, 'import mxnet as mx\n'), ((5458, 5566), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['val_val_dataset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'last_batch': '"""keep"""', 'num_workers': '(1)'}), "(val_val_dataset, batch_size=1000, shuffle=False,\n last_batch='keep', num_workers=1)\n", (5479, 5566), False, 'from mxnet import gluon, nd\n'), ((8114, 8150), 'mxnet.gluon.loss.SoftmaxCrossEntropyLoss', 'gluon.loss.SoftmaxCrossEntropyLoss', ([], {}), '()\n', (8148, 8150), False, 'from mxnet import gluon, nd\n'), ((8167, 8187), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (8185, 8187), True, 'import mxnet as mx\n'), ((8200, 8220), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (8218, 8220), True, 'import mxnet as mx\n'), ((8232, 8257), 'mxnet.metric.TopKAccuracy', 'mx.metric.TopKAccuracy', (['(5)'], {}), '(5)\n', (8254, 8257), True, 'import mxnet as mx\n'), ((8280, 8304), 'mxnet.metric.CrossEntropy', 'mx.metric.CrossEntropy', ([], {}), '()\n', (8302, 8304), True, 'import mxnet as mx\n'), ((9074, 9086), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (9084, 9086), False, 'from mxnet import gluon, nd\n'), ((10845, 10857), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (10855, 10857), False, 'from mxnet import gluon, nd\n'), ((10868, 10879), 'time.time', 'time.time', ([], {}), '()\n', (10877, 10879), False, 'import argparse, time, logging, os, math, random\n'), ((2721, 2742), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (2738, 2742), False, 'import argparse, time, logging, os, math, random\n'), ((3261, 3279), 'os.listdir', 'listdir', (['train_dir'], {}), '(train_dir)\n', (3268, 3279), False, 'from os import listdir\n'), ((3306, 3339), 'os.path.join', 'os.path.join', (['train_dir', 'filename'], {}), '(train_dir, filename)\n', (3318, 3339), False, 'import argparse, time, logging, os, math, random\n'), ((4051, 4107), 'os.path.join', 'os.path.join', (['data_dir', "('train_data_%03d.pkl' % mpi_rank)"], {}), "(data_dir, 'train_data_%03d.pkl' % mpi_rank)\n", (4063, 4107), False, 'import argparse, time, logging, os, math, random\n'), ((4394, 4448), 'os.path.join', 'os.path.join', (['data_dir', "('val_data_%03d.pkl' % mpi_rank)"], {}), "(data_dir, 'val_data_%03d.pkl' % mpi_rank)\n", (4406, 4448), False, 'import argparse, time, logging, os, math, random\n'), ((4816, 4868), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['train_X', 'train_Y'], {}), '(train_X, train_Y)\n', (4850, 4868), True, 'import mxnet as mx\n'), ((4886, 5006), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'last_batch': '"""rollover"""', 'num_workers': '(1)'}), "(train_dataset, batch_size=args.batchsize, shuffle=\n True, last_batch='rollover', num_workers=1)\n", (4907, 5006), False, 'from mxnet import gluon, nd\n'), ((5627, 5648), 'mxnet.gluon.nn.Sequential', 'gluon.nn.Sequential', ([], {}), '()\n', (5646, 5648), False, 'from mxnet import gluon, nd\n'), ((7385, 7422), 'gluoncv.model_zoo.get_model', 'get_model', (['model_name'], {}), '(model_name, **model_kwargs)\n', (7394, 7422), False, 'from gluoncv.model_zoo import get_model\n'), ((9551, 9580), 'random.shuffle', 'random.shuffle', (['randperm_list'], {}), '(randperm_list)\n', (9565, 9580), False, 'import argparse, time, logging, os, math, random\n'), ((9696, 9722), 'os.path.splitext', 'os.path.splitext', (['args.log'], {}), '(args.log)\n', (9712, 9722), False, 'import argparse, time, logging, os, math, random\n'), ((11142, 11153), 'time.time', 'time.time', ([], {}), '()\n', (11151, 11153), False, 'import argparse, time, logging, os, math, random\n'), ((13671, 13683), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (13681, 13683), False, 'from mxnet import gluon, nd\n'), ((14878, 14890), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (14888, 14890), False, 'from mxnet import gluon, nd\n'), ((14906, 14917), 'time.time', 'time.time', ([], {}), '()\n', (14915, 14917), False, 'import argparse, time, logging, os, math, random\n'), ((16547, 16559), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (16557, 16559), False, 'from mxnet import gluon, nd\n'), ((3524, 3538), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3535, 3538), False, 'import pickle\n'), ((3684, 3695), 'mxnet.nd.array', 'nd.array', (['L'], {}), '(L)\n', (3692, 3695), False, 'from mxnet import gluon, nd\n'), ((3795, 3809), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3806, 3809), False, 'import pickle\n'), ((3969, 3994), 'mxnet.nd.array', 'nd.array', (['(classes - 1 - L)'], {}), '(classes - 1 - L)\n', (3977, 3994), False, 'from mxnet import gluon, nd\n'), ((4164, 4178), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4175, 4178), False, 'import pickle\n'), ((4328, 4339), 'mxnet.nd.array', 'nd.array', (['L'], {}), '(L)\n', (4336, 4339), False, 'from mxnet import gluon, nd\n'), ((4505, 4519), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4516, 4519), False, 'import pickle\n'), ((4669, 4680), 'mxnet.nd.array', 'nd.array', (['L'], {}), '(L)\n', (4677, 4680), False, 'from mxnet import gluon, nd\n'), ((7505, 7521), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {}), '()\n', (7519, 7521), True, 'import mxnet as mx\n'), ((7561, 7580), 'mxnet.init.MSRAPrelu', 'mx.init.MSRAPrelu', ([], {}), '()\n', (7578, 7580), True, 'import mxnet as mx\n'), ((9496, 9543), 'math.ceil', 'math.ceil', (['(args.epochs * mpi_size / args.nsplit)'], {}), '(args.epochs * mpi_size / args.nsplit)\n', (9505, 9543), False, 'import argparse, time, logging, os, math, random\n'), ((10521, 10540), 'numpy.array', 'np.array', (['top1_list'], {}), '(top1_list)\n', (10529, 10540), True, 'import numpy as np\n'), ((10561, 10580), 'numpy.array', 'np.array', (['top5_list'], {}), '(top5_list)\n', (10569, 10580), True, 'import numpy as np\n'), ((10609, 10636), 'numpy.array', 'np.array', (['crossentropy_list'], {}), '(crossentropy_list)\n', (10617, 10636), True, 'import numpy as np\n'), ((12496, 12556), 'mxnet.gluon.data.dataset.ArrayDataset', 'mx.gluon.data.dataset.ArrayDataset', (['byz_train_X', 'byz_train_Y'], {}), '(byz_train_X, byz_train_Y)\n', (12530, 12556), True, 'import mxnet as mx\n'), ((12586, 12710), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['byz_train_dataset'], {'batch_size': 'args.batchsize', 'shuffle': '(True)', 'last_batch': '"""rollover"""', 'num_workers': '(1)'}), "(byz_train_dataset, batch_size=args.batchsize, shuffle\n =True, last_batch='rollover', num_workers=1)\n", (12607, 12710), False, 'from mxnet import gluon, nd\n'), ((14408, 14426), 'mxnet.nd.array', 'nd.array', (['param_np'], {}), '(param_np)\n', (14416, 14426), False, 'from mxnet import gluon, nd\n'), ((3656, 3667), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (3664, 3667), False, 'from mxnet import gluon, nd\n'), ((3941, 3952), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (3949, 3952), False, 'from mxnet import gluon, nd\n'), ((4300, 4311), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (4308, 4311), False, 'from mxnet import gluon, nd\n'), ((4641, 4652), 'mxnet.nd.array', 'nd.array', (['B'], {}), '(B)\n', (4649, 4652), False, 'from mxnet import gluon, nd\n'), ((5729, 5807), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(64)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=64, kernel_size=3, padding=(1, 1), activation='relu')\n", (5744, 5807), False, 'from mxnet import gluon, nd\n'), ((5824, 5844), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (5842, 5844), False, 'from mxnet import gluon, nd\n'), ((5862, 5940), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(64)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=64, kernel_size=3, padding=(1, 1), activation='relu')\n", (5877, 5940), False, 'from mxnet import gluon, nd\n'), ((5957, 5977), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (5975, 5977), False, 'from mxnet import gluon, nd\n'), ((5995, 6037), 'mxnet.gluon.nn.MaxPool2D', 'gluon.nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (6013, 6037), False, 'from mxnet import gluon, nd\n'), ((6055, 6082), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (6071, 6082), False, 'from mxnet import gluon, nd\n'), ((6236, 6315), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(128)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=128, kernel_size=3, padding=(1, 1), activation='relu')\n", (6251, 6315), False, 'from mxnet import gluon, nd\n'), ((6332, 6352), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (6350, 6352), False, 'from mxnet import gluon, nd\n'), ((6370, 6449), 'mxnet.gluon.nn.Conv2D', 'gluon.nn.Conv2D', ([], {'channels': '(128)', 'kernel_size': '(3)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(channels=128, kernel_size=3, padding=(1, 1), activation='relu')\n", (6385, 6449), False, 'from mxnet import gluon, nd\n'), ((6466, 6486), 'mxnet.gluon.nn.BatchNorm', 'gluon.nn.BatchNorm', ([], {}), '()\n', (6484, 6486), False, 'from mxnet import gluon, nd\n'), ((6504, 6546), 'mxnet.gluon.nn.MaxPool2D', 'gluon.nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (6522, 6546), False, 'from mxnet import gluon, nd\n'), ((6564, 6591), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (6580, 6591), False, 'from mxnet import gluon, nd\n'), ((7014, 7032), 'mxnet.gluon.nn.Flatten', 'gluon.nn.Flatten', ([], {}), '()\n', (7030, 7032), False, 'from mxnet import gluon, nd\n'), ((7166, 7204), 'mxnet.gluon.nn.Dense', 'gluon.nn.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (7180, 7204), False, 'from mxnet import gluon, nd\n'), ((7222, 7249), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (7238, 7249), False, 'from mxnet import gluon, nd\n'), ((7267, 7290), 'mxnet.gluon.nn.Dense', 'gluon.nn.Dense', (['classes'], {}), '(classes)\n', (7281, 7290), False, 'from mxnet import gluon, nd\n'), ((8550, 8561), 'mxnet.autograd.record', 'ag.record', ([], {}), '()\n', (8559, 8561), True, 'from mxnet import autograd as ag\n'), ((10195, 10214), 'mxnet.nd.softmax', 'nd.softmax', (['outputs'], {}), '(outputs)\n', (10205, 10214), False, 'from mxnet import gluon, nd\n'), ((12045, 12072), 'random.shuffle', 'random.shuffle', (['worker_list'], {}), '(worker_list)\n', (12059, 12072), False, 'import argparse, time, logging, os, math, random\n'), ((12433, 12462), 'random.choice', 'random.choice', (['training_files'], {}), '(training_files)\n', (12446, 12462), False, 'import argparse, time, logging, os, math, random\n'), ((12733, 12752), 'mxnet.init.MSRAPrelu', 'mx.init.MSRAPrelu', ([], {}), '()\n', (12750, 12752), True, 'import mxnet as mx\n'), ((15821, 15840), 'numpy.array', 'np.array', (['top1_list'], {}), '(top1_list)\n', (15829, 15840), True, 'import numpy as np\n'), ((15869, 15888), 'numpy.array', 'np.array', (['top5_list'], {}), '(top5_list)\n', (15877, 15888), True, 'import numpy as np\n'), ((15925, 15952), 'numpy.array', 'np.array', (['crossentropy_list'], {}), '(crossentropy_list)\n', (15933, 15952), True, 'import numpy as np\n'), ((13314, 13344), 'random.choice', 'random.choice', (['train_data_list'], {}), '(train_data_list)\n', (13327, 13344), False, 'import argparse, time, logging, os, math, random\n'), ((15431, 15450), 'mxnet.nd.softmax', 'nd.softmax', (['outputs'], {}), '(outputs)\n', (15441, 15450), False, 'from mxnet import gluon, nd\n'), ((16385, 16411), 'os.path.splitext', 'os.path.splitext', (['args.log'], {}), '(args.log)\n', (16401, 16411), False, 'import argparse, time, logging, os, math, random\n'), ((12933, 12944), 'mxnet.autograd.record', 'ag.record', ([], {}), '()\n', (12942, 12944), True, 'from mxnet import autograd as ag\n'), ((13433, 13444), 'mxnet.autograd.record', 'ag.record', ([], {}), '()\n', (13442, 13444), True, 'from mxnet import autograd as ag\n'), ((14012, 14070), 'numpy.stack', 'np.stack', (['[params[j] for params in params_np_list]'], {'axis': '(0)'}), '([params[j] for params in params_np_list], axis=0)\n', (14020, 14070), True, 'import numpy as np\n'), ((14190, 14248), 'numpy.stack', 'np.stack', (['[params[j] for params in params_np_list]'], {'axis': '(0)'}), '([params[j] for params in params_np_list], axis=0)\n', (14198, 14248), True, 'import numpy as np\n'), ((16187, 16198), 'time.time', 'time.time', ([], {}), '()\n', (16196, 16198), False, 'import argparse, time, logging, os, math, random\n')]
|
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from multiprocessing import Pool
import pickle
import time
import numpy as np
import torch
from scipy.stats import norm
from collections import OrderedDict
import plotting as plg
import utils.model_utils as mutils
import utils.exp_utils as utils
def get_mirrored_patch_crops(patch_crops, org_img_shape):
mirrored_patch_crops = []
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2],
ii[3], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
return mirrored_patch_crops
def get_mirrored_patch_crops_ax_dep(patch_crops, org_img_shape, mirror_axes):
mirrored_patch_crops = []
for ax_ix, axes in enumerate(mirror_axes):
if isinstance(axes, (int, float)) and int(axes) == 0:
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0],
ii[2], ii[3], ii[4], ii[5]]
for ii in patch_crops])
elif isinstance(axes, (int, float)) and int(axes) == 1:
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
elif hasattr(axes, "__iter__") and (tuple(axes) == (0, 1) or tuple(axes) == (1, 0)):
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
else:
raise Exception("invalid mirror axes {} in get mirrored patch crops".format(axes))
return mirrored_patch_crops
def apply_wbc_to_patient(inputs):
"""
wrapper around prediction box consolidation: weighted box clustering (wbc). processes a single patient.
loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes,
aggregates and stores results in new list.
:return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D
predictions, and a dummy batch dimension of 1 for 3D predictions.
:return. pid: string. patient id.
"""
regress_flag, in_patient_results_list, pid, class_dict, clustering_iou, n_ens = inputs
out_patient_results_list = [[] for _ in range(len(in_patient_results_list))]
for bix, b in enumerate(in_patient_results_list):
for cl in list(class_dict.keys()):
boxes = [(ix, box) for ix, box in enumerate(b) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([b[1]['box_coords'] for b in boxes])
box_scores = np.array([b[1]['box_score'] for b in boxes])
box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes])
box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes])
try:
box_patch_id = np.array([b[1]['patch_id'] for b in boxes])
except KeyError: #backward compatibility for already saved pred results ... omg
box_patch_id = np.array([b[1]['ens_ix'] for b in boxes])
box_regressions = np.array([b[1]['regression'] for b in boxes]) if regress_flag else None
box_rg_bins = np.array([b[1]['rg_bin'] if 'rg_bin' in b[1].keys() else float('NaN') for b in boxes])
box_rg_uncs = np.array([b[1]['rg_uncertainty'] if 'rg_uncertainty' in b[1].keys() else float('NaN') for b in boxes])
if 0 not in box_scores.shape:
keep_scores, keep_coords, keep_n_missing, keep_regressions, keep_rg_bins, keep_rg_uncs = \
weighted_box_clustering(box_coords, box_scores, box_center_factor, box_n_overlaps, box_rg_bins, box_rg_uncs,
box_regressions, box_patch_id, clustering_iou, n_ens)
for boxix in range(len(keep_scores)):
clustered_box = {'box_type': 'det', 'box_coords': keep_coords[boxix],
'box_score': keep_scores[boxix], 'cluster_n_missing': keep_n_missing[boxix],
'box_pred_class_id': cl}
if regress_flag:
clustered_box.update({'regression': keep_regressions[boxix],
'rg_uncertainty': keep_rg_uncs[boxix],
'rg_bin': keep_rg_bins[boxix]})
out_patient_results_list[bix].append(clustered_box)
# add gt boxes back to new output list.
out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt'])
return [out_patient_results_list, pid]
def weighted_box_clustering(box_coords, scores, box_pc_facts, box_n_ovs, box_rg_bins, box_rg_uncs,
box_regress, box_patch_id, thresh, n_ens):
"""Consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling.
clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the
average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered
its position within the patch is) and the size of the corresponding box.
The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position
(1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique
patches in the cluster, which did not contribute any predict any boxes.
:param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs).
:param box_coords: y1, x1, y2, x2, (z1), (z2).
:param scores: confidence scores.
:param box_pc_facts: patch-center factors from position on patch tiles.
:param box_n_ovs: number of patch overlaps at box position.
:param box_rg_bins: regression bin predictions.
:param box_rg_uncs: (n_dets,) regression uncertainties (from model mrcnn_aleatoric).
:param box_regress: (n_dets, n_regression_features).
:param box_patch_id: ensemble index.
:param thresh: threshold for iou_matching.
:param n_ens: number of models, that are ensembled. (-> number of expected predictions per position).
:return: keep_scores: (n_keep) new scores of boxes to be kept.
:return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept.
"""
dim = 2 if box_coords.shape[1] == 4 else 3
y1 = box_coords[:,0]
x1 = box_coords[:,1]
y2 = box_coords[:,2]
x2 = box_coords[:,3]
areas = (y2 - y1 + 1) * (x2 - x1 + 1)
if dim == 3:
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
areas *= (z2 - z1 + 1)
# order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24)
order = scores.argsort()[::-1]
keep_scores = []
keep_coords = []
keep_n_missing = []
keep_regress = []
keep_rg_bins = []
keep_rg_uncs = []
while order.size > 0:
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order])
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
inter = w * h
if dim == 3:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0, zz2 - zz1 + 1)
inter *= d
# overlap between currently highest scoring box and all boxes.
ovr = inter / (areas[i] + areas[order] - inter)
ovr_fl = inter.astype('float64') / (areas[i] + areas[order] - inter.astype('float64'))
assert np.all(ovr==ovr_fl), "ovr {}\n ovr_float {}".format(ovr, ovr_fl)
# get all the predictions that match the current box to build one cluster.
matches = np.nonzero(ovr > thresh)[0]
match_n_ovs = box_n_ovs[order[matches]]
match_pc_facts = box_pc_facts[order[matches]]
match_patch_id = box_patch_id[order[matches]]
match_ov_facts = ovr[matches]
match_areas = areas[order[matches]]
match_scores = scores[order[matches]]
# weight all scores in cluster by patch factors, and size.
match_score_weights = match_ov_facts * match_areas * match_pc_facts
match_scores *= match_score_weights
# for the weighted average, scores have to be divided by the number of total expected preds at the position
# of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is
# multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be
# in areas of different overlaps).
n_expected_preds = n_ens * np.mean(match_n_ovs)
# the number of missing predictions is obtained as the number of patches,
# which did not contribute any prediction to the current cluster.
n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0]))
# missing preds are given the mean weighting
# (expected prediction is the mean over all predictions in cluster).
denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights)
# compute weighted average score for the cluster
avg_score = np.sum(match_scores) / denom
# compute weighted average of coordinates for the cluster. now only take existing
# predictions into account.
avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)]
if dim == 3:
avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores))
avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores))
if box_regress is not None:
# compute wt. avg. of regression vectors (component-wise average)
avg_regress = np.sum(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0) / np.sum(
match_scores)
avg_rg_bins = np.round(np.sum(box_rg_bins[order[matches]] * match_scores) / np.sum(match_scores))
avg_rg_uncs = np.sum(box_rg_uncs[order[matches]] * match_scores) / np.sum(match_scores)
else:
avg_regress = np.array(float('NaN'))
avg_rg_bins = np.array(float('NaN'))
avg_rg_uncs = np.array(float('NaN'))
# some clusters might have very low scores due to high amounts of missing predictions.
# filter out the with a conservative threshold, to speed up evaluation.
if avg_score > 0.01:
keep_scores.append(avg_score)
keep_coords.append(avg_coords)
keep_n_missing.append((n_missing_preds / n_expected_preds * 100)) # relative
keep_regress.append(avg_regress)
keep_rg_uncs.append(avg_rg_uncs)
keep_rg_bins.append(avg_rg_bins)
# get index of all elements that were not matched and discard all others.
inds = np.nonzero(ovr <= thresh)[0]
inds_where = np.where(ovr<=thresh)[0]
assert np.all(inds == inds_where), "inds_nonzero {} \ninds_where {}".format(inds, inds_where)
order = order[inds]
return keep_scores, keep_coords, keep_n_missing, keep_regress, keep_rg_bins, keep_rg_uncs
def apply_nms_to_patient(inputs):
in_patient_results_list, pid, class_dict, iou_thresh = inputs
out_patient_results_list = []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch in in_patient_results_list:
batch_el_boxes = []
for cl in list(class_dict.keys()):
det_boxes = [box for box in batch if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([box['box_coords'] for box in det_boxes])
box_scores = np.array([box['box_score'] for box in det_boxes])
if 0 not in box_scores.shape:
keep_ix = mutils.nms_numpy(box_coords, box_scores, iou_thresh)
else:
keep_ix = []
batch_el_boxes += [det_boxes[ix] for ix in keep_ix]
batch_el_boxes += [box for box in batch if box['box_type'] == 'gt']
out_patient_results_list.append(batch_el_boxes)
assert len(in_patient_results_list) == len(out_patient_results_list), "batch dim needs to be maintained, in: {}, out {}".format(len(in_patient_results_list), len(out_patient_results_list))
return [out_patient_results_list, pid]
def nms_2to3D(dets, thresh):
"""
Merges 2D boxes to 3D cubes. For this purpose, boxes of all slices are regarded as lying in one slice.
An adaptation of Non-maximum suppression is applied where clusters are found (like in NMS) with the extra constraint
that suppressed boxes have to have 'connected' z coordinates w.r.t the core slice (cluster center, highest
scoring box, the prevailing box). 'connected' z-coordinates are determined
as the z-coordinates with predictions until the first coordinate for which no prediction is found.
example: a cluster of predictions was found overlap > iou thresh in xy (like NMS). The z-coordinate of the highest
scoring box is 50. Other predictions have 23, 46, 48, 49, 51, 52, 53, 56, 57.
Only the coordinates connected with 50 are clustered to one cube: 48, 49, 51, 52, 53. (46 not because nothing was
found in 47, so 47 is a 'hole', which interrupts the connection). Only the boxes corresponding to these coordinates
are suppressed. All others are kept for building of further clusters.
This algorithm works better with a certain min_confidence of predictions, because low confidence (e.g. noisy/cluttery)
predictions can break the relatively strong assumption of defining cubes' z-boundaries at the first 'hole' in the cluster.
:param dets: (n_detections, (y1, x1, y2, x2, scores, slice_id)
:param thresh: iou matchin threshold (like in NMS).
:return: keep: (n_keep,) 1D tensor of indices to be kept.
:return: keep_z: (n_keep, [z1, z2]) z-coordinates to be added to boxes, which are kept in order to form cubes.
"""
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
where maximum is taken needs to be the lower coordinate"""
scores = dets[:, -2]
slice_id = dets[:, -1]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
keep_z = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself: okay?
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1 + 1)
w = np.maximum(0.0, xx2 - xx1 + 1)
inter = h * w
iou = inter / (areas[i] + areas[order] - inter)
matches = np.argwhere(
iou > thresh) # get all the elements that match the current box and have a lower score
slice_ids = slice_id[order[matches]]
core_slice = slice_id[int(i)]
upper_holes = [ii for ii in np.arange(core_slice, np.max(slice_ids)) if ii not in slice_ids]
lower_holes = [ii for ii in np.arange(np.min(slice_ids), core_slice) if ii not in slice_ids]
max_valid_slice_id = np.min(upper_holes) if len(upper_holes) > 0 else np.max(slice_ids)
min_valid_slice_id = np.max(lower_holes) if len(lower_holes) > 0 else np.min(slice_ids)
z_matches = matches[(slice_ids <= max_valid_slice_id) & (slice_ids >= min_valid_slice_id)]
# expand by one z voxel since box content is surrounded w/o overlap, i.e., z-content computed as z2-z1
z1 = np.min(slice_id[order[z_matches]]) - 1
z2 = np.max(slice_id[order[z_matches]]) + 1
keep.append(i)
keep_z.append([z1, z2])
order = np.delete(order, z_matches, axis=0)
return keep, keep_z
def apply_2d_3d_merging_to_patient(inputs):
"""
wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension)
and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression
(Detailed methodology is described in nms_2to3D).
:return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]].
:return. pid: string. patient id.
"""
in_patient_results_list, pid, class_dict, merge_3D_iou = inputs
out_patient_results_list = []
for cl in list(class_dict.keys()):
det_boxes, slice_ids = [], []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch_ix, batch in enumerate(in_patient_results_list):
batch_element_det_boxes = [(ix, box) for ix, box in enumerate(batch) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
det_boxes += batch_element_det_boxes
slice_ids += [batch_ix] * len(batch_element_det_boxes)
box_coords = np.array([batch[1]['box_coords'] for batch in det_boxes])
box_scores = np.array([batch[1]['box_score'] for batch in det_boxes])
slice_ids = np.array(slice_ids)
if 0 not in box_scores.shape:
keep_ix, keep_z = nms_2to3D(
np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou)
else:
keep_ix, keep_z = [], []
# store kept predictions in new results list and add corresponding z-dimension info to coordinates.
for kix, kz in zip(keep_ix, keep_z):
keep_box = det_boxes[kix][1]
keep_box['box_coords'] = list(keep_box['box_coords']) + kz
out_patient_results_list.append(keep_box)
gt_boxes = [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
assert np.all([len(box["box_coords"]) == 6 for box in gt_boxes]), "expanded preds to 3D but GT is 2D."
out_patient_results_list += gt_boxes
return [[out_patient_results_list], pid] # additional list wrapping is extra batch dim.
class Predictor:
"""
Prediction pipeline:
- receives a patched patient image (n_patches, c, y, x, (z)) from patient data loader.
- forwards patches through model in chunks of batch_size. (method: batch_tiling_forward)
- unmolds predictions (boxes and segmentations) to original patient coordinates. (method: spatial_tiling_forward)
Ensembling (mode == 'test'):
- for inference, forwards 4 mirrored versions of image to through model and unmolds predictions afterwards
accordingly (method: data_aug_forward)
- for inference, loads multiple parameter-sets of the trained model corresponding to different epochs. for each
parameter-set loops over entire test set, runs prediction pipeline for each patient. (method: predict_test_set)
Consolidation of predictions:
- consolidates a patient's predictions (boxes, segmentations) collected over patches, data_aug- and temporal ensembling,
performs clustering and weighted averaging (external function: apply_wbc_to_patient) to obtain consistent outptus.
- for 2D networks, consolidates box predictions to 3D cubes via clustering (adaption of non-maximum surpression).
(external function: apply_2d_3d_merging_to_patient)
Ground truth handling:
- dissmisses any ground truth boxes returned by the model (happens in validation mode, patch-based groundtruth)
- if provided by data loader, adds patient-wise ground truth to the final predictions to be passed to the evaluator.
"""
def __init__(self, cf, net, logger, mode):
self.cf = cf
self.batch_size = cf.batch_size
self.logger = logger
self.mode = mode
self.net = net
self.n_ens = 1
self.rank_ix = '0'
self.regress_flag = any(['regression' in task for task in self.cf.prediction_tasks])
if self.cf.merge_2D_to_3D_preds:
assert self.cf.dim == 2, "Merge 2Dto3D only valid for 2D preds, but current dim is {}.".format(self.cf.dim)
if self.mode == 'test':
last_state_path = os.path.join(self.cf.fold_dir, 'last_state.pth')
try:
self.model_index = torch.load(last_state_path)["model_index"]
self.model_index = self.model_index[self.model_index["rank"] <= self.cf.test_n_epochs]
except FileNotFoundError:
raise FileNotFoundError('no last_state/model_index file in fold directory. '
'seems like you are trying to run testing without prior training...')
self.n_ens = cf.test_n_epochs
if self.cf.test_aug_axes is not None:
self.n_ens *= (len(self.cf.test_aug_axes)+1)
self.example_plot_dir = os.path.join(cf.test_dir, "example_plots")
os.makedirs(self.example_plot_dir, exist_ok=True)
def batch_tiling_forward(self, batch):
"""
calls the actual network forward method. in patch-based prediction, the batch dimension might be overladed
with n_patches >> batch_size, which would exceed gpu memory. In this case, batches are processed in chunks of
batch_size. validation mode calls the train method to monitor losses (returned ground truth objects are discarded).
test mode calls the test forward method, no ground truth required / involved.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
img = batch['data']
if img.shape[0] <= self.batch_size:
if self.mode == 'val':
# call training method to monitor losses
results_dict = self.net.train_forward(batch, is_validation=True)
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
elif self.mode == 'test':
results_dict = self.net.test_forward(batch, return_masks=self.cf.return_masks_in_test)
else: # needs batch tiling
split_ixs = np.split(np.arange(img.shape[0]), np.arange(img.shape[0])[::self.batch_size])
chunk_dicts = []
for chunk_ixs in split_ixs[1:]: # first split is elements before 0, so empty
b = {k: batch[k][chunk_ixs] for k in batch.keys()
if (isinstance(batch[k], np.ndarray) and batch[k].shape[0] == img.shape[0])}
if self.mode == 'val':
chunk_dicts += [self.net.train_forward(b, is_validation=True)]
else:
chunk_dicts += [self.net.test_forward(b, return_masks=self.cf.return_masks_in_test)]
results_dict = {}
# flatten out batch elements from chunks ([chunk, chunk] -> [b, b, b, b, ...])
results_dict['boxes'] = [item for d in chunk_dicts for item in d['boxes']]
results_dict['seg_preds'] = np.array([item for d in chunk_dicts for item in d['seg_preds']])
if self.mode == 'val':
# if hasattr(self.cf, "losses_to_monitor"):
# loss_names = self.cf.losses_to_monitor
# else:
# loss_names = {name for dic in chunk_dicts for name in dic if 'loss' in name}
# estimate patient loss by mean over batch_chunks. Most similar to training loss.
results_dict['torch_loss'] = torch.mean(torch.cat([d['torch_loss'] for d in chunk_dicts]))
results_dict['class_loss'] = np.mean([d['class_loss'] for d in chunk_dicts])
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
return results_dict
def spatial_tiling_forward(self, batch, patch_crops = None, n_aug='0'):
"""
forwards batch to batch_tiling_forward method and receives and returns a dictionary with results.
if patch-based prediction, the results received from batch_tiling_forward will be on a per-patch-basis.
this method uses the provided patch_crops to re-transform all predictions to whole-image coordinates.
Patch-origin information of all box-predictions will be needed for consolidation, hence it is stored as
'patch_id', which is a unique string for each patch (also takes current data aug and temporal epoch instances
into account). all box predictions get additional information about the amount overlapping patches at the
respective position (used for consolidation).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- monitor_values (only in validation mode)
returned dict is a flattened version with 1 batch instance (3D) or slices (2D)
"""
if patch_crops is not None:
#print("patch_crops not None, applying patch center factor")
patches_dict = self.batch_tiling_forward(batch)
results_dict = {'boxes': [[] for _ in range(batch['original_img_shape'][0])]}
#bc of ohe--> channel dim of seg has size num_classes
out_seg_shape = list(batch['original_img_shape'])
out_seg_shape[1] = patches_dict["seg_preds"].shape[1]
out_seg_preds = np.zeros(out_seg_shape, dtype=np.float16)
patch_overlap_map = np.zeros_like(out_seg_preds, dtype='uint8')
for pix, pc in enumerate(patch_crops):
if self.cf.dim == 3:
out_seg_preds[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += patches_dict['seg_preds'][pix]
patch_overlap_map[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += 1
elif self.cf.dim == 2:
out_seg_preds[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += patches_dict['seg_preds'][pix]
patch_overlap_map[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += 1
out_seg_preds[patch_overlap_map > 0] /= patch_overlap_map[patch_overlap_map > 0]
results_dict['seg_preds'] = out_seg_preds
for pix, pc in enumerate(patch_crops):
patch_boxes = patches_dict['boxes'][pix]
for box in patch_boxes:
# add unique patch id for consolidation of predictions.
box['patch_id'] = self.rank_ix + '_' + n_aug + '_' + str(pix)
# boxes from the edges of a patch have a lower prediction quality, than the ones at patch-centers.
# hence they will be down-weighted for consolidation, using the 'box_patch_center_factor', which is
# obtained by a gaussian distribution over positions in the patch and average over spatial dimensions.
# Also the info 'box_n_overlaps' is stored for consolidation, which represents the amount of
# overlapping patches at the box's position.
c = box['box_coords']
#box_centers = np.array([(c[ii] + c[ii+2])/2 for ii in range(len(c)//2)])
box_centers = [(c[ii] + c[ii + 2]) / 2 for ii in range(2)]
if self.cf.dim == 3:
box_centers.append((c[4] + c[5]) / 2)
box['box_patch_center_factor'] = np.mean(
[norm.pdf(bc, loc=pc, scale=pc * 0.8) * np.sqrt(2 * np.pi) * pc * 0.8 for bc, pc in
zip(box_centers, np.array(self.cf.patch_size) / 2)])
if self.cf.dim == 3:
c += np.array([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])
int_c = [int(np.floor(ii)) if ix%2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]])
results_dict['boxes'][0].append(box)
else:
c += np.array([pc[0], pc[2], pc[0], pc[2]])
int_c = [int(np.floor(ii)) if ix % 2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(
patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])
results_dict['boxes'][pc[4]].append(box)
if self.mode == 'val':
results_dict['torch_loss'] = patches_dict['torch_loss']
results_dict['class_loss'] = patches_dict['class_loss']
else:
results_dict = self.batch_tiling_forward(batch)
for b in results_dict['boxes']:
for box in b:
box['box_patch_center_factor'] = 1
box['box_n_overlaps'] = 1
box['patch_id'] = self.rank_ix + '_' + n_aug
return results_dict
def data_aug_forward(self, batch):
"""
in val_mode: passes batch through to spatial_tiling method without data_aug.
in test_mode: if cf.test_aug is set in configs, createst 4 mirrored versions of the input image,
passes all of them to the next processing step (spatial_tiling method) and re-transforms returned predictions
to original image version.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
patch_crops = batch['patch_crop_coords'] if self.patched_patient else None
results_list = [self.spatial_tiling_forward(batch, patch_crops)]
org_img_shape = batch['original_img_shape']
if self.mode == 'test' and self.cf.test_aug_axes is not None:
if isinstance(self.cf.test_aug_axes, (int, float)):
self.cf.test_aug_axes = (self.cf.test_aug_axes,)
#assert np.all(np.array(self.cf.test_aug_axes)<self.cf.dim), "test axes {} need to be spatial axes".format(self.cf.test_aug_axes)
if self.patched_patient:
# apply mirror transformations to patch-crop coordinates, for correct tiling in spatial_tiling method.
mirrored_patch_crops = get_mirrored_patch_crops_ax_dep(patch_crops, batch['original_img_shape'],
self.cf.test_aug_axes)
self.logger.info("mirrored patch crop coords for patched patient in test augs!")
else:
mirrored_patch_crops = [None] * 3
img = np.copy(batch['data'])
for n_aug, sp_axis in enumerate(self.cf.test_aug_axes):
#sp_axis = np.array(axis) #-2 #spatial axis index
axis = np.array(sp_axis)+2
if isinstance(sp_axis, (int, float)):
# mirroring along one axis at a time
batch['data'] = np.flip(img, axis=axis).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis+2]
coords[sp_axis+2] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(chunk_dict['seg_preds'], axis=axis)
elif hasattr(sp_axis, "__iter__") and tuple(sp_axis)==(0,1) or tuple(sp_axis)==(1,0):
#NEED: mirrored patch crops are given as [(y-axis), (x-axis), (y-,x-axis)], obey this order!
# mirroring along two axes at same time
batch['data'] = np.flip(np.flip(img, axis=axis[0]), axis=axis[1]).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis[0]] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]+2]
coords[sp_axis[0]+2] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]]
coords[sp_axis[1]] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]+2]
coords[sp_axis[1]+2] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(np.flip(chunk_dict['seg_preds'], axis=axis[0]), axis=axis[1]).copy()
else:
raise Exception("Invalid axis type {} in test augs".format(type(axis)))
results_list.append(chunk_dict)
batch['data'] = img
# aggregate all boxes/seg_preds per batch element from data_aug predictions.
results_dict = {}
results_dict['boxes'] = [[item for d in results_list for item in d['boxes'][batch_instance]]
for batch_instance in range(org_img_shape[0])]
# results_dict['seg_preds'] = np.array([[item for d in results_list for item in d['seg_preds'][batch_instance]]
# for batch_instance in range(org_img_shape[0])])
results_dict['seg_preds'] = np.stack([dic['seg_preds'] for dic in results_list], axis=1)
# needs segs probs in seg_preds entry:
results_dict['seg_preds'] = np.sum(results_dict['seg_preds'], axis=1) #add up seg probs from different augs per class
if self.mode == 'val':
results_dict['torch_loss'] = results_list[0]['torch_loss']
results_dict['class_loss'] = results_list[0]['class_loss']
return results_dict
def load_saved_predictions(self):
"""loads raw predictions saved by self.predict_test_set. aggregates and/or merges 2D boxes to 3D cubes for
evaluation (if model predicts 2D but evaluation is run in 3D), according to settings config.
:return: list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'batch_dices': dice scores as recorded in raw prediction results.
- 'seg_preds': not implemented yet. could replace dices by seg preds to have raw seg info available, however
would consume critically large memory amount. todo evaluation of instance/semantic segmentation.
"""
results_file = 'pred_results.pkl' if not self.cf.hold_out_test_set else 'pred_results_held_out.pkl'
if not self.cf.hold_out_test_set or not self.cf.ensemble_folds:
self.logger.info("loading saved predictions of fold {}".format(self.cf.fold))
with open(os.path.join(self.cf.fold_dir, results_file), 'rb') as handle:
results_list = pickle.load(handle)
box_results_list = [(res_dict["boxes"], pid) for res_dict, pid in results_list]
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor
self.logger.info('loaded raw test set predictions with n_patients = {} and n_ens = {}'.format(
len(results_list), self.n_ens))
else:
self.logger.info("loading saved predictions of hold-out test set")
fold_dirs = sorted([os.path.join(self.cf.exp_dir, f) for f in os.listdir(self.cf.exp_dir) if
os.path.isdir(os.path.join(self.cf.exp_dir, f)) and f.startswith("fold")])
results_list = []
folds_loaded = 0
for fold in range(self.cf.n_cv_splits):
fold_dir = os.path.join(self.cf.exp_dir, 'fold_{}'.format(fold))
if fold_dir in fold_dirs:
with open(os.path.join(fold_dir, results_file), 'rb') as handle:
fold_list = pickle.load(handle)
results_list += fold_list
folds_loaded += 1
else:
self.logger.info("Skipping fold {} since no saved predictions found.".format(fold))
box_results_list = []
for res_dict, pid in results_list: #without filtering gt out:
box_results_list.append((res_dict['boxes'], pid))
#it's usually not right to filter out gts here, is it?
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor * folds_loaded
# -------------- aggregation of boxes via clustering -----------------
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou {} and n_ens {} over {} patients'.format(
self.cf.clustering_iou, self.n_ens, len(box_results_list)))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii
in box_results_list]
del box_results_list
pool = Pool(processes=self.cf.n_workers)
box_results_list = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou {} over {} patients.'.format(
self.cf.clustering_iou, len(box_results_list)))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in box_results_list]
del box_results_list
box_results_list = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2Dto3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in box_results_list]
box_results_list = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
for ix in range(len(results_list)):
assert np.all(results_list[ix][1] == box_results_list[ix][1]), "pid mismatch between loaded and aggregated results"
results_list[ix][0]["boxes"] = box_results_list[ix][0]
return results_list # holds (results_dict, pid)
def predict_patient(self, batch):
"""
predicts one patient.
called either directly via loop over validation set in exec.py (mode=='val')
or from self.predict_test_set (mode=='test).
in val mode: adds 3D ground truth info to predictions and runs consolidation and 2Dto3D merging of predictions.
in test mode: returns raw predictions (ground truth addition, consolidation, 2D to 3D merging are
done in self.predict_test_set, because patient predictions across several epochs might be needed
to be collected first, in case of temporal ensembling).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
#if self.mode=="test":
# self.logger.info('predicting patient {} for fold {} '.format(np.unique(batch['pid']), self.cf.fold))
# True if patient is provided in patches and predictions need to be tiled.
self.patched_patient = 'patch_crop_coords' in list(batch.keys())
# forward batch through prediction pipeline.
results_dict = self.data_aug_forward(batch)
#has seg probs in entry 'seg_preds'
if self.mode == 'val':
for b in range(batch['patient_bb_target'].shape[0]):
for t in range(len(batch['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['patient_bb_target'][b][t],
'class_targets': batch['patient_class_targets'][b][t]}
for name in self.cf.roi_items:
gt_box.update({name : batch['patient_'+name][b][t]})
results_dict['boxes'][b].append(gt_box)
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
if self.patched_patient and self.cf.clustering == "wbc":
wbc_input = [self.regress_flag, results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou, self.n_ens]
results_dict['boxes'] = apply_wbc_to_patient(wbc_input)[0]
elif self.patched_patient:
nms_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou]
results_dict['boxes'] = apply_nms_to_patient(nms_inputs)[0]
if self.cf.merge_2D_to_3D_preds:
results_dict['2D_boxes'] = results_dict['boxes']
merge_dims_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.merge_3D_iou]
results_dict['boxes'] = apply_2d_3d_merging_to_patient(merge_dims_inputs)[0]
return results_dict
def predict_test_set(self, batch_gen, return_results=True):
"""
wrapper around test method, which loads multiple (or one) epoch parameters (temporal ensembling), loops through
the test set and collects predictions per patient. Also flattens the results per patient and epoch
and adds optional ground truth boxes for evaluation. Saves out the raw result list for later analysis and
optionally consolidates and returns predictions immediately.
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo evaluation of instance/semantic segmentation.
"""
# -------------- raw predicting -----------------
dict_of_patients_results = OrderedDict()
set_of_result_types = set()
self.model_index = self.model_index.sort_values(by="rank")
# get paths of all parameter sets to be loaded for temporal ensembling. (or just one for no temp. ensembling).
weight_paths = [os.path.join(self.cf.fold_dir, file_name) for file_name in self.model_index["file_name"]]
for rank_ix, weight_path in enumerate(weight_paths):
self.logger.info(('tmp ensembling over rank_ix:{} epoch:{}'.format(rank_ix, weight_path)))
self.net.load_state_dict(torch.load(weight_path))
self.net.eval()
self.rank_ix = str(rank_ix)
plot_batches = np.random.choice(np.arange(batch_gen['n_test']),
size=min(batch_gen['n_test'], self.cf.n_test_plots), replace=False)
with torch.no_grad():
for i in range(batch_gen['n_test']):
batch = next(batch_gen['test'])
pid = np.unique(batch['pid'])
assert len(pid)==1
pid = pid[0]
if not pid in dict_of_patients_results.keys(): # store batch info in patient entry of results dict.
dict_of_patients_results[pid] = {}
dict_of_patients_results[pid]['results_dicts'] = []
dict_of_patients_results[pid]['patient_bb_target'] = batch['patient_bb_target']
for name in self.cf.roi_items:
dict_of_patients_results[pid]["patient_"+name] = batch["patient_"+name]
stime = time.time()
results_dict = self.predict_patient(batch) #only holds "boxes", "seg_preds"
# needs ohe seg probs in seg_preds entry:
results_dict['seg_preds'] = np.argmax(results_dict['seg_preds'], axis=1)[:,np.newaxis]
print("\rpredicting patient {} with weight rank {} (progress: {}/{}) took {:.2f}s".format(
str(pid), rank_ix, (rank_ix)*batch_gen['n_test']+(i+1), len(weight_paths)*batch_gen['n_test'],
time.time()-stime), end="", flush=True)
if i in plot_batches and (not self.patched_patient or 'patient_data' in batch.keys()):
try:
# view qualitative results of random test case
out_file = os.path.join(self.example_plot_dir,
'batch_example_test_{}_rank_{}.png'.format(self.cf.fold, rank_ix))
utils.split_off_process(plg.view_batch, self.cf, batch, results_dict,
has_colorchannels=self.cf.has_colorchannels,
show_gt_labels=True, show_seg_ids='dice' in self.cf.metrics,
get_time="test-example plot", out_file=out_file)
except Exception as e:
self.logger.info("WARNING: error in view_batch: {}".format(e))
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class( results_dict['seg_preds'],
batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
dict_of_patients_results[pid]['results_dicts'].append({k:v for k,v in results_dict.items()
if k in ["boxes", "batch_dices"]})
# collect result types to know which ones to look for when saving
set_of_result_types.update(dict_of_patients_results[pid]['results_dicts'][-1].keys())
# -------------- re-order, save raw results -----------------
self.logger.info('finished predicting test set. starting aggregation of predictions.')
results_per_patient = []
for pid, p_dict in dict_of_patients_results.items():
# dict_of_patients_results[pid]['results_list'] has length batch['n_test']
results_dict = {}
# collect all boxes/seg_preds of same batch_instance over temporal instances.
b_size = len(p_dict['results_dicts'][0]["boxes"])
for res_type in [rtype for rtype in set_of_result_types if rtype in ["boxes", "batch_dices"]]:#, "seg_preds"]]:
if not 'batch' in res_type: #assume it's results on batch-element basis
results_dict[res_type] = [[item for rank_dict in p_dict['results_dicts'] for item in rank_dict[res_type][batch_instance]]
for batch_instance in range(b_size)]
else:
results_dict[res_type] = []
for dict in p_dict['results_dicts']:
if 'dice' in res_type:
item = dict[res_type] #dict['batch_dices'] has shape (num_seg_classes,)
assert len(item) == self.cf.num_seg_classes, \
"{}, {}".format(len(item), self.cf.num_seg_classes)
else:
raise NotImplementedError
results_dict[res_type].append(item)
# rdict[dice] shape (n_rank_epochs (n_saved_ranks), nsegclasses)
# calc mean over test epochs so inline with shape from sampling
results_dict[res_type] = np.mean(results_dict[res_type], axis=0) #maybe error type with other than dice
if not hasattr(self.cf, "eval_test_separately") or not self.cf.eval_test_separately:
# add unpatched 2D or 3D (if dim==3 or merge_2D_to_3D) ground truth boxes for evaluation.
for b in range(p_dict['patient_bb_target'].shape[0]):
for targ in range(len(p_dict['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords':p_dict['patient_bb_target'][b][targ],
'class_targets': p_dict['patient_class_targets'][b][targ]}
for name in self.cf.roi_items:
gt_box.update({name: p_dict["patient_"+name][b][targ]})
results_dict['boxes'][b].append(gt_box)
results_per_patient.append([results_dict, pid])
out_string = 'pred_results_held_out' if self.cf.hold_out_test_set else 'pred_results'
with open(os.path.join(self.cf.fold_dir, '{}.pkl'.format(out_string)), 'wb') as handle:
pickle.dump(results_per_patient, handle)
if return_results:
# -------------- results processing, clustering, etc. -----------------
final_patient_box_results = [ (res_dict["boxes"], pid) for res_dict,pid in results_per_patient ]
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou = {} and n_ens = {}.'.format(
self.cf.clustering_iou, self.n_ens))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou = {}.'.format(self.cf.clustering_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in final_patient_box_results]
del final_patient_box_results
final_patient_box_results = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2D-to-3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
# final_patient_box_results holds [avg_boxes, pid] if wbc
for ix in range(len(results_per_patient)):
assert results_per_patient[ix][1] == final_patient_box_results[ix][1], "should be same pid"
results_per_patient[ix][0]["boxes"] = final_patient_box_results[ix][0]
# results_per_patient = [(res_dict["boxes"] = boxes, pid) for (boxes,pid) in final_patient_box_results]
return results_per_patient # holds list of (results_dict, pid)
|
[
"numpy.sqrt",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.flip",
"os.listdir",
"numpy.where",
"numpy.delete",
"numpy.max",
"numpy.stack",
"numpy.concatenate",
"numpy.min",
"numpy.maximum",
"collections.OrderedDict",
"numpy.ceil",
"pickle.load",
"numpy.argmax",
"numpy.floor",
"scipy.stats.norm.pdf",
"numpy.nonzero",
"time.time",
"torch.cat",
"numpy.copy",
"pickle.dump",
"numpy.minimum",
"os.makedirs",
"numpy.unique",
"torch.load",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"utils.exp_utils.split_off_process",
"multiprocessing.Pool",
"torch.no_grad",
"numpy.all",
"utils.model_utils.nms_numpy",
"numpy.zeros_like",
"utils.model_utils.dice_per_batch_and_class"
] |
[((10050, 10078), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order]'], {}), '(y1[i], y1[order])\n', (10060, 10078), True, 'import numpy as np\n'), ((10093, 10121), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order]'], {}), '(x1[i], x1[order])\n', (10103, 10121), True, 'import numpy as np\n'), ((10136, 10164), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order]'], {}), '(y2[i], y2[order])\n', (10146, 10164), True, 'import numpy as np\n'), ((10179, 10207), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order]'], {}), '(x2[i], x2[order])\n', (10189, 10207), True, 'import numpy as np\n'), ((10221, 10249), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1 + 1)'], {}), '(0, xx2 - xx1 + 1)\n', (10231, 10249), True, 'import numpy as np\n'), ((10262, 10290), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1 + 1)'], {}), '(0, yy2 - yy1 + 1)\n', (10272, 10290), True, 'import numpy as np\n'), ((10735, 10756), 'numpy.all', 'np.all', (['(ovr == ovr_fl)'], {}), '(ovr == ovr_fl)\n', (10741, 10756), True, 'import numpy as np\n'), ((14444, 14470), 'numpy.all', 'np.all', (['(inds == inds_where)'], {}), '(inds == inds_where)\n', (14450, 14470), True, 'import numpy as np\n'), ((17594, 17610), 'numpy.all', 'np.all', (['(y1 <= y2)'], {}), '(y1 <= y2)\n', (17600, 17610), True, 'import numpy as np\n'), ((17615, 17631), 'numpy.all', 'np.all', (['(x1 <= x2)'], {}), '(x1 <= x2)\n', (17621, 17631), True, 'import numpy as np\n'), ((18105, 18133), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order]'], {}), '(y1[i], y1[order])\n', (18115, 18133), True, 'import numpy as np\n'), ((18222, 18250), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order]'], {}), '(x1[i], x1[order])\n', (18232, 18250), True, 'import numpy as np\n'), ((18265, 18293), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order]'], {}), '(y2[i], y2[order])\n', (18275, 18293), True, 'import numpy as np\n'), ((18308, 18336), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order]'], {}), '(x2[i], x2[order])\n', (18318, 18336), True, 'import numpy as np\n'), ((18350, 18380), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (18360, 18380), True, 'import numpy as np\n'), ((18393, 18423), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (18403, 18423), True, 'import numpy as np\n'), ((18521, 18546), 'numpy.argwhere', 'np.argwhere', (['(iou > thresh)'], {}), '(iou > thresh)\n', (18532, 18546), True, 'import numpy as np\n'), ((19499, 19534), 'numpy.delete', 'np.delete', (['order', 'z_matches'], {'axis': '(0)'}), '(order, z_matches, axis=0)\n', (19508, 19534), True, 'import numpy as np\n'), ((20822, 20879), 'numpy.array', 'np.array', (["[batch[1]['box_coords'] for batch in det_boxes]"], {}), "([batch[1]['box_coords'] for batch in det_boxes])\n", (20830, 20879), True, 'import numpy as np\n'), ((20901, 20957), 'numpy.array', 'np.array', (["[batch[1]['box_score'] for batch in det_boxes]"], {}), "([batch[1]['box_score'] for batch in det_boxes])\n", (20909, 20957), True, 'import numpy as np\n'), ((20978, 20997), 'numpy.array', 'np.array', (['slice_ids'], {}), '(slice_ids)\n', (20986, 20997), True, 'import numpy as np\n'), ((39772, 39832), 'numpy.stack', 'np.stack', (["[dic['seg_preds'] for dic in results_list]"], {'axis': '(1)'}), "([dic['seg_preds'] for dic in results_list], axis=1)\n", (39780, 39832), True, 'import numpy as np\n'), ((39916, 39957), 'numpy.sum', 'np.sum', (["results_dict['seg_preds']"], {'axis': '(1)'}), "(results_dict['seg_preds'], axis=1)\n", (39922, 39957), True, 'import numpy as np\n'), ((50095, 50108), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (50106, 50108), False, 'from collections import OrderedDict\n'), ((5492, 5537), 'numpy.array', 'np.array', (["[b[1]['box_coords'] for b in boxes]"], {}), "([b[1]['box_coords'] for b in boxes])\n", (5500, 5537), True, 'import numpy as np\n'), ((5563, 5607), 'numpy.array', 'np.array', (["[b[1]['box_score'] for b in boxes]"], {}), "([b[1]['box_score'] for b in boxes])\n", (5571, 5607), True, 'import numpy as np\n'), ((5640, 5698), 'numpy.array', 'np.array', (["[b[1]['box_patch_center_factor'] for b in boxes]"], {}), "([b[1]['box_patch_center_factor'] for b in boxes])\n", (5648, 5698), True, 'import numpy as np\n'), ((5728, 5777), 'numpy.array', 'np.array', (["[b[1]['box_n_overlaps'] for b in boxes]"], {}), "([b[1]['box_n_overlaps'] for b in boxes])\n", (5736, 5777), True, 'import numpy as np\n'), ((10353, 10381), 'numpy.maximum', 'np.maximum', (['z1[i]', 'z1[order]'], {}), '(z1[i], z1[order])\n', (10363, 10381), True, 'import numpy as np\n'), ((10400, 10428), 'numpy.minimum', 'np.minimum', (['z2[i]', 'z2[order]'], {}), '(z2[i], z2[order])\n', (10410, 10428), True, 'import numpy as np\n'), ((10445, 10473), 'numpy.maximum', 'np.maximum', (['(0)', '(zz2 - zz1 + 1)'], {}), '(0, zz2 - zz1 + 1)\n', (10455, 10473), True, 'import numpy as np\n'), ((10901, 10925), 'numpy.nonzero', 'np.nonzero', (['(ovr > thresh)'], {}), '(ovr > thresh)\n', (10911, 10925), True, 'import numpy as np\n'), ((11821, 11841), 'numpy.mean', 'np.mean', (['match_n_ovs'], {}), '(match_n_ovs)\n', (11828, 11841), True, 'import numpy as np\n'), ((12238, 12265), 'numpy.sum', 'np.sum', (['match_score_weights'], {}), '(match_score_weights)\n', (12244, 12265), True, 'import numpy as np\n'), ((12393, 12413), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12399, 12413), True, 'import numpy as np\n'), ((14354, 14379), 'numpy.nonzero', 'np.nonzero', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (14364, 14379), True, 'import numpy as np\n'), ((14404, 14427), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (14412, 14427), True, 'import numpy as np\n'), ((15139, 15189), 'numpy.array', 'np.array', (["[box['box_coords'] for box in det_boxes]"], {}), "([box['box_coords'] for box in det_boxes])\n", (15147, 15189), True, 'import numpy as np\n'), ((15215, 15264), 'numpy.array', 'np.array', (["[box['box_score'] for box in det_boxes]"], {}), "([box['box_score'] for box in det_boxes])\n", (15223, 15264), True, 'import numpy as np\n'), ((18949, 18968), 'numpy.min', 'np.min', (['upper_holes'], {}), '(upper_holes)\n', (18955, 18968), True, 'import numpy as np\n'), ((18998, 19015), 'numpy.max', 'np.max', (['slice_ids'], {}), '(slice_ids)\n', (19004, 19015), True, 'import numpy as np\n'), ((19045, 19064), 'numpy.max', 'np.max', (['lower_holes'], {}), '(lower_holes)\n', (19051, 19064), True, 'import numpy as np\n'), ((19094, 19111), 'numpy.min', 'np.min', (['slice_ids'], {}), '(slice_ids)\n', (19100, 19111), True, 'import numpy as np\n'), ((19336, 19370), 'numpy.min', 'np.min', (['slice_id[order[z_matches]]'], {}), '(slice_id[order[z_matches]])\n', (19342, 19370), True, 'import numpy as np\n'), ((19388, 19422), 'numpy.max', 'np.max', (['slice_id[order[z_matches]]'], {}), '(slice_id[order[z_matches]])\n', (19394, 19422), True, 'import numpy as np\n'), ((24004, 24052), 'os.path.join', 'os.path.join', (['self.cf.fold_dir', '"""last_state.pth"""'], {}), "(self.cf.fold_dir, 'last_state.pth')\n", (24016, 24052), False, 'import os\n'), ((24676, 24718), 'os.path.join', 'os.path.join', (['cf.test_dir', '"""example_plots"""'], {}), "(cf.test_dir, 'example_plots')\n", (24688, 24718), False, 'import os\n'), ((24731, 24780), 'os.makedirs', 'os.makedirs', (['self.example_plot_dir'], {'exist_ok': '(True)'}), '(self.example_plot_dir, exist_ok=True)\n', (24742, 24780), False, 'import os\n'), ((27335, 27399), 'numpy.array', 'np.array', (["[item for d in chunk_dicts for item in d['seg_preds']]"], {}), "([item for d in chunk_dicts for item in d['seg_preds']])\n", (27343, 27399), True, 'import numpy as np\n'), ((30136, 30177), 'numpy.zeros', 'np.zeros', (['out_seg_shape'], {'dtype': 'np.float16'}), '(out_seg_shape, dtype=np.float16)\n', (30144, 30177), True, 'import numpy as np\n'), ((30210, 30253), 'numpy.zeros_like', 'np.zeros_like', (['out_seg_preds'], {'dtype': '"""uint8"""'}), "(out_seg_preds, dtype='uint8')\n", (30223, 30253), True, 'import numpy as np\n'), ((35770, 35792), 'numpy.copy', 'np.copy', (["batch['data']"], {}), "(batch['data'])\n", (35777, 35792), True, 'import numpy as np\n'), ((43876, 43909), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (43880, 43909), False, 'from multiprocessing import Pool\n'), ((44836, 44869), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (44840, 44869), False, 'from multiprocessing import Pool\n'), ((45215, 45269), 'numpy.all', 'np.all', (['(results_list[ix][1] == box_results_list[ix][1])'], {}), '(results_list[ix][1] == box_results_list[ix][1])\n', (45221, 45269), True, 'import numpy as np\n'), ((50356, 50397), 'os.path.join', 'os.path.join', (['self.cf.fold_dir', 'file_name'], {}), '(self.cf.fold_dir, file_name)\n', (50368, 50397), False, 'import os\n'), ((57028, 57068), 'pickle.dump', 'pickle.dump', (['results_per_patient', 'handle'], {}), '(results_per_patient, handle)\n', (57039, 57068), False, 'import pickle\n'), ((5826, 5869), 'numpy.array', 'np.array', (["[b[1]['patch_id'] for b in boxes]"], {}), "([b[1]['patch_id'] for b in boxes])\n", (5834, 5869), True, 'import numpy as np\n'), ((6065, 6110), 'numpy.array', 'np.array', (["[b[1]['regression'] for b in boxes]"], {}), "([b[1]['regression'] for b in boxes])\n", (6073, 6110), True, 'import numpy as np\n'), ((12286, 12314), 'numpy.mean', 'np.mean', (['match_score_weights'], {}), '(match_score_weights)\n', (12293, 12314), True, 'import numpy as np\n'), ((12571, 12612), 'numpy.sum', 'np.sum', (['(y1[order[matches]] * match_scores)'], {}), '(y1[order[matches]] * match_scores)\n', (12577, 12612), True, 'import numpy as np\n'), ((12615, 12635), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12621, 12635), True, 'import numpy as np\n'), ((12659, 12700), 'numpy.sum', 'np.sum', (['(x1[order[matches]] * match_scores)'], {}), '(x1[order[matches]] * match_scores)\n', (12665, 12700), True, 'import numpy as np\n'), ((12703, 12723), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12709, 12723), True, 'import numpy as np\n'), ((12747, 12788), 'numpy.sum', 'np.sum', (['(y2[order[matches]] * match_scores)'], {}), '(y2[order[matches]] * match_scores)\n', (12753, 12788), True, 'import numpy as np\n'), ((12791, 12811), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12797, 12811), True, 'import numpy as np\n'), ((12835, 12876), 'numpy.sum', 'np.sum', (['(x2[order[matches]] * match_scores)'], {}), '(x2[order[matches]] * match_scores)\n', (12841, 12876), True, 'import numpy as np\n'), ((12879, 12899), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (12885, 12899), True, 'import numpy as np\n'), ((13256, 13329), 'numpy.sum', 'np.sum', (['(box_regress[order[matches]] * match_scores[:, np.newaxis])'], {'axis': '(0)'}), '(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0)\n', (13262, 13329), True, 'import numpy as np\n'), ((13332, 13352), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13338, 13352), True, 'import numpy as np\n'), ((13506, 13556), 'numpy.sum', 'np.sum', (['(box_rg_uncs[order[matches]] * match_scores)'], {}), '(box_rg_uncs[order[matches]] * match_scores)\n', (13512, 13556), True, 'import numpy as np\n'), ((13559, 13579), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13565, 13579), True, 'import numpy as np\n'), ((15333, 15385), 'utils.model_utils.nms_numpy', 'mutils.nms_numpy', (['box_coords', 'box_scores', 'iou_thresh'], {}), '(box_coords, box_scores, iou_thresh)\n', (15349, 15385), True, 'import utils.model_utils as mutils\n'), ((21094, 21171), 'numpy.concatenate', 'np.concatenate', (['(box_coords, box_scores[:, None], slice_ids[:, None])'], {'axis': '(1)'}), '((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1)\n', (21108, 21171), True, 'import numpy as np\n'), ((26485, 26508), 'numpy.arange', 'np.arange', (['img.shape[0]'], {}), '(img.shape[0])\n', (26494, 26508), True, 'import numpy as np\n'), ((27930, 27977), 'numpy.mean', 'np.mean', (["[d['class_loss'] for d in chunk_dicts]"], {}), "([d['class_loss'] for d in chunk_dicts])\n", (27937, 27977), True, 'import numpy as np\n'), ((41600, 41619), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (41611, 41619), False, 'import pickle\n'), ((44311, 44344), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (44315, 44344), False, 'from multiprocessing import Pool\n'), ((47884, 48062), 'utils.model_utils.dice_per_batch_and_class', 'mutils.dice_per_batch_and_class', (["results_dict['seg_preds']", "(batch['patient_seg'] if self.patched_patient else batch['seg'])", 'self.cf.num_seg_classes'], {'convert_to_ohe': '(True)'}), "(results_dict['seg_preds'], batch[\n 'patient_seg'] if self.patched_patient else batch['seg'], self.cf.\n num_seg_classes, convert_to_ohe=True)\n", (47915, 48062), True, 'import utils.model_utils as mutils\n'), ((50649, 50672), 'torch.load', 'torch.load', (['weight_path'], {}), '(weight_path)\n', (50659, 50672), False, 'import torch\n'), ((50786, 50816), 'numpy.arange', 'np.arange', (["batch_gen['n_test']"], {}), "(batch_gen['n_test'])\n", (50795, 50816), True, 'import numpy as np\n'), ((50947, 50962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (50960, 50962), False, 'import torch\n'), ((57726, 57759), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (57730, 57759), False, 'from multiprocessing import Pool\n'), ((58902, 58935), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (58906, 58935), False, 'from multiprocessing import Pool\n'), ((5993, 6034), 'numpy.array', 'np.array', (["[b[1]['ens_ix'] for b in boxes]"], {}), "([b[1]['ens_ix'] for b in boxes])\n", (6001, 6034), True, 'import numpy as np\n'), ((12953, 12994), 'numpy.sum', 'np.sum', (['(z1[order[matches]] * match_scores)'], {}), '(z1[order[matches]] * match_scores)\n', (12959, 12994), True, 'import numpy as np\n'), ((12997, 13017), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13003, 13017), True, 'import numpy as np\n'), ((13049, 13090), 'numpy.sum', 'np.sum', (['(z2[order[matches]] * match_scores)'], {}), '(z2[order[matches]] * match_scores)\n', (13055, 13090), True, 'import numpy as np\n'), ((13093, 13113), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13099, 13113), True, 'import numpy as np\n'), ((13405, 13455), 'numpy.sum', 'np.sum', (['(box_rg_bins[order[matches]] * match_scores)'], {}), '(box_rg_bins[order[matches]] * match_scores)\n', (13411, 13455), True, 'import numpy as np\n'), ((13458, 13478), 'numpy.sum', 'np.sum', (['match_scores'], {}), '(match_scores)\n', (13464, 13478), True, 'import numpy as np\n'), ((18776, 18793), 'numpy.max', 'np.max', (['slice_ids'], {}), '(slice_ids)\n', (18782, 18793), True, 'import numpy as np\n'), ((18865, 18882), 'numpy.min', 'np.min', (['slice_ids'], {}), '(slice_ids)\n', (18871, 18882), True, 'import numpy as np\n'), ((24105, 24132), 'torch.load', 'torch.load', (['last_state_path'], {}), '(last_state_path)\n', (24115, 24132), False, 'import torch\n'), ((26510, 26533), 'numpy.arange', 'np.arange', (['img.shape[0]'], {}), '(img.shape[0])\n', (26519, 26533), True, 'import numpy as np\n'), ((27834, 27883), 'torch.cat', 'torch.cat', (["[d['torch_loss'] for d in chunk_dicts]"], {}), "([d['torch_loss'] for d in chunk_dicts])\n", (27843, 27883), False, 'import torch\n'), ((35951, 35968), 'numpy.array', 'np.array', (['sp_axis'], {}), '(sp_axis)\n', (35959, 35968), True, 'import numpy as np\n'), ((37203, 37246), 'numpy.flip', 'np.flip', (["chunk_dict['seg_preds']"], {'axis': 'axis'}), "(chunk_dict['seg_preds'], axis=axis)\n", (37210, 37246), True, 'import numpy as np\n'), ((41506, 41550), 'os.path.join', 'os.path.join', (['self.cf.fold_dir', 'results_file'], {}), '(self.cf.fold_dir, results_file)\n', (41518, 41550), False, 'import os\n'), ((42149, 42181), 'os.path.join', 'os.path.join', (['self.cf.exp_dir', 'f'], {}), '(self.cf.exp_dir, f)\n', (42161, 42181), False, 'import os\n'), ((51096, 51119), 'numpy.unique', 'np.unique', (["batch['pid']"], {}), "(batch['pid'])\n", (51105, 51119), True, 'import numpy as np\n'), ((51737, 51748), 'time.time', 'time.time', ([], {}), '()\n', (51746, 51748), False, 'import time\n'), ((55936, 55975), 'numpy.mean', 'np.mean', (['results_dict[res_type]'], {'axis': '(0)'}), '(results_dict[res_type], axis=0)\n', (55943, 55975), True, 'import numpy as np\n'), ((58143, 58176), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.cf.n_workers'}), '(processes=self.cf.n_workers)\n', (58147, 58176), False, 'from multiprocessing import Pool\n'), ((32414, 32466), 'numpy.array', 'np.array', (['[pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]]'], {}), '([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])\n', (32422, 32466), True, 'import numpy as np\n'), ((32630, 32724), 'numpy.mean', 'np.mean', (['patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]\n ]'], {}), '(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c\n [4]:int_c[5]])\n', (32637, 32724), True, 'import numpy as np\n'), ((32836, 32874), 'numpy.array', 'np.array', (['[pc[0], pc[2], pc[0], pc[2]]'], {}), '([pc[0], pc[2], pc[0], pc[2]])\n', (32844, 32874), True, 'import numpy as np\n'), ((33039, 33113), 'numpy.mean', 'np.mean', (['patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]]'], {}), '(patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])\n', (33046, 33113), True, 'import numpy as np\n'), ((42191, 42218), 'os.listdir', 'os.listdir', (['self.cf.exp_dir'], {}), '(self.cf.exp_dir)\n', (42201, 42218), False, 'import os\n'), ((42685, 42704), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (42696, 42704), False, 'import pickle\n'), ((51955, 51999), 'numpy.argmax', 'np.argmax', (["results_dict['seg_preds']"], {'axis': '(1)'}), "(results_dict['seg_preds'], axis=1)\n", (51964, 51999), True, 'import numpy as np\n'), ((53533, 53711), 'utils.model_utils.dice_per_batch_and_class', 'mutils.dice_per_batch_and_class', (["results_dict['seg_preds']", "(batch['patient_seg'] if self.patched_patient else batch['seg'])", 'self.cf.num_seg_classes'], {'convert_to_ohe': '(True)'}), "(results_dict['seg_preds'], batch[\n 'patient_seg'] if self.patched_patient else batch['seg'], self.cf.\n num_seg_classes, convert_to_ohe=True)\n", (53564, 53711), True, 'import utils.model_utils as mutils\n'), ((12054, 12079), 'numpy.unique', 'np.unique', (['match_patch_id'], {}), '(match_patch_id)\n', (12063, 12079), True, 'import numpy as np\n'), ((36118, 36141), 'numpy.flip', 'np.flip', (['img'], {'axis': 'axis'}), '(img, axis=axis)\n', (36125, 36141), True, 'import numpy as np\n'), ((42594, 42630), 'os.path.join', 'os.path.join', (['fold_dir', 'results_file'], {}), '(fold_dir, results_file)\n', (42606, 42630), False, 'import os\n'), ((52742, 52978), 'utils.exp_utils.split_off_process', 'utils.split_off_process', (['plg.view_batch', 'self.cf', 'batch', 'results_dict'], {'has_colorchannels': 'self.cf.has_colorchannels', 'show_gt_labels': '(True)', 'show_seg_ids': "('dice' in self.cf.metrics)", 'get_time': '"""test-example plot"""', 'out_file': 'out_file'}), "(plg.view_batch, self.cf, batch, results_dict,\n has_colorchannels=self.cf.has_colorchannels, show_gt_labels=True,\n show_seg_ids='dice' in self.cf.metrics, get_time='test-example plot',\n out_file=out_file)\n", (52765, 52978), True, 'import utils.exp_utils as utils\n'), ((42268, 42300), 'os.path.join', 'os.path.join', (['self.cf.exp_dir', 'f'], {}), '(self.cf.exp_dir, f)\n', (42280, 42300), False, 'import os\n'), ((52268, 52279), 'time.time', 'time.time', ([], {}), '()\n', (52277, 52279), False, 'import time\n'), ((32504, 32516), 'numpy.floor', 'np.floor', (['ii'], {}), '(ii)\n', (32512, 32516), True, 'import numpy as np\n'), ((32540, 32551), 'numpy.ceil', 'np.ceil', (['ii'], {}), '(ii)\n', (32547, 32551), True, 'import numpy as np\n'), ((32912, 32924), 'numpy.floor', 'np.floor', (['ii'], {}), '(ii)\n', (32920, 32924), True, 'import numpy as np\n'), ((32950, 32961), 'numpy.ceil', 'np.ceil', (['ii'], {}), '(ii)\n', (32957, 32961), True, 'import numpy as np\n'), ((37567, 37593), 'numpy.flip', 'np.flip', (['img'], {'axis': 'axis[0]'}), '(img, axis=axis[0])\n', (37574, 37593), True, 'import numpy as np\n'), ((38962, 39008), 'numpy.flip', 'np.flip', (["chunk_dict['seg_preds']"], {'axis': 'axis[0]'}), "(chunk_dict['seg_preds'], axis=axis[0])\n", (38969, 39008), True, 'import numpy as np\n'), ((32183, 32219), 'scipy.stats.norm.pdf', 'norm.pdf', (['bc'], {'loc': 'pc', 'scale': '(pc * 0.8)'}), '(bc, loc=pc, scale=pc * 0.8)\n', (32191, 32219), False, 'from scipy.stats import norm\n'), ((32222, 32240), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (32229, 32240), True, 'import numpy as np\n'), ((32308, 32336), 'numpy.array', 'np.array', (['self.cf.patch_size'], {}), '(self.cf.patch_size)\n', (32316, 32336), True, 'import numpy as np\n')]
|
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind
from sklearn.preprocessing import LabelEncoder
def load_data():
questionnaire = pd.read_excel('XAutoML.xlsx')
encoder = LabelEncoder()
encoder.classes_ = np.array(['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree'])
for c in questionnaire.columns:
try:
questionnaire.loc[:, c] = questionnaire.loc[:, c].str.strip().str.lower()
questionnaire.loc[:, c] = encoder.transform(questionnaire.loc[:, c])
except (AttributeError, ValueError):
pass
questionnaire.columns = questionnaire.columns.str.strip()
requirements = pd.read_excel('task_results.ods', sheet_name='Requirements', skiprows=1)
requirements = requirements.drop(index=[24], columns=['Unnamed: 1']).T
requirements.columns = requirements.iloc[0]
requirements = requirements[1:]
tasks = pd.read_excel('task_results.ods', sheet_name=0)
tasks = tasks.dropna(axis=1, how='all').dropna(axis=0, how='all')
tasks.index = tasks.iloc[:, 0]
tasks.drop(columns=tasks.columns[:2], inplace=True)
return questionnaire, requirements, tasks
def calculate_sus(df: pd.DataFrame):
invert = [False, False, True, False, True, False, True, False, True, True]
for c, inv in zip(df.columns, invert):
if inv:
df.loc[:, c] = 4 - df.loc[:, c]
df.loc[:, c] = df.loc[:, c] * 2.5
score = df.sum(axis=1)
print('###### System Usability Score ######')
print(df.mean(axis=0))
print(score.mean(), score.std())
print('\n\n')
def print_visual_design(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Visual Design ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def print_previous_knowledge(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Previous Knowledge ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def plot_priority_distribution(df: pd.DataFrame, group=False):
def calc_user_group(value: str):
return value.strip().split('.')[0]
x = []
y = []
m = []
for col in df:
y.append(df[col].to_list())
x.append([col] * df.shape[0])
m.append(df[col].index.map(calc_user_group))
x = np.array(x).flatten()
y = 24 - np.array(y).flatten()
m = np.array(m).flatten()
data = pd.DataFrame({'x': x, 'y': y, 'role': m})
mean = data.groupby(by=['x', 'role']).mean().reset_index()
mean = pd.DataFrame({
'Domain Expert': 24 - mean.loc[mean['role'] == 'Domain Expert', 'y'].reset_index(drop=True),
'Data Scientist': 24 - mean.loc[mean['role'] == 'Data Scientist', 'y'].reset_index(drop=True),
'AutoML Researcher': 24 - mean.loc[mean['role'] == 'AutoML Researcher', 'y'].reset_index(drop=True),
'All': 24 - data.groupby('x').mean()['y'].reset_index(drop=True)
})
print('Average card rank')
for _, row in mean.iterrows():
print(f'\\({row[0]:.1f}\\)\t& \\({row[1]:.1f}\\)\t& \\({row[2]:.1f}\\)\t& \\({row[3]:.1f}\\) \\\\')
print('\n\n')
if group:
replacements = {
'#01': ['#02', '#03', '#04'],
'#05': ['#06', '#07', '#08'],
'#09': ['#10', '#11', '#12'],
'#15': ['#16'],
'#19': ['#20'],
# '#22': ['#23', '#24']
}
for key, values in replacements.items():
for value in values:
data.loc[data['x'] == value, 'x'] = key
rename = {
'#01': 'Input Data',
'#05': 'Pre-Proc. Data',
'#09': 'Feat.-Eng. Data',
'#13': 'Complete Pipeline',
'#14': 'Search Space',
'#15': 'Search Strategy',
'#17': 'Perf. Metrics',
'#18': 'Perf. Visual.',
'#19': 'Explanations',
'#21': 'View Hyperparam.',
'#22': 'Comp. Perf.',
'#23': 'Comp. Pipelines',
'#24': 'Comp. Hyperparam.'
}
else:
rename = {
'#01': 'R01 View Input',
'#02': 'R02 Desc Input',
'#03': 'R03 Input Stat',
'#04': 'R04 Plot Input',
'#05': 'R05 View Pre-Proc',
'#06': 'R06 Desc Pre-Proc',
'#07': 'R07 Pre-Proc Stat',
'#08': 'R08 Plot Pre-Proc',
'#09': 'R09 View Feat-Eng',
'#10': 'R10 Feat-Eng Stat',
'#11': 'R11 Plot Feat-Eng',
'#12': 'R12 Desc Feat-Eng',
'#13': 'R13 Complete Pipe',
'#14': 'R14 Search Space',
'#15': 'R15 Pipe Search Strat',
'#16': 'R16 HP Search Strat',
'#17': 'R17 View Perf Metrics',
'#18': 'R18 Plot Perf Visual',
'#19': 'R19 Global Expl',
'#20': 'R20 Local Expl',
'#21': 'R21 View HP',
'#22': 'R22 Comp Perf',
'#23': 'R23 Comp Pipe',
'#24': 'R24 Comp HP'
}
for old, new in rename.items():
data.loc[data['x'] == old, 'x'] = new
data.loc[data['role'] == 'AutoML Researcher', 'role'] = 'Data Scientist'
print('Difference between user groups per card')
for card in data['x'].unique():
ds = data[(data['x'] == card) & (data['role'] == 'Data Scientist')]
de = data[(data['x'] == card) & (data['role'] == 'Domain Expert')]
t = ttest_ind(ds['y'].values, de['y'].values)
if t.pvalue < 0.05:
print(f'{card} {t.pvalue:.5f}')
print('\n\n')
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
fig.tight_layout()
sns.violinplot(data=data, x='x', y='y', hue='role', split=True, palette='pastel', ax=ax)
sns.despine(left=True)
ax.set_ylim(0, 24)
ax.set_yticklabels([])
ax.set_ylabel(None)
ax.set_xlabel(None)
box = ax.get_position()
if group:
plt.xticks(rotation=15)
fig.text(0.0125, 0.2, 'least important', rotation=90, va='bottom')
fig.text(0.0125, 0.95, 'most important', rotation=90, va='top')
ax.set_position([box.x0, box.y0 + box.height * 0.125, box.width, box.height * 0.875])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
else:
plt.xticks(rotation=25, ha='right', rotation_mode='anchor')
fig.text(0.025, 0.225, 'least important', rotation=90, va='bottom')
fig.text(0.025, 0.91, 'most important', rotation=90, va='top')
ax.set_position([box.x0 + 0.015, box.y0 + box.height * 0.15, box.width, box.height * 0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.13), ncol=2)
fig.show()
fig.savefig('requirement_cards.pdf')
def calculate_trust_result(text_df: pd.DataFrame, vis_df: pd.DataFrame):
def cohen_d(x: pd.Series, y: pd.Series):
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (x.mean() - y.mean()) / math.sqrt(((nx - 1) * x.std() ** 2 + (ny - 1) * y.std() ** 2) / dof)
vis_df.columns = text_df.columns
print('###### Trust ######')
for col in text_df:
if col == 'Role':
continue
text = text_df.loc[:, col]
vis = vis_df.loc[:, col]
t = ttest_ind(text.values, vis.values, alternative='less')
print(
f'{col}, \({text.mean() + 1:.2f} \pm {text.std():.2f}\), \({vis.mean() + 1:.2f} \pm {vis.std():.2f}\), \(p = {t.pvalue:.2e}\), \(d = {cohen_d(text, vis):.2f}\)')
text_de, vis_de = text_df[text_df['Role'] == 'domain expert'], vis_df[vis_df['Role'] == 'domain expert']
text_ar, vis_ar = text_df[text_df['Role'] == 'automl researcher'], vis_df[vis_df['Role'] == 'automl researcher']
text_ds, vis_ds = text_df[text_df['Role'] == 'data scientist'], vis_df[vis_df['Role'] == 'data scientist']
for col in text_df:
if col == 'Role':
continue
print(
f'\\({text_de[col].mean() + 1:.2f}\\)\t& \\({text_ds[col].mean() + 1:.2f}\\)\t& \\({text_ar[col].mean() + 1:.2f}\\)\t& \\({text_df[col].mean() + 1:.2f}\\) \\\\')
print(
f'\\({vis_de[col].mean() + 1:.2f}\\)\t& \\({vis_ds[col].mean() + 1:.2f}\\)\t& \\({vis_ar[col].mean() + 1:.2f}\\)\t& \\({vis_df[col].mean() + 1:.2f}\\) \\\\')
print('\n\n')
def calculate_task_success(df: pd.DataFrame):
encoder = LabelEncoder()
encoder.classes_ = np.array(['n', 'y'])
for c in df.columns:
df.loc[:, c] = encoder.transform(df.loc[:, c])
with pd.option_context('display.precision', 0):
print('Task success percentage')
print(df.mean(axis=1) * 100)
print(df.mean().mean() * 100)
print('\n\n')
def index(df: pd.DataFrame, slice_) -> pd.DataFrame:
df2 = df.iloc[:, slice_]
df2['Role'] = df['Role']
return df2
questionnaire, requirements, tasks = load_data()
print_visual_design(index(questionnaire, slice(27, 32)))
print_previous_knowledge(index(questionnaire, slice(6, 11)))
calculate_sus(index(questionnaire, slice(32, 42)))
plot_priority_distribution(requirements)
calculate_task_success(tasks)
calculate_trust_result(index(questionnaire, slice(14, 20)), index(questionnaire, slice(20, 26)))
print('Correlation ML expertise and understanding of ML model')
print(questionnaire.iloc[:, [6, 15]].corr())
|
[
"sklearn.preprocessing.LabelEncoder",
"seaborn.despine",
"matplotlib.pyplot.xticks",
"seaborn.set_theme",
"pandas.option_context",
"numpy.array",
"scipy.stats.ttest_ind",
"seaborn.violinplot",
"pandas.read_excel",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] |
[((227, 256), 'pandas.read_excel', 'pd.read_excel', (['"""XAutoML.xlsx"""'], {}), "('XAutoML.xlsx')\n", (240, 256), True, 'import pandas as pd\n'), ((272, 286), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (284, 286), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((310, 395), 'numpy.array', 'np.array', (["['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree']"], {}), "(['strongly disagree', 'disagree', 'neutral', 'agree',\n 'strongly agree'])\n", (318, 395), True, 'import numpy as np\n'), ((753, 825), 'pandas.read_excel', 'pd.read_excel', (['"""task_results.ods"""'], {'sheet_name': '"""Requirements"""', 'skiprows': '(1)'}), "('task_results.ods', sheet_name='Requirements', skiprows=1)\n", (766, 825), True, 'import pandas as pd\n'), ((998, 1045), 'pandas.read_excel', 'pd.read_excel', (['"""task_results.ods"""'], {'sheet_name': '(0)'}), "('task_results.ods', sheet_name=0)\n", (1011, 1045), True, 'import pandas as pd\n'), ((3063, 3104), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'role': m}"], {}), "({'x': x, 'y': y, 'role': m})\n", (3075, 3104), True, 'import pandas as pd\n'), ((6222, 6254), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (6235, 6254), True, 'import seaborn as sns\n'), ((6269, 6304), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 5)'}), '(1, 1, figsize=(15, 5))\n', (6281, 6304), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6426), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'data', 'x': '"""x"""', 'y': '"""y"""', 'hue': '"""role"""', 'split': '(True)', 'palette': '"""pastel"""', 'ax': 'ax'}), "(data=data, x='x', y='y', hue='role', split=True, palette=\n 'pastel', ax=ax)\n", (6347, 6426), True, 'import seaborn as sns\n'), ((6426, 6448), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)'}), '(left=True)\n', (6437, 6448), True, 'import seaborn as sns\n'), ((9021, 9035), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (9033, 9035), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((9059, 9079), 'numpy.array', 'np.array', (["['n', 'y']"], {}), "(['n', 'y'])\n", (9067, 9079), True, 'import numpy as np\n'), ((6084, 6125), 'scipy.stats.ttest_ind', 'ttest_ind', (["ds['y'].values", "de['y'].values"], {}), "(ds['y'].values, de['y'].values)\n", (6093, 6125), False, 'from scipy.stats import ttest_ind\n'), ((6598, 6621), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(15)'}), '(rotation=15)\n', (6608, 6621), True, 'import matplotlib.pyplot as plt\n'), ((6957, 7016), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(25)', 'ha': '"""right"""', 'rotation_mode': '"""anchor"""'}), "(rotation=25, ha='right', rotation_mode='anchor')\n", (6967, 7016), True, 'import matplotlib.pyplot as plt\n'), ((7912, 7966), 'scipy.stats.ttest_ind', 'ttest_ind', (['text.values', 'vis.values'], {'alternative': '"""less"""'}), "(text.values, vis.values, alternative='less')\n", (7921, 7966), False, 'from scipy.stats import ttest_ind\n'), ((9171, 9212), 'pandas.option_context', 'pd.option_context', (['"""display.precision"""', '(0)'], {}), "('display.precision', 0)\n", (9188, 9212), True, 'import pandas as pd\n'), ((2964, 2975), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2972, 2975), True, 'import numpy as np\n'), ((3029, 3040), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (3037, 3040), True, 'import numpy as np\n'), ((2999, 3010), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3007, 3010), True, 'import numpy as np\n')]
|
"""
*****************
Specifying Colors
*****************
Matplotlib recognizes the following formats to specify a color:
* an RGB or RGBA (red, green, blue, alpha) tuple of float values in closed
interval ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)`` or ``(0.1, 0.2, 0.5, 0.3)``);
* a hex RGB or RGBA string (e.g., ``'#0f0f0f'`` or ``'#0f0f0f80'``;
case-insensitive);
* a shorthand hex RGB or RGBA string, equivalent to the hex RGB or RGBA
string obtained by duplicating each character, (e.g., ``'#abc'``, equivalent
to ``'#aabbcc'``, or ``'#abcd'``, equivalent to ``'#aabbccdd'``;
case-insensitive);
* a string representation of a float value in ``[0, 1]`` inclusive for gray
level (e.g., ``'0.5'``);
* one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``, they are the single
character short-hand notations for blue, green, red, cyan, magenta, yellow,
black, and white.
* a X11/CSS4 color name (case-insensitive);
* a name from the `xkcd color survey`_, prefixed with ``'xkcd:'`` (e.g.,
``'xkcd:sky blue'``; case insensitive);
* one of the Tableau Colors from the 'T10' categorical palette (the default
color cycle): ``{'tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'}``
(case-insensitive);
* a "CN" color spec, i.e. ``'C'`` followed by a number, which is an index into
the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``); the
indexing is intended to occur at rendering time, and defaults to black if the
cycle does not include color.
.. _xkcd color survey: https://xkcd.com/color/rgb/
"Red", "Green", and "Blue" are the intensities of those colors, the combination
of which span the colorspace.
How "Alpha" behaves depends on the ``zorder`` of the Artist. Higher
``zorder`` Artists are drawn on top of lower Artists, and "Alpha" determines
whether the lower artist is covered by the higher.
If the old RGB of a pixel is ``RGBold`` and the RGB of the
pixel of the Artist being added is ``RGBnew`` with Alpha ``alpha``,
then the RGB of the pixel is updated to:
``RGB = RGBOld * (1 - Alpha) + RGBnew * Alpha``. Alpha
of 1 means the old color is completely covered by the new Artist, Alpha of 0
means that pixel of the Artist is transparent.
For more information on colors in matplotlib see
* the :doc:`/gallery/color/color_demo` example;
* the `matplotlib.colors` API;
* the :doc:`/gallery/color/named_colors` example.
"CN" color selection
--------------------
"CN" colors are converted to RGBA as soon as the artist is created. For
example,
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
th = np.linspace(0, 2*np.pi, 128)
def demo(sty):
mpl.style.use(sty)
fig, ax = plt.subplots(figsize=(3, 3))
ax.set_title('style: {!r}'.format(sty), color='C0')
ax.plot(th, np.cos(th), 'C1', label='C1')
ax.plot(th, np.sin(th), 'C2', label='C2')
ax.legend()
demo('default')
demo('seaborn')
###############################################################################
# will use the first color for the title and then plot using the second
# and third colors of each style's ``mpl.rcParams['axes.prop_cycle']``.
#
#
# .. _xkcd-colors:
#
# xkcd v X11/CSS4
# ---------------
#
# The xkcd colors are derived from a user survey conducted by the
# webcomic xkcd. `Details of the survey are available on the xkcd blog
# <https://blog.xkcd.com/2010/05/03/color-survey-results/>`__.
#
# Out of 148 colors in the CSS color list, there are 95 name collisions
# between the X11/CSS4 names and the xkcd names, all but 3 of which have
# different hex values. For example ``'blue'`` maps to ``'#0000FF'``
# where as ``'xkcd:blue'`` maps to ``'#0343DF'``. Due to these name
# collisions all of the xkcd colors have ``'xkcd:'`` prefixed. As noted in
# the blog post, while it might be interesting to re-define the X11/CSS4 names
# based on such a survey, we do not do so unilaterally.
#
# The name collisions are shown in the table below; the color names
# where the hex values agree are shown in bold.
import matplotlib._color_data as mcd
import matplotlib.patches as mpatch
overlap = {name for name in mcd.CSS4_COLORS
if "xkcd:" + name in mcd.XKCD_COLORS}
fig = plt.figure(figsize=[4.8, 16])
ax = fig.add_axes([0, 0, 1, 1])
for j, n in enumerate(sorted(overlap, reverse=True)):
weight = None
cn = mcd.CSS4_COLORS[n]
xkcd = mcd.XKCD_COLORS["xkcd:" + n].upper()
if cn == xkcd:
weight = 'bold'
r1 = mpatch.Rectangle((0, j), 1, 1, color=cn)
r2 = mpatch.Rectangle((1, j), 1, 1, color=xkcd)
txt = ax.text(2, j+.5, ' ' + n, va='center', fontsize=10,
weight=weight)
ax.add_patch(r1)
ax.add_patch(r2)
ax.axhline(j, color='k')
ax.text(.5, j + 1.5, 'X11', ha='center', va='center')
ax.text(1.5, j + 1.5, 'xkcd', ha='center', va='center')
ax.set_xlim(0, 3)
ax.set_ylim(0, j + 2)
ax.axis('off')
|
[
"matplotlib.patches.Rectangle",
"numpy.linspace",
"matplotlib.style.use",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.subplots"
] |
[((2661, 2691), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(128)'], {}), '(0, 2 * np.pi, 128)\n', (2672, 2691), True, 'import numpy as np\n'), ((4251, 4280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[4.8, 16]'}), '(figsize=[4.8, 16])\n', (4261, 4280), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2729), 'matplotlib.style.use', 'mpl.style.use', (['sty'], {}), '(sty)\n', (2724, 2729), True, 'import matplotlib as mpl\n'), ((2744, 2772), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (2756, 2772), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4555), 'matplotlib.patches.Rectangle', 'mpatch.Rectangle', (['(0, j)', '(1)', '(1)'], {'color': 'cn'}), '((0, j), 1, 1, color=cn)\n', (4531, 4555), True, 'import matplotlib.patches as mpatch\n'), ((4565, 4607), 'matplotlib.patches.Rectangle', 'mpatch.Rectangle', (['(1, j)', '(1)', '(1)'], {'color': 'xkcd'}), '((1, j), 1, 1, color=xkcd)\n', (4581, 4607), True, 'import matplotlib.patches as mpatch\n'), ((2847, 2857), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (2853, 2857), True, 'import numpy as np\n'), ((2893, 2903), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (2899, 2903), True, 'import numpy as np\n')]
|
import numpy as np
nparr = np.array([i for i in range(10)])
a = np.zeros(10)
f = np.zeros(10,dtype=float)
n = np.full((3,5),44)
r = np.random.randint(0,100,size=(3,5))
r2 = np.random.random((3,5))
x = np.linspace(0,100,50)
print(nparr,a,f,n,r,r2,x)
|
[
"numpy.random.random",
"numpy.zeros",
"numpy.linspace",
"numpy.random.randint",
"numpy.full"
] |
[((66, 78), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (74, 78), True, 'import numpy as np\n'), ((83, 108), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'float'}), '(10, dtype=float)\n', (91, 108), True, 'import numpy as np\n'), ((112, 131), 'numpy.full', 'np.full', (['(3, 5)', '(44)'], {}), '((3, 5), 44)\n', (119, 131), True, 'import numpy as np\n'), ((134, 172), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(3, 5)'}), '(0, 100, size=(3, 5))\n', (151, 172), True, 'import numpy as np\n'), ((175, 199), 'numpy.random.random', 'np.random.random', (['(3, 5)'], {}), '((3, 5))\n', (191, 199), True, 'import numpy as np\n'), ((203, 226), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(50)'], {}), '(0, 100, 50)\n', (214, 226), True, 'import numpy as np\n')]
|
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
import pytest
import vector.backends.numpy_
import vector.backends.object_
def test_xy():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectXY(1, 0)
)
assert vec.rotateZ(0.1).x == pytest.approx(0.9950041652780258)
assert vec.rotateZ(0.1).y == pytest.approx(0.09983341664682815)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("x", numpy.float64), ("y", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("x", "y")
assert numpy.allclose(out.x, [0, 0.9950041652780258, -0.09983341664682815])
assert numpy.allclose(out.y, [0, 0.09983341664682815, 0.9950041652780258])
def test_rhophi():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectRhoPhi(1, 0)
)
assert vec.rotateZ(0.1).rho == pytest.approx(1)
assert vec.rotateZ(0.1).phi == pytest.approx(0.1)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("rho", numpy.float64), ("phi", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("rho", "phi")
assert numpy.allclose(out.rho, [0, 1, 0])
assert numpy.allclose(out.phi, [0.1, 0.1, 1.1])
|
[
"pytest.approx",
"numpy.allclose"
] |
[((853, 921), 'numpy.allclose', 'numpy.allclose', (['out.x', '[0, 0.9950041652780258, -0.09983341664682815]'], {}), '(out.x, [0, 0.9950041652780258, -0.09983341664682815])\n', (867, 921), False, 'import numpy\n'), ((933, 1000), 'numpy.allclose', 'numpy.allclose', (['out.y', '[0, 0.09983341664682815, 0.9950041652780258]'], {}), '(out.y, [0, 0.09983341664682815, 0.9950041652780258])\n', (947, 1000), False, 'import numpy\n'), ((1555, 1589), 'numpy.allclose', 'numpy.allclose', (['out.rho', '[0, 1, 0]'], {}), '(out.rho, [0, 1, 0])\n', (1569, 1589), False, 'import numpy\n'), ((1601, 1641), 'numpy.allclose', 'numpy.allclose', (['out.phi', '[0.1, 0.1, 1.1]'], {}), '(out.phi, [0.1, 0.1, 1.1])\n', (1615, 1641), False, 'import numpy\n'), ((448, 481), 'pytest.approx', 'pytest.approx', (['(0.9950041652780258)'], {}), '(0.9950041652780258)\n', (461, 481), False, 'import pytest\n'), ((515, 549), 'pytest.approx', 'pytest.approx', (['(0.09983341664682815)'], {}), '(0.09983341664682815)\n', (528, 549), False, 'import pytest\n'), ((1173, 1189), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (1186, 1189), False, 'import pytest\n'), ((1225, 1243), 'pytest.approx', 'pytest.approx', (['(0.1)'], {}), '(0.1)\n', (1238, 1243), False, 'import pytest\n')]
|
# Copyright 2022 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import numpy as np
from floris.tools import FlorisInterface
from floris.tools.visualization import visualize_cut_plane
"""
04_sweep_wind_directions
This example demonstrates vectorization of wind direction.
A vector of wind directions is passed to the intialize function
and the powers of the two simulated turbines is computed for all
wind directions in one call
The power of both turbines for each wind direction is then plotted
"""
# Instantiate FLORIS using either the GCH or CC model
fi = FlorisInterface("inputs/gch.yaml") # GCH model matched to the default "legacy_gauss" of V2
# fi = FlorisInterface("inputs/cc.yaml") # New CumulativeCurl model
# Define a two turbine farm
D = 126.
layout_x = np.array([0, D*6])
layout_y = [0, 0]
fi.reinitialize(layout = [layout_x, layout_y])
# Sweep wind speeds but keep wind direction fixed
wd_array = np.arange(250,291,1.)
fi.reinitialize(wind_directions=wd_array)
# Define a matrix of yaw angles to be all 0
# Note that yaw angles is now specified as a matrix whose dimesions are
# wd/ws/turbine
num_wd = len(wd_array) # Number of wind directions
num_ws = 1 # Number of wind speeds
num_turbine = len(layout_x) # Number of turbines
yaw_angles = np.zeros((num_wd, num_ws, num_turbine))
# Calculate
fi.calculate_wake(yaw_angles=yaw_angles)
# Collect the turbine powers
turbine_powers = fi.get_turbine_powers() / 1E3 # In kW
# Pull out the power values per turbine
pow_t0 = turbine_powers[:,:,0].flatten()
pow_t1 = turbine_powers[:,:,1].flatten()
# Plot
fig, ax = plt.subplots()
ax.plot(wd_array,pow_t0,color='k',label='Upstream Turbine')
ax.plot(wd_array,pow_t1,color='r',label='Downstream Turbine')
ax.grid(True)
ax.legend()
ax.set_xlabel('Wind Direction (deg)')
ax.set_ylabel('Power (kW)')
plt.show()
|
[
"numpy.array",
"numpy.zeros",
"floris.tools.FlorisInterface",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1151, 1185), 'floris.tools.FlorisInterface', 'FlorisInterface', (['"""inputs/gch.yaml"""'], {}), "('inputs/gch.yaml')\n", (1166, 1185), False, 'from floris.tools import FlorisInterface\n'), ((1359, 1379), 'numpy.array', 'np.array', (['[0, D * 6]'], {}), '([0, D * 6])\n', (1367, 1379), True, 'import numpy as np\n'), ((1505, 1529), 'numpy.arange', 'np.arange', (['(250)', '(291)', '(1.0)'], {}), '(250, 291, 1.0)\n', (1514, 1529), True, 'import numpy as np\n'), ((1851, 1890), 'numpy.zeros', 'np.zeros', (['(num_wd, num_ws, num_turbine)'], {}), '((num_wd, num_ws, num_turbine))\n', (1859, 1890), True, 'import numpy as np\n'), ((2172, 2186), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2184, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2409, 2411), True, 'import matplotlib.pyplot as plt\n')]
|
import copy
import numpy as np
from scipy.special import wofz
from scipy.integrate import quad
from typing import List, Tuple
import autoarray as aa
from autogalaxy.profiles.mass_profiles import MassProfile
from autogalaxy.profiles.mass_profiles.mass_profiles import (
MassProfileMGE,
MassProfileCSE,
)
from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from
class StellarProfile:
pass
class EllGaussian(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
sigma: float = 0.01,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Gaussian light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
sigma
The sigma value of the Gaussian.
"""
super(EllGaussian, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.sigma = sigma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
deflections = (
self.mass_to_light_ratio
* self.intensity
* self.sigma
* np.sqrt((2 * np.pi) / (1.0 - self.axis_ratio ** 2.0))
* self.zeta_from(grid=grid)
)
return self.rotate_grid_from_reference_frame(
np.multiply(
1.0, np.vstack((-1.0 * np.imag(deflections), np.real(deflections))).T
)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
Note: sigma is divided by sqrt(q) here.
"""
def calculate_deflection_component(npow, index):
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sigma / np.sqrt(self.axis_ratio),
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(u, y, x, npow, axis_ratio, sigma):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(-0.5 * np.square(np.divide(eta_u, sigma))) / (
(1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
Note: sigma is divided by sqrt(q) here.
"""
return np.multiply(
self.intensity,
np.exp(
-0.5
* np.square(
np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio))
)
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.9999 else 0.9999
def zeta_from(self, grid: aa.type.Grid2DLike):
q2 = self.axis_ratio ** 2.0
ind_pos_y = grid[:, 0] >= 0
shape_grid = np.shape(grid)
output_grid = np.zeros((shape_grid[0]), dtype=np.complex128)
scale_factor = self.axis_ratio / (self.sigma * np.sqrt(2.0 * (1.0 - q2)))
xs_0 = grid[:, 1][ind_pos_y] * scale_factor
ys_0 = grid[:, 0][ind_pos_y] * scale_factor
xs_1 = grid[:, 1][~ind_pos_y] * scale_factor
ys_1 = -grid[:, 0][~ind_pos_y] * scale_factor
output_grid[ind_pos_y] = -1j * (
wofz(xs_0 + 1j * ys_0)
- np.exp(-(xs_0 ** 2.0) * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_0 + 1j * ys_0 / self.axis_ratio)
)
output_grid[~ind_pos_y] = np.conj(
-1j
* (
wofz(xs_1 + 1j * ys_1)
- np.exp(-(xs_1 ** 2.0) * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_1 + 1j * ys_1 / self.axis_ratio)
)
)
return output_grid
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
# noinspection PyAbstractClass
class AbstractEllSersic(MassProfile, MassProfileMGE, MassProfileCSE, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super(AbstractEllSersic, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfileMGE, self).__init__()
super(MassProfileCSE, self).__init__()
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.effective_radius = effective_radius
self.sersic_index = sersic_index
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_mge_from(
grid=grid, sigmas_factor=np.sqrt(self.axis_ratio)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
eccentric_radii = self.grid_to_eccentric_radii(grid=grid)
return self._convergence_2d_via_mge_from(grid_radii=eccentric_radii)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D convergence from a grid of (y,x) arc second coordinates, by computing and summing
the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
elliptical_radii = self.grid_to_elliptical_radii(grid=grid)
return self._convergence_2d_via_cse_from(grid_radii=elliptical_radii)
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, radius: np.ndarray):
"""
Returns the intensity of the profile at a given radius.
Parameters
----------
radius
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant
* (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1)
)
def decompose_convergence_via_mge(self) -> Tuple[List, List]:
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_2d, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self,) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_cses
The number of CSEs used to approximate the input func.
sample_points: int (should be larger than 'total_cses')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=0.0,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_2d,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
@property
def sersic_constant(self):
"""A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's
total integrated light.
"""
return (
(2 * self.sersic_index)
- (1.0 / 3.0)
+ (4.0 / (405.0 * self.sersic_index))
+ (46.0 / (25515.0 * self.sersic_index ** 2))
+ (131.0 / (1148175.0 * self.sersic_index ** 3))
- (2194697.0 / (30690717750.0 * self.sersic_index ** 4))
)
@property
def ellipticity_rescale(self):
return 1.0 - ((1.0 - self.axis_ratio) / 2.0)
@property
def elliptical_effective_radius(self):
"""
The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \
radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \
systems, this won't robustly capture the light profile's elliptical shape.
The elliptical effective radius instead describes the major-axis radius of the ellipse containing \
half the light, and may be more appropriate for highly flattened systems like disk galaxies.
"""
return self.effective_radius / np.sqrt(self.axis_ratio)
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE):
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u, y, x, npow, axis_ratio, sersic_index, effective_radius, sersic_constant
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphSersic(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre
intensity
Overall flux intensity normalisation in the light profiles (electrons per second)
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllExponential(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllExponential mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=1.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphExponential(EllExponential):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllDevVaucouleurs(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllDevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=4.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphDevVaucouleurs(EllDevVaucouleurs):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the
lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllSersicRadialGradient(AbstractEllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.mass_to_light_gradient = mass_to_light_gradient
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
self.mass_to_light_gradient,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
sersic_index,
effective_radius,
mass_to_light_gradient,
sersic_constant,
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return (
(((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient)
* np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
)
/ ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return (
self.mass_to_light_ratio
* (
((self.axis_ratio * grid_radius) / self.effective_radius)
** -self.mass_to_light_gradient
)
* self.image_2d_via_radii_from(grid_radius)
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / self.effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / scaled_effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_mge_from(grid=grid)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 50.0
radii_max = self.effective_radius * 20.0
def core_sersic_2D(r):
return (
self.mass_to_light_ratio
* self.intensity_prime
* (1.0 + (self.radius_break / r) ** self.alpha)
** (self.gamma / self.alpha)
* np.exp(
-self.sersic_constant
* (
(r ** self.alpha + self.radius_break ** self.alpha)
/ self.effective_radius ** self.alpha
)
** (1.0 / (self.sersic_index * self.alpha))
)
)
return self._decompose_convergence_via_mge(
func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max
)
@property
def intensity_prime(self):
"""Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)"""
return (
self.intensity_break
* (2.0 ** (-self.gamma / self.alpha))
* np.exp(
self.sersic_constant
* (
((2.0 ** (1.0 / self.alpha)) * self.radius_break)
/ self.effective_radius
)
** (1.0 / self.sersic_index)
)
)
class SphSersicCore(EllSersicCore):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
effective_radius=effective_radius,
sersic_index=sersic_index,
radius_break=radius_break,
intensity_break=intensity_break,
gamma=gamma,
alpha=alpha,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
class EllChameleon(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Chamelon mass profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
"""
super(EllChameleon, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.core_radius_0 = core_radius_0
self.core_radius_1 = core_radius_1
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Following Eq. (15) and (16), but the parameters are slightly different.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
factor = (
2.0
* self.mass_to_light_ratio
* self.intensity
/ (1 + self.axis_ratio)
* self.axis_ratio
/ np.sqrt(1.0 - self.axis_ratio ** 2.0)
)
core_radius_0 = np.sqrt(
(4.0 * self.core_radius_0 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
core_radius_1 = np.sqrt(
(4.0 * self.core_radius_1 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
psi0 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0
)
psi1 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1
)
deflection_y0 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi0, self.axis_ratio ** 2.0 * core_radius_0),
)
)
deflection_x0 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi0, core_radius_0),
)
)
deflection_y1 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi1, self.axis_ratio ** 2.0 * core_radius_1),
)
)
deflection_x1 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi1, core_radius_1),
)
)
deflection_y = np.subtract(deflection_y0, deflection_y1)
deflection_x = np.subtract(deflection_x0, deflection_x1)
return self.rotate_grid_from_reference_frame(
np.multiply(factor, np.vstack((deflection_y, deflection_x)).T)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_elliptical_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Chamelon light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
axis_ratio_factor = (1.0 + self.axis_ratio) ** 2.0
return np.multiply(
self.intensity / (1 + self.axis_ratio),
np.add(
np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_0 ** 2.0) / axis_ratio_factor,
)
),
),
-np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_1 ** 2.0) / axis_ratio_factor,
)
),
),
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.99999 else 0.99999
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class SphChameleon(EllChameleon):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The spherica; Chameleon mass profile.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
core_radius_0=core_radius_0,
core_radius_1=core_radius_1,
mass_to_light_ratio=mass_to_light_ratio,
)
def cse_settings_from(
effective_radius, sersic_index, sersic_constant, mass_to_light_gradient
):
if mass_to_light_gradient > 0.5:
if effective_radius > 0.2:
lower_dex = 6.0
upper_dex = np.min(
[np.log10((18.0 / sersic_constant) ** sersic_index), 1.1]
)
if sersic_index <= 1.2:
total_cses = 50
sample_points = 80
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.5
else:
total_cses = 30
sample_points = 50
else:
if sersic_index <= 1.2:
upper_dex = 1.0
total_cses = 50
sample_points = 80
lower_dex = 4.5
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.0
upper_dex = 1.5
else:
upper_dex = 1.1
lower_dex = 6.0
total_cses = 30
sample_points = 50
else:
upper_dex = np.min(
[
np.log10((23.0 / sersic_constant) ** sersic_index),
0.85 - np.log10(effective_radius),
]
)
if (sersic_index <= 0.9) and (sersic_index > 0.8):
total_cses = 50
sample_points = 80
upper_dex = np.log10((18.0 / sersic_constant) ** sersic_index)
lower_dex = 4.3 + np.log10(effective_radius)
elif sersic_index <= 0.8:
total_cses = 50
sample_points = 80
upper_dex = np.log10((16.0 / sersic_constant) ** sersic_index)
lower_dex = 4.0 + np.log10(effective_radius)
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 4.5 + np.log10(effective_radius)
else:
lower_dex = 3.5 + np.log10(effective_radius)
total_cses = 30
sample_points = 50
return upper_dex, lower_dex, total_cses, sample_points
|
[
"numpy.log10",
"numpy.sqrt",
"autogalaxy.profiles.mass_profiles.mass_profiles.psi_from",
"numpy.add",
"numpy.power",
"scipy.integrate.quad",
"numpy.subtract",
"copy.copy",
"numpy.exp",
"numpy.real",
"numpy.zeros",
"numpy.square",
"numpy.vstack",
"scipy.special.wofz",
"numpy.shape",
"numpy.imag",
"numpy.divide"
] |
[((5818, 5847), 'numpy.zeros', 'np.zeros', ([], {'shape': 'grid.shape[0]'}), '(shape=grid.shape[0])\n', (5826, 5847), True, 'import numpy as np\n'), ((6793, 6807), 'numpy.shape', 'np.shape', (['grid'], {}), '(grid)\n', (6801, 6807), True, 'import numpy as np\n'), ((6831, 6875), 'numpy.zeros', 'np.zeros', (['shape_grid[0]'], {'dtype': 'np.complex128'}), '(shape_grid[0], dtype=np.complex128)\n', (6839, 6875), True, 'import numpy as np\n'), ((7851, 7866), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (7860, 7866), False, 'import copy\n'), ((14018, 14047), 'numpy.zeros', 'np.zeros', ([], {'shape': 'grid.shape[0]'}), '(shape=grid.shape[0])\n', (14026, 14047), True, 'import numpy as np\n'), ((18625, 18640), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (18634, 18640), False, 'import copy\n'), ((46683, 46754), 'numpy.sqrt', 'np.sqrt', (['(4.0 * self.core_radius_0 ** 2.0 / (1.0 + self.axis_ratio) ** 2)'], {}), '(4.0 * self.core_radius_0 ** 2.0 / (1.0 + self.axis_ratio) ** 2)\n', (46690, 46754), True, 'import numpy as np\n'), ((46806, 46877), 'numpy.sqrt', 'np.sqrt', (['(4.0 * self.core_radius_1 ** 2.0 / (1.0 + self.axis_ratio) ** 2)'], {}), '(4.0 * self.core_radius_1 ** 2.0 / (1.0 + self.axis_ratio) ** 2)\n', (46813, 46877), True, 'import numpy as np\n'), ((46922, 46996), 'autogalaxy.profiles.mass_profiles.mass_profiles.psi_from', 'psi_from', ([], {'grid': 'grid', 'axis_ratio': 'self.axis_ratio', 'core_radius': 'core_radius_0'}), '(grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0)\n', (46930, 46996), False, 'from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from\n'), ((47037, 47111), 'autogalaxy.profiles.mass_profiles.mass_profiles.psi_from', 'psi_from', ([], {'grid': 'grid', 'axis_ratio': 'self.axis_ratio', 'core_radius': 'core_radius_1'}), '(grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1)\n', (47045, 47111), False, 'from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from\n'), ((48074, 48115), 'numpy.subtract', 'np.subtract', (['deflection_y0', 'deflection_y1'], {}), '(deflection_y0, deflection_y1)\n', (48085, 48115), True, 'import numpy as np\n'), ((48140, 48181), 'numpy.subtract', 'np.subtract', (['deflection_x0', 'deflection_x1'], {}), '(deflection_x0, deflection_x1)\n', (48151, 48181), True, 'import numpy as np\n'), ((49094, 49123), 'numpy.zeros', 'np.zeros', ([], {'shape': 'grid.shape[0]'}), '(shape=grid.shape[0])\n', (49102, 49123), True, 'import numpy as np\n'), ((50536, 50551), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (50545, 50551), False, 'import copy\n'), ((4786, 4805), 'numpy.sqrt', 'np.sqrt', (['axis_ratio'], {}), '(axis_ratio)\n', (4793, 4805), True, 'import numpy as np\n'), ((4808, 4872), 'numpy.sqrt', 'np.sqrt', (['(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))'], {}), '(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))\n', (4815, 4872), True, 'import numpy as np\n'), ((14366, 14469), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((radius / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1))'], {}), '(-self.sersic_constant * ((radius / self.effective_radius) ** (1.0 /\n self.sersic_index) - 1))\n', (14372, 14469), True, 'import numpy as np\n'), ((16410, 16434), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (16417, 16434), True, 'import numpy as np\n'), ((18518, 18542), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (18525, 18542), True, 'import numpy as np\n'), ((20624, 20643), 'numpy.sqrt', 'np.sqrt', (['axis_ratio'], {}), '(axis_ratio)\n', (20631, 20643), True, 'import numpy as np\n'), ((20646, 20710), 'numpy.sqrt', 'np.sqrt', (['(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))'], {}), '(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))\n', (20653, 20710), True, 'import numpy as np\n'), ((20759, 20846), 'numpy.exp', 'np.exp', (['(-sersic_constant * ((eta_u / effective_radius) ** (1.0 / sersic_index) - 1))'], {}), '(-sersic_constant * ((eta_u / effective_radius) ** (1.0 /\n sersic_index) - 1))\n', (20765, 20846), True, 'import numpy as np\n'), ((30915, 30934), 'numpy.sqrt', 'np.sqrt', (['axis_ratio'], {}), '(axis_ratio)\n', (30922, 30934), True, 'import numpy as np\n'), ((30937, 31001), 'numpy.sqrt', 'np.sqrt', (['(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))'], {}), '(u * (x ** 2 + y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))\n', (30944, 31001), True, 'import numpy as np\n'), ((34328, 34352), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (34335, 34352), True, 'import numpy as np\n'), ((41647, 41783), 'numpy.exp', 'np.exp', (['(self.sersic_constant * (2.0 ** (1.0 / self.alpha) * self.radius_break /\n self.effective_radius) ** (1.0 / self.sersic_index))'], {}), '(self.sersic_constant * (2.0 ** (1.0 / self.alpha) * self.\n radius_break / self.effective_radius) ** (1.0 / self.sersic_index))\n', (41653, 41783), True, 'import numpy as np\n'), ((46607, 46644), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (46614, 46644), True, 'import numpy as np\n'), ((53870, 53920), 'numpy.log10', 'np.log10', (['((18.0 / sersic_constant) ** sersic_index)'], {}), '((18.0 / sersic_constant) ** sersic_index)\n', (53878, 53920), True, 'import numpy as np\n'), ((2796, 2847), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi / (1.0 - self.axis_ratio ** 2.0))'], {}), '(2 * np.pi / (1.0 - self.axis_ratio ** 2.0))\n', (2803, 2847), True, 'import numpy as np\n'), ((6934, 6959), 'numpy.sqrt', 'np.sqrt', (['(2.0 * (1.0 - q2))'], {}), '(2.0 * (1.0 - q2))\n', (6941, 6959), True, 'import numpy as np\n'), ((7235, 7259), 'scipy.special.wofz', 'wofz', (['(xs_0 + 1.0j * ys_0)'], {}), '(xs_0 + 1.0j * ys_0)\n', (7239, 7259), False, 'from scipy.special import wofz\n'), ((10836, 10860), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (10843, 10860), True, 'import numpy as np\n'), ((14821, 14922), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))\n', (14827, 14922), True, 'import numpy as np\n'), ((16711, 16813), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 /\n self.sersic_index) - 1.0))\n', (16717, 16813), True, 'import numpy as np\n'), ((31151, 31238), 'numpy.exp', 'np.exp', (['(-sersic_constant * ((eta_u / effective_radius) ** (1.0 / sersic_index) - 1))'], {}), '(-sersic_constant * ((eta_u / effective_radius) ** (1.0 /\n sersic_index) - 1))\n', (31157, 31238), True, 'import numpy as np\n'), ((32684, 32785), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / self.effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))\n', (32690, 32785), True, 'import numpy as np\n'), ((34809, 34911), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 / self.\n sersic_index) - 1.0))'], {}), '(-self.sersic_constant * ((r / scaled_effective_radius) ** (1.0 /\n self.sersic_index) - 1.0))\n', (34815, 34911), True, 'import numpy as np\n'), ((40888, 41064), 'numpy.exp', 'np.exp', (['(-self.sersic_constant * ((r ** self.alpha + self.radius_break ** self.\n alpha) / self.effective_radius ** self.alpha) ** (1.0 / (self.\n sersic_index * self.alpha)))'], {}), '(-self.sersic_constant * ((r ** self.alpha + self.radius_break **\n self.alpha) / self.effective_radius ** self.alpha) ** (1.0 / (self.\n sersic_index * self.alpha)))\n', (40894, 41064), True, 'import numpy as np\n'), ((47297, 47349), 'numpy.add', 'np.add', (['psi0', '(self.axis_ratio ** 2.0 * core_radius_0)'], {}), '(psi0, self.axis_ratio ** 2.0 * core_radius_0)\n', (47303, 47349), True, 'import numpy as np\n'), ((47537, 47564), 'numpy.add', 'np.add', (['psi0', 'core_radius_0'], {}), '(psi0, core_radius_0)\n', (47543, 47564), True, 'import numpy as np\n'), ((47753, 47805), 'numpy.add', 'np.add', (['psi1', '(self.axis_ratio ** 2.0 * core_radius_1)'], {}), '(psi1, self.axis_ratio ** 2.0 * core_radius_1)\n', (47759, 47805), True, 'import numpy as np\n'), ((47993, 48020), 'numpy.add', 'np.add', (['psi1', 'core_radius_1'], {}), '(psi1, core_radius_1)\n', (47999, 48020), True, 'import numpy as np\n'), ((53592, 53642), 'numpy.log10', 'np.log10', (['((23.0 / sersic_constant) ** sersic_index)'], {}), '((23.0 / sersic_constant) ** sersic_index)\n', (53600, 53642), True, 'import numpy as np\n'), ((53952, 53978), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (53960, 53978), True, 'import numpy as np\n'), ((54100, 54150), 'numpy.log10', 'np.log10', (['((16.0 / sersic_constant) ** sersic_index)'], {}), '((16.0 / sersic_constant) ** sersic_index)\n', (54108, 54150), True, 'import numpy as np\n'), ((4634, 4673), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (4643, 4673), True, 'import numpy as np\n'), ((7273, 7339), 'numpy.exp', 'np.exp', (['(-xs_0 ** 2.0 * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))'], {}), '(-xs_0 ** 2.0 * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))\n', (7279, 7339), True, 'import numpy as np\n'), ((7357, 7417), 'scipy.special.wofz', 'wofz', (['(self.axis_ratio * xs_0 + 1.0j * ys_0 / self.axis_ratio)'], {}), '(self.axis_ratio * xs_0 + 1.0j * ys_0 / self.axis_ratio)\n', (7361, 7417), False, 'from scipy.special import wofz\n'), ((7524, 7548), 'scipy.special.wofz', 'wofz', (['(xs_1 + 1.0j * ys_1)'], {}), '(xs_1 + 1.0j * ys_1)\n', (7528, 7548), False, 'from scipy.special import wofz\n'), ((20414, 20453), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (20423, 20453), True, 'import numpy as np\n'), ((30608, 30647), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (30617, 30647), True, 'import numpy as np\n'), ((47228, 47265), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47235, 47265), True, 'import numpy as np\n'), ((47468, 47505), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47475, 47505), True, 'import numpy as np\n'), ((47684, 47721), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47691, 47721), True, 'import numpy as np\n'), ((47924, 47961), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.axis_ratio ** 2.0)'], {}), '(1.0 - self.axis_ratio ** 2.0)\n', (47931, 47961), True, 'import numpy as np\n'), ((48272, 48311), 'numpy.vstack', 'np.vstack', (['(deflection_y, deflection_x)'], {}), '((deflection_y, deflection_x))\n', (48281, 48311), True, 'import numpy as np\n'), ((52592, 52642), 'numpy.log10', 'np.log10', (['((18.0 / sersic_constant) ** sersic_index)'], {}), '((18.0 / sersic_constant) ** sersic_index)\n', (52600, 52642), True, 'import numpy as np\n'), ((53668, 53694), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (53676, 53694), True, 'import numpy as np\n'), ((54182, 54208), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (54190, 54208), True, 'import numpy as np\n'), ((4945, 4968), 'numpy.divide', 'np.divide', (['eta_u', 'sigma'], {}), '(eta_u, sigma)\n', (4954, 4968), True, 'import numpy as np\n'), ((7566, 7632), 'numpy.exp', 'np.exp', (['(-xs_1 ** 2.0 * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))'], {}), '(-xs_1 ** 2.0 * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))\n', (7572, 7632), True, 'import numpy as np\n'), ((7654, 7714), 'scipy.special.wofz', 'wofz', (['(self.axis_ratio * xs_1 + 1.0j * ys_1 / self.axis_ratio)'], {}), '(self.axis_ratio * xs_1 + 1.0j * ys_1 / self.axis_ratio)\n', (7658, 7714), False, 'from scipy.special import wofz\n'), ((19630, 19792), 'scipy.integrate.quad', 'quad', (['self.deflection_func'], {'a': '(0.0)', 'b': '(1.0)', 'args': '(grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sersic_index, self.\n effective_radius, sersic_constant)'}), '(self.deflection_func, a=0.0, b=1.0, args=(grid[i, 0], grid[i, 1], npow,\n self.axis_ratio, self.sersic_index, self.effective_radius, sersic_constant)\n )\n', (19634, 19792), False, 'from scipy.integrate import quad\n'), ((29768, 29959), 'scipy.integrate.quad', 'quad', (['self.deflection_func'], {'a': '(0.0)', 'b': '(1.0)', 'args': '(grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sersic_index, self.\n effective_radius, self.mass_to_light_gradient, sersic_constant)'}), '(self.deflection_func, a=0.0, b=1.0, args=(grid[i, 0], grid[i, 1], npow,\n self.axis_ratio, self.sersic_index, self.effective_radius, self.\n mass_to_light_gradient, sersic_constant))\n', (29772, 29959), False, 'from scipy.integrate import quad\n'), ((54335, 54361), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (54343, 54361), True, 'import numpy as np\n'), ((54408, 54434), 'numpy.log10', 'np.log10', (['effective_radius'], {}), '(effective_radius)\n', (54416, 54434), True, 'import numpy as np\n'), ((3047, 3067), 'numpy.real', 'np.real', (['deflections'], {}), '(deflections)\n', (3054, 3067), True, 'import numpy as np\n'), ((39622, 39662), 'numpy.divide', 'np.divide', (['self.radius_break', 'grid_radii'], {}), '(self.radius_break, grid_radii)\n', (39631, 39662), True, 'import numpy as np\n'), ((49775, 49796), 'numpy.square', 'np.square', (['grid_radii'], {}), '(grid_radii)\n', (49784, 49796), True, 'import numpy as np\n'), ((3025, 3045), 'numpy.imag', 'np.imag', (['deflections'], {}), '(deflections)\n', (3032, 3045), True, 'import numpy as np\n'), ((6424, 6448), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (6431, 6448), True, 'import numpy as np\n'), ((40056, 40088), 'numpy.power', 'np.power', (['grid_radii', 'self.alpha'], {}), '(grid_radii, self.alpha)\n', (40064, 40088), True, 'import numpy as np\n'), ((50100, 50121), 'numpy.square', 'np.square', (['grid_radii'], {}), '(grid_radii)\n', (50109, 50121), True, 'import numpy as np\n'), ((4282, 4306), 'numpy.sqrt', 'np.sqrt', (['self.axis_ratio'], {}), '(self.axis_ratio)\n', (4289, 4306), True, 'import numpy as np\n')]
|
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def _eps(arr):
if ia.is_np_array(arr) and arr.dtype.kind == "f":
return np.finfo(arr.dtype).eps
return 1e-4
class Test_handle_continuous_param(unittest.TestCase):
def test_value_range_is_none(self):
result = iap.handle_continuous_param(
1, "[test1]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
result = iap.handle_continuous_param(
1, "[test1b]",
value_range=(None, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_stochastic_parameter(self):
result = iap.handle_continuous_param(
iap.Deterministic(1), "[test2]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_integers(self):
result = iap.handle_continuous_param(
1, "[test3]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range(self):
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test4]",
value_range=(2, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test4]" in str(context.exception))
def test_param_is_inside_value_range_and_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_continuous_param(
1, "[test5]",
value_range=(None, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test6]",
value_range=(None, 0),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test6]" in str(context.exception))
def test_param_is_inside_value_range_and_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_continuous_param(
1, "[test7]",
value_range=(-1, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test8]",
value_range=(2, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test8]" in str(context.exception))
def test_tuple_as_value_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test9]",
value_range=None,
tuple_to_uniform=False,
list_to_choice=True)
self.assertTrue("[test9]" in str(context.exception))
def test_tuple_as_value_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_continuous_param(
(1, 2), "[test10]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_continuous_param(
(1, 2), "[test11]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_value_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test12]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test12]" in str(context.exception))
def test_tuple_value_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test13]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test13]" in str(context.exception))
def test_list_as_value_but_no_lists_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2, 3], "[test14]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=False)
self.assertTrue("[test14]" in str(context.exception))
def test_list_as_value_and_lists_allowed(self):
# list as value and list allowed
result = iap.handle_continuous_param(
[1, 2, 3], "[test15]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_list_value_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test16]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test16]" in str(context.exception))
def test_list_value_and_allowed_and_fully_outside_of_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test17]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_and_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_continuous_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test19]",
value_range=False,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_discrete_param(unittest.TestCase):
def test_float_value_inside_value_range_but_no_floats_allowed(self):
# float value without value range when no float value is allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1.5, "[test0]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.assertTrue("[test0]" in str(context.exception))
def test_value_range_is_none(self):
# value without value range
result = iap.handle_discrete_param(
1, "[test1]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
# value without value range as (None, None)
result = iap.handle_discrete_param(
1, "[test1b]", value_range=(None, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_is_stochastic_parameter(self):
# stochastic parameter
result = iap.handle_discrete_param(
iap.Deterministic(1), "[test2]", value_range=None,
tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_inside_value_range(self):
# value within value range
result = iap.handle_discrete_param(
1, "[test3]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range(self):
# value outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test4]", value_range=(2, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test4]" in str(context.exception))
def test_value_inside_value_range_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_discrete_param(
1, "[test5]", value_range=(None, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test6]", value_range=(None, 0), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test6]" in str(context.exception))
def test_value_inside_value_range_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_discrete_param(
1, "[test7]", value_range=(-1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test8]", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test8]" in str(context.exception))
def test_value_is_tuple_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test9]", value_range=None, tuple_to_uniform=False,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test9]" in str(context.exception))
def test_value_is_tuple_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_discrete_param(
(1, 2), "[test10]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_discrete_param(
(1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self):
# tuple as value and tuple allowed and tuple within value range with
# allow_floats=False
result = iap.handle_discrete_param(
(1, 2), "[test11b]", value_range=(0, 10),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test12]" in str(context.exception))
def test_value_tuple_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test13]" in str(context.exception))
def test_value_list_but_not_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True,
list_to_choice=False, allow_floats=True)
self.assertTrue("[test14]" in str(context.exception))
def test_value_list_and_allowed(self):
# list as value and list allowed
result = iap.handle_discrete_param(
[1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_value_list_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test16]" in str(context.exception))
def test_value_list_and_allowed_and_fully_outside_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_discrete_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test19]", value_range=False, tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_categorical_string_param(unittest.TestCase):
def test_arg_is_all(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
ia.ALL, "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == valid_values
def test_arg_is_valid_str(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
"class1", "foo", valid_values)
assert isinstance(param, iap.Deterministic)
assert param.value == "class1"
def test_arg_is_invalid_str(self):
valid_values = ["class1", "class2"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
"class3", "foo", valid_values)
expected = (
"Expected parameter 'foo' to be one of: class1, class2. "
"Got: class3.")
assert expected == str(ctx.exception)
def test_arg_is_valid_list(self):
valid_values = ["class1", "class2", "class3"]
param = iap.handle_categorical_string_param(
["class1", "class3"], "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == ["class1", "class3"]
def test_arg_is_list_with_invalid_types(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", False], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"strings, got types: str, bool."
)
assert expected in str(ctx.exception)
def test_arg_is_invalid_list(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", "class4"], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"the following allowed strings: class1, class2, class3. "
"Got strings: class1, class4."
)
assert expected in str(ctx.exception)
def test_arg_is_stochastic_param(self):
param = iap.Deterministic("class1")
param_out = iap.handle_categorical_string_param(
param, "foo", ["class1"])
assert param_out is param
def test_arg_is_invalid_datatype(self):
with self.assertRaises(Exception) as ctx:
_ = iap.handle_categorical_string_param(
False, "foo", ["class1"])
expected = "Expected parameter 'foo' to be imgaug.ALL"
assert expected in str(ctx.exception)
class Test_handle_probability_param(unittest.TestCase):
def test_bool_like_values(self):
for val in [True, False, 0, 1, 0.0, 1.0]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
def test_float_probabilities(self):
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
def test_probability_is_stochastic_parameter(self):
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
def test_probability_has_bad_datatype(self):
with self.assertRaises(Exception) as context:
_p = iap.handle_probability_param("test", "[test4]")
self.assertTrue("Expected " in str(context.exception))
def test_probability_is_negative(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(-0.01, "[test5]")
def test_probability_is_above_100_percent(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(1.01, "[test6]")
class Test_force_np_float_dtype(unittest.TestCase):
def test_common_dtypes(self):
dtypes = [
("float16", "float16"),
("float32", "float32"),
("float64", "float64"),
("uint8", "float64"),
("int32", "float64")
]
for dtype_in, expected in dtypes:
with self.subTest(dtype_in=dtype_in):
arr = np.zeros((1,), dtype=dtype_in)
observed = iap.force_np_float_dtype(arr).dtype
assert observed.name == expected
class Test_both_np_float_if_one_is_float(unittest.TestCase):
def test_float16_float32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float32"
def test_float16_int32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float64"
def test_int32_float16(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float16"
def test_int32_uint8(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float64"
class Test_draw_distributions_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
params = [mock.Mock(), mock.Mock()]
params[0].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
params[1].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
draw_grid_mock = mock.Mock()
draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8)
with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock):
grid_observed = iap.draw_distributions_grid(
params, rows=2, cols=3, graph_sizes=(20, 21),
sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"])
assert grid_observed.shape == (4, 3, 2)
assert params[0].draw_distribution_graph.call_count == 1
assert params[1].draw_distribution_graph.call_count == 1
assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2)
assert params[0].draw_distribution_graph.call_args[1]["title"] == "A"
assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4)
assert params[1].draw_distribution_graph.call_args[1]["title"] == "B"
assert draw_grid_mock.call_count == 1
assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3)
assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3)
assert draw_grid_mock.call_args[1]["rows"] == 2
assert draw_grid_mock.call_args[1]["cols"] == 3
class Test_draw_distributions_graph(unittest.TestCase):
def test_basic_functionality(self):
# this test is very rough as we get a not-very-well-defined image out
# of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,),
bins=100)
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
graph_img_title = param.draw_distribution_graph(title="test",
size=(10000,),
bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
assert nb_white > 0.1 * nb_all
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
class TestStochasticParameter(unittest.TestCase):
def setUp(self):
reseed()
def test_copy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] == param.other_param.a[0]
def test_deepcopy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] != param.other_param.a[0]
class TestStochasticParameterOperators(unittest.TestCase):
def setUp(self):
reseed()
def test_multiply_stochasic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
def test_multiply_stochastic_param_with_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_multiply_integer_with_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_multiply_string_with_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" * param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_multiply_stochastic_param_with_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 * "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_params(self):
# Divide (__truediv__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_divide_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_divide_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_divide_string_by_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" / param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 / "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_div_stochastic_params(self):
# Divide (__div__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_div_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_div_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__div__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_rdiv_stochastic_param_by_integer(self):
# Divide (__rdiv__)
param1 = iap.Normal(0, 1)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_rdiv_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__rdiv__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_stochastic_params(self):
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
def test_floordiv_symbol_stochastic_param_by_integer(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
def test_floordiv_symbol_integer_by_stochastic_param(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
def test_floordiv_symbol_string_by_stochastic_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = "test" // param1_int
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_symbol_stochastic_param_by_string_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = param1_int // "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
def test_add_integer_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_add_stochastic_param_to_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_add_stochastic_param_to_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" + param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_string_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 + "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
def test_subtract_integer_from_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_subtract_stochastic_param_from_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_subtract_stochastic_param_from_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" - param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_string_from_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 - "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
def test_samples_same_values_for_same_seeds(self):
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestTruncatedNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.TruncatedNormal(0, 1)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(float -inf), "
"high=Deterministic(float inf)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test___init___custom_range(self):
param = iap.TruncatedNormal(0, 1, low=-100, high=50.0)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(int -100), "
"high=Deterministic(float 50.00000000)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_scale_is_zero(self):
param = iap.TruncatedNormal(0.5, 0, low=-10, high=10)
samples = param.draw_samples((100,))
assert np.allclose(samples, 0.5)
def test_scale(self):
param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100)
param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20)
assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40)
def test_loc_is_stochastic_parameter(self):
param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01,
low=-1000, high=1000)
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((5,))
observed = np.mean(samples)
dist1 = np.abs(-100 - observed)
dist2 = np.abs(100 - observed)
if dist1 < 1:
seen[0] += 1
elif dist2 < 1:
seen[1] += 1
else:
assert False
assert np.isclose(seen[0], 100, rtol=0, atol=20)
assert np.isclose(seen[1], 100, rtol=0, atol=20)
def test_samples_are_within_bounds(self):
param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5)
samples = param.draw_samples((1000,))
# are all within bounds
assert np.all(samples >= -5.0 - 1e-4)
assert np.all(samples <= 7.5 + 1e-4)
# at least some samples close to bounds
assert np.any(samples <= -4.5)
assert np.any(samples >= 7.0)
# at least some samples close to loc
assert np.any(np.abs(samples) < 0.5)
def test_samples_same_values_for_same_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=1234)
assert np.allclose(samples1, samples2)
def test_samples_different_values_for_different_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=2345)
assert not np.allclose(samples1, samples2)
class TestLaplace(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Laplace(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Laplace(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_laplace(self):
param = iap.Laplace(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
def test_scale_is_zero(self):
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - _eps(samples),
samples < 1 + _eps(samples)
))
def test_samples_same_values_for_same_seeds(self):
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestChiSquare(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ChiSquare(1)
assert (
param.__str__()
== param.__repr__()
== "ChiSquare(df=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.ChiSquare(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_chisquare(self):
param = iap.ChiSquare(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).chisquare(df=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_df_is_stochastic_parameter(self):
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_larger_df_leads_to_more_variance(self):
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
def test_samples_same_values_for_same_seeds(self):
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestWeibull(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Weibull(1)
assert (
param.__str__()
== param.__repr__()
== "Weibull(a=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Weibull(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_weibull(self):
param = iap.Weibull(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).weibull(a=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
matches_first = (
expected_first - 0.2 * expected_first
< observed <
expected_first + 0.2 * expected_first
)
matches_second = (
expected_second - 0.2 * expected_second
< observed <
expected_second + 0.2 * expected_second
)
if matches_first:
seen[0] += 1
elif matches_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_different_strengths(self):
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = (
scipy.special.gamma(1 + 2/1)
- (scipy.special.gamma(1 + 1/1))**2
)
expected_second = (
scipy.special.gamma(1 + 2/0.5)
- (scipy.special.gamma(1 + 1/0.5))**2
)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.2 * expected_first
< np.var(samples1) <
expected_first + 0.2 * expected_first
)
assert (
expected_second - 0.2 * expected_second
< np.var(samples2) <
expected_second + 0.2 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Uniform(0, 1.0)
assert (
param.__str__()
== param.__repr__()
== "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
)
def test_draw_sample(self):
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10, 5))
assert samples.shape == (10, 5)
assert np.all(
np.logical_and(
0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_via_density_histogram(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0),
density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert (
density_expected - density_tolerance
< density <
density_expected + density_tolerance
)
def test_negative_value(self):
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_wrong_argument_order(self):
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_integers(self):
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_identical(self):
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestBeta(unittest.TestCase):
@classmethod
def _mean(cls, alpha, beta):
return alpha / (alpha + beta)
@classmethod
def _var(cls, alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
def setUp(self):
reseed()
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = self._mean(0.5, 0.5)
expected_second = self._mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_compare_curves_of_different_arguments(self):
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = self._var(2, 2)
expected_second = self._var(0.5, 0.5)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.1 * expected_first
< np.var(samples1) <
expected_first + 0.1 * expected_first
)
assert (
expected_second - 0.1 * expected_second
< np.var(samples2) <
expected_second + 0.1 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestDeterministic(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
pairs = [
(0, "Deterministic(int 0)"),
(1.0, "Deterministic(float 1.00000000)"),
("test", "Deterministic(test)")
]
for value, expected in pairs:
with self.subTest(value=value):
param = iap.Deterministic(value)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_samples_same_values_for_same_seeds(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0
]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
rs1 = iarandom.RNG(123456)
rs2 = iarandom.RNG(123456)
samples1 = param.draw_samples(20, random_state=rs1)
samples2 = param.draw_samples(20, random_state=rs2)
assert np.array_equal(samples1, samples2)
def test_draw_sample_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
def test_draw_sample_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert np.isclose(
sample1, sample2, rtol=0, atol=_eps(sample1))
def test_draw_samples_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.all(samples == value)
def test_draw_samples_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.allclose(samples, value, rtol=0, atol=_eps(samples))
def test_argument_is_stochastic_parameter(self):
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_argument_has_invalid_type(self):
with self.assertRaises(Exception) as context:
_ = iap.Deterministic([1, 2, 3])
self.assertTrue(
"Expected StochasticParameter object or number or string"
in str(context.exception))
class TestFromLowerResolution(unittest.TestCase):
def setUp(self):
reseed()
def test___init___size_percent(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_percent=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_percent=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test___init___size_px(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_px=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_px=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test_binomial_hwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
uq = np.unique(samples)
assert samples.shape == (8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_binomial_nhwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
uq = np.unique(samples_nhwc)
assert samples_nhwc.shape == (1, 8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_draw_samples_with_too_many_dimensions(self):
# (N, H, W, C, something) causing error
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
with self.assertRaises(Exception) as context:
_ = param.draw_samples((1, 8, 8, 1, 1))
self.assertTrue(
"FromLowerResolution can only generate samples of shape"
in str(context.exception)
)
def test_binomial_hw3(self):
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
uq = np.unique(samples)
assert samples.shape == (8, 8, 3)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_different_size_px_arguments(self):
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_arguments_with_tuple(self):
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_argument_with_stochastic_parameters(self):
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_px_has_invalid_datatype(self):
# bad datatype for size_px
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
self.assertTrue("Expected " in str(context.exception))
def test_min_size(self):
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1,
min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent(self):
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_as_stochastic_parameters(self):
# different sizes in percent, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Deterministic(0.01))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Choice([0.4, 0.8]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_has_invalid_datatype(self):
# bad datatype for size_percent
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False)
self.assertTrue("Expected " in str(context.exception))
def test_method(self):
# method given as StochasticParameter
param = iap.FromLowerResolution(
iap.Binomial(0.5), size_px=4,
method=iap.Choice(["nearest", "linear"]))
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((16, 16, 1))
nb_in_between = np.sum(
np.logical_and(0.05 < samples, samples < 0.95))
if nb_in_between == 0:
seen[0] += 1
else:
seen[1] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_method_has_invalid_datatype(self):
# bad datatype for method
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4,
method=False)
self.assertTrue("Expected " in str(context.exception))
def test_samples_same_values_for_same_seeds(self):
# multiple calls with same random_state
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
samples1 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestClip(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), -1.000000, 1.000000)"
)
def test_value_within_bounds(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_value_exactly_at_upper_bound(self):
param = iap.Clip(iap.Deterministic(1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_exactly_at_lower_bound(self):
param = iap.Clip(iap.Deterministic(-1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_within_bounds_and_float(self):
param = iap.Clip(iap.Deterministic(0.5), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0.5 - _eps(sample) < sample < 0.5 + _eps(sample)
assert np.all(
np.logical_and(
0.5 - _eps(sample) <= samples,
samples <= 0.5 + _eps(sample)
)
)
def test_value_is_above_upper_bound(self):
param = iap.Clip(iap.Deterministic(2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_below_lower_bound(self):
param = iap.Clip(iap.Deterministic(-2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_sometimes_without_bounds_sometimes_beyond(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_samples_same_values_for_same_seeds(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_lower_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), None, 1)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, 1.000000)"
)
def test_upper_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), 0, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), 0.000000, None)"
)
def test_both_bounds_are_none(self):
param = iap.Clip(iap.Deterministic(0), None, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, None)"
)
class TestDiscretize(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Discretize(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Discretize(Deterministic(int 0))"
)
def test_applied_to_deterministic(self):
values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043,
0,
0.00043, 0.7, 1.0, 1, 54.3, 100.2]
for value in values:
with self.subTest(value=value):
param = iap.Discretize(iap.Deterministic(value))
value_expected = np.round(
np.float64([value])
).astype(np.int32)[0]
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == value_expected
assert np.all(samples == value_expected)
# TODO why are these tests applied to DiscreteUniform instead of Uniform?
def test_applied_to_discrete_uniform(self):
param_orig = iap.DiscreteUniform(0, 1)
param = iap.Discretize(param_orig)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_applied_to_discrete_uniform_with_wider_range(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param_orig.draw_samples((10000,))
samples2 = param.draw_samples((10000,))
assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3))
def test_samples_same_values_for_same_seeds(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Multiply(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_multiply_example_integer_values(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 * v2
)
def test_multiply_example_integer_values_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.name == "int32"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 * v2
)
def test_multiply_example_float_values(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_example_float_values_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_by_stochastic_parameter(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_by_stochastic_parameter_elementwise(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value_elementwise(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestDivide(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Divide(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Divide(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_divide_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_by_stochastic_parameter(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_by_stochastic_parameter_elementwise(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float_elementwise(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted)
< samples_sorted[-1]
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted)
)
def test_divide_by_stochastic_parameter_that_can_by_zero(self):
# test division by zero automatically being converted to division by 1
param = iap.Divide(2,
iap.Choice([0, 2]),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_unique = np.sort(np.unique(samples.flatten()))
assert samples_unique[0] == 1 and samples_unique[1] == 2
def test_divide_by_zero(self):
param = iap.Divide(iap.Deterministic(1), 0, elementwise=False)
sample = param.draw_sample()
assert sample == 1
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Add(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Add(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_add_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_stochastic_parameter(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
class TestSubtract(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Subtract(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_subtract_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_stochastic_parameter(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestPower(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Power(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Power(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_pairs(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), exponent)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_pairs_both_deterministic(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), iap.Deterministic(exponent))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_exponent_is_stochastic_parameter(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_exponent_is_stochastic_parameter_elementwise(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform_elementwise(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestAbsolute(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Absolute(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Absolute(Deterministic(int 0))"
)
def test_fixed_values(self):
simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5]
for value in simple_values:
with self.subTest(value=value):
param = iap.Absolute(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
if ia.is_single_float(value):
assert (
abs(value) - _eps(sample)
< sample <
abs(value) + _eps(sample)
)
assert np.all(abs(value) - _eps(samples) < samples)
assert np.all(samples < abs(value) + _eps(samples))
else:
assert sample == abs(value)
assert np.all(samples == abs(value))
def test_value_is_stochastic_parameter(self):
param = iap.Absolute(iap.Choice([-3, -1, 1, 3]))
sample = param.draw_sample()
samples = param.draw_samples((10, 10))
samples_uq = np.sort(np.unique(samples))
assert sample.shape == tuple()
assert sample in [3, 1]
assert samples.shape == (10, 10)
assert len(samples_uq) == 2
assert samples_uq[0] == 1 and samples_uq[1] == 3
class TestRandomSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.RandomSign(iap.Deterministic(0), 0.5)
assert (
param.__str__()
== param.__repr__()
== "RandomSign(Deterministic(int 0), 0.50)"
)
def test_value_is_deterministic(self):
param = iap.RandomSign(iap.Deterministic(1))
samples = param.draw_samples((1000,))
n_positive = np.sum(samples == 1)
n_negative = np.sum(samples == -1)
assert samples.shape == (1000,)
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_deterministic_many_samples(self):
param = iap.RandomSign(iap.Deterministic(1))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
assert sample.shape == tuple()
if sample == 1:
seen[1] += 1
else:
seen[0] += 1
n_negative, n_positive = seen
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_stochastic_parameter(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples = param.draw_samples((4000,))
seen = [0, 0, 0, 0]
seen[0] = np.sum(samples == -2)
seen[1] = np.sum(samples == -1)
seen[2] = np.sum(samples == 1)
seen[3] = np.sum(samples == 2)
assert np.sum(seen) == 4000
assert all([700 < v < 1300 for v in seen])
def test_samples_same_values_for_same_seeds(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
assert np.sum(samples1 == -2) > 50
assert np.sum(samples1 == -1) > 50
assert np.sum(samples1 == 1) > 50
assert np.sum(samples1 == 2) > 50
class TestForceSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1)
assert (
param.__str__()
== param.__repr__()
== "ForceSign(Deterministic(int 0), True, invert, 1)"
)
def test_single_sample_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == 1
def test_single_sample_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == -1
def test_many_samples_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_negative_value_to_positive(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative_value_to_negative(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_stochastic_value_to_positive(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="invert")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert 200 < n_twos < 700
assert 200 < n_ones < 700
def test_many_samples_stochastic_value_to_positive_reroll(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert n_twos > 0
assert n_ones > 0
def test_many_samples_stochastic_value_to_positive_reroll_max_count(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll", reroll_count_max=100)
samples = param.draw_samples(100)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (100,)
assert n_twos + n_ones == 100
assert n_twos < 5
def test_samples_same_values_for_same_seeds(self):
param = iap.ForceSign(iap.Choice([-2, 1]),
positive=True,
mode="invert")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
class TestPositive(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Positive(iap.Deterministic(-1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == 1)
class TestNegative(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Negative(iap.Deterministic(1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == -1)
class TestIterativeNoiseAggregator(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(0),
iterations=(1, 3),
aggregation_method="max")
assert (
param.__str__()
== param.__repr__()
== (
"IterativeNoiseAggregator("
"Deterministic(int 0), "
"DiscreteUniform(Deterministic(int 1), "
"Deterministic(int 3)"
"), "
"Deterministic(max)"
")"
)
)
def test_value_is_deterministic_max_1_iter(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(1),
iterations=1,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_stochastic_avg_200_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=200,
aggregation_method="avg")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert 25 - 10 < sample < 25 + 10
assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10))
def test_value_is_stochastic_max_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 50
assert np.all(samples == 50)
def test_value_is_stochastic_min_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="min")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 0
assert np.all(samples == 0)
def test_value_is_stochastic_avg_or_max_100_iter_evaluate_counts(self):
seen = [0, 0, 0, 0]
for _ in sm.xrange(100):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=100,
aggregation_method=["avg", "max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_25 = abs(25 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_25 < 10.0:
seen[0] += 1
elif diff_50 < _eps(samples):
seen[1] += 1
elif diff_0 < _eps(samples):
seen[2] += 1
else:
seen[3] += 1
assert seen[2] <= 2 # around 0.0
assert seen[3] <= 2 # 0.0+eps <= x < 15.0 or 35.0 < x < 50.0 or >50.0
assert 50 - 20 < seen[0] < 50 + 20
assert 50 - 20 < seen[1] < 50 + 20
def test_value_is_stochastic_avg_tuple_as_iter_evaluate_histograms(self):
# iterations as tuple
param = iap.IterativeNoiseAggregator(
iap.Uniform(-1.0, 1.0),
iterations=(1, 100),
aggregation_method="avg")
diffs = []
for _ in sm.xrange(100):
samples = param.draw_samples((1, 1))
diff = abs(samples[0, 0] - 0.0)
diffs.append(diff)
nb_bins = 3
hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0),
density=False)
assert hist[1] > hist[0]
assert hist[1] > hist[2]
def test_value_is_stochastic_max_list_as_iter_evaluate_counts(self):
# iterations as list
seen = [0, 0]
for _ in sm.xrange(400):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=[1, 100],
aggregation_method=["max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_50 < _eps(samples):
seen[0] += 1
elif diff_0 < _eps(samples):
seen[1] += 1
else:
assert False
assert 300 - 50 < seen[0] < 300 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_value_is_stochastic_all_100_iter(self):
# test ia.ALL as aggregation_method
# note that each method individually and list of methods are already
# tested, so no in depth test is needed here
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL)
assert isinstance(param.aggregation_method, iap.Choice)
assert len(param.aggregation_method.a) == 3
assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]]
def test_value_is_stochastic_max_2_iter(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=2, aggregation_method="max")
samples = param.draw_samples((2, 1000))
nb_0 = np.sum(samples == 0)
nb_50 = np.sum(samples == 50)
assert nb_0 + nb_50 == 2 * 1000
assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05
def test_samples_same_values_for_same_seeds(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method="avg")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.allclose(samples1, samples2)
def test_stochastic_param_as_aggregation_method(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=5,
aggregation_method=iap.Deterministic("max"))
assert isinstance(param.aggregation_method, iap.Deterministic)
assert param.aggregation_method.value == "max"
def test_bad_datatype_for_aggregation_method(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method=False)
self.assertTrue(
"Expected aggregation_method to be" in str(context.exception))
def test_bad_datatype_for_iterations(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=False,
aggregation_method="max")
self.assertTrue("Expected iterations to be" in str(context.exception))
class TestSigmoid(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Sigmoid(
iap.Deterministic(0),
threshold=(-10, 10),
activated=True,
mul=1,
add=0)
assert (
param.__str__()
== param.__repr__()
== (
"Sigmoid("
"Deterministic(int 0), "
"Uniform("
"Deterministic(int -10), "
"Deterministic(int 10)"
"), "
"Deterministic(int 1), "
"1, "
"0)"
)
)
def test_activated_is_true(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(samples) < samples,
samples < expected + _eps(samples)
)
)
def test_activated_is_false(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=False)
expected = 5
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(sample) < samples,
samples < expected + _eps(sample)
)
)
def test_activated_is_probabilistic(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=0.5)
expected_first = 5
expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_value_is_stochastic_param(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5)))
expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_mul_add_threshold_with_various_fixed_values(self):
muls = [0.1, 1, 10.3]
adds = [-5.7, -0.0734, 0, 0.0734, 5.7]
vals = [-1, -0.7, 0, 0.7, 1]
threshs = [-5.7, -0.0734, 0, 0.0734, 5.7]
for mul, add, val, thresh in itertools.product(muls, adds, vals,
threshs):
with self.subTest(mul=mul, add=add, val=val, threshold=thresh):
param = iap.Sigmoid(
iap.Deterministic(val),
add=add,
mul=mul,
threshold=thresh)
sample = param.draw_sample()
samples = param.draw_samples((2, 3))
dt = sample.dtype
val_ = np.array([val], dtype=dt)
mul_ = np.array([mul], dtype=dt)
add_ = np.array([add], dtype=dt)
thresh_ = np.array([thresh], dtype=dt)
expected = (
1 / (
1 + np.exp(
-(val_ * mul_ + add_ - thresh_)
)
)
)
assert sample.shape == tuple()
assert samples.shape == (2, 3)
assert (
expected - 5*_eps(sample)
< sample <
expected + 5*_eps(sample)
)
assert np.all(
np.logical_and(
expected - 5*_eps(sample) < samples,
samples < expected + 5*_eps(sample)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
|
[
"numpy.clip",
"numpy.prod",
"imgaug.random.RNG",
"imgaug.parameters.Choice",
"mock.Mock",
"imgaug.parameters.Uniform",
"imgaug.parameters.draw_distributions_grid",
"imgaug.parameters.handle_discrete_param",
"numpy.array",
"six.moves.xrange",
"imgaug.parameters.handle_continuous_param",
"imgaug.parameters.Weibull",
"imgaug.parameters.Discretize",
"numpy.mean",
"numpy.histogram",
"mock.patch",
"imgaug.parameters.TruncatedNormal",
"numpy.float64",
"itertools.product",
"skimage.morphology.label",
"imgaug.parameters.Normal",
"imgaug.is_single_float",
"numpy.exp",
"imgaug.parameters.Poisson",
"imgaug.parameters.Deterministic",
"imgaug.is_np_array",
"imgaug.parameters.ChiSquare",
"numpy.abs",
"numpy.allclose",
"imgaug.parameters.force_np_float_dtype",
"matplotlib.use",
"imgaug.testutils.reseed",
"numpy.any",
"imgaug.parameters.Laplace",
"scipy.special.gamma",
"numpy.std",
"numpy.finfo",
"imgaug.parameters.handle_categorical_string_param",
"imgaug.parameters.DiscreteUniform",
"numpy.unique",
"numpy.isclose",
"numpy.logical_and",
"imgaug.parameters.Binomial",
"imgaug.parameters.both_np_float_if_one_is_float",
"numpy.logical_or",
"numpy.sum",
"numpy.zeros",
"numpy.array_equal",
"imgaug.parameters.Beta",
"numpy.all",
"numpy.var",
"imgaug.parameters.handle_probability_param"
] |
[((422, 443), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (436, 443), False, 'import matplotlib\n'), ((789, 808), 'imgaug.is_np_array', 'ia.is_np_array', (['arr'], {}), '(arr)\n', (803, 808), True, 'import imgaug as ia\n'), ((1005, 1112), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test1]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test1]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True)\n", (1032, 1112), True, 'from imgaug import parameters as iap\n'), ((1265, 1381), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test1b]"""'], {'value_range': '(None, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test1b]', value_range=(None, None),\n tuple_to_uniform=True, list_to_choice=True)\n", (1292, 1381), True, 'from imgaug import parameters as iap\n'), ((1840, 1950), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test3]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test3]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True)\n", (1867, 1950), True, 'from imgaug import parameters as iap\n'), ((2557, 2670), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test5]"""'], {'value_range': '(None, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test5]', value_range=(None, 12),\n tuple_to_uniform=True, list_to_choice=True)\n", (2584, 2670), True, 'from imgaug import parameters as iap\n'), ((3359, 3472), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test7]"""'], {'value_range': '(-1, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test7]', value_range=(-1, None),\n tuple_to_uniform=True, list_to_choice=True)\n", (3386, 3472), True, 'from imgaug import parameters as iap\n'), ((4546, 4659), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test10]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test10]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True)\n", (4573, 4659), True, 'from imgaug import parameters as iap\n'), ((4929, 5045), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test11]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test11]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True)\n", (4956, 5045), True, 'from imgaug import parameters as iap\n'), ((6635, 6751), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2, 3]', '"""[test15]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "([1, 2, 3], '[test15]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True)\n", (6662, 6751), True, 'from imgaug import parameters as iap\n'), ((8019, 8135), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test18]"""'], {'value_range': '_value_range', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test18]', value_range=_value_range,\n tuple_to_uniform=True, list_to_choice=True)\n", (8046, 8135), True, 'from imgaug import parameters as iap\n'), ((9287, 9412), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test1]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test1]', value_range=None, tuple_to_uniform=\n True, list_to_choice=True, allow_floats=True)\n", (9312, 9412), True, 'from imgaug import parameters as iap\n'), ((9616, 9749), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test1b]"""'], {'value_range': '(None, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test1b]', value_range=(None, None),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (9641, 9749), True, 'from imgaug import parameters as iap\n'), ((10259, 10386), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test3]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test3]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (10284, 10386), True, 'from imgaug import parameters as iap\n'), ((10980, 11110), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test5]"""'], {'value_range': '(None, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test5]', value_range=(None, 12),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (11005, 11110), True, 'from imgaug import parameters as iap\n'), ((11743, 11873), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test7]"""'], {'value_range': '(-1, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test7]', value_range=(-1, None),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (11768, 11873), True, 'from imgaug import parameters as iap\n'), ((12883, 13013), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test10]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test10]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (12908, 13013), True, 'from imgaug import parameters as iap\n'), ((13257, 13390), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test11]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test11]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (13282, 13390), True, 'from imgaug import parameters as iap\n'), ((13678, 13813), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test11b]"""'], {'value_range': '(0, 10)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(False)'}), "((1, 2), '[test11b]', value_range=(0, 10),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=False)\n", (13703, 13813), True, 'from imgaug import parameters as iap\n'), ((15323, 15456), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 2, 3]', '"""[test15]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "([1, 2, 3], '[test15]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (15348, 15456), True, 'from imgaug import parameters as iap\n'), ((16639, 16753), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test18]"""'], {'value_range': '_value_range', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test18]', value_range=_value_range,\n tuple_to_uniform=True, list_to_choice=True)\n", (16664, 16753), True, 'from imgaug import parameters as iap\n'), ((17413, 17477), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['ia.ALL', '"""foo"""', 'valid_values'], {}), "(ia.ALL, 'foo', valid_values)\n", (17448, 17477), True, 'from imgaug import parameters as iap\n'), ((17675, 17741), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['"""class1"""', '"""foo"""', 'valid_values'], {}), "('class1', 'foo', valid_values)\n", (17710, 17741), True, 'from imgaug import parameters as iap\n'), ((18368, 18446), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (["['class1', 'class3']", '"""foo"""', 'valid_values'], {}), "(['class1', 'class3'], 'foo', valid_values)\n", (18403, 18446), True, 'from imgaug import parameters as iap\n'), ((19620, 19647), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['"""class1"""'], {}), "('class1')\n", (19637, 19647), True, 'from imgaug import parameters as iap\n'), ((19669, 19730), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['param', '"""foo"""', "['class1']"], {}), "(param, 'foo', ['class1'])\n", (19704, 19730), True, 'from imgaug import parameters as iap\n'), ((20885, 20905), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (20902, 20905), True, 'from imgaug import parameters as iap\n'), ((20918, 20962), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['det', '"""[test3]"""'], {}), "(det, '[test3]')\n", (20946, 20962), True, 'from imgaug import parameters as iap\n'), ((22202, 22234), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float16'}), '((1,), dtype=np.float16)\n', (22210, 22234), True, 'import numpy as np\n'), ((22248, 22280), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float32'}), '((1,), dtype=np.float32)\n', (22256, 22280), True, 'import numpy as np\n'), ((22298, 22339), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (22331, 22339), True, 'from imgaug import parameters as iap\n'), ((22472, 22504), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float16'}), '((1,), dtype=np.float16)\n', (22480, 22504), True, 'import numpy as np\n'), ((22518, 22548), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (22526, 22548), True, 'import numpy as np\n'), ((22566, 22607), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (22599, 22607), True, 'from imgaug import parameters as iap\n'), ((22740, 22770), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (22748, 22770), True, 'import numpy as np\n'), ((22784, 22816), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float16'}), '((1,), dtype=np.float16)\n', (22792, 22816), True, 'import numpy as np\n'), ((22834, 22875), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (22867, 22875), True, 'from imgaug import parameters as iap\n'), ((23006, 23036), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (23014, 23036), True, 'import numpy as np\n'), ((23050, 23080), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.uint8'}), '((1,), dtype=np.uint8)\n', (23058, 23080), True, 'import numpy as np\n'), ((23098, 23139), 'imgaug.parameters.both_np_float_if_one_is_float', 'iap.both_np_float_if_one_is_float', (['a1', 'b1'], {}), '(a1, b1)\n', (23131, 23139), True, 'from imgaug import parameters as iap\n'), ((23310, 23318), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (23316, 23318), False, 'from imgaug.testutils import reseed\n'), ((23475, 23510), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (23483, 23510), True, 'import numpy as np\n'), ((23582, 23617), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (23590, 23617), True, 'import numpy as np\n'), ((23644, 23655), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (23653, 23655), False, 'import mock\n'), ((23694, 23729), 'numpy.zeros', 'np.zeros', (['(4, 3, 2)'], {'dtype': 'np.uint8'}), '((4, 3, 2), dtype=np.uint8)\n', (23702, 23729), True, 'import numpy as np\n'), ((24994, 25015), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (25005, 25015), True, 'from imgaug import parameters as iap\n'), ((25243, 25286), 'numpy.sum', 'np.sum', (['(graph_img[..., :] > [200, 200, 200])'], {}), '(graph_img[..., :] > [200, 200, 200])\n', (25249, 25286), True, 'import numpy as np\n'), ((25304, 25328), 'numpy.prod', 'np.prod', (['graph_img.shape'], {}), '(graph_img.shape)\n', (25311, 25328), True, 'import numpy as np\n'), ((25880, 25888), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (25886, 25888), False, 'from imgaug.testutils import reseed\n'), ((25937, 25959), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (25948, 25959), True, 'from imgaug import parameters as iap\n'), ((25976, 26003), 'imgaug.parameters.Discretize', 'iap.Discretize', (['other_param'], {}), '(other_param)\n', (25990, 26003), True, 'from imgaug import parameters as iap\n'), ((26344, 26366), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26355, 26366), True, 'from imgaug import parameters as iap\n'), ((26383, 26410), 'imgaug.parameters.Discretize', 'iap.Discretize', (['other_param'], {}), '(other_param)\n', (26397, 26410), True, 'from imgaug import parameters as iap\n'), ((26793, 26801), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (26799, 26801), False, 'from imgaug.testutils import reseed\n'), ((26866, 26882), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (26876, 26882), True, 'from imgaug import parameters as iap\n'), ((26900, 26922), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (26911, 26922), True, 'from imgaug import parameters as iap\n'), ((27163, 27179), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (27173, 27179), True, 'from imgaug import parameters as iap\n'), ((27473, 27489), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (27483, 27489), True, 'from imgaug import parameters as iap\n'), ((27802, 27818), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (27812, 27818), True, 'from imgaug import parameters as iap\n'), ((28066, 28082), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28076, 28082), True, 'from imgaug import parameters as iap\n'), ((28336, 28352), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28346, 28352), True, 'from imgaug import parameters as iap\n'), ((28370, 28392), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (28381, 28392), True, 'from imgaug import parameters as iap\n'), ((28627, 28643), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28637, 28643), True, 'from imgaug import parameters as iap\n'), ((28931, 28947), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (28941, 28947), True, 'from imgaug import parameters as iap\n'), ((29254, 29270), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (29264, 29270), True, 'from imgaug import parameters as iap\n'), ((29514, 29530), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (29524, 29530), True, 'from imgaug import parameters as iap\n'), ((29777, 29793), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (29787, 29793), True, 'from imgaug import parameters as iap\n'), ((29811, 29833), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (29822, 29833), True, 'from imgaug import parameters as iap\n'), ((30072, 30088), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (30082, 30088), True, 'from imgaug import parameters as iap\n'), ((30391, 30407), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (30401, 30407), True, 'from imgaug import parameters as iap\n'), ((30673, 30689), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (30683, 30689), True, 'from imgaug import parameters as iap\n'), ((31002, 31018), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (31012, 31018), True, 'from imgaug import parameters as iap\n'), ((31287, 31313), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (31306, 31313), True, 'from imgaug import parameters as iap\n'), ((31335, 31353), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {}), '([1, 2])\n', (31345, 31353), True, 'from imgaug import parameters as iap\n'), ((31704, 31730), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (31723, 31730), True, 'from imgaug import parameters as iap\n'), ((32138, 32164), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (32157, 32164), True, 'from imgaug import parameters as iap\n'), ((32585, 32611), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (32604, 32611), True, 'from imgaug import parameters as iap\n'), ((32873, 32899), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(10)'], {}), '(0, 10)\n', (32892, 32899), True, 'from imgaug import parameters as iap\n'), ((33124, 33140), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (33134, 33140), True, 'from imgaug import parameters as iap\n'), ((33158, 33180), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (33169, 33180), True, 'from imgaug import parameters as iap\n'), ((33409, 33425), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (33419, 33425), True, 'from imgaug import parameters as iap\n'), ((33707, 33723), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (33717, 33723), True, 'from imgaug import parameters as iap\n'), ((34012, 34028), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34022, 34028), True, 'from imgaug import parameters as iap\n'), ((34257, 34273), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34267, 34273), True, 'from imgaug import parameters as iap\n'), ((34498, 34514), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34508, 34514), True, 'from imgaug import parameters as iap\n'), ((34532, 34554), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (34543, 34554), True, 'from imgaug import parameters as iap\n'), ((34795, 34811), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (34805, 34811), True, 'from imgaug import parameters as iap\n'), ((35105, 35121), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35115, 35121), True, 'from imgaug import parameters as iap\n'), ((35434, 35450), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35444, 35450), True, 'from imgaug import parameters as iap\n'), ((35698, 35714), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35708, 35714), True, 'from imgaug import parameters as iap\n'), ((35943, 35959), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (35953, 35959), True, 'from imgaug import parameters as iap\n'), ((35977, 35999), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (35988, 35999), True, 'from imgaug import parameters as iap\n'), ((36240, 36256), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (36250, 36256), True, 'from imgaug import parameters as iap\n'), ((36550, 36566), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (36560, 36566), True, 'from imgaug import parameters as iap\n'), ((36867, 36883), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (36877, 36883), True, 'from imgaug import parameters as iap\n'), ((37122, 37138), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (37132, 37138), True, 'from imgaug import parameters as iap\n'), ((37369, 37377), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (37375, 37377), False, 'from imgaug.testutils import reseed\n'), ((37434, 37449), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0)'], {}), '(0)\n', (37446, 37449), True, 'from imgaug import parameters as iap\n'), ((37640, 37657), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(1.0)'], {}), '(1.0)\n', (37652, 37657), True, 'from imgaug import parameters as iap\n'), ((37851, 37866), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0)'], {}), '(0)\n', (37863, 37866), True, 'from imgaug import parameters as iap\n'), ((38073, 38093), 'numpy.all', 'np.all', (['(samples == 0)'], {}), '(samples == 0)\n', (38079, 38093), True, 'import numpy as np\n'), ((38140, 38157), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(1.0)'], {}), '(1.0)\n', (38152, 38157), True, 'from imgaug import parameters as iap\n'), ((38364, 38384), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (38370, 38384), True, 'import numpy as np\n'), ((38438, 38455), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (38450, 38455), True, 'from imgaug import parameters as iap\n'), ((38566, 38604), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (38575, 38604), True, 'import numpy as np\n'), ((39109, 39122), 'six.moves.xrange', 'sm.xrange', (['(10)'], {}), '(10)\n', (39118, 39122), True, 'import six.moves as sm\n'), ((39403, 39427), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.0, 1.0)'], {}), '((0.0, 1.0))\n', (39415, 39427), True, 'from imgaug import parameters as iap\n'), ((39486, 39499), 'six.moves.xrange', 'sm.xrange', (['(30)'], {}), '(30)\n', (39495, 39499), True, 'import six.moves as sm\n'), ((39848, 39865), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (39860, 39865), True, 'from imgaug import parameters as iap\n'), ((40119, 40153), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (40133, 40153), True, 'import numpy as np\n'), ((40222, 40230), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (40228, 40230), False, 'from imgaug.testutils import reseed\n'), ((40277, 40298), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40287, 40298), True, 'from imgaug import parameters as iap\n'), ((40496, 40517), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40506, 40517), True, 'from imgaug import parameters as iap\n'), ((40953, 40974), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (40963, 40974), True, 'from imgaug import parameters as iap\n'), ((41395, 41414), 'imgaug.parameters.Choice', 'iap.Choice', (['[-1, 1]'], {}), '([-1, 1])\n', (41405, 41414), True, 'from imgaug import parameters as iap\n'), ((41739, 41762), 'imgaug.parameters.Choice', 'iap.Choice', (['[-1.2, 1.7]'], {}), '([-1.2, 1.7])\n', (41749, 41762), True, 'from imgaug import parameters as iap\n'), ((42658, 42698), 'imgaug.parameters.Choice', 'iap.Choice', (["['first', 'second', 'third']"], {}), "(['first', 'second', 'third'])\n", (42668, 42698), True, 'from imgaug import parameters as iap\n'), ((43560, 43594), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1]'], {'p': '[0.25, 0.75]'}), '([0, 1], p=[0.25, 0.75])\n', (43570, 43594), True, 'from imgaug import parameters as iap\n'), ((43668, 43706), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (43677, 43706), True, 'import numpy as np\n'), ((44177, 44215), 'numpy.unique', 'np.unique', (['samples'], {'return_counts': '(True)'}), '(samples, return_counts=True)\n', (44186, 44215), True, 'import numpy as np\n'), ((44582, 44610), 'imgaug.parameters.Choice', 'iap.Choice', (['[-1, 0, 1, 2, 3]'], {}), '([-1, 0, 1, 2, 3])\n', (44592, 44610), True, 'from imgaug import parameters as iap\n'), ((44864, 44898), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (44878, 44898), True, 'import numpy as np\n'), ((45631, 45639), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (45637, 45639), False, 'from imgaug.testutils import reseed\n'), ((45686, 45711), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (45705, 45711), True, 'from imgaug import parameters as iap\n'), ((45929, 45954), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (45948, 45954), True, 'from imgaug import parameters as iap\n'), ((46383, 46408), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (46402, 46408), True, 'from imgaug import parameters as iap\n'), ((46812, 46838), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (46831, 46838), True, 'from imgaug import parameters as iap\n'), ((47257, 47287), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(-1.2)', '(1.2)'], {}), '(-1.2, 1.2)\n', (47276, 47287), True, 'from imgaug import parameters as iap\n'), ((47765, 47791), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(1)', '(-1)'], {}), '(1, -1)\n', (47784, 47791), True, 'from imgaug import parameters as iap\n'), ((48265, 48290), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(1)', '(1)'], {}), '(1, 1)\n', (48284, 48290), True, 'from imgaug import parameters as iap\n'), ((48417, 48437), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (48423, 48437), True, 'import numpy as np\n'), ((48510, 48528), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (48521, 48528), True, 'from imgaug import parameters as iap\n'), ((48782, 48816), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (48796, 48816), True, 'import numpy as np\n'), ((48886, 48894), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (48892, 48894), False, 'from imgaug.testutils import reseed\n'), ((48941, 48955), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (48952, 48955), True, 'from imgaug import parameters as iap\n'), ((49139, 49153), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (49150, 49153), True, 'from imgaug import parameters as iap\n'), ((49325, 49339), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (49336, 49339), True, 'from imgaug import parameters as iap\n'), ((49880, 49894), 'imgaug.parameters.Poisson', 'iap.Poisson', (['(1)'], {}), '(1)\n', (49891, 49894), True, 'from imgaug import parameters as iap\n'), ((50148, 50182), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (50162, 50182), True, 'import numpy as np\n'), ((50251, 50259), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (50257, 50259), False, 'from imgaug.testutils import reseed\n'), ((50306, 50322), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (50316, 50322), True, 'from imgaug import parameters as iap\n'), ((50537, 50553), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (50547, 50553), True, 'from imgaug import parameters as iap\n'), ((50697, 50713), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (50707, 50713), True, 'from imgaug import parameters as iap\n'), ((50928, 50951), 'numpy.clip', 'np.clip', (['samples', '(-1)', '(1)'], {}), '(samples, -1, 1)\n', (50935, 50951), True, 'import numpy as np\n'), ((50977, 51007), 'numpy.clip', 'np.clip', (['samples_direct', '(-1)', '(1)'], {}), '(samples_direct, -1, 1)\n', (50984, 51007), True, 'import numpy as np\n'), ((51047, 51116), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (51059, 51116), True, 'import numpy as np\n'), ((51173, 51249), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (51185, 51249), True, 'import numpy as np\n'), ((51792, 51807), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (51801, 51807), True, 'import six.moves as sm\n'), ((52225, 52241), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (52235, 52241), True, 'from imgaug import parameters as iap\n'), ((52259, 52277), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(100)'], {}), '(0, 100)\n', (52269, 52277), True, 'from imgaug import parameters as iap\n'), ((52553, 52569), 'imgaug.parameters.Normal', 'iap.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (52563, 52569), True, 'from imgaug import parameters as iap\n'), ((52823, 52854), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (52834, 52854), True, 'import numpy as np\n'), ((52932, 52940), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (52938, 52940), False, 'from imgaug.testutils import reseed\n'), ((52987, 53012), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (53006, 53012), True, 'from imgaug import parameters as iap\n'), ((53435, 53481), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {'low': '(-100)', 'high': '(50.0)'}), '(0, 1, low=-100, high=50.0)\n', (53454, 53481), True, 'from imgaug import parameters as iap\n'), ((53902, 53947), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0.5)', '(0)'], {'low': '(-10)', 'high': '(10)'}), '(0.5, 0, low=-10, high=10)\n', (53921, 53947), True, 'from imgaug import parameters as iap\n'), ((54008, 54033), 'numpy.allclose', 'np.allclose', (['samples', '(0.5)'], {}), '(samples, 0.5)\n', (54019, 54033), True, 'import numpy as np\n'), ((54078, 54127), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0.0)', '(0.1)'], {'low': '(-100)', 'high': '(100)'}), '(0.0, 0.1, low=-100, high=100)\n', (54097, 54127), True, 'from imgaug import parameters as iap\n'), ((54145, 54194), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0.0)', '(5.0)'], {'low': '(-100)', 'high': '(100)'}), '(0.0, 5.0, low=-100, high=100)\n', (54164, 54194), True, 'from imgaug import parameters as iap\n'), ((54692, 54706), 'six.moves.xrange', 'sm.xrange', (['(200)'], {}), '(200)\n', (54701, 54706), True, 'import six.moves as sm\n'), ((55058, 55099), 'numpy.isclose', 'np.isclose', (['seen[0]', '(100)'], {'rtol': '(0)', 'atol': '(20)'}), '(seen[0], 100, rtol=0, atol=20)\n', (55068, 55099), True, 'import numpy as np\n'), ((55115, 55156), 'numpy.isclose', 'np.isclose', (['seen[1]', '(100)'], {'rtol': '(0)', 'atol': '(20)'}), '(seen[1], 100, rtol=0, atol=20)\n', (55125, 55156), True, 'import numpy as np\n'), ((55220, 55266), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(10.0)'], {'low': '(-5)', 'high': '(7.5)'}), '(0, 10.0, low=-5, high=7.5)\n', (55239, 55266), True, 'from imgaug import parameters as iap\n'), ((55362, 55394), 'numpy.all', 'np.all', (['(samples >= -5.0 - 0.0001)'], {}), '(samples >= -5.0 - 0.0001)\n', (55368, 55394), True, 'import numpy as np\n'), ((55408, 55439), 'numpy.all', 'np.all', (['(samples <= 7.5 + 0.0001)'], {}), '(samples <= 7.5 + 0.0001)\n', (55414, 55439), True, 'import numpy as np\n'), ((55502, 55525), 'numpy.any', 'np.any', (['(samples <= -4.5)'], {}), '(samples <= -4.5)\n', (55508, 55525), True, 'import numpy as np\n'), ((55541, 55563), 'numpy.any', 'np.any', (['(samples >= 7.0)'], {}), '(samples >= 7.0)\n', (55547, 55563), True, 'import numpy as np\n'), ((55727, 55752), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (55746, 55752), True, 'from imgaug import parameters as iap\n'), ((55902, 55933), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (55913, 55933), True, 'import numpy as np\n'), ((56016, 56041), 'imgaug.parameters.TruncatedNormal', 'iap.TruncatedNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (56035, 56041), True, 'from imgaug import parameters as iap\n'), ((56296, 56304), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (56302, 56304), False, 'from imgaug.testutils import reseed\n'), ((56351, 56368), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (56362, 56368), True, 'from imgaug import parameters as iap\n'), ((56584, 56601), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (56595, 56601), True, 'from imgaug import parameters as iap\n'), ((56746, 56763), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (56757, 56763), True, 'from imgaug import parameters as iap\n'), ((57026, 57049), 'numpy.clip', 'np.clip', (['samples', '(-1)', '(1)'], {}), '(samples, -1, 1)\n', (57033, 57049), True, 'import numpy as np\n'), ((57075, 57105), 'numpy.clip', 'np.clip', (['samples_direct', '(-1)', '(1)'], {}), '(samples_direct, -1, 1)\n', (57082, 57105), True, 'import numpy as np\n'), ((57145, 57214), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (57157, 57214), True, 'import numpy as np\n'), ((57271, 57347), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (57283, 57347), True, 'import numpy as np\n'), ((57891, 57906), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (57900, 57906), True, 'import six.moves as sm\n'), ((58325, 58342), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (58336, 58342), True, 'from imgaug import parameters as iap\n'), ((58360, 58379), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(100)'], {}), '(0, 100)\n', (58371, 58379), True, 'from imgaug import parameters as iap\n'), ((58581, 58598), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(1)', '(0)'], {}), '(1, 0)\n', (58592, 58598), True, 'from imgaug import parameters as iap\n'), ((58849, 58866), 'imgaug.parameters.Laplace', 'iap.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (58860, 58866), True, 'from imgaug import parameters as iap\n'), ((59120, 59151), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (59131, 59151), True, 'import numpy as np\n'), ((59223, 59231), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (59229, 59231), False, 'from imgaug.testutils import reseed\n'), ((59278, 59294), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (59291, 59294), True, 'from imgaug import parameters as iap\n'), ((59484, 59500), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (59497, 59500), True, 'from imgaug import parameters as iap\n'), ((59674, 59690), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (59687, 59690), True, 'from imgaug import parameters as iap\n'), ((59943, 59963), 'numpy.all', 'np.all', (['(0 <= samples)'], {}), '(0 <= samples)\n', (59949, 59963), True, 'import numpy as np\n'), ((59983, 60005), 'numpy.clip', 'np.clip', (['samples', '(0)', '(3)'], {}), '(samples, 0, 3)\n', (59990, 60005), True, 'import numpy as np\n'), ((60031, 60060), 'numpy.clip', 'np.clip', (['samples_direct', '(0)', '(3)'], {}), '(samples_direct, 0, 3)\n', (60038, 60060), True, 'import numpy as np\n'), ((60100, 60166), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0, 3.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0, 3.0), density=False)\n', (60112, 60166), True, 'import numpy as np\n'), ((60223, 60296), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(0, 3.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(0, 3.0), density=False)\n', (60235, 60296), True, 'import numpy as np\n'), ((60834, 60849), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (60843, 60849), True, 'import six.moves as sm\n'), ((61291, 61307), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (61304, 61307), True, 'from imgaug import parameters as iap\n'), ((61325, 61342), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(10)'], {}), '(10)\n', (61338, 61342), True, 'from imgaug import parameters as iap\n'), ((61678, 61694), 'imgaug.parameters.ChiSquare', 'iap.ChiSquare', (['(1)'], {}), '(1)\n', (61691, 61694), True, 'from imgaug import parameters as iap\n'), ((61948, 61979), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (61959, 61979), True, 'import numpy as np\n'), ((62049, 62057), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (62055, 62057), False, 'from imgaug.testutils import reseed\n'), ((62104, 62118), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (62115, 62118), True, 'from imgaug import parameters as iap\n'), ((62305, 62319), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (62316, 62319), True, 'from imgaug import parameters as iap\n'), ((62491, 62505), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (62502, 62505), True, 'from imgaug import parameters as iap\n'), ((62753, 62773), 'numpy.all', 'np.all', (['(0 <= samples)'], {}), '(0 <= samples)\n', (62759, 62773), True, 'import numpy as np\n'), ((62793, 62815), 'numpy.clip', 'np.clip', (['samples', '(0)', '(2)'], {}), '(samples, 0, 2)\n', (62800, 62815), True, 'import numpy as np\n'), ((62841, 62870), 'numpy.clip', 'np.clip', (['samples_direct', '(0)', '(2)'], {}), '(samples_direct, 0, 2)\n', (62848, 62870), True, 'import numpy as np\n'), ((62910, 62976), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0, 2.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0, 2.0), density=False)\n', (62922, 62976), True, 'import numpy as np\n'), ((63033, 63106), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(0, 2.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(0, 2.0), density=False)\n', (63045, 63106), True, 'import numpy as np\n'), ((63635, 63665), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 1)'], {}), '(1 + 1 / 1)\n', (63654, 63665), False, 'import scipy\n'), ((63690, 63722), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 0.5)'], {}), '(1 + 1 / 0.5)\n', (63709, 63722), False, 'import scipy\n'), ((63760, 63774), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (63769, 63774), True, 'import six.moves as sm\n'), ((64549, 64563), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (64560, 64563), True, 'from imgaug import parameters as iap\n'), ((64581, 64597), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(0.5)'], {}), '(0.5)\n', (64592, 64597), True, 'from imgaug import parameters as iap\n'), ((65402, 65416), 'imgaug.parameters.Weibull', 'iap.Weibull', (['(1)'], {}), '(1)\n', (65413, 65416), True, 'from imgaug import parameters as iap\n'), ((65670, 65701), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (65681, 65701), True, 'import numpy as np\n'), ((65771, 65779), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (65777, 65779), False, 'from imgaug.testutils import reseed\n'), ((65826, 65845), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (65837, 65845), True, 'from imgaug import parameters as iap\n'), ((66062, 66081), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (66073, 66081), True, 'from imgaug import parameters as iap\n'), ((66272, 66291), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (66283, 66291), True, 'from imgaug import parameters as iap\n'), ((66605, 66624), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (66616, 66624), True, 'from imgaug import parameters as iap\n'), ((66712, 66780), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0.0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0.0, 1.0), density=False)\n', (66724, 66780), True, 'import numpy as np\n'), ((67185, 67207), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (67196, 67207), True, 'from imgaug import parameters as iap\n'), ((67664, 67686), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(-1.0)'], {}), '(1.0, -1.0)\n', (67675, 67686), True, 'from imgaug import parameters as iap\n'), ((68145, 68163), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (68156, 68163), True, 'from imgaug import parameters as iap\n'), ((68623, 68640), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1)', '(1)'], {}), '(1, 1)\n', (68634, 68640), True, 'from imgaug import parameters as iap\n'), ((69109, 69131), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (69120, 69131), True, 'from imgaug import parameters as iap\n'), ((69385, 69416), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (69396, 69416), True, 'import numpy as np\n'), ((69695, 69703), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (69701, 69703), False, 'from imgaug.testutils import reseed\n'), ((69750, 69768), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (69758, 69768), True, 'from imgaug import parameters as iap\n'), ((70047, 70065), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (70055, 70065), True, 'from imgaug import parameters as iap\n'), ((70256, 70274), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (70264, 70274), True, 'from imgaug import parameters as iap\n'), ((70602, 70620), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (70610, 70620), True, 'from imgaug import parameters as iap\n'), ((70806, 70872), 'numpy.histogram', 'np.histogram', (['samples'], {'bins': 'nb_bins', 'range': '(0, 1.0)', 'density': '(False)'}), '(samples, bins=nb_bins, range=(0, 1.0), density=False)\n', (70818, 70872), True, 'import numpy as np\n'), ((70929, 71002), 'numpy.histogram', 'np.histogram', (['samples_direct'], {'bins': 'nb_bins', 'range': '(0, 1.0)', 'density': '(False)'}), '(samples_direct, bins=nb_bins, range=(0, 1.0), density=False)\n', (70941, 71002), True, 'import numpy as np\n'), ((71638, 71652), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (71647, 71652), True, 'import six.moves as sm\n'), ((72164, 72178), 'imgaug.parameters.Beta', 'iap.Beta', (['(2)', '(2)'], {}), '(2, 2)\n', (72172, 72178), True, 'from imgaug import parameters as iap\n'), ((72196, 72214), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (72204, 72214), True, 'from imgaug import parameters as iap\n'), ((72850, 72868), 'imgaug.parameters.Beta', 'iap.Beta', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (72858, 72868), True, 'from imgaug import parameters as iap\n'), ((73122, 73153), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (73133, 73153), True, 'import numpy as np\n'), ((73229, 73237), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (73235, 73237), False, 'from imgaug.testutils import reseed\n'), ((75359, 75392), 'itertools.product', 'itertools.product', (['values', 'shapes'], {}), '(values, shapes)\n', (75376, 75392), False, 'import itertools\n'), ((76011, 76044), 'itertools.product', 'itertools.product', (['values', 'shapes'], {}), '(values, shapes)\n', (76028, 76044), False, 'import itertools\n'), ((76583, 76597), 'six.moves.xrange', 'sm.xrange', (['(200)'], {}), '(200)\n', (76592, 76597), True, 'import six.moves as sm\n'), ((77145, 77153), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (77151, 77153), False, 'from imgaug.testutils import reseed\n'), ((78281, 78299), 'numpy.unique', 'np.unique', (['samples'], {}), '(samples)\n', (78290, 78299), True, 'import numpy as np\n'), ((78592, 78615), 'numpy.unique', 'np.unique', (['samples_nhwc'], {}), '(samples_nhwc)\n', (78601, 78615), True, 'import numpy as np\n'), ((79348, 79366), 'numpy.unique', 'np.unique', (['samples'], {}), '(samples)\n', (79357, 79366), True, 'import numpy as np\n'), ((79788, 79802), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (79797, 79802), True, 'import six.moves as sm\n'), ((80928, 80942), 'six.moves.xrange', 'sm.xrange', (['(400)'], {}), '(400)\n', (80937, 80942), True, 'import six.moves as sm\n'), ((82199, 82213), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (82208, 82213), True, 'import six.moves as sm\n'), ((83594, 83608), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (83603, 83608), True, 'import six.moves as sm\n'), ((84695, 84709), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (84704, 84709), True, 'import six.moves as sm\n'), ((85971, 85985), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (85980, 85985), True, 'import six.moves as sm\n'), ((87307, 87321), 'six.moves.xrange', 'sm.xrange', (['(200)'], {}), '(200)\n', (87316, 87321), True, 'import six.moves as sm\n'), ((88437, 88468), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (88448, 88468), True, 'import numpy as np\n'), ((88535, 88543), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (88541, 88543), False, 'from imgaug.testutils import reseed\n'), ((89081, 89101), 'numpy.all', 'np.all', (['(samples == 0)'], {}), '(samples == 0)\n', (89087, 89101), True, 'import numpy as np\n'), ((89412, 89432), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (89418, 89432), True, 'import numpy as np\n'), ((89745, 89766), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (89751, 89766), True, 'import numpy as np\n'), ((90581, 90601), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (90587, 90601), True, 'import numpy as np\n'), ((90912, 90933), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (90918, 90933), True, 'import numpy as np\n'), ((91680, 91714), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (91694, 91714), True, 'import numpy as np\n'), ((92717, 92725), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (92723, 92725), False, 'from imgaug.testutils import reseed\n'), ((93834, 93859), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(1)'], {}), '(0, 1)\n', (93853, 93859), True, 'from imgaug import parameters as iap\n'), ((93876, 93902), 'imgaug.parameters.Discretize', 'iap.Discretize', (['param_orig'], {}), '(param_orig)\n', (93890, 93902), True, 'from imgaug import parameters as iap\n'), ((94251, 94276), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (94270, 94276), True, 'from imgaug import parameters as iap\n'), ((94293, 94319), 'imgaug.parameters.Discretize', 'iap.Discretize', (['param_orig'], {}), '(param_orig)\n', (94307, 94319), True, 'from imgaug import parameters as iap\n'), ((94567, 94592), 'imgaug.parameters.DiscreteUniform', 'iap.DiscreteUniform', (['(0)', '(2)'], {}), '(0, 2)\n', (94586, 94592), True, 'from imgaug import parameters as iap\n'), ((94609, 94635), 'imgaug.parameters.Discretize', 'iap.Discretize', (['param_orig'], {}), '(param_orig)\n', (94623, 94635), True, 'from imgaug import parameters as iap\n'), ((94889, 94923), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (94903, 94923), True, 'import numpy as np\n'), ((94994, 95002), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (95000, 95002), False, 'from imgaug.testutils import reseed\n'), ((95398, 95439), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (95415, 95439), False, 'import itertools\n'), ((95997, 96038), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (96014, 96038), False, 'import itertools\n'), ((96623, 96668), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (96640, 96668), False, 'import itertools\n'), ((97309, 97354), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (97326, 97354), False, 'import itertools\n'), ((100415, 100423), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (100421, 100423), False, 'from imgaug.testutils import reseed\n'), ((100799, 100840), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (100816, 100840), False, 'import itertools\n'), ((101466, 101507), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (101483, 101507), False, 'import itertools\n'), ((102157, 102202), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (102174, 102202), False, 'import itertools\n'), ((102972, 103017), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (102989, 103017), False, 'import itertools\n'), ((106837, 106845), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (106843, 106845), False, 'from imgaug.testutils import reseed\n'), ((107212, 107253), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (107229, 107253), False, 'import itertools\n'), ((107819, 107860), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (107836, 107860), False, 'import itertools\n'), ((108449, 108494), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (108466, 108494), False, 'import itertools\n'), ((109111, 109156), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (109128, 109156), False, 'import itertools\n'), ((111979, 111987), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (111985, 111987), False, 'from imgaug.testutils import reseed\n'), ((112369, 112410), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (112386, 112410), False, 'import itertools\n'), ((112986, 113027), 'itertools.product', 'itertools.product', (['values_int', 'values_int'], {}), '(values_int, values_int)\n', (113003, 113027), False, 'import itertools\n'), ((113626, 113671), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (113643, 113671), False, 'import itertools\n'), ((114308, 114353), 'itertools.product', 'itertools.product', (['values_float', 'values_float'], {}), '(values_float, values_float)\n', (114325, 114353), False, 'import itertools\n'), ((117340, 117348), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (117346, 117348), False, 'from imgaug.testutils import reseed\n'), ((117854, 117890), 'itertools.product', 'itertools.product', (['values', 'exponents'], {}), '(values, exponents)\n', (117871, 117890), False, 'import itertools\n'), ((118909, 118945), 'itertools.product', 'itertools.product', (['values', 'exponents'], {}), '(values, exponents)\n', (118926, 118945), False, 'import itertools\n'), ((122196, 122204), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (122202, 122204), False, 'from imgaug.testutils import reseed\n'), ((123891, 123899), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (123897, 123899), False, 'from imgaug.testutils import reseed\n'), ((124296, 124316), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (124302, 124316), True, 'import numpy as np\n'), ((124338, 124359), 'numpy.sum', 'np.sum', (['(samples == -1)'], {}), '(samples == -1)\n', (124344, 124359), True, 'import numpy as np\n'), ((124636, 124651), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (124645, 124651), True, 'import six.moves as sm\n'), ((125160, 125181), 'numpy.sum', 'np.sum', (['(samples == -2)'], {}), '(samples == -2)\n', (125166, 125181), True, 'import numpy as np\n'), ((125200, 125221), 'numpy.sum', 'np.sum', (['(samples == -1)'], {}), '(samples == -1)\n', (125206, 125221), True, 'import numpy as np\n'), ((125240, 125260), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (125246, 125260), True, 'import numpy as np\n'), ((125279, 125299), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (125285, 125299), True, 'import numpy as np\n'), ((125838, 125872), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (125852, 125872), True, 'import numpy as np\n'), ((126114, 126122), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (126120, 126122), False, 'from imgaug.testutils import reseed\n'), ((127154, 127174), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (127160, 127174), True, 'import numpy as np\n'), ((127429, 127450), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (127435, 127450), True, 'import numpy as np\n'), ((127723, 127743), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (127729, 127743), True, 'import numpy as np\n'), ((128017, 128038), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (128023, 128038), True, 'import numpy as np\n'), ((128274, 128294), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (128280, 128294), True, 'import numpy as np\n'), ((128312, 128332), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (128318, 128332), True, 'import numpy as np\n'), ((128723, 128743), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (128729, 128743), True, 'import numpy as np\n'), ((128761, 128781), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (128767, 128781), True, 'import numpy as np\n'), ((129187, 129207), 'numpy.sum', 'np.sum', (['(samples == 2)'], {}), '(samples == 2)\n', (129193, 129207), True, 'import numpy as np\n'), ((129225, 129245), 'numpy.sum', 'np.sum', (['(samples == 1)'], {}), '(samples == 1)\n', (129231, 129245), True, 'import numpy as np\n'), ((129890, 129924), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (129904, 129924), True, 'import numpy as np\n'), ((129995, 130003), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (130001, 130003), False, 'from imgaug.testutils import reseed\n'), ((130291, 130311), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (130297, 130311), True, 'import numpy as np\n'), ((130382, 130390), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (130388, 130390), False, 'from imgaug.testutils import reseed\n'), ((130677, 130698), 'numpy.all', 'np.all', (['(samples == -1)'], {}), '(samples == -1)\n', (130683, 130698), True, 'import numpy as np\n'), ((130785, 130793), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (130791, 130793), False, 'from imgaug.testutils import reseed\n'), ((131860, 131880), 'numpy.all', 'np.all', (['(samples == 1)'], {}), '(samples == 1)\n', (131866, 131880), True, 'import numpy as np\n'), ((132870, 132891), 'numpy.all', 'np.all', (['(samples == 50)'], {}), '(samples == 50)\n', (132876, 132891), True, 'import numpy as np\n'), ((133348, 133368), 'numpy.all', 'np.all', (['(samples == 0)'], {}), '(samples == 0)\n', (133354, 133368), True, 'import numpy as np\n'), ((133491, 133505), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (133500, 133505), True, 'import six.moves as sm\n'), ((134617, 134631), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (134626, 134631), True, 'import six.moves as sm\n'), ((134796, 134863), 'numpy.histogram', 'np.histogram', (['diffs'], {'bins': 'nb_bins', 'range': '(-1.0, 1.0)', 'density': '(False)'}), '(diffs, bins=nb_bins, range=(-1.0, 1.0), density=False)\n', (134808, 134863), True, 'import numpy as np\n'), ((135104, 135118), 'six.moves.xrange', 'sm.xrange', (['(400)'], {}), '(400)\n', (135113, 135118), True, 'import six.moves as sm\n'), ((136486, 136506), 'numpy.sum', 'np.sum', (['(samples == 0)'], {}), '(samples == 0)\n', (136492, 136506), True, 'import numpy as np\n'), ((136523, 136544), 'numpy.sum', 'np.sum', (['(samples == 50)'], {}), '(samples == 50)\n', (136529, 136544), True, 'import numpy as np\n'), ((137165, 137196), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (137176, 137196), True, 'import numpy as np\n'), ((138292, 138300), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (138298, 138300), False, 'from imgaug.testutils import reseed\n'), ((140527, 140542), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (140536, 140542), True, 'import six.moves as sm\n'), ((141350, 141365), 'six.moves.xrange', 'sm.xrange', (['(1000)'], {}), '(1000)\n', (141359, 141365), True, 'import six.moves as sm\n'), ((142071, 142115), 'itertools.product', 'itertools.product', (['muls', 'adds', 'vals', 'threshs'], {}), '(muls, adds, vals, threshs)\n', (142088, 142115), False, 'import itertools\n'), ((144020, 144054), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (144034, 144054), True, 'import numpy as np\n'), ((851, 870), 'numpy.finfo', 'np.finfo', (['arr.dtype'], {}), '(arr.dtype)\n', (859, 870), True, 'import numpy as np\n'), ((1599, 1619), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (1616, 1619), True, 'from imgaug import parameters as iap\n'), ((2182, 2292), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test4]"""'], {'value_range': '(2, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test4]', value_range=(2, 12),\n tuple_to_uniform=True, list_to_choice=True)\n", (2209, 2292), True, 'from imgaug import parameters as iap\n'), ((2982, 3094), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test6]"""'], {'value_range': '(None, 0)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test6]', value_range=(None, 0),\n tuple_to_uniform=True, list_to_choice=True)\n", (3009, 3094), True, 'from imgaug import parameters as iap\n'), ((3784, 3896), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test8]"""'], {'value_range': '(2, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test8]', value_range=(2, None),\n tuple_to_uniform=True, list_to_choice=True)\n", (3811, 3896), True, 'from imgaug import parameters as iap\n'), ((4195, 4308), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test9]"""'], {'value_range': 'None', 'tuple_to_uniform': '(False)', 'list_to_choice': '(True)'}), "((1, 2), '[test9]', value_range=None,\n tuple_to_uniform=False, list_to_choice=True)\n", (4222, 4308), True, 'from imgaug import parameters as iap\n'), ((5393, 5511), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test12]"""'], {'value_range': '(1.5, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test12]', value_range=(1.5, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (5420, 5511), True, 'from imgaug import parameters as iap\n'), ((5872, 5988), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1, 2)', '"""[test13]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "((1, 2), '[test13]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (5899, 5988), True, 'from imgaug import parameters as iap\n'), ((6283, 6400), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2, 3]', '"""[test14]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(False)'}), "([1, 2, 3], '[test14]', value_range=None,\n tuple_to_uniform=True, list_to_choice=False)\n", (6310, 6400), True, 'from imgaug import parameters as iap\n'), ((7094, 7212), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2]', '"""[test16]"""'], {'value_range': '(1.5, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "([1, 2], '[test16]', value_range=(1.5, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (7121, 7212), True, 'from imgaug import parameters as iap\n'), ((7562, 7678), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['[1, 2]', '"""[test17]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "([1, 2], '[test17]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True)\n", (7589, 7678), True, 'from imgaug import parameters as iap\n'), ((8402, 8511), 'imgaug.parameters.handle_continuous_param', 'iap.handle_continuous_param', (['(1)', '"""[test19]"""'], {'value_range': '(False)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test19]', value_range=False,\n tuple_to_uniform=True, list_to_choice=True)\n", (8429, 8511), True, 'from imgaug import parameters as iap\n'), ((8943, 9070), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1.5)', '"""[test0]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(False)'}), "(1.5, '[test0]', value_range=None,\n tuple_to_uniform=True, list_to_choice=True, allow_floats=False)\n", (8968, 9070), True, 'from imgaug import parameters as iap\n'), ((9972, 9992), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (9989, 9992), True, 'from imgaug import parameters as iap\n'), ((10627, 10754), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test4]"""'], {'value_range': '(2, 12)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test4]', value_range=(2, 12),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (10652, 10754), True, 'from imgaug import parameters as iap\n'), ((11388, 11517), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test6]"""'], {'value_range': '(None, 0)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test6]', value_range=(None, 0),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (11413, 11517), True, 'from imgaug import parameters as iap\n'), ((12151, 12280), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test8]"""'], {'value_range': '(2, None)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "(1, '[test8]', value_range=(2, None),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (12176, 12280), True, 'from imgaug import parameters as iap\n'), ((12547, 12677), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test9]"""'], {'value_range': 'None', 'tuple_to_uniform': '(False)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test9]', value_range=None,\n tuple_to_uniform=False, list_to_choice=True, allow_floats=True)\n", (12572, 12677), True, 'from imgaug import parameters as iap\n'), ((14145, 14278), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 3)', '"""[test12]"""'], {'value_range': '(2, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 3), '[test12]', value_range=(2, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (14170, 14278), True, 'from imgaug import parameters as iap\n'), ((14607, 14740), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1, 2)', '"""[test13]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "((1, 2), '[test13]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (14632, 14740), True, 'from imgaug import parameters as iap\n'), ((14995, 15129), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 2, 3]', '"""[test14]"""'], {'value_range': 'None', 'tuple_to_uniform': '(True)', 'list_to_choice': '(False)', 'allow_floats': '(True)'}), "([1, 2, 3], '[test14]', value_range=None,\n tuple_to_uniform=True, list_to_choice=False, allow_floats=True)\n", (15020, 15129), True, 'from imgaug import parameters as iap\n'), ((15765, 15898), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 3]', '"""[test16]"""'], {'value_range': '(2, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "([1, 3], '[test16]', value_range=(2, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (15790, 15898), True, 'from imgaug import parameters as iap\n'), ((16213, 16346), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['[1, 2]', '"""[test17]"""'], {'value_range': '(3, 13)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)', 'allow_floats': '(True)'}), "([1, 2], '[test17]', value_range=(3, 13),\n tuple_to_uniform=True, list_to_choice=True, allow_floats=True)\n", (16238, 16346), True, 'from imgaug import parameters as iap\n'), ((17020, 17127), 'imgaug.parameters.handle_discrete_param', 'iap.handle_discrete_param', (['(1)', '"""[test19]"""'], {'value_range': '(False)', 'tuple_to_uniform': '(True)', 'list_to_choice': '(True)'}), "(1, '[test19]', value_range=False,\n tuple_to_uniform=True, list_to_choice=True)\n", (17045, 17127), True, 'from imgaug import parameters as iap\n'), ((18008, 18074), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['"""class3"""', '"""foo"""', 'valid_values'], {}), "('class3', 'foo', valid_values)\n", (18043, 18074), True, 'from imgaug import parameters as iap\n'), ((18736, 18811), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (["['class1', False]", '"""foo"""', 'valid_values'], {}), "(['class1', False], 'foo', valid_values)\n", (18771, 18811), True, 'from imgaug import parameters as iap\n'), ((19198, 19276), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (["['class1', 'class4']", '"""foo"""', 'valid_values'], {}), "(['class1', 'class4'], 'foo', valid_values)\n", (19233, 19276), True, 'from imgaug import parameters as iap\n'), ((19890, 19951), 'imgaug.parameters.handle_categorical_string_param', 'iap.handle_categorical_string_param', (['(False)', '"""foo"""', "['class1']"], {}), "(False, 'foo', ['class1'])\n", (19925, 19951), True, 'from imgaug import parameters as iap\n'), ((21108, 21155), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['"""test"""', '"""[test4]"""'], {}), "('test', '[test4]')\n", (21136, 21155), True, 'from imgaug import parameters as iap\n'), ((21329, 21375), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['(-0.01)', '"""[test5]"""'], {}), "(-0.01, '[test5]')\n", (21357, 21375), True, 'from imgaug import parameters as iap\n'), ((21495, 21540), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['(1.01)', '"""[test6]"""'], {}), "(1.01, '[test6]')\n", (21523, 21540), True, 'from imgaug import parameters as iap\n'), ((23378, 23389), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (23387, 23389), False, 'import mock\n'), ((23391, 23402), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (23400, 23402), False, 'import mock\n'), ((23743, 23796), 'mock.patch', 'mock.patch', (['"""imgaug.imgaug.draw_grid"""', 'draw_grid_mock'], {}), "('imgaug.imgaug.draw_grid', draw_grid_mock)\n", (23753, 23796), False, 'import mock\n'), ((23826, 23953), 'imgaug.parameters.draw_distributions_grid', 'iap.draw_distributions_grid', (['params'], {'rows': '(2)', 'cols': '(3)', 'graph_sizes': '(20, 21)', 'sample_sizes': '[(1, 2), (3, 4)]', 'titles': "['A', 'B']"}), "(params, rows=2, cols=3, graph_sizes=(20, 21),\n sample_sizes=[(1, 2), (3, 4)], titles=['A', 'B'])\n", (23853, 23953), True, 'from imgaug import parameters as iap\n'), ((25756, 25798), 'numpy.array_equal', 'np.array_equal', (['graph_img_title', 'graph_img'], {}), '(graph_img_title, graph_img)\n', (25770, 25798), True, 'import numpy as np\n'), ((39066, 39090), 'imgaug.parameters.Choice', 'iap.Choice', (['[0.25, 0.75]'], {}), '([0.25, 0.75])\n', (39076, 39090), True, 'from imgaug import parameters as iap\n'), ((41143, 41163), 'numpy.sum', 'np.sum', (['(samples == v)'], {}), '(samples == v)\n', (41149, 41163), True, 'import numpy as np\n'), ((41634, 41676), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 1)'], {}), '(samples == -1, samples == 1)\n', (41647, 41676), True, 'import numpy as np\n'), ((45012, 45027), 'imgaug.parameters.Choice', 'iap.Choice', (['(123)'], {}), '(123)\n', (45022, 45027), True, 'from imgaug import parameters as iap\n'), ((45233, 45258), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {'p': '(123)'}), '([1, 2], p=123)\n', (45243, 45258), True, 'from imgaug import parameters as iap\n'), ((45454, 45479), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {'p': '[1]'}), '([1, 2], p=[1])\n', (45464, 45479), True, 'from imgaug import parameters as iap\n'), ((46577, 46597), 'numpy.sum', 'np.sum', (['(samples == v)'], {}), '(samples == v)\n', (46583, 46597), True, 'import numpy as np\n'), ((49655, 49675), 'numpy.sum', 'np.sum', (['(samples == i)'], {}), '(samples == i)\n', (49661, 49675), True, 'import numpy as np\n'), ((51724, 51747), 'imgaug.parameters.Choice', 'iap.Choice', (['[-100, 100]'], {}), '([-100, 100])\n', (51734, 51747), True, 'from imgaug import parameters as iap\n'), ((51876, 51892), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (51883, 51892), True, 'import numpy as np\n'), ((52391, 52407), 'numpy.std', 'np.std', (['samples1'], {}), '(samples1)\n', (52397, 52407), True, 'import numpy as np\n'), ((52410, 52426), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (52416, 52426), True, 'import numpy as np\n'), ((52453, 52469), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (52459, 52469), True, 'import numpy as np\n'), ((54306, 54322), 'numpy.std', 'np.std', (['samples1'], {}), '(samples1)\n', (54312, 54322), True, 'import numpy as np\n'), ((54325, 54341), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (54331, 54341), True, 'import numpy as np\n'), ((54368, 54384), 'numpy.std', 'np.std', (['samples1'], {}), '(samples1)\n', (54374, 54384), True, 'import numpy as np\n'), ((54436, 54452), 'numpy.std', 'np.std', (['samples2'], {}), '(samples2)\n', (54442, 54452), True, 'import numpy as np\n'), ((54563, 54586), 'imgaug.parameters.Choice', 'iap.Choice', (['[-100, 100]'], {}), '([-100, 100])\n', (54573, 54586), True, 'from imgaug import parameters as iap\n'), ((54778, 54794), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (54785, 54794), True, 'import numpy as np\n'), ((54816, 54839), 'numpy.abs', 'np.abs', (['(-100 - observed)'], {}), '(-100 - observed)\n', (54822, 54839), True, 'import numpy as np\n'), ((54860, 54882), 'numpy.abs', 'np.abs', (['(100 - observed)'], {}), '(100 - observed)\n', (54866, 54882), True, 'import numpy as np\n'), ((56195, 56226), 'numpy.allclose', 'np.allclose', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (56206, 56226), True, 'import numpy as np\n'), ((57823, 57846), 'imgaug.parameters.Choice', 'iap.Choice', (['[-100, 100]'], {}), '([-100, 100])\n', (57833, 57846), True, 'from imgaug import parameters as iap\n'), ((57975, 57991), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (57982, 57991), True, 'import numpy as np\n'), ((58493, 58509), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (58499, 58509), True, 'import numpy as np\n'), ((58512, 58528), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (58518, 58528), True, 'import numpy as np\n'), ((60773, 60792), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 10]'], {}), '([1, 10])\n', (60783, 60792), True, 'from imgaug import parameters as iap\n'), ((60918, 60934), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (60925, 60934), True, 'import numpy as np\n'), ((61456, 61472), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (61462, 61472), True, 'import numpy as np\n'), ((61475, 61491), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (61481, 61491), True, 'import numpy as np\n'), ((61519, 61535), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (61525, 61535), True, 'import numpy as np\n'), ((61576, 61592), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (61582, 61592), True, 'import numpy as np\n'), ((63587, 63607), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 0.5]'], {}), '([1, 0.5])\n', (63597, 63607), True, 'from imgaug import parameters as iap\n'), ((63850, 63866), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (63857, 63866), True, 'import numpy as np\n'), ((64736, 64766), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 2 / 1)'], {}), '(1 + 2 / 1)\n', (64755, 64766), False, 'import scipy\n'), ((64863, 64895), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 2 / 0.5)'], {}), '(1 + 2 / 0.5)\n', (64882, 64895), False, 'import scipy\n'), ((64970, 64986), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (64976, 64986), True, 'import numpy as np\n'), ((64989, 65005), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (64995, 65005), True, 'import numpy as np\n'), ((65087, 65103), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (65093, 65103), True, 'import numpy as np\n'), ((65249, 65265), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (65255, 65265), True, 'import numpy as np\n'), ((71480, 71500), 'imgaug.parameters.Choice', 'iap.Choice', (['[0.5, 2]'], {}), '([0.5, 2])\n', (71490, 71500), True, 'from imgaug import parameters as iap\n'), ((71728, 71744), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (71735, 71744), True, 'import numpy as np\n'), ((72418, 72434), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (72424, 72434), True, 'import numpy as np\n'), ((72437, 72453), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (72443, 72453), True, 'import numpy as np\n'), ((72535, 72551), 'numpy.var', 'np.var', (['samples1'], {}), '(samples1)\n', (72541, 72551), True, 'import numpy as np\n'), ((72697, 72713), 'numpy.var', 'np.var', (['samples2'], {}), '(samples2)\n', (72703, 72713), True, 'import numpy as np\n'), ((76900, 76928), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (76917, 76928), True, 'from imgaug import parameters as iap\n'), ((78189, 78206), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (78201, 78206), True, 'from imgaug import parameters as iap\n'), ((78492, 78509), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (78504, 78509), True, 'from imgaug import parameters as iap\n'), ((78888, 78905), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (78900, 78905), True, 'from imgaug import parameters as iap\n'), ((79256, 79273), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (79268, 79273), True, 'from imgaug import parameters as iap\n'), ((79606, 79623), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (79618, 79623), True, 'from imgaug import parameters as iap\n'), ((79677, 79694), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (79689, 79694), True, 'from imgaug import parameters as iap\n'), ((79938, 80024), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (79962, 80024), False, 'import skimage\n'), ((80089, 80175), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (80113, 80175), False, 'import skimage\n'), ((80326, 80347), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (80332, 80347), True, 'import numpy as np\n'), ((80378, 80399), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (80384, 80399), True, 'import numpy as np\n'), ((80741, 80758), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (80753, 80758), True, 'from imgaug import parameters as iap\n'), ((80812, 80829), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (80824, 80829), True, 'from imgaug import parameters as iap\n'), ((81078, 81164), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (81102, 81164), False, 'import skimage\n'), ((81229, 81315), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (81253, 81315), False, 'import skimage\n'), ((81466, 81487), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (81472, 81487), True, 'import numpy as np\n'), ((81518, 81539), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (81524, 81539), True, 'import numpy as np\n'), ((81899, 81916), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (81911, 81916), True, 'from imgaug import parameters as iap\n'), ((82030, 82047), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (82042, 82047), True, 'from imgaug import parameters as iap\n'), ((82349, 82435), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (82373, 82435), False, 'import skimage\n'), ((82500, 82586), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (82524, 82586), False, 'import skimage\n'), ((82737, 82758), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (82743, 82758), True, 'import numpy as np\n'), ((82789, 82810), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (82795, 82810), True, 'import numpy as np\n'), ((83359, 83376), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (83371, 83376), True, 'from imgaug import parameters as iap\n'), ((83430, 83447), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (83442, 83447), True, 'from imgaug import parameters as iap\n'), ((83744, 83830), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (83768, 83830), False, 'import skimage\n'), ((83895, 83981), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (83919, 83981), False, 'import skimage\n'), ((84132, 84153), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (84138, 84153), True, 'import numpy as np\n'), ((84184, 84205), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (84190, 84205), True, 'import numpy as np\n'), ((84499, 84516), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (84511, 84516), True, 'from imgaug import parameters as iap\n'), ((84578, 84595), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (84590, 84595), True, 'from imgaug import parameters as iap\n'), ((84845, 84931), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (84869, 84931), False, 'import skimage\n'), ((84996, 85082), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (85020, 85082), False, 'import skimage\n'), ((85233, 85254), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (85239, 85254), True, 'import numpy as np\n'), ((85285, 85306), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (85291, 85306), True, 'import numpy as np\n'), ((85655, 85672), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (85667, 85672), True, 'from imgaug import parameters as iap\n'), ((85794, 85811), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (85806, 85811), True, 'from imgaug import parameters as iap\n'), ((86121, 86207), 'skimage.morphology.label', 'skimage.morphology.label', (['samples1'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples1, connectivity=1, background=0, return_num\n =True)\n', (86145, 86207), False, 'import skimage\n'), ((86272, 86358), 'skimage.morphology.label', 'skimage.morphology.label', (['samples2'], {'connectivity': '(1)', 'background': '(0)', 'return_num': '(True)'}), '(samples2, connectivity=1, background=0, return_num\n =True)\n', (86296, 86358), False, 'import skimage\n'), ((86509, 86530), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (86515, 86530), True, 'import numpy as np\n'), ((86561, 86582), 'numpy.sum', 'np.sum', (['(samples2 == 1)'], {}), '(samples2 == 1)\n', (86567, 86582), True, 'import numpy as np\n'), ((87183, 87200), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (87195, 87200), True, 'from imgaug import parameters as iap\n'), ((88148, 88165), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (88160, 88165), True, 'from imgaug import parameters as iap\n'), ((88599, 88619), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (88616, 88619), True, 'from imgaug import parameters as iap\n'), ((88846, 88866), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (88863, 88866), True, 'from imgaug import parameters as iap\n'), ((89177, 89197), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (89194, 89197), True, 'from imgaug import parameters as iap\n'), ((89508, 89529), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (89525, 89529), True, 'from imgaug import parameters as iap\n'), ((89846, 89868), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0.5)'], {}), '(0.5)\n', (89863, 89868), True, 'from imgaug import parameters as iap\n'), ((90346, 90366), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(2)'], {}), '(2)\n', (90363, 90366), True, 'from imgaug import parameters as iap\n'), ((90675, 90696), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-2)'], {}), '(-2)\n', (90692, 90696), True, 'from imgaug import parameters as iap\n'), ((91031, 91049), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 2]'], {}), '([0, 2])\n', (91041, 91049), True, 'from imgaug import parameters as iap\n'), ((91276, 91317), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (91289, 91317), True, 'import numpy as np\n'), ((91400, 91418), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 2]'], {}), '([0, 2])\n', (91410, 91418), True, 'from imgaug import parameters as iap\n'), ((91781, 91801), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (91798, 91801), True, 'from imgaug import parameters as iap\n'), ((92091, 92111), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (92108, 92111), True, 'from imgaug import parameters as iap\n'), ((92402, 92422), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (92419, 92422), True, 'from imgaug import parameters as iap\n'), ((92787, 92807), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (92804, 92807), True, 'from imgaug import parameters as iap\n'), ((94121, 94162), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (94134, 94162), True, 'import numpy as np\n'), ((95062, 95082), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (95079, 95082), True, 'from imgaug import parameters as iap\n'), ((97927, 97949), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (97944, 97949), True, 'from imgaug import parameters as iap\n'), ((98569, 98591), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (98586, 98591), True, 'from imgaug import parameters as iap\n'), ((99214, 99235), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (99225, 99235), True, 'from imgaug import parameters as iap\n'), ((99863, 99884), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (99874, 99884), True, 'from imgaug import parameters as iap\n'), ((100481, 100501), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (100498, 100501), True, 'from imgaug import parameters as iap\n'), ((103730, 103752), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (103747, 103752), True, 'from imgaug import parameters as iap\n'), ((104348, 104370), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (104365, 104370), True, 'from imgaug import parameters as iap\n'), ((104963, 104984), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (104974, 104984), True, 'from imgaug import parameters as iap\n'), ((105583, 105604), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (105594, 105604), True, 'from imgaug import parameters as iap\n'), ((106357, 106375), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 2]'], {}), '([0, 2])\n', (106367, 106375), True, 'from imgaug import parameters as iap\n'), ((106662, 106682), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (106679, 106682), True, 'from imgaug import parameters as iap\n'), ((106900, 106920), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (106917, 106920), True, 'from imgaug import parameters as iap\n'), ((109711, 109733), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (109728, 109733), True, 'from imgaug import parameters as iap\n'), ((110284, 110306), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (110301, 110306), True, 'from imgaug import parameters as iap\n'), ((110851, 110872), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (110862, 110872), True, 'from imgaug import parameters as iap\n'), ((111423, 111444), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (111434, 111444), True, 'from imgaug import parameters as iap\n'), ((112047, 112067), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (112064, 112067), True, 'from imgaug import parameters as iap\n'), ((114933, 114955), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (114950, 114955), True, 'from imgaug import parameters as iap\n'), ((115572, 115594), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.0)'], {}), '(1.0)\n', (115589, 115594), True, 'from imgaug import parameters as iap\n'), ((116207, 116228), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (116218, 116228), True, 'from imgaug import parameters as iap\n'), ((116789, 116810), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (116800, 116810), True, 'from imgaug import parameters as iap\n'), ((117405, 117425), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (117422, 117425), True, 'from imgaug import parameters as iap\n'), ((119800, 119822), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.5)'], {}), '(1.5)\n', (119817, 119822), True, 'from imgaug import parameters as iap\n'), ((120443, 120465), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1.5)'], {}), '(1.5)\n', (120460, 120465), True, 'from imgaug import parameters as iap\n'), ((121061, 121082), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (121072, 121082), True, 'from imgaug import parameters as iap\n'), ((121632, 121653), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (121643, 121653), True, 'from imgaug import parameters as iap\n'), ((122264, 122284), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (122281, 122284), True, 'from imgaug import parameters as iap\n'), ((123451, 123477), 'imgaug.parameters.Choice', 'iap.Choice', (['[-3, -1, 1, 3]'], {}), '([-3, -1, 1, 3])\n', (123461, 123477), True, 'from imgaug import parameters as iap\n'), ((123593, 123611), 'numpy.unique', 'np.unique', (['samples'], {}), '(samples)\n', (123602, 123611), True, 'import numpy as np\n'), ((123961, 123981), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (123978, 123981), True, 'from imgaug import parameters as iap\n'), ((124206, 124226), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (124223, 124226), True, 'from imgaug import parameters as iap\n'), ((124574, 124594), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (124591, 124594), True, 'from imgaug import parameters as iap\n'), ((125047, 125065), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {}), '([1, 2])\n', (125057, 125065), True, 'from imgaug import parameters as iap\n'), ((125316, 125328), 'numpy.sum', 'np.sum', (['seen'], {}), '(seen)\n', (125322, 125328), True, 'import numpy as np\n'), ((125475, 125493), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 2]'], {}), '([1, 2])\n', (125485, 125493), True, 'from imgaug import parameters as iap\n'), ((125888, 125910), 'numpy.sum', 'np.sum', (['(samples1 == -2)'], {}), '(samples1 == -2)\n', (125894, 125910), True, 'import numpy as np\n'), ((125931, 125953), 'numpy.sum', 'np.sum', (['(samples1 == -1)'], {}), '(samples1 == -1)\n', (125937, 125953), True, 'import numpy as np\n'), ((125974, 125995), 'numpy.sum', 'np.sum', (['(samples1 == 1)'], {}), '(samples1 == 1)\n', (125980, 125995), True, 'import numpy as np\n'), ((126016, 126037), 'numpy.sum', 'np.sum', (['(samples1 == 2)'], {}), '(samples1 == 2)\n', (126022, 126037), True, 'import numpy as np\n'), ((126183, 126203), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (126200, 126203), True, 'from imgaug import parameters as iap\n'), ((126451, 126471), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (126468, 126471), True, 'from imgaug import parameters as iap\n'), ((126712, 126732), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (126729, 126732), True, 'from imgaug import parameters as iap\n'), ((126974, 126994), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (126991, 126994), True, 'from imgaug import parameters as iap\n'), ((127248, 127268), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (127265, 127268), True, 'from imgaug import parameters as iap\n'), ((127542, 127563), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (127559, 127563), True, 'from imgaug import parameters as iap\n'), ((127835, 127856), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (127852, 127856), True, 'from imgaug import parameters as iap\n'), ((128132, 128151), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (128142, 128151), True, 'from imgaug import parameters as iap\n'), ((128581, 128600), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (128591, 128600), True, 'from imgaug import parameters as iap\n'), ((129024, 129043), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (129034, 129043), True, 'from imgaug import parameters as iap\n'), ((129436, 129455), 'imgaug.parameters.Choice', 'iap.Choice', (['[-2, 1]'], {}), '([-2, 1])\n', (129446, 129455), True, 'from imgaug import parameters as iap\n'), ((130074, 130095), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(-1)'], {}), '(-1)\n', (130091, 130095), True, 'from imgaug import parameters as iap\n'), ((130461, 130481), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (130478, 130481), True, 'from imgaug import parameters as iap\n'), ((130869, 130889), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (130886, 130889), True, 'from imgaug import parameters as iap\n'), ((131504, 131524), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (131521, 131524), True, 'from imgaug import parameters as iap\n'), ((131980, 131999), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (131990, 131999), True, 'from imgaug import parameters as iap\n'), ((132359, 132411), 'numpy.logical_and', 'np.logical_and', (['(25 - 10 < samples)', '(samples < 25 + 10)'], {}), '(25 - 10 < samples, samples < 25 + 10)\n', (132373, 132411), True, 'import numpy as np\n'), ((132512, 132531), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (132522, 132531), True, 'from imgaug import parameters as iap\n'), ((132991, 133010), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (133001, 133010), True, 'from imgaug import parameters as iap\n'), ((134485, 134507), 'imgaug.parameters.Uniform', 'iap.Uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (134496, 134507), True, 'from imgaug import parameters as iap\n'), ((135990, 136009), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (136000, 136009), True, 'from imgaug import parameters as iap\n'), ((136361, 136380), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (136371, 136380), True, 'from imgaug import parameters as iap\n'), ((136761, 136780), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (136771, 136780), True, 'from imgaug import parameters as iap\n'), ((137315, 137334), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (137325, 137334), True, 'from imgaug import parameters as iap\n'), ((138372, 138392), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (138389, 138392), True, 'from imgaug import parameters as iap\n'), ((138975, 138995), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(5)'], {}), '(5)\n', (138992, 138995), True, 'from imgaug import parameters as iap\n'), ((139643, 139663), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(5)'], {}), '(5)\n', (139660, 139663), True, 'from imgaug import parameters as iap\n'), ((140283, 140303), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(5)'], {}), '(5)\n', (140300, 140303), True, 'from imgaug import parameters as iap\n'), ((141070, 141089), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 10]'], {}), '([1, 10])\n', (141080, 141089), True, 'from imgaug import parameters as iap\n'), ((143563, 143582), 'imgaug.parameters.Choice', 'iap.Choice', (['[1, 10]'], {}), '([1, 10])\n', (143573, 143582), True, 'from imgaug import parameters as iap\n'), ((20285, 20329), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['val', '"""[test1]"""'], {}), "(val, '[test1]')\n", (20313, 20329), True, 'from imgaug import parameters as iap\n'), ((20605, 20649), 'imgaug.parameters.handle_probability_param', 'iap.handle_probability_param', (['val', '"""[test2]"""'], {}), "(val, '[test2]')\n", (20633, 20649), True, 'from imgaug import parameters as iap\n'), ((21947, 21977), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'dtype_in'}), '((1,), dtype=dtype_in)\n', (21955, 21977), True, 'import numpy as np\n'), ((39190, 39205), 'numpy.sum', 'np.sum', (['samples'], {}), '(samples)\n', (39196, 39205), True, 'import numpy as np\n'), ((39965, 39983), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (39977, 39983), True, 'import imgaug.random as iarandom\n'), ((40083, 40101), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (40095, 40101), True, 'import imgaug.random as iarandom\n'), ((40783, 40824), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (40796, 40824), True, 'import numpy as np\n'), ((42983, 43037), 'numpy.logical_or', 'np.logical_or', (["(samples == 'first')", "(samples == 'second')"], {}), "(samples == 'first', samples == 'second')\n", (42996, 43037), True, 'import numpy as np\n'), ((43347, 43361), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (43356, 43361), True, 'import six.moves as sm\n'), ((44080, 44098), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1]'], {}), '([0, 1])\n', (44090, 44098), True, 'from imgaug import parameters as iap\n'), ((44710, 44728), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (44722, 44728), True, 'import imgaug.random as iarandom\n'), ((44828, 44846), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (44840, 44846), True, 'import imgaug.random as iarandom\n'), ((46220, 46261), 'numpy.logical_or', 'np.logical_or', (['(samples == 0)', '(samples == 1)'], {}), '(samples == 0, samples == 1)\n', (46233, 46261), True, 'import numpy as np\n'), ((47105, 47147), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 0)'], {}), '(samples == -1, samples == 0)\n', (47118, 47147), True, 'import numpy as np\n'), ((47554, 47596), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 0)'], {}), '(samples == -1, samples == 0)\n', (47567, 47596), True, 'import numpy as np\n'), ((48058, 48100), 'numpy.logical_or', 'np.logical_or', (['(samples == -1)', '(samples == 0)'], {}), '(samples == -1, samples == 0)\n', (48071, 48100), True, 'import numpy as np\n'), ((48628, 48646), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (48640, 48646), True, 'import imgaug.random as iarandom\n'), ((48746, 48764), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (48758, 48764), True, 'import imgaug.random as iarandom\n'), ((49416, 49434), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (49428, 49434), True, 'import imgaug.random as iarandom\n'), ((49606, 49633), 'numpy.sum', 'np.sum', (['(samples_direct == i)'], {}), '(samples_direct == i)\n', (49612, 49633), True, 'import numpy as np\n'), ((49994, 50012), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (50006, 50012), True, 'import imgaug.random as iarandom\n'), ((50112, 50130), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (50124, 50130), True, 'import imgaug.random as iarandom\n'), ((50790, 50808), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (50802, 50808), True, 'import imgaug.random as iarandom\n'), ((52669, 52687), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (52681, 52687), True, 'import imgaug.random as iarandom\n'), ((52787, 52805), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (52799, 52805), True, 'import imgaug.random as iarandom\n'), ((55632, 55647), 'numpy.abs', 'np.abs', (['samples'], {}), '(samples)\n', (55638, 55647), True, 'import numpy as np\n'), ((56840, 56858), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (56852, 56858), True, 'import imgaug.random as iarandom\n'), ((58966, 58984), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (58978, 58984), True, 'import imgaug.random as iarandom\n'), ((59084, 59102), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (59096, 59102), True, 'import imgaug.random as iarandom\n'), ((59767, 59785), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (59779, 59785), True, 'import imgaug.random as iarandom\n'), ((61794, 61812), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (61806, 61812), True, 'import imgaug.random as iarandom\n'), ((61912, 61930), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (61924, 61930), True, 'import imgaug.random as iarandom\n'), ((62582, 62600), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (62594, 62600), True, 'import imgaug.random as iarandom\n'), ((64780, 64810), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 1)'], {}), '(1 + 1 / 1)\n', (64799, 64810), False, 'import scipy\n'), ((64909, 64941), 'scipy.special.gamma', 'scipy.special.gamma', (['(1 + 1 / 0.5)'], {}), '(1 + 1 / 0.5)\n', (64928, 64941), False, 'import scipy\n'), ((65516, 65534), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (65528, 65534), True, 'import imgaug.random as iarandom\n'), ((65634, 65652), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (65646, 65652), True, 'import imgaug.random as iarandom\n'), ((69231, 69249), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (69243, 69249), True, 'import imgaug.random as iarandom\n'), ((69349, 69367), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (69361, 69367), True, 'import imgaug.random as iarandom\n'), ((70697, 70715), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (70709, 70715), True, 'import imgaug.random as iarandom\n'), ((72968, 72986), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (72980, 72986), True, 'import imgaug.random as iarandom\n'), ((73086, 73104), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (73098, 73104), True, 'import imgaug.random as iarandom\n'), ((73541, 73565), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (73558, 73565), True, 'from imgaug import parameters as iap\n'), ((74006, 74030), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (74023, 74030), True, 'from imgaug import parameters as iap\n'), ((74054, 74074), 'imgaug.random.RNG', 'iarandom.RNG', (['(123456)'], {}), '(123456)\n', (74066, 74074), True, 'import imgaug.random as iarandom\n'), ((74097, 74117), 'imgaug.random.RNG', 'iarandom.RNG', (['(123456)'], {}), '(123456)\n', (74109, 74117), True, 'import imgaug.random as iarandom\n'), ((74279, 74313), 'numpy.array_equal', 'np.array_equal', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (74293, 74313), True, 'import numpy as np\n'), ((74496, 74520), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (74513, 74520), True, 'from imgaug import parameters as iap\n'), ((74913, 74937), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (74930, 74937), True, 'from imgaug import parameters as iap\n'), ((75475, 75499), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (75492, 75499), True, 'from imgaug import parameters as iap\n'), ((75782, 75806), 'numpy.all', 'np.all', (['(samples == value)'], {}), '(samples == value)\n', (75788, 75806), True, 'import numpy as np\n'), ((76127, 76151), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (76144, 76151), True, 'from imgaug import parameters as iap\n'), ((76637, 76655), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 1]'], {}), '([0, 1])\n', (76647, 76655), True, 'from imgaug import parameters as iap\n'), ((77249, 77269), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (77266, 77269), True, 'from imgaug import parameters as iap\n'), ((77732, 77752), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0)'], {}), '(0)\n', (77749, 77752), True, 'from imgaug import parameters as iap\n'), ((81967, 81987), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(1)'], {}), '(1)\n', (81984, 81987), True, 'from imgaug import parameters as iap\n'), ((82098, 82117), 'imgaug.parameters.Choice', 'iap.Choice', (['[8, 16]'], {}), '([8, 16])\n', (82108, 82117), True, 'from imgaug import parameters as iap\n'), ((83171, 83188), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (83183, 83188), True, 'from imgaug import parameters as iap\n'), ((85728, 85751), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['(0.01)'], {}), '(0.01)\n', (85745, 85751), True, 'from imgaug import parameters as iap\n'), ((85867, 85889), 'imgaug.parameters.Choice', 'iap.Choice', (['[0.4, 0.8]'], {}), '([0.4, 0.8])\n', (85877, 85889), True, 'from imgaug import parameters as iap\n'), ((86953, 86970), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (86965, 86970), True, 'from imgaug import parameters as iap\n'), ((87232, 87265), 'imgaug.parameters.Choice', 'iap.Choice', (["['nearest', 'linear']"], {}), "(['nearest', 'linear'])\n", (87242, 87265), True, 'from imgaug import parameters as iap\n'), ((87429, 87475), 'numpy.logical_and', 'np.logical_and', (['(0.05 < samples)', '(samples < 0.95)'], {}), '(0.05 < samples, samples < 0.95)\n', (87443, 87475), True, 'import numpy as np\n'), ((87856, 87873), 'imgaug.parameters.Binomial', 'iap.Binomial', (['(0.5)'], {}), '(0.5)\n', (87868, 87873), True, 'from imgaug import parameters as iap\n'), ((88280, 88298), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (88292, 88298), True, 'import imgaug.random as iarandom\n'), ((88401, 88419), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (88413, 88419), True, 'import imgaug.random as iarandom\n'), ((91526, 91544), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (91538, 91544), True, 'import imgaug.random as iarandom\n'), ((91644, 91662), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (91656, 91662), True, 'import imgaug.random as iarandom\n'), ((93652, 93685), 'numpy.all', 'np.all', (['(samples == value_expected)'], {}), '(samples == value_expected)\n', (93658, 93685), True, 'import numpy as np\n'), ((94445, 94472), 'numpy.abs', 'np.abs', (['(samples1 - samples2)'], {}), '(samples1 - samples2)\n', (94451, 94472), True, 'import numpy as np\n'), ((94735, 94753), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (94747, 94753), True, 'import imgaug.random as iarandom\n'), ((94853, 94871), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (94865, 94871), True, 'import imgaug.random as iarandom\n'), ((96895, 96942), 'numpy.isclose', 'np.isclose', (['sample', '(v1 * v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 * v2, atol=0.001, rtol=0)\n', (96905, 96942), True, 'import numpy as np\n'), ((97600, 97647), 'numpy.isclose', 'np.isclose', (['sample', '(v1 * v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 * v2, atol=0.001, rtol=0)\n', (97610, 97647), True, 'import numpy as np\n'), ((108716, 108763), 'numpy.isclose', 'np.isclose', (['sample', '(v1 + v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 + v2, atol=0.001, rtol=0)\n', (108726, 108763), True, 'import numpy as np\n'), ((109397, 109444), 'numpy.isclose', 'np.isclose', (['sample', '(v1 + v2)'], {'atol': '(0.001)', 'rtol': '(0)'}), '(sample, v1 + v2, atol=0.001, rtol=0)\n', (109407, 109444), True, 'import numpy as np\n'), ((117920, 117948), 'imgaug.is_single_float', 'ia.is_single_float', (['exponent'], {}), '(exponent)\n', (117938, 117948), True, 'import imgaug as ia\n'), ((118975, 119003), 'imgaug.is_single_float', 'ia.is_single_float', (['exponent'], {}), '(exponent)\n', (118993, 119003), True, 'import imgaug as ia\n'), ((122887, 122912), 'imgaug.is_single_float', 'ia.is_single_float', (['value'], {}), '(value)\n', (122905, 122912), True, 'import imgaug as ia\n'), ((125596, 125614), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (125608, 125614), True, 'import imgaug.random as iarandom\n'), ((125716, 125734), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (125728, 125734), True, 'import imgaug.random as iarandom\n'), ((129648, 129666), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (129660, 129666), True, 'import imgaug.random as iarandom\n'), ((129768, 129786), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (129780, 129786), True, 'import imgaug.random as iarandom\n'), ((133573, 133592), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (133583, 133592), True, 'from imgaug import parameters as iap\n'), ((135186, 135205), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (135196, 135205), True, 'from imgaug import parameters as iap\n'), ((136923, 136941), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (136935, 136941), True, 'import imgaug.random as iarandom\n'), ((137043, 137061), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (137055, 137061), True, 'import imgaug.random as iarandom\n'), ((137393, 137417), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['"""max"""'], {}), "('max')\n", (137410, 137417), True, 'from imgaug import parameters as iap\n'), ((137719, 137738), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (137729, 137738), True, 'from imgaug import parameters as iap\n'), ((138046, 138065), 'imgaug.parameters.Choice', 'iap.Choice', (['[0, 50]'], {}), '([0, 50])\n', (138056, 138065), True, 'from imgaug import parameters as iap\n'), ((139119, 139145), 'numpy.exp', 'np.exp', (['(-(5 * 1 + 0 - 0.5))'], {}), '(-(5 * 1 + 0 - 0.5))\n', (139125, 139145), True, 'import numpy as np\n'), ((140460, 140486), 'numpy.exp', 'np.exp', (['(-(5 * 1 + 0 - 0.5))'], {}), '(-(5 * 1 + 0 - 0.5))\n', (140466, 140486), True, 'import numpy as np\n'), ((141219, 141245), 'numpy.exp', 'np.exp', (['(-(1 * 1 + 0 - 0.5))'], {}), '(-(1 * 1 + 0 - 0.5))\n', (141225, 141245), True, 'import numpy as np\n'), ((141282, 141309), 'numpy.exp', 'np.exp', (['(-(10 * 1 + 0 - 0.5))'], {}), '(-(10 * 1 + 0 - 0.5))\n', (141288, 141309), True, 'import numpy as np\n'), ((142581, 142606), 'numpy.array', 'np.array', (['[val]'], {'dtype': 'dt'}), '([val], dtype=dt)\n', (142589, 142606), True, 'import numpy as np\n'), ((142630, 142655), 'numpy.array', 'np.array', (['[mul]'], {'dtype': 'dt'}), '([mul], dtype=dt)\n', (142638, 142655), True, 'import numpy as np\n'), ((142679, 142704), 'numpy.array', 'np.array', (['[add]'], {'dtype': 'dt'}), '([add], dtype=dt)\n', (142687, 142704), True, 'import numpy as np\n'), ((142731, 142759), 'numpy.array', 'np.array', (['[thresh]'], {'dtype': 'dt'}), '([thresh], dtype=dt)\n', (142739, 142759), True, 'import numpy as np\n'), ((143778, 143796), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (143790, 143796), True, 'import imgaug.random as iarandom\n'), ((143898, 143916), 'imgaug.random.RNG', 'iarandom.RNG', (['(1234)'], {}), '(1234)\n', (143910, 143916), True, 'import imgaug.random as iarandom\n'), ((22005, 22034), 'imgaug.parameters.force_np_float_dtype', 'iap.force_np_float_dtype', (['arr'], {}), '(arr)\n', (22029, 22034), True, 'from imgaug import parameters as iap\n'), ((43243, 43257), 'six.moves.xrange', 'sm.xrange', (['(100)'], {}), '(100)\n', (43252, 43257), True, 'import six.moves as sm\n'), ((93238, 93262), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (93255, 93262), True, 'from imgaug import parameters as iap\n'), ((95524, 95545), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (95541, 95545), True, 'from imgaug import parameters as iap\n'), ((96123, 96144), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (96140, 96144), True, 'from imgaug import parameters as iap\n'), ((96146, 96167), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (96163, 96167), True, 'from imgaug import parameters as iap\n'), ((96753, 96774), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (96770, 96774), True, 'from imgaug import parameters as iap\n'), ((97439, 97460), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (97456, 97460), True, 'from imgaug import parameters as iap\n'), ((97462, 97483), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (97479, 97483), True, 'from imgaug import parameters as iap\n'), ((100971, 100992), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (100988, 100992), True, 'from imgaug import parameters as iap\n'), ((101638, 101659), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (101655, 101659), True, 'from imgaug import parameters as iap\n'), ((101661, 101682), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (101678, 101682), True, 'from imgaug import parameters as iap\n'), ((102333, 102354), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (102350, 102354), True, 'from imgaug import parameters as iap\n'), ((103148, 103169), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (103165, 103169), True, 'from imgaug import parameters as iap\n'), ((103171, 103192), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (103188, 103192), True, 'from imgaug import parameters as iap\n'), ((107333, 107354), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (107350, 107354), True, 'from imgaug import parameters as iap\n'), ((107940, 107961), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (107957, 107961), True, 'from imgaug import parameters as iap\n'), ((107963, 107984), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (107980, 107984), True, 'from imgaug import parameters as iap\n'), ((108574, 108595), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (108591, 108595), True, 'from imgaug import parameters as iap\n'), ((109236, 109257), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (109253, 109257), True, 'from imgaug import parameters as iap\n'), ((109259, 109280), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (109276, 109280), True, 'from imgaug import parameters as iap\n'), ((112495, 112516), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (112512, 112516), True, 'from imgaug import parameters as iap\n'), ((113112, 113133), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (113129, 113133), True, 'from imgaug import parameters as iap\n'), ((113135, 113156), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (113152, 113156), True, 'from imgaug import parameters as iap\n'), ((113756, 113777), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (113773, 113777), True, 'from imgaug import parameters as iap\n'), ((114438, 114459), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v1'], {}), '(v1)\n', (114455, 114459), True, 'from imgaug import parameters as iap\n'), ((114461, 114482), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['v2'], {}), '(v2)\n', (114478, 114482), True, 'from imgaug import parameters as iap\n'), ((118135, 118158), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['base'], {}), '(base)\n', (118152, 118158), True, 'from imgaug import parameters as iap\n'), ((119190, 119213), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['base'], {}), '(base)\n', (119207, 119213), True, 'from imgaug import parameters as iap\n'), ((119215, 119242), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['exponent'], {}), '(exponent)\n', (119232, 119242), True, 'from imgaug import parameters as iap\n'), ((122646, 122670), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['value'], {}), '(value)\n', (122663, 122670), True, 'from imgaug import parameters as iap\n'), ((142305, 142327), 'imgaug.parameters.Deterministic', 'iap.Deterministic', (['val'], {}), '(val)\n', (142322, 142327), True, 'from imgaug import parameters as iap\n'), ((39567, 39582), 'numpy.sum', 'np.sum', (['samples'], {}), '(samples)\n', (39573, 39582), True, 'import numpy as np\n'), ((95789, 95821), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int64'}), '((2, 3), dtype=np.int64)\n', (95797, 95821), True, 'import numpy as np\n'), ((96411, 96443), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int32'}), '((2, 3), dtype=np.int32)\n', (96419, 96443), True, 'import numpy as np\n'), ((97076, 97110), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (97084, 97110), True, 'import numpy as np\n'), ((97781, 97815), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (97789, 97815), True, 'import numpy as np\n'), ((101270, 101304), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (101278, 101304), True, 'import numpy as np\n'), ((101956, 101990), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (101964, 101990), True, 'import numpy as np\n'), ((102752, 102786), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (102760, 102786), True, 'import numpy as np\n'), ((103586, 103620), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (103594, 103620), True, 'import numpy as np\n'), ((118574, 118608), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (118582, 118608), True, 'import numpy as np\n'), ((119648, 119682), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (119656, 119682), True, 'import numpy as np\n'), ((142843, 142882), 'numpy.exp', 'np.exp', (['(-(val_ * mul_ + add_ - thresh_))'], {}), '(-(val_ * mul_ + add_ - thresh_))\n', (142849, 142882), True, 'import numpy as np\n'), ((107630, 107662), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int32'}), '((2, 3), dtype=np.int32)\n', (107638, 107662), True, 'import numpy as np\n'), ((108256, 108288), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int32'}), '((2, 3), dtype=np.int32)\n', (108264, 108288), True, 'import numpy as np\n'), ((108897, 108931), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (108905, 108931), True, 'import numpy as np\n'), ((109578, 109612), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (109586, 109612), True, 'import numpy as np\n'), ((112792, 112824), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int64'}), '((2, 3), dtype=np.int64)\n', (112800, 112824), True, 'import numpy as np\n'), ((113428, 113460), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.int64'}), '((2, 3), dtype=np.int64)\n', (113436, 113460), True, 'import numpy as np\n'), ((114089, 114123), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (114097, 114123), True, 'import numpy as np\n'), ((114790, 114824), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float64'}), '((2, 3), dtype=np.float64)\n', (114798, 114824), True, 'import numpy as np\n'), ((93327, 93346), 'numpy.float64', 'np.float64', (['[value]'], {}), '([value])\n', (93337, 93346), True, 'import numpy as np\n')]
|
import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
class GoalSampler:
def __init__(self,
policy_language_model,
reward_language_model,
goal_dim,
one_hot_encoder,
params):
self.policy_language_model = policy_language_model
self.reward_language_model = reward_language_model
self.goal_dim = goal_dim
self.params = params
self.nb_feedbacks = 0
self.nb_positive_feedbacks = 0
self.nb_negative_feedbacks = 0
self.feedback2id = dict()
self.id2feedback = dict()
self.id2oracleid = dict()
self.feedback2one_hot = dict()
self.id2one_hot = dict()
self.feedback_memory = dict(memory_id=[],
string=[],
iter_discovery=[],
target_counter=[],
reached_counter=[],
oracle_id=[],
f1_score=[],
policy_encoding=[],
reward_encoding=[],
imagined=[],
)
self.imagined_goals = dict(string=[],
competence=[],
lp=[])
self.one_hot_encoder = one_hot_encoder
self.goal_generator = SentenceGeneratorHeuristic(params['train_descriptions'],
params['test_descriptions'],
sentences=None,
method=params['conditions']['imagination_method'])
self.nb_discovered_goals = 0
self.score_target_goals = None
self.perceived_learning_progress = None
self.perceived_competence = None
self.feedback_stats = None
self.rank = MPI.COMM_WORLD.Get_rank()
self.num_cpus = params['experiment_params']['n_cpus']
self.rollout_batch_size = params['experiment_params']['rollout_batch_size']
self.not_imagined_goal_ids = np.array([])
self.imagined_goal_ids = np.array([])
def store_reward_function(self, reward_function):
self.reward_function = reward_function
def update_embeddings(self):
# embeddings must be updated when the language model is udpated
for i, goal_str in enumerate(self.feedback_memory['string']):
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'][i] = reward_encoding.copy()
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback_memory['policy_encoding'][i] = policy_encoding.copy()
def add_entries_to_feedback_memory(self, str_list, episode_count, imagined):
for goal_str in str_list:
if goal_str not in self.feedback2id.keys():
memory_id = self.nb_discovered_goals
if goal_str in self.params['train_descriptions']:
oracle_id = self.params['train_descriptions'].index(goal_str)
else:
oracle_id = None
one_hot = self.one_hot_encoder.encode(goal_str.lower().split(" "))
self.feedback2one_hot[goal_str] = one_hot
self.id2one_hot[memory_id] = one_hot
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'].append(reward_encoding.copy())
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback2id[goal_str] = memory_id
self.id2oracleid[memory_id] = oracle_id
self.id2feedback[memory_id] = goal_str
self.feedback_memory['memory_id'].append(memory_id)
self.feedback_memory['oracle_id'].append(oracle_id)
self.feedback_memory['string'].append(goal_str)
self.feedback_memory['target_counter'].append(0)
self.feedback_memory['reached_counter'].append(0)
self.feedback_memory['iter_discovery'].append(episode_count)
self.feedback_memory['f1_score'].append(0)
self.feedback_memory['policy_encoding'].append(policy_encoding.copy())
self.feedback_memory['imagined'].append(imagined)
self.nb_discovered_goals += 1
elif goal_str in self.feedback2id.keys() and not imagined: # if goal previously imagined is discovered later, change its status
ind = self.feedback_memory['string'].index(goal_str)
if self.feedback_memory['imagined'][ind] == 1:
self.feedback_memory['imagined'][ind] = 0
logger.info('Goal already imagined:', goal_str)
def update_discovered_goals(self,
new_goals_str,
episode_count,
epoch):
# only done in cpu 0
self.add_entries_to_feedback_memory(str_list=new_goals_str,
episode_count=episode_count,
imagined=0)
# Decide whether to generate new goals
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = True
if len(new_goals_str) > 0 and imagined:
new_imagined_goals = []
inds_not_imagined = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.goal_generator.update_model(np.array(self.feedback_memory['string'])[inds_not_imagined])
generated_goals = self.goal_generator.generate_sentences(n='all')
for gen_g in generated_goals:
if gen_g not in self.imagined_goals['string']:
self.imagined_goals['string'].append(gen_g)
self.imagined_goals['competence'].append(0)
self.imagined_goals['lp'].append(0)
new_imagined_goals.append(gen_g)
self.add_entries_to_feedback_memory(str_list=new_imagined_goals,
episode_count=episode_count,
imagined=1)
def update(self,
current_episode,
all_episodes,
partner_available,
goals_reached_str,
goals_not_reached_str):
imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 1).flatten()
not_imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.not_imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[not_imagined_inds]
self.imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[imagined_inds]
# only done in cpu 0
n_episodes = len(all_episodes)
attempted_goals_ids = []
exploit = []
for ep in all_episodes:
exploit.append(ep['exploit'])
attempted_goals_ids.append(ep['g_id'])
if partner_available:
# if partner is available, simply encodes what it said
assert n_episodes == len(goals_reached_str) == len(goals_not_reached_str) == len(exploit) == len(attempted_goals_ids)
# Get indexes in the order of discovery of the attempted goals, reached_goals, not reached_goals
goals_reached_ids = []
goals_not_reached_ids = []
for i in range(n_episodes):
goals_reached_ids.append([])
goals_not_reached_ids.append([])
for goal_str in goals_reached_str[i]:
goals_reached_ids[-1].append(self.feedback2id[goal_str])
for goal_str in goals_not_reached_str[i]:
goals_not_reached_ids[-1].append(self.feedback2id[goal_str])
else:
goals_reached_ids = []
goals_not_reached_ids = []
final_obs = np.array([ep['obs'][-1] for ep in all_episodes])
# test 50 goals for each episode
discovered_goal_ids = np.array(self.feedback_memory['memory_id'])
not_imagined_ind = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
discovered_goal_ids = discovered_goal_ids[not_imagined_ind]
n_attempts = min(50, len(discovered_goal_ids))
goals_to_try = np.random.choice(discovered_goal_ids, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(all_episodes)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids.append(pos_goals)
neg_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == -1)].tolist()
goals_not_reached_ids.append(neg_goals)
return goals_reached_ids, goals_not_reached_ids
def share_info_to_all_cpus(self):
# share data across cpus
self.feedback_memory = MPI.COMM_WORLD.bcast(self.feedback_memory, root=0)
self.feedback2id = MPI.COMM_WORLD.bcast(self.feedback2id, root=0)
self.id2oracleid = MPI.COMM_WORLD.bcast(self.id2oracleid, root=0)
self.id2feedback = MPI.COMM_WORLD.bcast(self.id2feedback, root=0)
self.feedback2one_hot = MPI.COMM_WORLD.bcast(self.feedback2one_hot, root=0)
self.nb_discovered_goals = MPI.COMM_WORLD.bcast(self.nb_discovered_goals, root=0)
self.imagined_goals = MPI.COMM_WORLD.bcast(self.imagined_goals, root=0)
self.one_hot_encoder = MPI.COMM_WORLD.bcast(self.one_hot_encoder, root=0)
def sample_targets(self, epoch):
"""
Sample targets for all cpus and all batch, then scatter to the different cpus
"""
# Decide whether to exploit or not
exploit = True if np.random.random() < 0.1 else False
strategy = 'random'
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = np.random.random() < self.params['conditions']['p_imagined']
if self.rank == 0:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
for i in range(self.num_cpus):
goals_str = []
goals_encodings = []
goals_ids = []
for j in range(self.rollout_batch_size):
# when there is no goal in memory, sample random goal from standard normal distribution
if len(self.feedback_memory['memory_id']) == 0:
goals_encodings.append(np.random.normal(size=self.goal_dim))
goals_str.append('Random Goal')
goals_ids.append(-1)
else:
if strategy == 'random':
if imagined and self.imagined_goal_ids.size > 0:
ind = np.random.choice(self.imagined_goal_ids)
else:
ind = np.random.choice(self.not_imagined_goal_ids)
else:
raise NotImplementedError
goals_encodings.append(self.feedback_memory['policy_encoding'][ind])
goals_str.append(self.id2feedback[ind])
goals_ids.append(ind)
all_goals_str.append(goals_str)
all_goals_encodings.append(goals_encodings)
all_goals_ids.append(goals_ids)
else:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
goals_str = MPI.COMM_WORLD.scatter(all_goals_str, root=0)
goals_encodings = MPI.COMM_WORLD.scatter(all_goals_encodings, root=0)
goals_ids = MPI.COMM_WORLD.scatter(all_goals_ids, root=0)
return exploit, goals_str, goals_encodings, goals_ids, imagined
class EvalGoalSampler:
def __init__(self, policy_language_model, one_hot_encoder, params):
self.descriptions = params['train_descriptions']
self.nb_descriptions = len(self.descriptions)
self.count = 0
self.policy_language_model = policy_language_model
self.rollout_batch_size = params['evaluation_rollout_params']['rollout_batch_size']
self.params = params
def reset(self):
self.count = 0
def sample(self, method='robin'):
# print(self.descriptions[self.count])
goals_str = []
goals_encodings = []
goals_ids = []
if method == 'robin':
ind = self.count
elif method == 'random':
ind = np.random.randint(self.nb_descriptions)
else:
raise NotImplementedError
for _ in range(self.rollout_batch_size):
g_str = self.descriptions[ind]
goals_str.append(g_str)
policy_encoding = self.policy_language_model.encode(g_str).flatten()
goals_encodings.append(policy_encoding)
goals_ids.append(ind)
self.count += 1
return True, goals_str, goals_encodings, goals_ids
|
[
"numpy.random.normal",
"numpy.tile",
"src.logger.info",
"numpy.repeat",
"mpi4py.MPI.COMM_WORLD.bcast",
"numpy.random.choice",
"numpy.random.random",
"numpy.where",
"src.imagine.goal_generator.simple_sentence_generator.SentenceGeneratorHeuristic",
"numpy.array",
"mpi4py.MPI.COMM_WORLD.scatter",
"numpy.random.randint",
"mpi4py.MPI.COMM_WORLD.Get_rank"
] |
[((1608, 1770), 'src.imagine.goal_generator.simple_sentence_generator.SentenceGeneratorHeuristic', 'SentenceGeneratorHeuristic', (["params['train_descriptions']", "params['test_descriptions']"], {'sentences': 'None', 'method': "params['conditions']['imagination_method']"}), "(params['train_descriptions'], params[\n 'test_descriptions'], sentences=None, method=params['conditions'][\n 'imagination_method'])\n", (1634, 1770), False, 'from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic\n'), ((2152, 2177), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (2175, 2177), False, 'from mpi4py import MPI\n'), ((2362, 2374), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2370, 2374), True, 'import numpy as np\n'), ((2408, 2420), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2416, 2420), True, 'import numpy as np\n'), ((9881, 9931), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.feedback_memory'], {'root': '(0)'}), '(self.feedback_memory, root=0)\n', (9901, 9931), False, 'from mpi4py import MPI\n'), ((9959, 10005), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.feedback2id'], {'root': '(0)'}), '(self.feedback2id, root=0)\n', (9979, 10005), False, 'from mpi4py import MPI\n'), ((10033, 10079), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.id2oracleid'], {'root': '(0)'}), '(self.id2oracleid, root=0)\n', (10053, 10079), False, 'from mpi4py import MPI\n'), ((10107, 10153), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.id2feedback'], {'root': '(0)'}), '(self.id2feedback, root=0)\n', (10127, 10153), False, 'from mpi4py import MPI\n'), ((10186, 10237), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.feedback2one_hot'], {'root': '(0)'}), '(self.feedback2one_hot, root=0)\n', (10206, 10237), False, 'from mpi4py import MPI\n'), ((10273, 10327), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.nb_discovered_goals'], {'root': '(0)'}), '(self.nb_discovered_goals, root=0)\n', (10293, 10327), False, 'from mpi4py import MPI\n'), ((10358, 10407), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.imagined_goals'], {'root': '(0)'}), '(self.imagined_goals, root=0)\n', (10378, 10407), False, 'from mpi4py import MPI\n'), ((10439, 10489), 'mpi4py.MPI.COMM_WORLD.bcast', 'MPI.COMM_WORLD.bcast', (['self.one_hot_encoder'], {'root': '(0)'}), '(self.one_hot_encoder, root=0)\n', (10459, 10489), False, 'from mpi4py import MPI\n'), ((12720, 12765), 'mpi4py.MPI.COMM_WORLD.scatter', 'MPI.COMM_WORLD.scatter', (['all_goals_str'], {'root': '(0)'}), '(all_goals_str, root=0)\n', (12742, 12765), False, 'from mpi4py import MPI\n'), ((12792, 12843), 'mpi4py.MPI.COMM_WORLD.scatter', 'MPI.COMM_WORLD.scatter', (['all_goals_encodings'], {'root': '(0)'}), '(all_goals_encodings, root=0)\n', (12814, 12843), False, 'from mpi4py import MPI\n'), ((12864, 12909), 'mpi4py.MPI.COMM_WORLD.scatter', 'MPI.COMM_WORLD.scatter', (['all_goals_ids'], {'root': '(0)'}), '(all_goals_ids, root=0)\n', (12886, 12909), False, 'from mpi4py import MPI\n'), ((7304, 7347), 'numpy.array', 'np.array', (["self.feedback_memory['memory_id']"], {}), "(self.feedback_memory['memory_id'])\n", (7312, 7347), True, 'import numpy as np\n'), ((7400, 7443), 'numpy.array', 'np.array', (["self.feedback_memory['memory_id']"], {}), "(self.feedback_memory['memory_id'])\n", (7408, 7443), True, 'import numpy as np\n'), ((8634, 8682), 'numpy.array', 'np.array', (["[ep['obs'][-1] for ep in all_episodes]"], {}), "([ep['obs'][-1] for ep in all_episodes])\n", (8642, 8682), True, 'import numpy as np\n'), ((8762, 8805), 'numpy.array', 'np.array', (["self.feedback_memory['memory_id']"], {}), "(self.feedback_memory['memory_id'])\n", (8770, 8805), True, 'import numpy as np\n'), ((9066, 9135), 'numpy.random.choice', 'np.random.choice', (['discovered_goal_ids'], {'size': 'n_attempts', 'replace': '(False)'}), '(discovered_goal_ids, size=n_attempts, replace=False)\n', (9082, 9135), True, 'import numpy as np\n'), ((9154, 9194), 'numpy.repeat', 'np.repeat', (['final_obs', 'n_attempts'], {'axis': '(0)'}), '(final_obs, n_attempts, axis=0)\n', (9163, 9194), True, 'import numpy as np\n'), ((9215, 9256), 'numpy.tile', 'np.tile', (['goals_to_try', 'final_obs.shape[0]'], {}), '(goals_to_try, final_obs.shape[0])\n', (9222, 9256), True, 'import numpy as np\n'), ((10709, 10727), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10725, 10727), True, 'import numpy as np\n'), ((13712, 13751), 'numpy.random.randint', 'np.random.randint', (['self.nb_descriptions'], {}), '(self.nb_descriptions)\n', (13729, 13751), True, 'import numpy as np\n'), ((6186, 6226), 'numpy.array', 'np.array', (["self.feedback_memory['string']"], {}), "(self.feedback_memory['string'])\n", (6194, 6226), True, 'import numpy as np\n'), ((11033, 11051), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (11049, 11051), True, 'import numpy as np\n'), ((5194, 5241), 'src.logger.info', 'logger.info', (['"""Goal already imagined:"""', 'goal_str'], {}), "('Goal already imagined:', goal_str)\n", (5205, 5241), False, 'from src import logger\n'), ((7109, 7151), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (7117, 7151), True, 'import numpy as np\n'), ((7208, 7250), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (7216, 7250), True, 'import numpy as np\n'), ((6082, 6124), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (6090, 6124), True, 'import numpy as np\n'), ((8849, 8891), 'numpy.array', 'np.array', (["self.feedback_memory['imagined']"], {}), "(self.feedback_memory['imagined'])\n", (8857, 8891), True, 'import numpy as np\n'), ((9427, 9486), 'numpy.where', 'np.where', (['(rewards[i * n_attempts:(i + 1) * n_attempts] == 0)'], {}), '(rewards[i * n_attempts:(i + 1) * n_attempts] == 0)\n', (9435, 9486), True, 'import numpy as np\n'), ((9591, 9651), 'numpy.where', 'np.where', (['(rewards[i * n_attempts:(i + 1) * n_attempts] == -1)'], {}), '(rewards[i * n_attempts:(i + 1) * n_attempts] == -1)\n', (9599, 9651), True, 'import numpy as np\n'), ((11648, 11684), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.goal_dim'}), '(size=self.goal_dim)\n', (11664, 11684), True, 'import numpy as np\n'), ((11985, 12025), 'numpy.random.choice', 'np.random.choice', (['self.imagined_goal_ids'], {}), '(self.imagined_goal_ids)\n', (12001, 12025), True, 'import numpy as np\n'), ((12098, 12142), 'numpy.random.choice', 'np.random.choice', (['self.not_imagined_goal_ids'], {}), '(self.not_imagined_goal_ids)\n', (12114, 12142), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def __init__(self, datasets, uniform_datasets):
_ConcatDataset.__init__(self, datasets)
self.uniform_datasets = uniform_datasets
def get_idxs(self, idx):
if self.uniform_datasets:
dataset_idx = np.random.randint(len(self.cumulative_sizes))
if dataset_idx == 0:
low = 0
else:
low = self.cumulative_sizes[dataset_idx - 1]
sample_idx = np.random.randint(0, self.cumulative_sizes[dataset_idx] - low)
else:
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx][sample_idx]
|
[
"torch.utils.data.dataset.ConcatDataset.__init__",
"numpy.random.randint",
"bisect.bisect_right"
] |
[((411, 450), 'torch.utils.data.dataset.ConcatDataset.__init__', '_ConcatDataset.__init__', (['self', 'datasets'], {}), '(self, datasets)\n', (434, 450), True, 'from torch.utils.data.dataset import ConcatDataset as _ConcatDataset\n'), ((798, 860), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.cumulative_sizes[dataset_idx] - low)'], {}), '(0, self.cumulative_sizes[dataset_idx] - low)\n', (815, 860), True, 'import numpy as np\n'), ((901, 948), 'bisect.bisect_right', 'bisect.bisect_right', (['self.cumulative_sizes', 'idx'], {}), '(self.cumulative_sizes, idx)\n', (920, 948), False, 'import bisect\n')]
|
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import copy
import numpy as np
from mmdet.datasets.builder import PIPELINES
from ..datasets import get_annotation_mmdet_format
@PIPELINES.register_module()
class LoadImageFromOTEDataset:
"""
Pipeline element that loads an image from a OTE Dataset on the fly. Can do conversion to float 32 if needed.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the image
results['dataset_id']: id of the dataset to which the item belongs
results['index']: index of the item in the dataset
:param to_float32: optional bool, True to convert images to fp32. defaults to False
"""
def __init__(self, to_float32: bool = False):
self.to_float32 = to_float32
def __call__(self, results):
dataset_item = results['dataset_item']
img = dataset_item.numpy
shape = img.shape
assert img.shape[0] == results['height'], f"{img.shape[0]} != {results['height']}"
assert img.shape[1] == results['width'], f"{img.shape[1]} != {results['width']}"
filename = f"Dataset item index {results['index']}"
results['filename'] = filename
results['ori_filename'] = filename
results['img'] = img
results['img_shape'] = shape
results['ori_shape'] = shape
# Set initial values for default meta_keys
results['pad_shape'] = shape
num_channels = 1 if len(shape) < 3 else shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
results['img_fields'] = ['img']
if self.to_float32:
results['img'] = results['img'].astype(np.float32)
return results
@PIPELINES.register_module()
class LoadAnnotationFromOTEDataset:
"""
Pipeline element that loads an annotation from a OTE Dataset on the fly.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the annotation
results['ann_info']['label_list']: list of all labels in the project
"""
def __init__(self, min_size : int, with_bbox: bool = True, with_label: bool = True, with_mask: bool = False, with_seg: bool = False,
poly2mask: bool = True, with_text: bool = False, domain=None):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.with_text = with_text
self.domain = domain
self.min_size = min_size
@staticmethod
def _load_bboxes(results, ann_info):
results['bbox_fields'].append('gt_bboxes')
results['gt_bboxes'] = copy.deepcopy(ann_info['bboxes'])
return results
@staticmethod
def _load_labels(results, ann_info):
results['gt_labels'] = copy.deepcopy(ann_info['labels'])
return results
@staticmethod
def _load_masks(results, ann_info):
results['mask_fields'].append('gt_masks')
results['gt_masks'] = copy.deepcopy(ann_info['masks'])
return results
def __call__(self, results):
dataset_item = results['dataset_item']
label_list = results['ann_info']['label_list']
ann_info = get_annotation_mmdet_format(dataset_item, label_list, self.domain, self.min_size)
if self.with_bbox:
results = self._load_bboxes(results, ann_info)
if results is None or len(results['gt_bboxes']) == 0:
return None
if self.with_label:
results = self._load_labels(results, ann_info)
if self.with_mask:
results = self._load_masks(results, ann_info)
return results
|
[
"numpy.zeros",
"numpy.ones",
"mmdet.datasets.builder.PIPELINES.register_module",
"copy.deepcopy"
] |
[((715, 742), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (740, 742), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((2438, 2465), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (2463, 2465), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((3481, 3514), 'copy.deepcopy', 'copy.deepcopy', (["ann_info['bboxes']"], {}), "(ann_info['bboxes'])\n", (3494, 3514), False, 'import copy\n'), ((3629, 3662), 'copy.deepcopy', 'copy.deepcopy', (["ann_info['labels']"], {}), "(ann_info['labels'])\n", (3642, 3662), False, 'import copy\n'), ((3825, 3857), 'copy.deepcopy', 'copy.deepcopy', (["ann_info['masks']"], {}), "(ann_info['masks'])\n", (3838, 3857), False, 'import copy\n'), ((2154, 2194), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2162, 2194), True, 'import numpy as np\n'), ((2212, 2251), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2219, 2251), True, 'import numpy as np\n')]
|
import numpy as np
import argparse
from sklearn.svm import LinearSVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
y = np.load(args.labels, allow_pickle=True)
# http://scikit-learn.sourceforge.net/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC
regr = make_pipeline(StandardScaler(),
LinearSVR(verbose=args.verbose, tol = 1e-5, max_iter = 30))
regr.fit(X,y)
np.savetxt(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=",")
|
[
"argparse.ArgumentParser",
"sklearn.svm.LinearSVR",
"sklearn.preprocessing.StandardScaler",
"numpy.savetxt",
"numpy.load"
] |
[((217, 242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (240, 242), False, 'import argparse\n'), ((538, 579), 'numpy.load', 'np.load', (['args.datapath'], {'allow_pickle': '(True)'}), '(args.datapath, allow_pickle=True)\n', (545, 579), True, 'import numpy as np\n'), ((584, 623), 'numpy.load', 'np.load', (['args.labels'], {'allow_pickle': '(True)'}), '(args.labels, allow_pickle=True)\n', (591, 623), True, 'import numpy as np\n'), ((855, 934), 'numpy.savetxt', 'np.savetxt', (['args.outputpath', "regr.named_steps['linearsvr'].coef_"], {'delimiter': '""","""'}), "(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=',')\n", (865, 934), True, 'import numpy as np\n'), ((757, 773), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (771, 773), False, 'from sklearn.preprocessing import StandardScaler\n'), ((779, 834), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {'verbose': 'args.verbose', 'tol': '(1e-05)', 'max_iter': '(30)'}), '(verbose=args.verbose, tol=1e-05, max_iter=30)\n', (788, 834), False, 'from sklearn.svm import LinearSVR\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# Adapted from: https://github.com/zpincus/celltool/blob/master/celltool/numerics/image_warp.py
from scipy import ndimage
import numpy as np
from probreg import bcpd
import tifffile
import matplotlib.pyplot as plt
import napari
from magicgui import magic_factory, widgets
from napari.types import PointsData, ImageData
from typing_extensions import Annotated
def _make_inverse_warp(from_points, to_points, output_region, approximate_grid):
x_min, y_min, z_min, x_max, y_max, z_max = output_region
if approximate_grid is None: approximate_grid = 1
x_steps = (x_max - x_min) // approximate_grid
y_steps = (y_max - y_min) // approximate_grid
z_steps = (z_max - z_min) // approximate_grid
x, y, z = np.mgrid[x_min:x_max:x_steps*1j, y_min:y_max:y_steps*1j, z_min:z_max:z_steps*1j]
transform = _make_warp(to_points, from_points, x, y, z)
if approximate_grid != 1:
# linearly interpolate the zoomed transform grid
new_x, new_y, new_z = np.mgrid[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1]
x_fracs, x_indices = np.modf((x_steps-1)*(new_x-x_min)/float(x_max-x_min))
y_fracs, y_indices = np.modf((y_steps-1)*(new_y-y_min)/float(y_max-y_min))
z_fracs, z_indices = np.modf((z_steps-1)*(new_z-z_min)/float(z_max-z_min))
x_indices = x_indices.astype(int)
y_indices = y_indices.astype(int)
z_indices = z_indices.astype(int)
x1 = 1 - x_fracs
y1 = 1 - y_fracs
z1 = 1 - z_fracs
ix1 = (x_indices+1).clip(0, x_steps-1)
iy1 = (y_indices+1).clip(0, y_steps-1)
iz1 = (z_indices+1).clip(0, z_steps-1)
transform_x = _trilinear_interpolation(0, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_y = _trilinear_interpolation(1, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_z = _trilinear_interpolation(2, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform = [transform_x, transform_y, transform_z]
return transform
def _trilinear_interpolation(d, t, x0, y0, z0, x1, y1, z1, ix0, iy0, iz0, ix1, iy1, iz1):
t000 = t[d][(ix0, iy0, iz0)]
t001 = t[d][(ix0, iy0, iz1)]
t010 = t[d][(ix0, iy1, iz0)]
t100 = t[d][(ix1, iy0, iz0)]
t011 = t[d][(ix0, iy1, iz1)]
t101 = t[d][(ix1, iy0, iz1)]
t110 = t[d][(ix1, iy1, iz0)]
t111 = t[d][(ix1, iy1, iz1)]
return t000*x0*y0*z0 + t001*x0*y0*z1 + t010*x0*y1*z0 + t100*x1*y0*z0 + t011*x0*y1*z1 + t101*x1*y0*z1 + t110*x1*y1*z0 + t111*x1*y1*z1
def _U(x):
_small = 1e-100
return (x**2) * np.where(x<_small, 0, np.log(x))
def _interpoint_distances(points):
xd = np.subtract.outer(points[:,0], points[:,0])
yd = np.subtract.outer(points[:,1], points[:,1])
zd = np.subtract.outer(points[:,2], points[:,2])
return np.sqrt(xd**2 + yd**2 + zd**2)
def _make_L_matrix(points):
n = len(points)
K = _U(_interpoint_distances(points))
P = np.ones((n, 4))
P[:,1:] = points
O = np.zeros((4, 4))
L = np.asarray(np.bmat([[K, P],[P.transpose(), O]]))
return L
def _calculate_f(coeffs, points, x, y, z):
w = coeffs[:-3]
a1, ax, ay, az = coeffs[-4:]
summation = np.zeros(x.shape)
for wi, Pi in zip(w, points):
summation += wi * _U(np.sqrt((x-Pi[0])**2 + (y-Pi[1])**2 + (z-Pi[2])**2))
return a1 + ax*x + ay*y +az*z + summation
def _make_warp(from_points, to_points, x_vals, y_vals, z_vals):
from_points, to_points = np.asarray(from_points), np.asarray(to_points)
err = np.seterr(divide='ignore')
L = _make_L_matrix(from_points)
V = np.resize(to_points, (len(to_points)+4, 3))
V[-3:, :] = 0
coeffs = np.dot(np.linalg.pinv(L), V)
print('L, V, coeffs', L.shape, V.shape, coeffs.shape)
x_warp = _calculate_f(coeffs[:,0], from_points, x_vals, y_vals, z_vals)
y_warp = _calculate_f(coeffs[:,1], from_points, x_vals, y_vals, z_vals)
z_warp = _calculate_f(coeffs[:,2], from_points, x_vals, y_vals, z_vals)
np.seterr(**err)
return [x_warp, y_warp, z_warp]
@magic_factory
def make_image_warping(
viewer: "napari.viewer.Viewer",
moving_image: ImageData,
fixed_image: ImageData,
moving_points: PointsData,
transformed_points: PointsData,
interpolation_order: Annotated[int, {"min": 0, "max": 10, "step": 1}]=1,
approximate_grid: Annotated[int, {"min": 1, "max": 10, "step": 1}]=1
):
from napari.qt import thread_worker
pbar = widgets.ProgressBar()
pbar.range = (0, 0) # unknown duration
make_image_warping.insert(0, pbar) # add progress bar to the top of widget
# this function will be called after we return
def _add_data(return_value, self=make_image_warping):
data, kwargs = return_value
viewer.add_image(data, **kwargs)
self.pop(0).hide() # remove the progress bar
@thread_worker(connect={"returned": _add_data})
def _warp_images(from_points, to_points, image, output_region, interpolation_order=5, approximate_grid=10):
print('Entered warp_images')
transform = _make_inverse_warp(from_points, to_points, output_region, approximate_grid)
warped_image = ndimage.map_coordinates(np.asarray(image), transform, order=interpolation_order)
kwargs = dict(
name='warped_image'
)
return (warped_image, kwargs)
print('Warping image volume')
assert len(moving_points) == len(transformed_points), 'Moving and transformed points must be of same length.'
output_region = (0, 0, 0, int(fixed_image.shape[0] / 1), int(fixed_image.shape[1] / 1), int(fixed_image.shape[2] / 1))
print(output_region)
_warp_images(from_points=moving_points,
to_points=transformed_points,
image=moving_image,
output_region=output_region,
interpolation_order=interpolation_order,
approximate_grid=approximate_grid)
|
[
"numpy.sqrt",
"numpy.subtract.outer",
"numpy.ones",
"napari.qt.thread_worker",
"numpy.linalg.pinv",
"numpy.log",
"numpy.asarray",
"numpy.zeros",
"magicgui.widgets.ProgressBar",
"numpy.seterr"
] |
[((2818, 2863), 'numpy.subtract.outer', 'np.subtract.outer', (['points[:, 0]', 'points[:, 0]'], {}), '(points[:, 0], points[:, 0])\n', (2835, 2863), True, 'import numpy as np\n'), ((2871, 2916), 'numpy.subtract.outer', 'np.subtract.outer', (['points[:, 1]', 'points[:, 1]'], {}), '(points[:, 1], points[:, 1])\n', (2888, 2916), True, 'import numpy as np\n'), ((2924, 2969), 'numpy.subtract.outer', 'np.subtract.outer', (['points[:, 2]', 'points[:, 2]'], {}), '(points[:, 2], points[:, 2])\n', (2941, 2969), True, 'import numpy as np\n'), ((2979, 3015), 'numpy.sqrt', 'np.sqrt', (['(xd ** 2 + yd ** 2 + zd ** 2)'], {}), '(xd ** 2 + yd ** 2 + zd ** 2)\n', (2986, 3015), True, 'import numpy as np\n'), ((3109, 3124), 'numpy.ones', 'np.ones', (['(n, 4)'], {}), '((n, 4))\n', (3116, 3124), True, 'import numpy as np\n'), ((3154, 3170), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (3162, 3170), True, 'import numpy as np\n'), ((3354, 3371), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3362, 3371), True, 'import numpy as np\n'), ((3685, 3711), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (3694, 3711), True, 'import numpy as np\n'), ((4150, 4166), 'numpy.seterr', 'np.seterr', ([], {}), '(**err)\n', (4159, 4166), True, 'import numpy as np\n'), ((4608, 4629), 'magicgui.widgets.ProgressBar', 'widgets.ProgressBar', ([], {}), '()\n', (4627, 4629), False, 'from magicgui import magic_factory, widgets\n'), ((5001, 5047), 'napari.qt.thread_worker', 'thread_worker', ([], {'connect': "{'returned': _add_data}"}), "(connect={'returned': _add_data})\n", (5014, 5047), False, 'from napari.qt import thread_worker\n'), ((3628, 3651), 'numpy.asarray', 'np.asarray', (['from_points'], {}), '(from_points)\n', (3638, 3651), True, 'import numpy as np\n'), ((3653, 3674), 'numpy.asarray', 'np.asarray', (['to_points'], {}), '(to_points)\n', (3663, 3674), True, 'import numpy as np\n'), ((3838, 3855), 'numpy.linalg.pinv', 'np.linalg.pinv', (['L'], {}), '(L)\n', (3852, 3855), True, 'import numpy as np\n'), ((2762, 2771), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (2768, 2771), True, 'import numpy as np\n'), ((5340, 5357), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (5350, 5357), True, 'import numpy as np\n'), ((3435, 3498), 'numpy.sqrt', 'np.sqrt', (['((x - Pi[0]) ** 2 + (y - Pi[1]) ** 2 + (z - Pi[2]) ** 2)'], {}), '((x - Pi[0]) ** 2 + (y - Pi[1]) ** 2 + (z - Pi[2]) ** 2)\n', (3442, 3498), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import argparse
from eva import EvaProgram, Input, Output
from eva.ckks import CKKSCompiler
from eva.seal import generate_keys
import numpy as np
import time
from eva.std.numeric import horizontal_sum
def dot(x, y):
return np.dot(x, y)
def generate_inputs_naive(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
for n in range(size):
# each element is a list (i.e. a vector of size 1)
inputs[f"{label}_{n}"] = [i]
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot_naive(size):
"""Vector dot product with vector size of 1"""
fhe_dot = EvaProgram("fhe_dot", vec_size=1)
with fhe_dot:
a = np.array([Input(f"x_{n}") for n in range(size)]).reshape(1, size)
b = np.array([Input(f"w_{k}") for k in range(size)]).reshape(size, 1)
out = dot(a, b)
Output("y", out[0][0])
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def generate_inputs(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
# all data is stored in a single list of size `size`
inputs[label] = list(range(size))
for n in range(size):
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot(size):
"""Vector dot product with CKKS vector size equal to the size"""
fhe_dot = EvaProgram("fhe_dot", vec_size=size)
with fhe_dot:
a = np.array([Input("x")])
b = np.array([Input(f"w")])
out = dot(a, b)
Output("y", horizontal_sum(out))
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def benchmark_vector_dot(size, mode="SIMD"):
if mode == "SIMD":
# generate program with SIMD-style
inputs, inputs_np = generate_inputs(size, label="x")
weights, weights_np = generate_inputs(size, label="w")
fhe_dot = generate_vector_dot(size)
else:
# generate program with vector size = 1
inputs, inputs_np = generate_inputs_naive(size, label="x")
weights, weights_np = generate_inputs_naive(size, label="w")
fhe_dot = generate_vector_dot_naive(size)
# compiling program
data = {**weights, **inputs}
compiler = CKKSCompiler(config={"security_level": "128", "warn_vec_size": "false"})
compiled, params, signature = compiler.compile(fhe_dot)
public_ctx, secret_ctx = generate_keys(params)
enc_inputs = public_ctx.encrypt(data, signature)
# Running program
start = time.time()
enc_outputs = public_ctx.execute(compiled, enc_inputs)
end = time.time()
run_time = end - start
# decrypt the output
outputs = secret_ctx.decrypt(enc_outputs, signature)
y = np.array(outputs["y"])
# get time for plaintext dot product
start = time.time()
true_y = inputs_np.dot(weights_np)
end = time.time()
plain_run_time = end - start
# verifying correctness of output
np.testing.assert_allclose(y, true_y)
return run_time, plain_run_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a dot product program")
parser.add_argument(
"--mode",
default="SIMD",
choices=["SIMD", "naive"],
)
args = parser.parse_args()
results_cipher = dict()
results_plain = dict()
if args.mode == "SIMD":
print("Generating code in SIMD style")
else:
print("Generating code in naive style")
for size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:
time_cipher, time_plain = benchmark_vector_dot(size, args.mode)
results_cipher[f"{size}"] = time_cipher
results_plain[f"{size}"] = time_plain
print(f"Done vector size {size}, CKKS time: {time_cipher}")
print("Done")
print("CKKS times:", results_cipher)
print("Plain text times:", results_plain)
|
[
"eva.EvaProgram",
"eva.Output",
"argparse.ArgumentParser",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.dot",
"eva.ckks.CKKSCompiler",
"numpy.zeros",
"eva.std.numeric.horizontal_sum",
"eva.Input",
"time.time",
"eva.seal.generate_keys"
] |
[((251, 263), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (257, 263), True, 'import numpy as np\n'), ((346, 360), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (354, 360), True, 'import numpy as np\n'), ((668, 701), 'eva.EvaProgram', 'EvaProgram', (['"""fhe_dot"""'], {'vec_size': '(1)'}), "('fhe_dot', vec_size=1)\n", (678, 701), False, 'from eva import EvaProgram, Input, Output\n'), ((1096, 1110), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1104, 1110), True, 'import numpy as np\n'), ((1429, 1465), 'eva.EvaProgram', 'EvaProgram', (['"""fhe_dot"""'], {'vec_size': 'size'}), "('fhe_dot', vec_size=size)\n", (1439, 1465), False, 'from eva import EvaProgram, Input, Output\n'), ((2307, 2379), 'eva.ckks.CKKSCompiler', 'CKKSCompiler', ([], {'config': "{'security_level': '128', 'warn_vec_size': 'false'}"}), "(config={'security_level': '128', 'warn_vec_size': 'false'})\n", (2319, 2379), False, 'from eva.ckks import CKKSCompiler\n'), ((2469, 2490), 'eva.seal.generate_keys', 'generate_keys', (['params'], {}), '(params)\n', (2482, 2490), False, 'from eva.seal import generate_keys\n'), ((2579, 2590), 'time.time', 'time.time', ([], {}), '()\n', (2588, 2590), False, 'import time\n'), ((2660, 2671), 'time.time', 'time.time', ([], {}), '()\n', (2669, 2671), False, 'import time\n'), ((2790, 2812), 'numpy.array', 'np.array', (["outputs['y']"], {}), "(outputs['y'])\n", (2798, 2812), True, 'import numpy as np\n'), ((2867, 2878), 'time.time', 'time.time', ([], {}), '()\n', (2876, 2878), False, 'import time\n'), ((2928, 2939), 'time.time', 'time.time', ([], {}), '()\n', (2937, 2939), False, 'import time\n'), ((3016, 3053), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'true_y'], {}), '(y, true_y)\n', (3042, 3053), True, 'import numpy as np\n'), ((3133, 3197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run a dot product program"""'}), "(description='Run a dot product program')\n", (3156, 3197), False, 'import argparse\n'), ((910, 932), 'eva.Output', 'Output', (['"""y"""', 'out[0][0]'], {}), "('y', out[0][0])\n", (916, 932), False, 'from eva import EvaProgram, Input, Output\n'), ((1601, 1620), 'eva.std.numeric.horizontal_sum', 'horizontal_sum', (['out'], {}), '(out)\n', (1615, 1620), False, 'from eva.std.numeric import horizontal_sum\n'), ((1506, 1516), 'eva.Input', 'Input', (['"""x"""'], {}), "('x')\n", (1511, 1516), False, 'from eva import EvaProgram, Input, Output\n'), ((1541, 1552), 'eva.Input', 'Input', (['f"""w"""'], {}), "(f'w')\n", (1546, 1552), False, 'from eva import EvaProgram, Input, Output\n'), ((742, 757), 'eva.Input', 'Input', (['f"""x_{n}"""'], {}), "(f'x_{n}')\n", (747, 757), False, 'from eva import EvaProgram, Input, Output\n'), ((820, 835), 'eva.Input', 'Input', (['f"""w_{k}"""'], {}), "(f'w_{k}')\n", (825, 835), False, 'from eva import EvaProgram, Input, Output\n')]
|
import random
import cv2
import numpy as np
from augraphy.base.augmentation import Augmentation
class NoiseTexturize(Augmentation):
"""Creates a random noise based texture pattern to emulate paper textures.
Consequently applies noise patterns to the original image from big to small.
:param sigma_range: Defines bounds of noise fluctuations.
:type sigma_range: tuple, optional
:param turbulence_range: Defines how quickly big patterns will be
replaced with the small ones. The lower value -
the more iterations will be performed during texture generation.
:type turbulence_range: tuple, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
sigma_range=(3, 10),
turbulence_range=(2, 5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.sigma_range = sigma_range
self.turbulence_range = turbulence_range
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"NoiseTexturize(sigma_range={self.sigma_range}, turbulence_range={self.turbulence_range}, p={self.p})"
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
sigma = random.randint(self.sigma_range[0], self.sigma_range[1])
turbulence = random.randint(
self.turbulence_range[0],
self.turbulence_range[1],
)
result = image.astype(float)
rows, cols = image.shape[:2]
if len(image.shape) > 2:
channel = image.shape[2]
else:
channel = 0
ratio = cols
while not ratio == 1:
result += self.noise(cols, rows, channel, ratio, sigma=sigma)
ratio = (ratio // turbulence) or 1
cut = np.clip(result, 0, 255)
cut = cut.astype(np.uint8)
return cut
def noise(self, width, height, channel, ratio, sigma):
"""The function generates an image, filled with gaussian nose. If ratio
parameter is specified, noise will be generated for a lesser image and
then it will be upscaled to the original size. In that case noise will
generate larger square patterns. To avoid multiple lines, the upscale
uses interpolation.
:param ratio: the size of generated noise "pixels"
:param sigma: defines bounds of noise fluctuations
"""
mean = 0
# assert width % ratio == 0, "Can't scale image with of size {} and ratio {}".format(width, ratio)
# assert height % ratio == 0, "Can't scale image with of size {} and ratio {}".format(height, ratio)
h = int(height / ratio)
w = int(width / ratio)
if h == 0:
h = 1
if w == 0:
w = 1
gaussian = np.vectorize(lambda x: random.gauss(mean, sigma))
result = gaussian(np.array((w, h)))
result = cv2.resize(
result,
dsize=(width, height),
interpolation=cv2.INTER_LINEAR,
)
# for multiple channels input, convert result to multiple channels
if channel:
result = np.stack([result, result, result], axis=2)
return result
|
[
"numpy.clip",
"numpy.array",
"numpy.stack",
"cv2.resize",
"random.randint",
"random.gauss"
] |
[((3150, 3223), 'cv2.resize', 'cv2.resize', (['result'], {'dsize': '(width, height)', 'interpolation': 'cv2.INTER_LINEAR'}), '(result, dsize=(width, height), interpolation=cv2.INTER_LINEAR)\n', (3160, 3223), False, 'import cv2\n'), ((1413, 1469), 'random.randint', 'random.randint', (['self.sigma_range[0]', 'self.sigma_range[1]'], {}), '(self.sigma_range[0], self.sigma_range[1])\n', (1427, 1469), False, 'import random\n'), ((1495, 1561), 'random.randint', 'random.randint', (['self.turbulence_range[0]', 'self.turbulence_range[1]'], {}), '(self.turbulence_range[0], self.turbulence_range[1])\n', (1509, 1561), False, 'import random\n'), ((2023, 2046), 'numpy.clip', 'np.clip', (['result', '(0)', '(255)'], {}), '(result, 0, 255)\n', (2030, 2046), True, 'import numpy as np\n'), ((3114, 3130), 'numpy.array', 'np.array', (['(w, h)'], {}), '((w, h))\n', (3122, 3130), True, 'import numpy as np\n'), ((3388, 3430), 'numpy.stack', 'np.stack', (['[result, result, result]'], {'axis': '(2)'}), '([result, result, result], axis=2)\n', (3396, 3430), True, 'import numpy as np\n'), ((3060, 3085), 'random.gauss', 'random.gauss', (['mean', 'sigma'], {}), '(mean, sigma)\n', (3072, 3085), False, 'import random\n')]
|
"""
Expression Dataset for analysis of matrix (RNASeq/microarray) data with annotations
"""
import pandas as PD
import numpy as N
from matplotlib import pylab as P
from collections import OrderedDict
from ast import literal_eval
# from ..plot.matrix import matshow_clustered
class ExpressionSet(object):
def __init__(self, eData, gData=None, sData=None):
"""
eData: expression data (gene x samples) header: MultiIndex (samplename, group)
fData: gene annotation (gene x gene annotations)
pData: sample annotation (sample x sample annotations)
"""
self.eData = eData
self.gData = gData
self.sData = sData
def read(self, eFile, gFile=None, sFile=None):
pass
def write(self, eFile, gFile=None, sFile=None):
self.eData.to_csv(eFile, tupleize_cols=False, sep="\t")
if gFile is not None:
self.gData.to_csv(gFile, tupleize_cols=False, sep="\t")
if sFile is not None:
self.sData.to_csv(sFile, tupleize_cols=False, sep="\t")
def find(self, field, pat):
pass
def read_bioinfo3_data(fname):
""" read bioinfo3.table.dataset type of data """
fobj = open(fname)
groups = OrderedDict()
cnt = 0
for line in fobj:
cnt += 1
if line[:2]=='#%':
if line.startswith('#%groups:'):
gname, members = line[len('#%groups:'):].split('=')
gname = gname.strip()
members = members.strip().split(',')
groups[gname] = members
datafields = line.strip().split('=')[1].strip().split(',')
elif line.startswith('#%fields'):
fields = line.strip().split('=')[1].strip().split(',')
elif not line.strip():
continue # empty line
else:
break
df = PD.read_table(fname, skiprows=cnt-1)
f2g = {}
for g,m in groups.items():
for f in m:
f2g[f] = g
df.columns = PD.MultiIndex.from_tuples([(x, f2g.get(x,'')) for x in df.columns], names=['samplename','group'])
e = ExpressionSet(df)
return e
def read_multiindex_data(fname, tupleize=True, index_names = ['samplename','group']):
""" read dataset table with MultiIndex in the header """
if not tupleize:
df = PD.read_table(fname, header=range(len(index_names)), index_col=[0], tupleize_cols=False)
e = ExpressionSet(df)
return e
df = PD.read_table(fname, index_col=0)
df.columns = PD.MultiIndex.from_tuples(df.columns.map(literal_eval).tolist(), names=index_names)
e = ExpressionSet(df)
return e
def read_grouped_table(fname, groupfn=lambda x: '_'.join(x.split('_')[:-1])):
""" Read dataset whose group is encoded in the colname. Column 0 is index. """
df = PD.read_table(fname)
f2g = {x:groupfn(x) for x in df.columns}
df.columns = PD.MultiIndex.from_tuples([(x, f2g[x]) for x in df.columns], names=['samplename','group'])
e = ExpressionSet(df)
return e
def concatenate(dic):
""" dic: dict of DataFrames
merge all using index and outer join
"""
keys = list(dic)
d = dic[keys[0]].merge(dic[keys[1]], left_index=True, right_index=True, how='outer', suffixes=('.'+keys[0],'.'+keys[1]))
for k in keys[2:]:
d = d.merge(dic[k], left_index=True, right_index=True, how='outer', suffixes=('','.'+k))
return d
def calc_mergesortkey(dic, pos_neg_flds):
conc = concatenate(dic)
selected = ~N.isnan(conc[pos_neg_flds])
pos = conc[pos_neg_flds]>0
neg = conc[pos_neg_flds]<=0
num_pos = pos.sum(axis=1)
num_neg = neg.sum(axis=1)
pos_neg_mix = -1*(num_neg==0) + 1*(num_pos==0) # pos(-1), mix(0), neg(1)
#num_hit = num_pos - num_neg
num_hit = num_pos + num_neg
n = len(pos_neg_flds)
#position = (N.arange(1,n+1)*pos + N.arange(-1,-n-1,-1)*neg).sum(axis=1)
position = (N.arange(1,n+1)*pos + N.arange(-n,0)*neg).sum(axis=1)
strength = (conc[pos_neg_flds]*pos).sum(axis=1) + (conc[pos_neg_flds]*neg).sum(axis=1)
#msk = PD.Series(list(zip(pos_neg_mix, num_hit, position, strength)), index=conc.index)
#msk.sort()
conc['mergesortkey'] = list(zip(pos_neg_mix, num_hit, position, strength))
conc.sort('mergesortkey', inplace=True)
return conc
|
[
"collections.OrderedDict",
"numpy.isnan",
"pandas.read_table",
"pandas.MultiIndex.from_tuples",
"numpy.arange"
] |
[((1227, 1240), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1238, 1240), False, 'from collections import OrderedDict\n'), ((1862, 1900), 'pandas.read_table', 'PD.read_table', (['fname'], {'skiprows': '(cnt - 1)'}), '(fname, skiprows=cnt - 1)\n', (1875, 1900), True, 'import pandas as PD\n'), ((2481, 2514), 'pandas.read_table', 'PD.read_table', (['fname'], {'index_col': '(0)'}), '(fname, index_col=0)\n', (2494, 2514), True, 'import pandas as PD\n'), ((2826, 2846), 'pandas.read_table', 'PD.read_table', (['fname'], {}), '(fname)\n', (2839, 2846), True, 'import pandas as PD\n'), ((2909, 3005), 'pandas.MultiIndex.from_tuples', 'PD.MultiIndex.from_tuples', (['[(x, f2g[x]) for x in df.columns]'], {'names': "['samplename', 'group']"}), "([(x, f2g[x]) for x in df.columns], names=[\n 'samplename', 'group'])\n", (2934, 3005), True, 'import pandas as PD\n'), ((3515, 3542), 'numpy.isnan', 'N.isnan', (['conc[pos_neg_flds]'], {}), '(conc[pos_neg_flds])\n', (3522, 3542), True, 'import numpy as N\n'), ((3929, 3947), 'numpy.arange', 'N.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (3937, 3947), True, 'import numpy as N\n'), ((3951, 3966), 'numpy.arange', 'N.arange', (['(-n)', '(0)'], {}), '(-n, 0)\n', (3959, 3966), True, 'import numpy as N\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.