code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
import math
from scipy.optimize import minimize
class Optimize():
def __init__(self):
self.c_rad2deg = 180.0 / np.pi
self.c_deg2rad = np.pi / 180.0
def isRotationMatrix(self, R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
# print('n: ' + str(n))
return n < 1e-6
def Rot_Matrix_2_Euler_Angles(self, R):
assert(self.isRotationMatrix(R))
pitch = -math.asin(R[1, 2])
roll = -math.atan2(R[1, 0], R[1, 1])
yaw = -math.atan2(R[0, 2], R[2, 2])
return np.array([roll, pitch, yaw])
def Get_Init_Guess(self, l_vec, b_vec, f_vec):
f_vec = np.cross(b_vec, l_vec)
l_vec = np.cross(f_vec, b_vec)
l_norm = np.linalg.norm(l_vec)
l_vec /= l_norm
b_norm = np.linalg.norm(b_vec)
b_vec /= b_norm
f_norm = np.linalg.norm(f_vec)
f_vec /= f_norm
l_vec = l_vec.reshape(3, 1)
b_vec = b_vec.reshape(3, 1)
f_vec = f_vec.reshape(3, 1)
l = np.array([1, 0, 0]).reshape(1, 3)
b = np.array([0, 1, 0]).reshape(1, 3)
f = np.array([0, 0, 1]).reshape(1, 3)
R = l_vec @ l + b_vec @ b + f_vec @ f
assert (R.shape == (3, 3))
roll, pitch, yaw = self.Rot_Matrix_2_Euler_Angles(R)
return np.array([roll, pitch, yaw])
def Euler_Angles_2_Vectors(self, rx, ry, rz):
'''
rx: pitch
ry: yaw
rz: roll
'''
ry *= -1
rz *= -1
R_x = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(rx), -np.sin(rx)],
[0.0, np.sin(rx), np.cos(rx)]])
R_y = np.array([[np.cos(ry), 0.0, np.sin(ry)],
[0.0, 1.0, 0.0],
[-np.sin(ry), 0.0, np.cos(ry)]])
R_z = np.array([[np.cos(rz), -np.sin(rz), 0.0],
[np.sin(rz), np.cos(rz), 0.0],
[0.0, 0.0, 1.0]])
R = R_y @ R_x @ R_z
l_vec = R @ np.array([1, 0, 0])
b_vec = R @ np.array([0, 1, 0])
f_vec = R @ np.array([0, 0, 1])
return np.array([l_vec, b_vec, f_vec])
def Objective(self, x, l_vec, b_vec, f_vec):
rx = x[0]
ry = x[1]
rz = x[2]
l_hat, b_hat, f_hat = self.Euler_Angles_2_Vectors(rx, ry, rz)
l_vec_dot = np.clip(l_hat[0] * l_vec[0] + l_hat[1] * l_vec[1] + l_hat[2] * l_vec[2], -1, 1)
b_vec_dot = np.clip(b_hat[0] * b_vec[0] + b_hat[1] * b_vec[1] + b_hat[2] * b_vec[2], -1, 1)
f_vec_dot = np.clip(f_hat[0] * f_vec[0] + f_hat[1] * f_vec[1] + f_hat[2] * f_vec[2], -1, 1)
return math.acos(l_vec_dot) ** 2 + math.acos(b_vec_dot) ** 2 + math.acos(f_vec_dot) ** 2
def Get_Ortho_Vectors(self, l_vec, b_vec, f_vec):
x0 = self.Get_Init_Guess(l_vec, b_vec, f_vec)
sol = minimize(self.Objective, x0, args=(l_vec, b_vec, f_vec), method='nelder-mead', options={'xatol': 1e-7, 'disp': False})
pitch_rad, yaw_rad, roll_rad = sol.x
v1, v2, v3 = self.Euler_Angles_2_Vectors(pitch_rad, yaw_rad, roll_rad)
return np.array([v1, v2, v3])
|
[
"numpy.identity",
"numpy.clip",
"numpy.cross",
"math.acos",
"scipy.optimize.minimize",
"math.asin",
"numpy.array",
"numpy.dot",
"math.atan2",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.transpose"
] |
[((246, 261), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (258, 261), True, 'import numpy as np\n'), ((289, 302), 'numpy.dot', 'np.dot', (['Rt', 'R'], {}), '(Rt, R)\n', (295, 302), True, 'import numpy as np\n'), ((315, 344), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'R.dtype'}), '(3, dtype=R.dtype)\n', (326, 344), True, 'import numpy as np\n'), ((359, 395), 'numpy.linalg.norm', 'np.linalg.norm', (['(I - shouldBeIdentity)'], {}), '(I - shouldBeIdentity)\n', (373, 395), True, 'import numpy as np\n'), ((680, 708), 'numpy.array', 'np.array', (['[roll, pitch, yaw]'], {}), '([roll, pitch, yaw])\n', (688, 708), True, 'import numpy as np\n'), ((778, 800), 'numpy.cross', 'np.cross', (['b_vec', 'l_vec'], {}), '(b_vec, l_vec)\n', (786, 800), True, 'import numpy as np\n'), ((817, 839), 'numpy.cross', 'np.cross', (['f_vec', 'b_vec'], {}), '(f_vec, b_vec)\n', (825, 839), True, 'import numpy as np\n'), ((866, 887), 'numpy.linalg.norm', 'np.linalg.norm', (['l_vec'], {}), '(l_vec)\n', (880, 887), True, 'import numpy as np\n'), ((929, 950), 'numpy.linalg.norm', 'np.linalg.norm', (['b_vec'], {}), '(b_vec)\n', (943, 950), True, 'import numpy as np\n'), ((992, 1013), 'numpy.linalg.norm', 'np.linalg.norm', (['f_vec'], {}), '(f_vec)\n', (1006, 1013), True, 'import numpy as np\n'), ((1487, 1515), 'numpy.array', 'np.array', (['[roll, pitch, yaw]'], {}), '([roll, pitch, yaw])\n', (1495, 1515), True, 'import numpy as np\n'), ((2325, 2356), 'numpy.array', 'np.array', (['[l_vec, b_vec, f_vec]'], {}), '([l_vec, b_vec, f_vec])\n', (2333, 2356), True, 'import numpy as np\n'), ((2554, 2633), 'numpy.clip', 'np.clip', (['(l_hat[0] * l_vec[0] + l_hat[1] * l_vec[1] + l_hat[2] * l_vec[2])', '(-1)', '(1)'], {}), '(l_hat[0] * l_vec[0] + l_hat[1] * l_vec[1] + l_hat[2] * l_vec[2], -1, 1)\n', (2561, 2633), True, 'import numpy as np\n'), ((2654, 2733), 'numpy.clip', 'np.clip', (['(b_hat[0] * b_vec[0] + b_hat[1] * b_vec[1] + b_hat[2] * b_vec[2])', '(-1)', '(1)'], {}), '(b_hat[0] * b_vec[0] + b_hat[1] * b_vec[1] + b_hat[2] * b_vec[2], -1, 1)\n', (2661, 2733), True, 'import numpy as np\n'), ((2755, 2834), 'numpy.clip', 'np.clip', (['(f_hat[0] * f_vec[0] + f_hat[1] * f_vec[1] + f_hat[2] * f_vec[2])', '(-1)', '(1)'], {}), '(f_hat[0] * f_vec[0] + f_hat[1] * f_vec[1] + f_hat[2] * f_vec[2], -1, 1)\n', (2762, 2834), True, 'import numpy as np\n'), ((3074, 3198), 'scipy.optimize.minimize', 'minimize', (['self.Objective', 'x0'], {'args': '(l_vec, b_vec, f_vec)', 'method': '"""nelder-mead"""', 'options': "{'xatol': 1e-07, 'disp': False}"}), "(self.Objective, x0, args=(l_vec, b_vec, f_vec), method=\n 'nelder-mead', options={'xatol': 1e-07, 'disp': False})\n", (3082, 3198), False, 'from scipy.optimize import minimize\n'), ((3335, 3357), 'numpy.array', 'np.array', (['[v1, v2, v3]'], {}), '([v1, v2, v3])\n', (3343, 3357), True, 'import numpy as np\n'), ((556, 574), 'math.asin', 'math.asin', (['R[1, 2]'], {}), '(R[1, 2])\n', (565, 574), False, 'import math\n'), ((591, 619), 'math.atan2', 'math.atan2', (['R[1, 0]', 'R[1, 1]'], {}), '(R[1, 0], R[1, 1])\n', (601, 619), False, 'import math\n'), ((635, 663), 'math.atan2', 'math.atan2', (['R[0, 2]', 'R[2, 2]'], {}), '(R[0, 2], R[2, 2])\n', (645, 663), False, 'import math\n'), ((2210, 2229), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2218, 2229), True, 'import numpy as np\n'), ((2250, 2269), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2258, 2269), True, 'import numpy as np\n'), ((2290, 2309), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2298, 2309), True, 'import numpy as np\n'), ((1168, 1187), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1176, 1187), True, 'import numpy as np\n'), ((1214, 1233), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1222, 1233), True, 'import numpy as np\n'), ((1260, 1279), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1268, 1279), True, 'import numpy as np\n'), ((2915, 2935), 'math.acos', 'math.acos', (['f_vec_dot'], {}), '(f_vec_dot)\n', (2924, 2935), False, 'import math\n'), ((1747, 1757), 'numpy.cos', 'np.cos', (['rx'], {}), '(rx)\n', (1753, 1757), True, 'import numpy as np\n'), ((1803, 1813), 'numpy.sin', 'np.sin', (['rx'], {}), '(rx)\n', (1809, 1813), True, 'import numpy as np\n'), ((1815, 1825), 'numpy.cos', 'np.cos', (['rx'], {}), '(rx)\n', (1821, 1825), True, 'import numpy as np\n'), ((1855, 1865), 'numpy.cos', 'np.cos', (['ry'], {}), '(ry)\n', (1861, 1865), True, 'import numpy as np\n'), ((1872, 1882), 'numpy.sin', 'np.sin', (['ry'], {}), '(ry)\n', (1878, 1882), True, 'import numpy as np\n'), ((1969, 1979), 'numpy.cos', 'np.cos', (['ry'], {}), '(ry)\n', (1975, 1979), True, 'import numpy as np\n'), ((2009, 2019), 'numpy.cos', 'np.cos', (['rz'], {}), '(rz)\n', (2015, 2019), True, 'import numpy as np\n'), ((2065, 2075), 'numpy.sin', 'np.sin', (['rz'], {}), '(rz)\n', (2071, 2075), True, 'import numpy as np\n'), ((2077, 2087), 'numpy.cos', 'np.cos', (['rz'], {}), '(rz)\n', (2083, 2087), True, 'import numpy as np\n'), ((2859, 2879), 'math.acos', 'math.acos', (['l_vec_dot'], {}), '(l_vec_dot)\n', (2868, 2879), False, 'import math\n'), ((2887, 2907), 'math.acos', 'math.acos', (['b_vec_dot'], {}), '(b_vec_dot)\n', (2896, 2907), False, 'import math\n'), ((1760, 1770), 'numpy.sin', 'np.sin', (['rx'], {}), '(rx)\n', (1766, 1770), True, 'import numpy as np\n'), ((1952, 1962), 'numpy.sin', 'np.sin', (['ry'], {}), '(ry)\n', (1958, 1962), True, 'import numpy as np\n'), ((2022, 2032), 'numpy.sin', 'np.sin', (['rz'], {}), '(rz)\n', (2028, 2032), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.agent_utils import TrainState
from flatland.utils.rendertools import RenderTool, AgentRenderVariant
from utils.fast_methods import fast_count_nonzero, fast_argmax
class AgentCanChooseHelper:
def __init__(self):
self.render_debug_information = False
def reset(self, env):
self.env = env
if self.env is not None:
self.env.dev_obs_dict = {}
self.switches = {}
self.switches_neighbours = {}
self.switch_cluster = {}
self.switch_cluster_occupied = {}
self.switch_cluster_lock = {}
self.switch_cluster_grid = None
self.agent_positions = None
self.reset_swicht_cluster_lock()
self.reset_switch_cluster_occupied()
if self.env is not None:
self.find_all_cell_where_agent_can_choose()
self.calculate_agent_positions()
def get_agent_positions(self):
return self.agent_positions
def calculate_agent_positions(self):
self.agent_positions: np.ndarray = np.full((self.env.height, self.env.width), -1)
for agent_handle in self.env.get_agent_handles():
agent = self.env.agents[agent_handle]
if agent.state in [TrainState.MOVING, TrainState.STOPPED, TrainState.MALFUNCTION]:
position = agent.position
if position is None:
position = agent.initial_position
self.agent_positions[position] = agent_handle
def clear_switch_cluster_lock(self):
'''
clean up switch cluster lock
'''
self.switch_cluster_lock = {}
def clear_switch_cluster_occupied(self):
'''
clean up switch cluster occupied
'''
self.switch_cluster_occupied = {}
def lock_switch_cluster(self, handle, agent_pos, agent_dir):
'''
Lock the switch cluster if possible
:param handle: Agent handle
:param agent_pos: position to lock
:param agent_dir: direction
:return: True if lock is successfully done otherwise false (it might still have a lock)
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return True
lock_handle = self.switch_cluster_lock.get(cluster_id, None)
if lock_handle is None:
self.switch_cluster_lock.update({cluster_id: handle})
return True
if lock_handle == handle:
return True
return False
def unlock_switch_cluster(self, handle, agent_pos, agent_dir):
'''
Lock the switch cluster if possible
:param handle: Agent handle
:param agent_pos: position to lock
:param agent_dir: direction
:return: True if unlock is successfully done otherwise false (it might still have a lock own by another agent)
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return True
lock_handle = self.switch_cluster_lock.get(cluster_id, None)
if lock_handle == handle:
self.switch_cluster_lock.update({cluster_id, None})
return True
return False
def get_agent_position_and_direction(self, handle):
'''
Returns the agent position - if not yet started (active) it returns the initial position
:param handle: agent reference (handle)
:return: agent_pos, agent_dir, agent_state
'''
agent = self.env.agents[handle]
agent_pos = agent.position
agent_dir = agent.direction
if agent_pos is None:
agent_pos = agent.initial_position
agent_dir = agent.initial_direction
return agent_pos, agent_dir, agent.state, agent.target
def has_agent_switch_cluster_lock(self, handle, agent_pos=None, agent_dir=None):
'''
Checks if the agent passed by the handle has the switch cluster lock
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return: True if handle owns the lock otherwise false
'''
if agent_pos is None or agent_dir is None:
agent_pos, agent_dir, agent_state, agent_target = self.get_agent_position_and_direction(handle)
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return False
lock_handle = self.switch_cluster_lock.get(cluster_id, None)
return lock_handle == handle
def get_switch_cluster_occupiers_next_cell(self, handle, agent_pos, agent_dir):
'''
Returns all occupiers for the next cell
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return: a list of all agents (handles) which occupied the next cell switch cluster
'''
possible_transitions = self.env.rail.get_transitions(*agent_pos, agent_dir)
occupiers = []
for new_direction in range(4):
if possible_transitions[new_direction] == 1:
new_position = get_new_position(agent_pos, new_direction)
occupiers += self.get_switch_cluster_occupiers(handle,
new_position,
new_direction)
return occupiers
def mark_switch_next_cluster_occupied(self, handle):
agent_position, agent_direciton, agent_state, agent_target = \
self.get_agent_position_and_direction(handle)
possible_transitions = self.env.rail.get_transitions(*agent_position, agent_direciton)
for new_direction in range(4):
if possible_transitions[new_direction] == 1:
new_position = get_new_position(agent_position, new_direction)
self.mark_switch_cluster_occupied(handle, new_position, new_direction)
def can_agent_enter_next_cluster(self, handle):
agent_position, agent_direciton, agent_state, agent_target = \
self.get_agent_position_and_direction(handle)
occupiers = self.get_switch_cluster_occupiers_next_cell(handle,
agent_position,
agent_direciton)
if len(occupiers) > 0 and handle not in occupiers:
return False
return True
def get_switch_cluster_occupiers(self, handle, agent_pos, agent_dir):
'''
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return: a list of all agents (handles) which occupied the switch cluster
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return []
return self.switch_cluster_occupied.get(cluster_id, [])
def mark_switch_cluster_occupied(self, handle, agent_pos, agent_dir):
'''
Add the agent handle to the switch cluster occupied data. Set the agent (handle) as occupier
:param handle: agent reference (handle)
:param agent_pos: position to check
:param agent_dir: direction property
:return:
'''
cluster_id, grid_cell_members = self.get_switch_cluster(agent_pos)
if cluster_id < 1:
return
agent_handles = self.switch_cluster_occupied.get(cluster_id, [])
agent_handles.append(handle)
self.switch_cluster_occupied.update({cluster_id: agent_handles})
def reset_swicht_cluster_lock(self):
'''
Reset the explicit lock data switch_cluster_lock
'''
self.clear_switch_cluster_lock()
def reset_switch_cluster_occupied(self, handle_only_active_agents=False):
'''
Reset the occupied flag by recomputing the switch_cluster_occupied map
:param handle_only_active_agents: if true only agent with status ACTIVE will be mapped
'''
self.clear_switch_cluster_occupied()
for handle in range(self.env.get_num_agents()):
agent_pos, agent_dir, agent_state, agent_target = self.get_agent_position_and_direction(handle)
if handle_only_active_agents:
if agent_state in [TrainState.MOVING, TrainState.STOPPED, TrainState.MALFUNCTION]:
self.mark_switch_cluster_occupied(handle, agent_pos, agent_dir)
else:
if agent_state < TrainState.DONE:
self.mark_switch_cluster_occupied(handle, agent_pos, agent_dir)
def get_switch_cluster(self, pos):
'''
Returns the switch cluster at position pos
:param pos: the position for which the switch cluster must be returned
:return: if the position is not None and the switch cluster are computed it returns the cluster_id and the
grid cell members otherwise -1 and an empty list
'''
if pos is None:
return -1, []
if self.switch_cluster_grid is None:
return -1, []
cluster_id = self.switch_cluster_grid[pos]
grid_cell_members = self.switch_cluster.get(cluster_id, [])
return cluster_id, grid_cell_members
def find_all_switches(self):
'''
Search the environment (rail grid) for all switch cells. A switch is a cell where more than one tranisation
exists and collect all direction where the switch is a switch.
'''
self.switches = {}
for h in range(self.env.height):
for w in range(self.env.width):
pos = (h, w)
for dir in range(4):
possible_transitions = self.env.rail.get_transitions(*pos, dir)
num_transitions = fast_count_nonzero(possible_transitions)
if num_transitions > 1:
directions = self.switches.get(pos, [])
directions.append(dir)
self.switches.update({pos: directions})
def find_all_switch_neighbours(self):
'''
Collect all cells where is a neighbour to a switch cell. All cells are neighbour where the agent can make
just one step and he stands on a switch. A switch is a cell where the agents has more than one transition.
'''
self.switches_neighbours = {}
for h in range(self.env.height):
for w in range(self.env.width):
# look one step forward
for dir in range(4):
pos = (h, w)
possible_transitions = self.env.rail.get_transitions(*pos, dir)
for d in range(4):
if possible_transitions[d] == 1:
new_cell = get_new_position(pos, d)
if new_cell in self.switches.keys():
directions = self.switches_neighbours.get(pos, [])
directions.append(dir)
self.switches_neighbours.update({pos: directions})
def find_cluster_label(self, in_label) -> int:
label = int(in_label)
while 0 != self.label_dict[label]:
label = self.label_dict[label]
return label
def union_cluster_label(self, root, slave) -> None:
root_label = self.find_cluster_label(root)
slave_label = self.find_cluster_label(slave)
if slave_label != root_label:
self.label_dict[slave_label] = root_label
def find_connected_clusters_and_label(self, binary_image):
padded_binary_image = np.pad(binary_image, ((1, 0), (1, 0)), 'constant', constant_values=(0, 0))
w = np.size(binary_image, 1)
h = np.size(binary_image, 0)
self.label_dict = [int(i) for i in np.zeros(w * h)]
label = 1
# first pass
for cow in range(1, h + 1):
for col in range(1, w + 1):
working_position = (cow, col)
working_pixel = padded_binary_image[working_position]
if working_pixel != 0:
left_pixel_pos = (cow, col - 1)
up_pixel_pos = (cow - 1, col)
left_pixel = padded_binary_image[left_pixel_pos]
up_pixel = padded_binary_image[up_pixel_pos]
# Use connections (rails) for clustering (only real connected pixels builds a real cluster)
if (cow < self.env.height) and (col < self.env.width):
left_ok = 0
up_ok = 0
# correct padded image position (railenv)
t_working_position = (working_position[0] - 1, working_position[1] - 1)
t_left_pixel_pos = (left_pixel_pos[0] - 1, left_pixel_pos[1] - 1)
t_up_pixel_pos = (up_pixel_pos[0] - 1, up_pixel_pos[1] - 1)
for direction_loop in range(4):
possible_transitions = self.env.rail.get_transitions(*t_working_position, direction_loop)
orientation = direction_loop
if fast_count_nonzero(possible_transitions) == 1:
orientation = fast_argmax(possible_transitions)
for dir_loop, new_direction in enumerate(
[(orientation + dir_loop) % 4 for dir_loop in range(-1, 3)]):
if possible_transitions[new_direction] == 1:
new_pos = get_new_position(t_working_position, new_direction)
if new_pos == t_left_pixel_pos:
left_ok = 1
if new_pos == t_up_pixel_pos:
up_ok = 1
left_pixel *= left_ok
up_pixel *= up_ok
# build clusters
if left_pixel == 0 and up_pixel == 0:
padded_binary_image[working_position] = label
label += 1
if left_pixel != 0 and up_pixel != 0:
smaller = left_pixel if left_pixel < up_pixel else up_pixel
bigger = left_pixel if left_pixel > up_pixel else up_pixel
padded_binary_image[working_position] = smaller
self.union_cluster_label(smaller, bigger)
if up_pixel != 0 and left_pixel == 0:
padded_binary_image[working_position] = up_pixel
if up_pixel == 0 and left_pixel != 0:
padded_binary_image[working_position] = left_pixel
for cow in range(1, h + 1):
for col in range(1, w + 1):
root = self.find_cluster_label(padded_binary_image[cow][col])
padded_binary_image[cow][col] = root
self.switch_cluster_grid = padded_binary_image[1:, 1:]
for h in range(self.env.height):
for w in range(self.env.width):
working_position = (h, w)
root = self.switch_cluster_grid[working_position]
if root > 0:
pos_data = self.switch_cluster.get(root, [])
pos_data.append(working_position)
self.switch_cluster.update({root: pos_data})
def cluster_all_switches(self):
info_image = np.zeros((self.env.height, self.env.width))
# for h in range(self.env.height):
# for w in range(self.env.width):
# # look one step forward
# if self.env.rail.grid[h][w] > 0:
# info_image[(h,w)] = -1
for key in self.switches.keys():
info_image[key] = 1
# build clusters
self.find_connected_clusters_and_label(info_image)
if self.render_debug_information:
# Setup renderer
env_renderer = RenderTool(self.env, gl="PGL",
agent_render_variant=AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX)
env_renderer.set_new_rail()
env_renderer.render_env(
show=True,
frames=False,
show_observations=True,
show_predictions=False
)
plt.subplot(1, 2, 1)
plt.imshow(info_image)
plt.subplot(1, 2, 2)
plt.imshow(self.switch_cluster_grid)
plt.show()
plt.pause(0.01)
def find_all_cell_where_agent_can_choose(self):
'''
prepare the memory - collect all cells where the agent can choose more than FORWARD/STOP.
'''
self.find_all_switches()
self.find_all_switch_neighbours()
self.cluster_all_switches()
def check_agent_decision(self, position, direction):
'''
Decide whether the agent is
- on a switch
- at a switch neighbour (near to switch). The switch must be a switch where the agent has more option than
FORWARD/STOP
- all switch : doesn't matter whether the agent has more options than FORWARD/STOP
- all switch neightbors : doesn't matter the agent has more then one options (transistion) when he reach the
switch
:param position: (x,y) cell coordinate
:param direction: Flatland direction
:return: agents_on_switch, agents_near_to_switch, agents_near_to_switch_all, agents_on_switch_all
'''
agents_on_switch = False
agents_on_switch_all = False
agents_near_to_switch = False
agents_near_to_switch_all = False
if position in self.switches.keys():
agents_on_switch = direction in self.switches[position]
agents_on_switch_all = True
if position in self.switches_neighbours.keys():
new_cell = get_new_position(position, direction)
if new_cell in self.switches.keys():
if not direction in self.switches[new_cell]:
agents_near_to_switch = direction in self.switches_neighbours[position]
else:
agents_near_to_switch = direction in self.switches_neighbours[position]
agents_near_to_switch_all = direction in self.switches_neighbours[position]
return agents_on_switch, agents_near_to_switch, agents_near_to_switch_all, agents_on_switch_all
def requires_agent_decision(self):
'''
Returns for all agents its check_agent_decision values
:return: dicts with check_agent_decision values stored (each agents)
'''
agents_can_choose = {}
agents_on_switch = {}
agents_on_switch_all = {}
agents_near_to_switch = {}
agents_near_to_switch_all = {}
for a in range(self.env.get_num_agents()):
ret_agents_on_switch, ret_agents_near_to_switch, ret_agents_near_to_switch_all, ret_agents_on_switch_all = \
self.check_agent_decision(
self.env.agents[a].position,
self.env.agents[a].direction)
agents_on_switch.update({a: ret_agents_on_switch})
agents_on_switch_all.update({a: ret_agents_on_switch_all})
ready_to_depart = self.env.agents[a].state == TrainState.READY_TO_DEPART
agents_near_to_switch.update({a: (ret_agents_near_to_switch and not ready_to_depart)})
agents_can_choose.update({a: agents_on_switch[a] or agents_near_to_switch[a]})
agents_near_to_switch_all.update({a: (ret_agents_near_to_switch_all and not ready_to_depart)})
return agents_can_choose, agents_on_switch, agents_near_to_switch, agents_near_to_switch_all, agents_on_switch_all
|
[
"matplotlib.pyplot.imshow",
"numpy.size",
"utils.fast_methods.fast_count_nonzero",
"numpy.zeros",
"flatland.utils.rendertools.RenderTool",
"utils.fast_methods.fast_argmax",
"matplotlib.pyplot.pause",
"numpy.full",
"numpy.pad",
"flatland.core.grid.grid4_utils.get_new_position",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((1145, 1191), 'numpy.full', 'np.full', (['(self.env.height, self.env.width)', '(-1)'], {}), '((self.env.height, self.env.width), -1)\n', (1152, 1191), True, 'import numpy as np\n'), ((11908, 11982), 'numpy.pad', 'np.pad', (['binary_image', '((1, 0), (1, 0))', '"""constant"""'], {'constant_values': '(0, 0)'}), "(binary_image, ((1, 0), (1, 0)), 'constant', constant_values=(0, 0))\n", (11914, 11982), True, 'import numpy as np\n'), ((11995, 12019), 'numpy.size', 'np.size', (['binary_image', '(1)'], {}), '(binary_image, 1)\n', (12002, 12019), True, 'import numpy as np\n'), ((12032, 12056), 'numpy.size', 'np.size', (['binary_image', '(0)'], {}), '(binary_image, 0)\n', (12039, 12056), True, 'import numpy as np\n'), ((15842, 15885), 'numpy.zeros', 'np.zeros', (['(self.env.height, self.env.width)'], {}), '((self.env.height, self.env.width))\n', (15850, 15885), True, 'import numpy as np\n'), ((16370, 16474), 'flatland.utils.rendertools.RenderTool', 'RenderTool', (['self.env'], {'gl': '"""PGL"""', 'agent_render_variant': 'AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX'}), "(self.env, gl='PGL', agent_render_variant=AgentRenderVariant.\n AGENT_SHOWS_OPTIONS_AND_BOX)\n", (16380, 16474), False, 'from flatland.utils.rendertools import RenderTool, AgentRenderVariant\n'), ((16748, 16768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (16759, 16768), True, 'import matplotlib.pyplot as plt\n'), ((16781, 16803), 'matplotlib.pyplot.imshow', 'plt.imshow', (['info_image'], {}), '(info_image)\n', (16791, 16803), True, 'import matplotlib.pyplot as plt\n'), ((16816, 16836), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (16827, 16836), True, 'import matplotlib.pyplot as plt\n'), ((16849, 16885), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.switch_cluster_grid'], {}), '(self.switch_cluster_grid)\n', (16859, 16885), True, 'import matplotlib.pyplot as plt\n'), ((16898, 16908), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16906, 16908), True, 'import matplotlib.pyplot as plt\n'), ((16921, 16936), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (16930, 16936), True, 'import matplotlib.pyplot as plt\n'), ((18314, 18351), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['position', 'direction'], {}), '(position, direction)\n', (18330, 18351), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((5307, 5349), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['agent_pos', 'new_direction'], {}), '(agent_pos, new_direction)\n', (5323, 5349), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((6011, 6058), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['agent_position', 'new_direction'], {}), '(agent_position, new_direction)\n', (6027, 6058), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((12100, 12115), 'numpy.zeros', 'np.zeros', (['(w * h)'], {}), '(w * h)\n', (12108, 12115), True, 'import numpy as np\n'), ((10053, 10093), 'utils.fast_methods.fast_count_nonzero', 'fast_count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (10071, 10093), False, 'from utils.fast_methods import fast_count_nonzero, fast_argmax\n'), ((11061, 11085), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['pos', 'd'], {}), '(pos, d)\n', (11077, 11085), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((13481, 13521), 'utils.fast_methods.fast_count_nonzero', 'fast_count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (13499, 13521), False, 'from utils.fast_methods import fast_count_nonzero, fast_argmax\n'), ((13574, 13607), 'utils.fast_methods.fast_argmax', 'fast_argmax', (['possible_transitions'], {}), '(possible_transitions)\n', (13585, 13607), False, 'from utils.fast_methods import fast_count_nonzero, fast_argmax\n'), ((13899, 13950), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['t_working_position', 'new_direction'], {}), '(t_working_position, new_direction)\n', (13915, 13950), False, 'from flatland.core.grid.grid4_utils import get_new_position\n')]
|
import logging
import numpy as np
from gunpowder.nodes.batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class TanhSaturate(BatchFilter):
'''Saturate the values of an array to be floats between -1 and 1 by applying the tanh function.
Args:
array (:class:`ArrayKey`):
The key of the array to modify.
factor (scalar, optional):
The factor to divide by before applying the tanh, controls how quickly the values saturate to -1, 1.
'''
def __init__(self, array, scale=None):
self.array = array
if scale is not None:
self.scale = scale
else:
self.scale = 1.
def process(self, batch, request):
if self.array not in batch.arrays:
return
array = batch.arrays[self.array]
array.data = np.tanh(array.data/self.scale)
|
[
"logging.getLogger",
"numpy.tanh"
] |
[((98, 125), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (115, 125), False, 'import logging\n'), ((852, 884), 'numpy.tanh', 'np.tanh', (['(array.data / self.scale)'], {}), '(array.data / self.scale)\n', (859, 884), True, 'import numpy as np\n')]
|
"""
Core functionality for feature computation
<NAME>
Copyright (c) 2021. Pfizer Inc. All rights reserved.
"""
from abc import ABC, abstractmethod
from collections.abc import Iterator, Sequence
import json
from warnings import warn
from pandas import DataFrame
from numpy import float_, asarray, zeros, sum, moveaxis
__all__ = ["Bank"]
class ArrayConversionError(Exception):
pass
def get_n_feats(size, index):
if isinstance(index, int):
return 1
elif isinstance(index, (Iterator, Sequence)):
return len(index)
elif isinstance(index, slice):
return len(range(*index.indices(size)))
elif isinstance(index, type(Ellipsis)):
return size
def partial_index_check(index):
if index is None:
index = ...
if not isinstance(index, (int, Iterator, Sequence, type(...), slice)):
raise IndexError(f"Index type ({type(index)}) not understood.")
if isinstance(index, str):
raise IndexError("Index type (str) not understood.")
return index
def normalize_indices(nfeat, index):
if index is None:
return [...] * nfeat
elif not isinstance(index, (Iterator, Sequence)): # slice, single integer, etc
return [partial_index_check(index)] * nfeat
elif all([isinstance(i, int) for i in index]): # iterable of ints
return [index] * nfeat
elif isinstance(index, Sequence): # able to be indexed
return [partial_index_check(i) for i in index]
else: # pragma: no cover
return IndexError(f"Index type ({type(index)}) not understood.")
def normalize_axes(ndim, axis, ind_axis):
"""
Normalize input axes to be positive/correct for how the swapping has to work
"""
if axis == ind_axis:
raise ValueError("axis and index_axis cannot be the same")
if ndim == 1:
return 0, None
elif ndim >= 2:
"""
| shape | ax | ia | move1 | ax | ia | res | ax | ia | res move |
|--------|----|----|--------|----|----|-------|----|----|----------|
| (a, b) | 0 | 1 | (b, a) | 0 | 0 | (bf,) | | | |
| (a, b) | 0 | N | (b, a) | 0 | N | (f, b)| | | |
| (a, b) | 1 | 0 | | | | (3a,) | | | |
| (a, b) | 1 | N | | | | (f, a)| | | |
| shape | ax| ia | move1 | ax| ia| move2 | res | | ia| res move |
|----------|---|------|----------|---|---|----------|----------|----|---|----------|
| (a, b, c)| 0 | 1(0) | (b, c, a)| | | | (bf, c) | 0 | 0 | |
| (a, b, c)| 0 | 2(1) | (b, c, a)| | 1 | (c, b, a)| (cf, b) | 0 | 1 | (b, cf) |
| (a, b, c)| 0 | N | (b, c, a)| | | | (f, b, c)| | | |
| (a, b, c)| 1 | 0 | (a, c, b)| | | | (af, c) | 0 | 0 | |
| (a, b, c)| 1 | 2(1) | (a, c, b)| | 1 | (c, a, b)| (cf, a) | 0 | 1 | (a, cf) |
| (a, b, c)| 1 | N | (a, c, b)| | | | (f, a, c)| | | |
| (a, b, c)| 2 | 0 | (a, b, c)| | | | (af, b) | 0 | 0 | |
| (a, b, c)| 2 | 1 | (a, b, c)| | 1 | (b, a, c)| (bf, a) | 0 | 1 | (a, bf) |
| (a, b, c)| 2 | N | (a, b, c)| | | | (f, a, b)| | | |
| shape | ax| ia | move1 | ia| move2 | res | | ia| res move |
|------------|---|------|-------------|---|-------------|-------------|---|---|-----------|
|(a, b, c, d)| 0 | 1(0) | (b, c, d, a)| | | (bf, c, d) | 0 | 0 | |
|(a, b, c, d)| 0 | 2(1) | (b, c, d, a)| 1 | (c, b, d, a)| (cf, b, d) | 0 | 1 | (b, cf, d)|
|(a, b, c, d)| 0 | 3(2) | (b, c, d, a)| 2 | (d, b, c, a)| (df, b, c) | 0 | 2 | (d, c, df)|
|(a, b, c, d)| 0 | N | (b, c, d, a)| | | (f, b, c, d)| | | |
|(a, b, c, d)| 1 | 0 | (a, c, d, b)| | | (af, c, d) | | | |
|(a, b, c, d)| 1 | 2(1) | (a, c, d, b)| 1 | (c, a, d, b)| (cf, a, d) | 0 | 1 | (a, cf, d)|
|(a, b, c, d)| 1 | 3(2) | (a, c, d, b)| 2 | (d, a, c, b)| (df, a, c) | 0 | 2 | (a, c, df)|
|(a, b, c, d)| 1 | N | (a, c, d, b)| | | (f, a, c, d)| | | |
|(a, b, c, d)| 2 | 0 | (a, b, d, c)| | | (af, b, d) | | | |
|(a, b, c, d)| 2 | 1 | (a, b, d, c)| 1 | (b, a, d, c)| (bf, a, d) | 0 | 1 | (a, bf, d)|
|(a, b, c, d)| 2 | 3(2) | (a, b, d, c)| 2 | (d, a, b, c)| (df, a, b) | 0 | 2 | (a, b, df)|
|(a, b, c, d)| 2 | N | (a, b, d, c)| | | (f, a, b, d)| | | |
|(a, b, c, d)| 3 | 0 | (a, b, c, d)| | | (af, b, c) | | | |
|(a, b, c, d)| 3 | 1 | (a, b, c, d)| 1 | (b, a, c, d)| (bf, a, c) | 0 | 1 | (a, bf, c)|
|(a, b, c, d)| 3 | 2 | (a, b, c, d)| 2 | (c, a, b, d)| (cf, a, b) | 0 | 2 | (a, b, cf)|
|(a, b, c, d)| 3 | N | (a, b, c, d)| | | (f, a, b, c)| | | |
"""
ax = axis if axis >= 0 else ndim + axis
if ind_axis is None:
return ax, None
ia = ind_axis if ind_axis >= 0 else ndim + ind_axis
if ia > ax:
ia -= 1
return ax, ia
class Bank:
"""
A feature bank object for ease in creating a table or pipeline of features to be computed.
Parameters
----------
bank_file : {None, path-like}, optional
Path to a saved bank file to load. Optional
Examples
--------
"""
__slots__ = ("_feats", "_indices")
def __str__(self):
return "Bank"
def __repr__(self):
s = "Bank["
for f in self._feats:
s += f"\n\t{f!r},"
s += "\n]"
return s
def __contains__(self, item):
return item in self._feats
def __len__(self):
return len(self._feats)
def __init__(self, bank_file=None):
# initialize some variables
self._feats = []
self._indices = []
if bank_file is not None:
self.load(bank_file)
def add(self, features, index=None):
"""
Add a feature or features to the pipeline.
Parameters
----------
features : {Feature, list}
Single signal Feature, or list of signal Features to add to the feature Bank
index : {int, slice, list}, optional
Index to be applied to data input to each features. Either a index that will
apply to every feature, or a list of features corresponding to each feature being
added.
"""
if isinstance(features, Feature):
if features in self:
warn(
f"Feature {features!s} already in the Bank, will be duplicated.",
UserWarning,
)
self._indices.append(partial_index_check(index))
self._feats.append(features)
elif all([isinstance(i, Feature) for i in features]):
if any([ft in self for ft in features]):
warn("Feature already in the Bank, will be duplicated.", UserWarning)
self._indices.extend(normalize_indices(len(features), index))
self._feats.extend(features)
def save(self, file):
"""
Save the feature Bank to a file for a persistent object that can be loaded later to create
the same Bank as before
Parameters
----------
file : path-like
File to be saved to. Creates a new file or overwrites an existing file.
"""
out = []
for i, ft in enumerate(self._feats):
idx = "Ellipsis" if self._indices[i] is Ellipsis else self._indices[i]
out.append(
{ft.__class__.__name__: {"Parameters": ft._params, "Index": idx}}
)
with open(file, "w") as f:
json.dump(out, f)
def load(self, file):
"""
Load a previously saved feature Bank from a json file.
Parameters
----------
file : path-like
File to be read to create the feature Bank.
"""
# the import must be here, otherwise a circular import error occurs
from skdh.features import lib
with open(file, "r") as f:
feats = json.load(f)
for ft in feats:
name = list(ft.keys())[0]
params = ft[name]["Parameters"]
index = ft[name]["Index"]
if index == "Ellipsis":
index = Ellipsis
# add it to the feature bank
self.add(getattr(lib, name)(**params), index=index)
def compute(
self, signal, fs=1.0, *, axis=-1, index_axis=None, indices=None, columns=None
):
"""
Compute the specified features for the given signal
Parameters
----------
signal : {array-like}
Array-like signal to have features computed for.
fs : float, optional
Sampling frequency in Hz. Default is 1Hz
axis : int, optional
Axis along which to compute the features. Default is -1.
index_axis : {None, int}, optional
Axis corresponding to the indices specified in `Bank.add` or `indices`. Default is
None, which assumes that this axis is not part of the signal. Note that setting this to
None means values for `indices` or the indices set in `Bank.add` will be ignored.
indices : {None, int, list-like, slice, ellipsis}, optional
Indices to apply to the input signal. Either None, a integer, list-like, slice to apply
to each feature, or a list-like of lists/objects with a 1:1 correspondence to the
features present in the Bank. If provided, takes precedence over any values given in
`Bank.add`. Default is None, which will use indices from `Bank.add`.
columns : {None, list}, optional
Columns to use if providing a dataframe. Default is None (uses all columns).
Returns
-------
feats : numpy.ndarray
Computed features.
"""
# standardize the input signal
if isinstance(signal, DataFrame):
columns = columns if columns is not None else signal.columns
x = signal[columns].values.astype(float_)
else:
try:
x = asarray(signal, dtype=float_)
except ValueError as e:
raise ArrayConversionError("Error converting signal to ndarray") from e
axis, index_axis = normalize_axes(x.ndim, axis, index_axis)
if index_axis is None:
indices = [...] * len(self)
else:
if indices is None:
indices = self._indices
else:
indices = normalize_indices(len(self), indices)
# get the number of features that will results. Needed to allocate the feature array
if index_axis is None:
# don't have to move any other axes than the computation axis
x = moveaxis(x, axis, -1)
# number of feats is 1 per
n_feats = [1] * len(self)
feats = zeros((sum(n_feats),) + x.shape[:-1], dtype=float_)
else:
# move both the computation and index axis. do this in two steps to allow for undoing
# just the index axis swap later. The index_axis has been adjusted appropriately
# to match this axis move in 2 steps
x = moveaxis(x, axis, -1)
x = moveaxis(x, index_axis, 0)
n_feats = []
for ind in indices:
n_feats.append(get_n_feats(x.shape[0], ind))
feats = zeros((sum(n_feats),) + x.shape[1:-1], dtype=float_)
feat_i = 0 # keep track of where in the feature array we are
for i, ft in enumerate(self._feats):
feats[feat_i : feat_i + n_feats[i]] = ft.compute(
x[indices[i]], fs=fs, axis=-1
)
feat_i += n_feats[i]
# Move the shape back to the correct one.
# only have to do this if there is an index axis, because otherwise the array is still in
# the same order as originally
if index_axis is not None:
feats = moveaxis(feats, 0, index_axis) # undo the previous swap/move
return feats
class Feature(ABC):
"""
Base feature class
"""
def __str__(self):
return self.__class__.__name__
def __repr__(self):
s = self.__class__.__name__ + "("
for p in self._params:
s += f"{p}={self._params[p]!r}, "
if len(self._params) > 0:
s = s[:-2]
return s + ")"
def __eq__(self, other):
if isinstance(other, type(self)):
# double check the name
eq = str(other) == str(self)
# check the parameters
eq &= other._params == self._params
return eq
else:
return False
__slots__ = ("_params",)
def __init__(self, **params):
self._params = params
@abstractmethod
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the signal feature.
Parameters
----------
signal : array-like
Signal to compute the feature over.
fs : float, optional
Sampling frequency in Hz. Default is 1.0
axis : int, optional
Axis over which to compute the feature. Default is -1 (last dimension)
Returns
-------
feat : numpy.ndarray
ndarray of the computed feature
"""
# move the computation axis to the end
return moveaxis(asarray(signal, dtype=float_), axis, -1)
|
[
"numpy.asarray",
"numpy.moveaxis",
"numpy.sum",
"json.load",
"warnings.warn",
"json.dump"
] |
[((8060, 8077), 'json.dump', 'json.dump', (['out', 'f'], {}), '(out, f)\n', (8069, 8077), False, 'import json\n'), ((8482, 8494), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8491, 8494), False, 'import json\n'), ((11254, 11275), 'numpy.moveaxis', 'moveaxis', (['x', 'axis', '(-1)'], {}), '(x, axis, -1)\n', (11262, 11275), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((11695, 11716), 'numpy.moveaxis', 'moveaxis', (['x', 'axis', '(-1)'], {}), '(x, axis, -1)\n', (11703, 11716), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((11733, 11759), 'numpy.moveaxis', 'moveaxis', (['x', 'index_axis', '(0)'], {}), '(x, index_axis, 0)\n', (11741, 11759), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((12468, 12498), 'numpy.moveaxis', 'moveaxis', (['feats', '(0)', 'index_axis'], {}), '(feats, 0, index_axis)\n', (12476, 12498), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((13906, 13935), 'numpy.asarray', 'asarray', (['signal'], {'dtype': 'float_'}), '(signal, dtype=float_)\n', (13913, 13935), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((6856, 6943), 'warnings.warn', 'warn', (['f"""Feature {features!s} already in the Bank, will be duplicated."""', 'UserWarning'], {}), "(f'Feature {features!s} already in the Bank, will be duplicated.',\n UserWarning)\n", (6860, 6943), False, 'from warnings import warn\n'), ((10576, 10605), 'numpy.asarray', 'asarray', (['signal'], {'dtype': 'float_'}), '(signal, dtype=float_)\n', (10583, 10605), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((7232, 7301), 'warnings.warn', 'warn', (['"""Feature already in the Bank, will be duplicated."""', 'UserWarning'], {}), "('Feature already in the Bank, will be duplicated.', UserWarning)\n", (7236, 7301), False, 'from warnings import warn\n'), ((11380, 11392), 'numpy.sum', 'sum', (['n_feats'], {}), '(n_feats)\n', (11383, 11392), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n'), ((11907, 11919), 'numpy.sum', 'sum', (['n_feats'], {}), '(n_feats)\n', (11910, 11919), False, 'from numpy import float_, asarray, zeros, sum, moveaxis\n')]
|
from __future__ import division
import io
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy
import os
import tensorflow as tf
def figure_to_buff(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
return buf
def generate_edge_weight_buffer(nodes):
b_nodes = list(nodes.values())
print(b_nodes)
G = nx.DiGraph()
total_stake = sum([node.stake for node in b_nodes])
# Build node sizes in proportion to stake held within the graph.
node_sizes = []
node_labels = {}
for node in b_nodes:
G.add_node(node.identity)
node_sizes.append(25 + 500 * (node.stake / total_stake))
node_labels[node.identity] = str(node.identity)
# Edge colors (alphas and weight) reflect attribution wieghts of each
# connection.
edge_colors = {}
edge_labels = {}
for node in b_nodes:
for edge in node.edges:
if (node.identity, edge['first']) not in edge_labels:
G.add_edge(node.identity, edge['first'])
edge_colors[(node.identity,
edge['first'])] = float(edge['second'])
if node.identity != edge['first']:
edge_labels[(
node.identity,
edge['first'])] = "%.3f" % float(edge['second'])
else:
edge_labels[(node.identity, edge['first'])] = ""
# Set edge weights.
for u, v, d in G.edges(data=True):
d['weight'] = edge_colors[(u, v)]
edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())
# Clear Matplot lib buffer and create new figure.
plt.cla()
plt.clf()
figure = plt.figure(figsize=(20, 15))
pos = nx.layout.circular_layout(G)
nodes = nx.draw_networkx_nodes(G,
pos,
node_size=node_sizes,
node_color='blue')
edges = nx.draw_networkx_edges(G,
pos,
arrowstyle='->',
arrowsize=15,
edge_color=weights,
edge_cmap=plt.cm.Blues,
width=5)
edge_labels = nx.draw_networkx_edge_labels(G,
pos,
edge_labels=edge_labels,
with_labels=True,
label_pos=0.3)
for node in b_nodes:
pos[node.identity] = pos[node.identity] + numpy.array([0, 0.1])
labels = nx.draw_networkx_labels(G, pos, node_labels)
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
return buf
|
[
"networkx.draw_networkx_edges",
"matplotlib.pyplot.savefig",
"networkx.draw_networkx_edge_labels",
"networkx.get_edge_attributes",
"networkx.DiGraph",
"io.BytesIO",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"networkx.draw_networkx_nodes",
"matplotlib.pyplot.figure",
"networkx.draw_networkx_labels",
"numpy.array",
"matplotlib.pyplot.cla",
"networkx.layout.circular_layout"
] |
[((406, 418), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (416, 418), False, 'import io\n'), ((423, 453), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (434, 453), True, 'import matplotlib.pyplot as plt\n'), ((552, 569), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (561, 569), True, 'import matplotlib.pyplot as plt\n'), ((705, 717), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (715, 717), True, 'import networkx as nx\n'), ((2018, 2027), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2025, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2041), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2039, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2083), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (2065, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2095, 2123), 'networkx.layout.circular_layout', 'nx.layout.circular_layout', (['G'], {}), '(G)\n', (2120, 2123), True, 'import networkx as nx\n'), ((2136, 2207), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_size': 'node_sizes', 'node_color': '"""blue"""'}), "(G, pos, node_size=node_sizes, node_color='blue')\n", (2158, 2207), True, 'import networkx as nx\n'), ((2325, 2444), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'arrowstyle': '"""->"""', 'arrowsize': '(15)', 'edge_color': 'weights', 'edge_cmap': 'plt.cm.Blues', 'width': '(5)'}), "(G, pos, arrowstyle='->', arrowsize=15, edge_color=\n weights, edge_cmap=plt.cm.Blues, width=5)\n", (2347, 2444), True, 'import networkx as nx\n'), ((2669, 2768), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['G', 'pos'], {'edge_labels': 'edge_labels', 'with_labels': '(True)', 'label_pos': '(0.3)'}), '(G, pos, edge_labels=edge_labels, with_labels=\n True, label_pos=0.3)\n', (2697, 2768), True, 'import networkx as nx\n'), ((3063, 3107), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos', 'node_labels'], {}), '(G, pos, node_labels)\n', (3086, 3107), True, 'import networkx as nx\n'), ((3159, 3171), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3169, 3171), False, 'import io\n'), ((3176, 3206), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (3187, 3206), True, 'import matplotlib.pyplot as plt\n'), ((3305, 3322), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (3314, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3028, 3049), 'numpy.array', 'numpy.array', (['[0, 0.1]'], {}), '([0, 0.1])\n', (3039, 3049), False, 'import numpy\n'), ((1914, 1949), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""weight"""'], {}), "(G, 'weight')\n", (1936, 1949), True, 'import networkx as nx\n')]
|
#!/usr/bin/env python
'''
Calculating the emissions from deposits in Platypus stable accounts
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter
from strategy_const import *
from const import *
def boosted_pool_emission_rate(your_stable_deposit, vePTP_held, other_deposit_weights):
''' proportion of boosted pool emissions your deposits and vePTP earn
'''
your_boosted_pool_weight = np.sqrt(your_stable_deposit * vePTP_held)
return your_boosted_pool_weight / other_deposit_weights
def base_pool_emission_rate(your_stable_deposit, other_stable_deposits):
''' proportion of base pool emissions your deposits earn
'''
total_deposits = other_stable_deposits + your_stable_deposit
return your_stable_deposit / total_deposits
# define function with vectorize decorator for extensibility
@np.vectorize
def total_emissions_rate(stable_bankroll,
ptp_marketbuy_proportion):
'''
:stable_bankroll: total USD value of the stables you'd invest in the Platypus protocol
:ptp_marketbuy_proportion: proportion of stable_bankroll you'd use to marketbuy PTP for staking to vePTP
returns the number of PTP tokens you'd rececive given defined constants earlier in the notebook.
'''
n_PTP = (stable_bankroll * ptp_marketbuy_proportion) / PTP_PRICE
n_vePTP = HOURS_SPENT_STAKING * HOURLY_STAKED_PTP_vePTP_YIELD * n_PTP
stable_deposit = stable_bankroll * (1 - ptp_marketbuy_proportion)
# calculating lower bound on total deposit weights:
# assume all other deposits are from one wallet with all other staked PTP
# and it's been staking as long as you have
total_deposit_weights = GLOBAL_PTP_STAKED * HOURLY_STAKED_PTP_vePTP_YIELD * HOURS_SPENT_STAKING
boosted = boosted_pool_emission_rate(stable_deposit, n_vePTP, total_deposit_weights)
base = base_pool_emission_rate(stable_deposit, TVL - stable_deposit)
return (BOOSTING_POOL_ALLOCATION * boosted) + (BASE_POOL_ALLOCATION * base)
def plot_2d_returns(stable_bankroll, ptp_proportion, returns_array, as_percents = True):
"""Use matplotlib to plot the slope of returns across different bankroll strategies
"""
fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(18,9))
manifold = ax.plot_surface(stable_bankroll, ptp_proportion, returns_array,
cmap=cm.plasma, linewidth=0.5, antialiased=False)
# labels, titles, and axes
ax.set_title(f"Monthly Strategy Emissions given PTP staking for {round(HOURS_SPENT_STAKING / 24)} Days")
ax.xaxis.set_major_formatter(EngFormatter(unit="$", places=1, sep="\N{THIN SPACE}"))
ax.set_xlabel("Strategy Bankroll")
ax.yaxis.set_major_formatter(PercentFormatter(xmax=1, decimals=1))
ax.set_ylabel("Percent Market-Bought and Staked")
ax.zaxis.set_major_locator(LinearLocator(9))
ax.zaxis.set_major_formatter(PercentFormatter(xmax=1, decimals=4))
ax.set_zlabel("Percent of Emissions for Strategy")
# colorbar for scale
fig.colorbar(manifold, shrink=0.5, aspect=5, format=PercentFormatter(xmax=1, decimals=4))
plt.show()
def main():
print(f"Emissions calculations consider PTP/USD: ${round(PTP_PRICE, 3)}\n" +
f"Reflecting a FDMC of \t${round(FDMC / 10**6)}MM " +
f"({round(PERCENT_COINS_CIRCULATING * 100)}% of coins available)\n" +
f"and implying TVL of \t${round(TVL / 10**6)}MM " +
f"(Mcap/TVL: {round(1 / TVL_TO_CMC_RATIO, 4)})\n" +
f"with {round(GLOBAL_PTP_STAKED / 10**6, 2)}MM PTP staked for vePTP ({round(PERCENT_PTP_STAKED * 100)}%)")
# Create the mesh and calculate return rates
stable_bankroll, ptp_proportion = np.meshgrid(stable_deposit_range, ptp_market_buy_bankroll_proportion)
returns = total_emissions_rate(stable_bankroll, ptp_proportion)
# plotting time
plot_2d_returns(stable_bankroll, ptp_proportion, returns)
if __name__ == '__main__':
main()
|
[
"numpy.sqrt",
"matplotlib.ticker.PercentFormatter",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.EngFormatter",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((504, 545), 'numpy.sqrt', 'np.sqrt', (['(your_stable_deposit * vePTP_held)'], {}), '(your_stable_deposit * vePTP_held)\n', (511, 545), True, 'import numpy as np\n'), ((2317, 2379), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}", 'figsize': '(18, 9)'}), "(subplot_kw={'projection': '3d'}, figsize=(18, 9))\n", (2329, 2379), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3224), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3222, 3224), True, 'import matplotlib.pyplot as plt\n'), ((3813, 3882), 'numpy.meshgrid', 'np.meshgrid', (['stable_deposit_range', 'ptp_market_buy_bankroll_proportion'], {}), '(stable_deposit_range, ptp_market_buy_bankroll_proportion)\n', (3824, 3882), True, 'import numpy as np\n'), ((2694, 2740), 'matplotlib.ticker.EngFormatter', 'EngFormatter', ([], {'unit': '"""$"""', 'places': '(1)', 'sep': '"""\u2009"""'}), "(unit='$', places=1, sep='\\u2009')\n", (2706, 2740), False, 'from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter\n'), ((2822, 2858), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', ([], {'xmax': '(1)', 'decimals': '(1)'}), '(xmax=1, decimals=1)\n', (2838, 2858), False, 'from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter\n'), ((2945, 2961), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(9)'], {}), '(9)\n', (2958, 2961), False, 'from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter\n'), ((2996, 3032), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', ([], {'xmax': '(1)', 'decimals': '(4)'}), '(xmax=1, decimals=4)\n', (3012, 3032), False, 'from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter\n'), ((3171, 3207), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', ([], {'xmax': '(1)', 'decimals': '(4)'}), '(xmax=1, decimals=4)\n', (3187, 3207), False, 'from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter\n')]
|
from types import FunctionType
import numpy as np
import pandas as pd
from functools import partial
from multiprocessing import Pool, cpu_count
def get_levenshtein_distance(str1: str, str2: str) -> float:
"""
Computes the Levenshtein distance between two strings
:param str1: first string
:param str2: second string
:return: the distance between the two params
"""
size_x = len(str1) + 1
size_y = len(str2) + 1
matrix = np.zeros((size_x, size_y))
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if str1[x - 1] == str2[y - 1]:
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1],
matrix[x, y - 1] + 1
)
else:
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1] + 1,
matrix[x, y - 1] + 1
)
return matrix[size_x - 1, size_y - 1]
def add_distance_column(filename: str, df: pd.DataFrame) -> pd.DataFrame:
"""
Add new column to df which contains distance computed using filename
:param filename: filename to compare to df
:param df: df with artist or tracks names
:return: df with new column
"""
df['distances'] = df.applymap(lambda x: get_levenshtein_distance(filename, x))
return df
def parallelize_dataframe(df: pd.DataFrame, func: FunctionType, word: str, n_cores: int = cpu_count() - 1) -> pd.DataFrame:
"""
Apply certain func against dataframe parallelling the application
:param df: DataFrame which contains the required by func
:param func: func that will be parallelize through df
:param word: to compute the distance using
:param n_cores: thread to parallelize the function
:return: DataFrame after func applied
"""
df_split = np.array_split(df, n_cores) # TODO: add df length check to get n_cores
pool = Pool(n_cores)
f = partial(func, word)
df = pd.concat(pool.map(f, df_split))
pool.close()
pool.join()
return df
|
[
"multiprocessing.cpu_count",
"numpy.array_split",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool"
] |
[((462, 488), 'numpy.zeros', 'np.zeros', (['(size_x, size_y)'], {}), '((size_x, size_y))\n', (470, 488), True, 'import numpy as np\n'), ((2007, 2034), 'numpy.array_split', 'np.array_split', (['df', 'n_cores'], {}), '(df, n_cores)\n', (2021, 2034), True, 'import numpy as np\n'), ((2090, 2103), 'multiprocessing.Pool', 'Pool', (['n_cores'], {}), '(n_cores)\n', (2094, 2103), False, 'from multiprocessing import Pool, cpu_count\n'), ((2112, 2131), 'functools.partial', 'partial', (['func', 'word'], {}), '(func, word)\n', (2119, 2131), False, 'from functools import partial\n'), ((1607, 1618), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1616, 1618), False, 'from multiprocessing import Pool, cpu_count\n')]
|
# -*- coding: utf-8 -*-
import os
import datetime
import logging
import requests
import numpy
import cv2
import zbar
from Queue import Queue
from threading import Thread
from PIL import Image
logger = logging.getLogger(__name__)
TEMP_DIR = os.path.join(os.getcwd(), 'temp')
def get_temp_dir():
"""Create TEMP_DIR if it doesn't exist"""
if not os.path.exists(TEMP_DIR):
os.mkdir(TEMP_DIR)
return TEMP_DIR
def thumbnail(picture, size=0.50):
"""Thumbnail the picture"""
width, height = picture.size
w, h = int(width * size), int(height * size)
picture.thumbnail((w, h), Image.ANTIALIAS)
return picture
def save_picture(picture, path, filename):
"""Save picture to filesystem, return the path"""
# Unfortunately, StringIO was unsatisfactory
# StringIO size exceeds size of filesystem save. Why??
storage = os.path.join(path, filename)
picture.save(storage, optimize=True, format='JPEG')
return storage
def delete_picture(path):
"""Delete the file, with a try except clause"""
try:
os.remove(path)
# Gee! Thanks Windows
except:
pass
def prepare_msg(qrcode, picture, timestamp):
"""Prepare message to send to server"""
timestamp = datetime.datetime.strftime(timestamp, '%Y%m%d%H%M%S%f')
filename = '{}.jpeg'.format(timestamp)
temp_storage = save_picture(picture, get_temp_dir(), filename)
data = dict(qrcode=qrcode, timestamp=timestamp)
files = {'picture': temp_storage}
return filename, data, files
def server_auth(queue, url, qrcode, picture, timestamp, timeout=5):
"""Send message to server for auth"""
filename, data, files = prepare_msg(qrcode, picture, timestamp)
try:
if logger.getEffectiveLevel() >= logging.INFO:
# Profile the request
start = datetime.datetime.now()
r = requests.post(url, data=data, files=files, timeout=timeout)
if logger.getEffectiveLevel >= logging.INFO:
# Profile the request
end = datetime.datetime.now()
elapsed_time = (end - start).total_seconds()
logger.info('Elapsed time was {} seconds'.format(elapsed_time))
except Exception as e:
response = None
# Did the request timeout?
if isinstance(e, requests.exceptions.Timeout):
response = dict(network_timeout=True)
else:
response = r.json()
finally:
delete_picture(os.path.join(get_temp_dir(), filename))
queue.put(response)
class QRCodeScanner(object):
def __init__(
self,
url=None,
max_responses=2,
timeout=5,
ok_color=(0, 0, 255),
not_ok_color=(255, 0, 0),
box_width=1,
debug=False
):
self.url = url
self.timeout = timeout
self.max_responses
self.thread = None
self.queue = Queue()
# Init zbar.
self.scanner = zbar.ImageScanner()
# Disable all zbar symbols.
self.scanner.set_config(0, zbar.Config.ENABLE, 0)
# Enable QRCodes.
self.scanner.set_config(zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1)
# Highlight scanned QR Codes.
self.ok_color = ok_color
self.not_ok_color = not_ok_color
self.box_width = box_width
self.successes = 0
self.debug = debug
def main(self, frame, timestamp):
"""Main function"""
self.before_zbar(timestamp)
frame, qrcodes = self.zbar(frame)
if len(qrcodes) > 0:
self.auth(frame, qrcodes, timestamp)
frame = self.after_zbar(frame, qrcodes, timestamp)
self.process_results_from_queue(timestamp)
return frame
def auth(self, frame, qrcodes, timestamp):
"""Auth with server"""
if self.url is not None:
qrcode = self.get_next_qrcode(frame, qrcodes)
if qrcode is not None:
if len(self.responses) > self.max_responses:
frame = Image.fromarray(frame)
self.launch_thread(self.url, qrcode, frame, timestamp)
def get_next_qrcode(self, frame, qrcodes):
"""Returns the largest valid QR code, which is neither the
active QR code nor throttled"""
height, width = frame.shape[:2]
frame_size = width * height
target = None
targets = [
dict(
qrcode=qrcode,
size=self.qrcode_size(qrcodes[qrcode])
)
for qrcode in qrcodes
]
targets = sorted(targets, key=lambda k: k['size'])
for target in targets:
qrcode = target['qrcode']
qrcode_size = target['size'] / frame_size
qrcode_size = round(qrcode_size, 4)
if self.debug:
logger.info('QRcode percent of frame: {}%'.format(
qrcode_size
))
# Throttle requests for the same QR code.
if self.active_qrcode != qrcode:
# Throttle requests for cached QR codes.
if not self.is_qrcode_throttled(qrcode):
# Ensure the QR code is valid.
is_valid = self.is_valid_qrcode(qrcode)
if self.debug:
logger.info('QRcode is valid: {}'.format(is_valid))
if is_valid:
if self.max_qrcode_size > 0:
if qrcode_size > self.max_qrcode_size:
self.max_size_exceeded = True
break
if not self.max_size_exceeded:
return qrcode
def is_valid_qrcode(self, qrcode):
"""Intended to be overriden by subclass."""
return True if qrcode is not None else False
def is_qrcode_throttled(self, qrcode):
for throttle in (self.ok_throttle_dict, self.not_ok_throttle_dict):
if qrcode in throttle:
return True
def get_qrcode_size(self, qrcode):
contour = numpy.array(qrcode, dtype=numpy.int32)
return cv2.contourArea(contour)
def before_zbar(self, timestamp):
"""Remove expired QR codes from throttle dict"""
for throttle in (self.ok_throttle_dict, self.not_ok_throttle_dict):
delete = []
for qrcode in throttle:
expired = (throttle[qrcode] <= datetime.datetime.now())
if expired:
delete.append(qrcode)
for qrcode in delete:
del throttle[qrcode]
def zbar(self, frame):
"""Scan frame using ZBar"""
qrcodes = {}
# Convert to grayscale, as binarization requires
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# Apply Otsu Binarization
_, threshold = cv2.threshold(
gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
)
try:
# Convert to string, as ZBar requires
pil_image = Image.fromarray(threshold)
width, height = pil_image.size
raw = pil_image.tostring()
except:
logger.error('Error converting to PIL image')
else:
try:
image = zbar.Image(width, height, 'Y800', raw)
except:
logger.error('Error converting to ZBar image')
else:
self.scanner.scan(image)
for qrcode in image:
location = []
for point in qrcode.location:
location.append(point)
qrcodes[qrcode.data] = location
if self.debug:
self.successes += 1
if self.debug:
frame = cv2.cvtColor(threshold, cv2.COLOR_GRAY2RGB)
return frame, qrcodes
def after_zbar(self, frame, qrcodes, timestamp):
"""Intended to be overridden by subclass. Currently, draws boxes
around QR codes"""
frame = self.draw_boxes(qrcodes, frame)
return frame
def draw_box(self, frame, location, color, width):
"""Draw a box around around QR code"""
for index in range(len(location)):
if (index + 1) == len(location):
next_index = 0
else:
next_index = index + 1
# From OpenCV 3.0.0, cv2.LINE_AA was renamed cv2.CV_AA
if cv2.__version__ >= '3.0.0':
cv2.line(
frame,
location[index], location[next_index],
color,
width,
lineType=cv2.LINE_AA
)
else:
cv2.line(
frame,
location[index], location[next_index],
color,
width,
cv2.CV_AA
)
return frame
def is_thread_running(self):
"""Check if the thread is running"""
# Is a thread active?
if self.thread is not None:
if self.thread.is_alive():
return True
def launch_thread(self, url, qrcode, frame, timestamp):
"""Launch a thread to auth against server with requests library"""
try:
self.thread = Thread(
target=server_auth,
args=(
self.queue,
url,
qrcode,
Image.fromarray(frame),
timestamp
)
).start()
except:
logger.error('Thread failed to start')
else:
self.after_thread_started(qrcode, timestamp)
def after_thread_started(self, qrcode, timestamp):
"""Runs after thread is started. Throttles not OK results"""
# Throttle requests
self.not_ok_throttle_dict[qrcode] = (
timestamp + datetime.timedelta(seconds=self.not_ok_throttle)
)
self.active_qrcode = qrcode
logger.info('Sent QRcode to server {}'.format(self.active_qrcode))
def process_results_from_queue(self, timestamp):
"""Throttles OK results. Prepares response for GUI"""
if not self.queue.empty():
# Clear active qrcode
self.active_qrcode = None
response = self.queue.get()
if response is not None:
# Response is OK. Flag the QR code as OK, and throttle it
if 'qrcode' in response:
qrcode = response['qrcode']
ok_throttle = datetime.timedelta(seconds=self.ok_throttle)
self.ok_throttle_dict[qrcode] = timestamp + ok_throttle
self.responses.append(response)
|
[
"logging.getLogger",
"requests.post",
"numpy.array",
"zbar.Image",
"datetime.timedelta",
"os.remove",
"os.path.exists",
"cv2.threshold",
"cv2.line",
"cv2.contourArea",
"os.mkdir",
"cv2.cvtColor",
"PIL.Image.fromarray",
"os.path.join",
"os.getcwd",
"zbar.ImageScanner",
"datetime.datetime.now",
"datetime.datetime.strftime",
"Queue.Queue"
] |
[((202, 229), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (219, 229), False, 'import logging\n'), ((254, 265), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (263, 265), False, 'import os\n'), ((865, 893), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (877, 893), False, 'import os\n'), ((1240, 1295), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['timestamp', '"""%Y%m%d%H%M%S%f"""'], {}), "(timestamp, '%Y%m%d%H%M%S%f')\n", (1266, 1295), False, 'import datetime\n'), ((354, 378), 'os.path.exists', 'os.path.exists', (['TEMP_DIR'], {}), '(TEMP_DIR)\n', (368, 378), False, 'import os\n'), ((388, 406), 'os.mkdir', 'os.mkdir', (['TEMP_DIR'], {}), '(TEMP_DIR)\n', (396, 406), False, 'import os\n'), ((1066, 1081), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1075, 1081), False, 'import os\n'), ((1863, 1922), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'files': 'files', 'timeout': 'timeout'}), '(url, data=data, files=files, timeout=timeout)\n', (1876, 1922), False, 'import requests\n'), ((2880, 2887), 'Queue.Queue', 'Queue', ([], {}), '()\n', (2885, 2887), False, 'from Queue import Queue\n'), ((2932, 2951), 'zbar.ImageScanner', 'zbar.ImageScanner', ([], {}), '()\n', (2949, 2951), False, 'import zbar\n'), ((6076, 6114), 'numpy.array', 'numpy.array', (['qrcode'], {'dtype': 'numpy.int32'}), '(qrcode, dtype=numpy.int32)\n', (6087, 6114), False, 'import numpy\n'), ((6130, 6154), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (6145, 6154), False, 'import cv2\n'), ((6757, 6796), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (6769, 6796), False, 'import cv2\n'), ((6854, 6918), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (6867, 6918), False, 'import cv2\n'), ((1827, 1850), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1848, 1850), False, 'import datetime\n'), ((2028, 2051), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2049, 2051), False, 'import datetime\n'), ((7028, 7054), 'PIL.Image.fromarray', 'Image.fromarray', (['threshold'], {}), '(threshold)\n', (7043, 7054), False, 'from PIL import Image\n'), ((7790, 7833), 'cv2.cvtColor', 'cv2.cvtColor', (['threshold', 'cv2.COLOR_GRAY2RGB'], {}), '(threshold, cv2.COLOR_GRAY2RGB)\n', (7802, 7833), False, 'import cv2\n'), ((9968, 10016), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'self.not_ok_throttle'}), '(seconds=self.not_ok_throttle)\n', (9986, 10016), False, 'import datetime\n'), ((7266, 7304), 'zbar.Image', 'zbar.Image', (['width', 'height', '"""Y800"""', 'raw'], {}), "(width, height, 'Y800', raw)\n", (7276, 7304), False, 'import zbar\n'), ((8492, 8586), 'cv2.line', 'cv2.line', (['frame', 'location[index]', 'location[next_index]', 'color', 'width'], {'lineType': 'cv2.LINE_AA'}), '(frame, location[index], location[next_index], color, width,\n lineType=cv2.LINE_AA)\n', (8500, 8586), False, 'import cv2\n'), ((8735, 8814), 'cv2.line', 'cv2.line', (['frame', 'location[index]', 'location[next_index]', 'color', 'width', 'cv2.CV_AA'], {}), '(frame, location[index], location[next_index], color, width, cv2.CV_AA)\n', (8743, 8814), False, 'import cv2\n'), ((3996, 4018), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (4011, 4018), False, 'from PIL import Image\n'), ((6434, 6457), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6455, 6457), False, 'import datetime\n'), ((10635, 10679), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'self.ok_throttle'}), '(seconds=self.ok_throttle)\n', (10653, 10679), False, 'import datetime\n'), ((9513, 9535), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (9528, 9535), False, 'from PIL import Image\n')]
|
import tensorflow as tf
import numpy as np
import os
import time
from utils import random_batch, normalize, similarity, loss_cal, optim
from configuration import get_config
from tensorflow.contrib import rnn
config = get_config()
def train(path):
tf.reset_default_graph() # reset graph
# draw graph
batch = tf.placeholder(shape= [None, config.N*config.M, 40], dtype=tf.float32) # input batch (time x batch x n_mel)
lr = tf.placeholder(dtype= tf.float32) # learning rate
global_step = tf.Variable(0, name='global_step', trainable=False)
w = tf.get_variable("w", initializer= np.array([10], dtype=np.float32))
b = tf.get_variable("b", initializer= np.array([-5], dtype=np.float32))
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # define lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# loss
sim_matrix = similarity(embedded, w, b)
print("similarity matrix size: ", sim_matrix.shape)
loss = loss_cal(sim_matrix, type=config.loss)
# optimizer operation
trainable_vars= tf.trainable_variables() # get variable list
optimizer= optim(lr) # get optimizer (type is determined by configuration)
grads, vars= zip(*optimizer.compute_gradients(loss)) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, 3.0) # l2 norm clipping by 3
grads_rescale= [0.01*grad for grad in grads_clip[:2]] + grads_clip[2:] # smaller gradient scale for w, b
train_op= optimizer.apply_gradients(zip(grads_rescale, vars), global_step= global_step) # gradient update operation
# check variables memory
variable_count = np.sum(np.array([np.prod(np.array(v.get_shape().as_list())) for v in trainable_vars]))
print("total variables :", variable_count)
# record loss
loss_summary = tf.summary.scalar("loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
# training session
with tf.Session() as sess:
tf.global_variables_initializer().run()
os.makedirs(os.path.join(path, "Check_Point"), exist_ok=True) # make folder to save model
os.makedirs(os.path.join(path, "logs"), exist_ok=True) # make folder to save log
writer = tf.summary.FileWriter(os.path.join(path, "logs"), sess.graph)
epoch = 0
lr_factor = 1 # lr decay factor ( 1/2 per 10000 iteration)
loss_acc = 0 # accumulated loss ( for running average of loss)
for iter in range(config.iteration):
# run forward and backward propagation and update parameters
_, loss_cur, summary = sess.run([train_op, loss, merged],
feed_dict={batch: random_batch(), lr: config.lr*lr_factor})
loss_acc += loss_cur # accumulated loss for each 100 iteration
if iter % 10 == 0:
writer.add_summary(summary, iter) # write at tensorboard
if (iter+1) % 100 == 0:
print("(iter : %d) loss: %.4f" % ((iter+1),loss_acc/100))
loss_acc = 0 # reset accumulated loss
if (iter+1) % 10000 == 0:
lr_factor /= 2 # lr decay
print("learning rate is decayed! current lr : ", config.lr*lr_factor)
if (iter+1) % 10000 == 0:
saver.save(sess, os.path.join(path, "./Check_Point/model.ckpt"), global_step=iter//10000)
print("model is saved!")
# Test Session
def test(path):
tf.reset_default_graph()
# draw graph
enroll = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # enrollment batch (time x batch x n_mel)
verif = tf.placeholder(shape=[None, config.N*config.M, 40], dtype=tf.float32) # verification batch (time x batch x n_mel)
batch = tf.concat([enroll, verif], axis=1)
# embedding lstm (3-layer default)
with tf.variable_scope("lstm"):
lstm_cells = [tf.contrib.rnn.LSTMCell(num_units=config.hidden, num_proj=config.proj) for i in range(config.num_layer)]
lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells) # make lstm op and variables
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True) # for TI-VS must use dynamic rnn
embedded = outputs[-1] # the last ouput is the embedded d-vector
embedded = normalize(embedded) # normalize
print("embedded size: ", embedded.shape)
# enrollment embedded vectors (speaker model)
enroll_embed = normalize(tf.reduce_mean(tf.reshape(embedded[:config.N*config.M, :], shape= [config.N, config.M, -1]), axis=1))
# verification embedded vectors
verif_embed = embedded[config.N*config.M:, :]
similarity_matrix = similarity(embedded=verif_embed, w=1., b=0., center=enroll_embed)
saver = tf.train.Saver(var_list=tf.global_variables())
with tf.Session() as sess:
tf.global_variables_initializer().run()
# load model
print("model path :", path)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=os.path.join(path, "Check_Point"))
ckpt_list = ckpt.all_model_checkpoint_paths
loaded = 0
for model in ckpt_list:
if config.model_num == int(model.split('-')[-1]): # find ckpt file which matches configuration model number
print("ckpt file is loaded !", model)
loaded = 1
saver.restore(sess, model) # restore variables from selected ckpt file
break
if loaded == 0:
raise AssertionError("ckpt file does not exist! Check config.model_num or config.model_path.")
print("test file path : ", config.test_path)
# return similarity matrix after enrollment and verification
time1 = time.time() # for check inference time
if config.tdsv:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False, noise_filenum=1),
verif:random_batch(shuffle=False, noise_filenum=2)})
else:
S = sess.run(similarity_matrix, feed_dict={enroll:random_batch(shuffle=False),
verif:random_batch(shuffle=False, utter_start=config.M)})
S = S.reshape([config.N, config.M, -1])
time2 = time.time()
np.set_printoptions(precision=2)
print("inference time for %d utterences : %0.2fs"%(2*config.M*config.N, time2-time1))
print(S) # print similarity matrix
# calculating EER
diff = 1; EER=0; EER_thres = 0; EER_FAR=0; EER_FRR=0
# through thresholds calculate false acceptance ratio (FAR) and false reject ratio (FRR)
for thres in [0.01*i+0.5 for i in range(50)]:
S_thres = S>thres
# False acceptance ratio = false acceptance / mismatched population (enroll speaker != verification speaker)
FAR = sum([np.sum(S_thres[i])-np.sum(S_thres[i,:,i]) for i in range(config.N)])/(config.N-1)/config.M/config.N
# False reject ratio = false reject / matched population (enroll speaker = verification speaker)
FRR = sum([config.M-np.sum(S_thres[i][:,i]) for i in range(config.N)])/config.M/config.N
# Save threshold when FAR = FRR (=EER)
if diff> abs(FAR-FRR):
diff = abs(FAR-FRR)
EER = (FAR+FRR)/2
EER_thres = thres
EER_FAR = FAR
EER_FRR = FRR
print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)"%(EER,EER_thres,EER_FAR,EER_FRR))
|
[
"utils.random_batch",
"numpy.array",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.clip_by_global_norm",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"utils.similarity",
"tensorflow.summary.scalar",
"tensorflow.trainable_variables",
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.summary.merge_all",
"tensorflow.variable_scope",
"tensorflow.Variable",
"tensorflow.global_variables",
"configuration.get_config",
"tensorflow.reshape",
"time.time",
"numpy.set_printoptions",
"utils.loss_cal",
"tensorflow.reset_default_graph",
"utils.normalize",
"tensorflow.train.Saver",
"os.path.join",
"tensorflow.global_variables_initializer",
"numpy.sum",
"utils.optim"
] |
[((226, 238), 'configuration.get_config', 'get_config', ([], {}), '()\n', (236, 238), False, 'from configuration import get_config\n'), ((266, 290), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (288, 290), True, 'import tensorflow as tf\n'), ((341, 412), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, config.N * config.M, 40]', 'dtype': 'tf.float32'}), '(shape=[None, config.N * config.M, 40], dtype=tf.float32)\n', (355, 412), True, 'import tensorflow as tf\n'), ((460, 492), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (474, 492), True, 'import tensorflow as tf\n'), ((530, 581), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (541, 581), True, 'import tensorflow as tf\n'), ((1418, 1444), 'utils.similarity', 'similarity', (['embedded', 'w', 'b'], {}), '(embedded, w, b)\n', (1428, 1444), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((1514, 1552), 'utils.loss_cal', 'loss_cal', (['sim_matrix'], {'type': 'config.loss'}), '(sim_matrix, type=config.loss)\n', (1522, 1552), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((1603, 1627), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1625, 1627), True, 'import tensorflow as tf\n'), ((1679, 1688), 'utils.optim', 'optim', (['lr'], {}), '(lr)\n', (1684, 1688), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((1914, 1948), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', '(3.0)'], {}), '(grads, 3.0)\n', (1936, 1948), True, 'import tensorflow as tf\n'), ((2443, 2474), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (2460, 2474), True, 'import tensorflow as tf\n'), ((2489, 2511), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2509, 2511), True, 'import tensorflow as tf\n'), ((2525, 2541), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2539, 2541), True, 'import tensorflow as tf\n'), ((4181, 4205), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4203, 4205), True, 'import tensorflow as tf\n'), ((4240, 4311), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, config.N * config.M, 40]', 'dtype': 'tf.float32'}), '(shape=[None, config.N * config.M, 40], dtype=tf.float32)\n', (4254, 4311), True, 'import tensorflow as tf\n'), ((4365, 4436), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, config.N * config.M, 40]', 'dtype': 'tf.float32'}), '(shape=[None, config.N * config.M, 40], dtype=tf.float32)\n', (4379, 4436), True, 'import tensorflow as tf\n'), ((4493, 4527), 'tensorflow.concat', 'tf.concat', (['[enroll, verif]'], {'axis': '(1)'}), '([enroll, verif], axis=1)\n', (4502, 4527), True, 'import tensorflow as tf\n'), ((5478, 5545), 'utils.similarity', 'similarity', ([], {'embedded': 'verif_embed', 'w': '(1.0)', 'b': '(0.0)', 'center': 'enroll_embed'}), '(embedded=verif_embed, w=1.0, b=0.0, center=enroll_embed)\n', (5488, 5545), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((788, 813), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {}), "('lstm')\n", (805, 813), True, 'import tensorflow as tf\n'), ((959, 998), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['lstm_cells'], {}), '(lstm_cells)\n', (986, 998), True, 'import tensorflow as tf\n'), ((1055, 1132), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'lstm', 'inputs': 'batch', 'dtype': 'tf.float32', 'time_major': '(True)'}), '(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True)\n', (1072, 1132), True, 'import tensorflow as tf\n'), ((1289, 1308), 'utils.normalize', 'normalize', (['embedded'], {}), '(embedded)\n', (1298, 1308), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((2578, 2590), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2588, 2590), True, 'import tensorflow as tf\n'), ((4580, 4605), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {}), "('lstm')\n", (4597, 4605), True, 'import tensorflow as tf\n'), ((4751, 4790), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['lstm_cells'], {}), '(lstm_cells)\n', (4778, 4790), True, 'import tensorflow as tf\n'), ((4845, 4922), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'lstm', 'inputs': 'batch', 'dtype': 'tf.float32', 'time_major': '(True)'}), '(cell=lstm, inputs=batch, dtype=tf.float32, time_major=True)\n', (4862, 4922), True, 'import tensorflow as tf\n'), ((5079, 5098), 'utils.normalize', 'normalize', (['embedded'], {}), '(embedded)\n', (5088, 5098), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((5616, 5628), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5626, 5628), True, 'import tensorflow as tf\n'), ((6549, 6560), 'time.time', 'time.time', ([], {}), '()\n', (6558, 6560), False, 'import time\n'), ((7118, 7129), 'time.time', 'time.time', ([], {}), '()\n', (7127, 7129), False, 'import time\n'), ((7141, 7173), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (7160, 7173), True, 'import numpy as np\n'), ((625, 657), 'numpy.array', 'np.array', (['[10]'], {'dtype': 'np.float32'}), '([10], dtype=np.float32)\n', (633, 657), True, 'import numpy as np\n'), ((702, 734), 'numpy.array', 'np.array', (['[-5]'], {'dtype': 'np.float32'}), '([-5], dtype=np.float32)\n', (710, 734), True, 'import numpy as np\n'), ((838, 908), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'config.hidden', 'num_proj': 'config.proj'}), '(num_units=config.hidden, num_proj=config.proj)\n', (861, 908), True, 'import tensorflow as tf\n'), ((2670, 2703), 'os.path.join', 'os.path.join', (['path', '"""Check_Point"""'], {}), "(path, 'Check_Point')\n", (2682, 2703), False, 'import os\n'), ((2770, 2796), 'os.path.join', 'os.path.join', (['path', '"""logs"""'], {}), "(path, 'logs')\n", (2782, 2796), False, 'import os\n'), ((2888, 2914), 'os.path.join', 'os.path.join', (['path', '"""logs"""'], {}), "(path, 'logs')\n", (2900, 2914), False, 'import os\n'), ((4630, 4700), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'config.hidden', 'num_proj': 'config.proj'}), '(num_units=config.hidden, num_proj=config.proj)\n', (4653, 4700), True, 'import tensorflow as tf\n'), ((5276, 5353), 'tensorflow.reshape', 'tf.reshape', (['embedded[:config.N * config.M, :]'], {'shape': '[config.N, config.M, -1]'}), '(embedded[:config.N * config.M, :], shape=[config.N, config.M, -1])\n', (5286, 5353), True, 'import tensorflow as tf\n'), ((5583, 5604), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5602, 5604), True, 'import tensorflow as tf\n'), ((2609, 2642), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2640, 2642), True, 'import tensorflow as tf\n'), ((5647, 5680), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5678, 5680), True, 'import tensorflow as tf\n'), ((5809, 5842), 'os.path.join', 'os.path.join', (['path', '"""Check_Point"""'], {}), "(path, 'Check_Point')\n", (5821, 5842), False, 'import os\n'), ((4024, 4070), 'os.path.join', 'os.path.join', (['path', '"""./Check_Point/model.ckpt"""'], {}), "(path, './Check_Point/model.ckpt')\n", (4036, 4070), False, 'import os\n'), ((3338, 3352), 'utils.random_batch', 'random_batch', ([], {}), '()\n', (3350, 3352), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((6676, 6720), 'utils.random_batch', 'random_batch', ([], {'shuffle': '(False)', 'noise_filenum': '(1)'}), '(shuffle=False, noise_filenum=1)\n', (6688, 6720), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((6784, 6828), 'utils.random_batch', 'random_batch', ([], {'shuffle': '(False)', 'noise_filenum': '(2)'}), '(shuffle=False, noise_filenum=2)\n', (6796, 6828), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((6909, 6936), 'utils.random_batch', 'random_batch', ([], {'shuffle': '(False)'}), '(shuffle=False)\n', (6921, 6936), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((7000, 7049), 'utils.random_batch', 'random_batch', ([], {'shuffle': '(False)', 'utter_start': 'config.M'}), '(shuffle=False, utter_start=config.M)\n', (7012, 7049), False, 'from utils import random_batch, normalize, similarity, loss_cal, optim\n'), ((7986, 8010), 'numpy.sum', 'np.sum', (['S_thres[i][:, i]'], {}), '(S_thres[i][:, i])\n', (7992, 8010), True, 'import numpy as np\n'), ((7741, 7759), 'numpy.sum', 'np.sum', (['S_thres[i]'], {}), '(S_thres[i])\n', (7747, 7759), True, 'import numpy as np\n'), ((7760, 7784), 'numpy.sum', 'np.sum', (['S_thres[i, :, i]'], {}), '(S_thres[i, :, i])\n', (7766, 7784), True, 'import numpy as np\n')]
|
'''
<NAME>
2021
'''
import numpy as np
import cv2
from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift
from numpy import conj, real
from utils import gaussian2d_rolled_labels, cos_window
from hog_cpp.fhog.get_hog import get_hog
vgg_path = 'model/imagenet-vgg-verydeep-19.mat'
def create_model():
from scipy import io
from keras.applications.vgg19 import VGG19
from keras.models import Model
mat = io.loadmat(vgg_path)
model = VGG19(mat)
ixs = [2, 5, 10, 15, 20]
outputs = [model.layers[i].output for i in ixs]
model = Model(inputs=model.inputs, outputs=outputs)
# model.summary()
return model
vgg_model = create_model()
class KernelizedCorrelationFilter:
def __init__(self, correlation_type='gaussian', feature='hog'):
self.padding = 1.5 # extra area surrounding the target #padding = 2 #extra area surrounding the target
self.lambda_ = 1e-4 # regularization
self.output_sigma_factor = 0.1 # spatial bandwidth (proportional to target)
self.correlation_type = correlation_type
self.feature = feature
self.resize = False
# GRAY
if feature == 'gray':
self.interp_factor = 0.075 # linear interpolation factor for adaptation
self.sigma = 0.2 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 7 # polynomial kernel exponent
self.gray = True
self.cell_size = 1
# HOG
elif feature == 'hog':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.hog = True
self.hog_orientations = 9
self.cell_size = 4
# DEEP
elif feature == 'deep':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.deep = True
self.cell_size = 4 # 8
def start(self, init_gt, show, frame_list):
poses = []
poses.append(init_gt)
init_frame = cv2.imread(frame_list[0])
x1, y1, w, h = init_gt
init_gt = tuple(init_gt)
self.init(init_frame, init_gt)
for idx in range(len(frame_list)):
if idx != 0:
current_frame = cv2.imread(frame_list[idx])
bbox = self.update(current_frame)
if bbox is not None:
x1, y1, w, h = bbox
if show is True:
if len(current_frame.shape) == 2:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2BGR)
show_frame = cv2.rectangle(current_frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)),
(255, 0, 0), 1)
cv2.imshow('demo', show_frame)
cv2.waitKey(1)
else:
print('bbox is None')
poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
return np.array(poses)
def init(self, image, roi):
# Get image size and search window size
x, y, w, h = roi
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.target_sz = np.array([h, w])
self.target_sz_real = np.array([h, w])
self.pos = np.array([y + np.floor(h/2), x + np.floor(w/2)])
if np.sqrt(h * w) >= 100: # diagonal size >= threshold
self.resize = True
self.pos = np.floor(self.pos / 2)
self.target_sz = np.floor(self.target_sz / 2)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
# window size, taking padding into account
self.window_sz = np.floor(np.multiply(self.target_sz, (1 + self.padding)))
self.output_sigma = round(round(np.sqrt(self.target_sz[0]*self.target_sz[1]), 4) * self.output_sigma_factor / self.cell_size, 4)
yf_sz = np.floor(self.window_sz / self.cell_size)
yf_sz[0] = np.floor(self.window_sz / self.cell_size)[1]
yf_sz[1] = np.floor(self.window_sz / self.cell_size)[0]
gauss = gaussian2d_rolled_labels(yf_sz, self.output_sigma)
self.yf = fft2(gauss)
#store pre-computed cosine window
self.cos_window = cos_window([self.yf.shape[1], self.yf.shape[0]])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
kf = []
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
self.model_alphaf = alphaf
self.model_xf = xf
def update(self, image):
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
zf = fftn(self.get_features(patch), axes=(0, 1))
if self.correlation_type == 'gaussian':
kzf = self.gaussian_correlation(zf, self.model_xf)
response = real(ifftn(self.model_alphaf * kzf, axes=(0, 1))) # equation for fast detection
# Find indices and values of nonzero elements curr = np.unravel_index(np.argmax(gi, axis=None), gi.shape)
delta = np.unravel_index(np.argmax(response, axis=None), response.shape)
vert_delta, horiz_delta = delta[0], delta[1]
if vert_delta > np.size(zf, 0) / 2: # wrap around to negative half-space of vertical axis
vert_delta = vert_delta - np.size(zf, 0)
if horiz_delta > np.size(zf, 1) / 2: # same for horizontal axis
horiz_delta = horiz_delta - np.size(zf, 1)
self.pos = self.pos + self.cell_size * np.array([vert_delta, horiz_delta])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
# Kernel Ridge Regression, calculate alphas (in Fourier domain)
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
# subsequent frames, interpolate model
self.model_alphaf = (1 - self.interp_factor) * self.model_alphaf + self.interp_factor * alphaf
self.model_xf = (1 - self.interp_factor) * self.model_xf + self.interp_factor * xf
if self.resize:
pos_real = np.multiply(self.pos, 2)
else:
pos_real = self.pos
box = [pos_real[1] - self.target_sz_real[1] / 2,
pos_real[0] - self.target_sz_real[0] / 2,
self.target_sz_real[1],
self.target_sz_real[0]]
return box[0], box[1], box[2], box[3]
def get_subwindow(self, im, pos, sz):
_p1 = np.array(range(0, int(sz[0]))).reshape([1, int(sz[0])])
_p2 = np.array(range(0, int(sz[1]))).reshape([1, int(sz[1])])
ys = np.floor(pos[0]) + _p1 - np.floor(sz[0]/2)
xs = np.floor(pos[1]) + _p2 - np.floor(sz[1]/2)
# Check for out-of-bounds coordinates, and set them to the values at the borders
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs > np.size(im, 1) - 1] = np.size(im, 1) - 1
ys[ys > np.size(im, 0) - 1] = np.size(im, 0) - 1
xs = xs.astype(int)
ys = ys.astype(int)
# extract image
out1 = im[list(ys[0, :]), :, :]
out = out1[:, list(xs[0, :]), :]
return out
def get_features(self, im):
if self.feature == 'hog':
# HOG features, from Piotr's Toolbox
x = np.double(self.get_fhog(im))
return x * self.cos_window[:, :, None]
if self.feature == 'gray':
x = np.double(im) / 255
x = x - np.mean(x)
return x * self.cos_window[:, :, None]
if self.feature == 'deep':
x = self.get_deep_feature(im)
x = x / np.max(x)
return x * self.cos_window[:, :, None]
def get_fhog(self, im_patch):
H = get_hog(im_patch/255)
return H
def gaussian_correlation(self, xf, yf):
N = xf.shape[0] * xf.shape[1]
xff = xf.reshape([xf.shape[0] * xf.shape[1] * xf.shape[2], 1], order='F')
xff_T = xff.conj().T
yff = yf.reshape([yf.shape[0] * yf.shape[1] * yf.shape[2], 1], order='F')
yff_T = yff.conj().T
xx = np.dot(xff_T, xff).real / N # squared norm of x
yy = np.dot(yff_T, yff).real / N # squared norm of y
# cross-correlation term in Fourier domain
xyf = xf * conj(yf)
ixyf = ifftn(xyf, axes=(0, 1))
rxyf = real(ixyf)
xy = np.sum(rxyf, 2) # to spatial domain
# calculate gaussian response for all positions, then go back to the Fourier domain
sz = xf.shape[0] * xf.shape[1] * xf.shape[2]
mltp = (xx + yy - 2 * xy) / sz
crpm = -1 / (self.sigma * self.sigma)
expe = crpm * np.maximum(0, mltp)
expx = np.exp(expe)
kf = fftn(expx, axes=(0, 1))
return kf
def get_deep_feature(self, im):
# Preprocessing
from numpy import expand_dims
#img = im.astype('float32') # note: [0, 255] range
img = im # note: [0, 255] range
img = cv2.resize(img, (224, 224))
img = expand_dims(img, axis=0)
feature_maps = vgg_model.predict(img)
f_map = feature_maps[3][0][:][:][:]
feature_map_n = cv2.resize(f_map, (self.cos_window.shape[1], self.cos_window.shape[0]),
interpolation=cv2.INTER_LINEAR)
return feature_map_n
|
[
"numpy.sqrt",
"scipy.io.loadmat",
"cv2.imshow",
"numpy.array",
"numpy.divide",
"numpy.mean",
"numpy.multiply",
"numpy.fft.fftn",
"numpy.fft.fft2",
"numpy.exp",
"numpy.real",
"numpy.max",
"numpy.dot",
"keras.models.Model",
"keras.applications.vgg19.VGG19",
"numpy.maximum",
"cv2.waitKey",
"numpy.conj",
"numpy.size",
"numpy.floor",
"utils.cos_window",
"numpy.argmax",
"utils.gaussian2d_rolled_labels",
"cv2.cvtColor",
"cv2.resize",
"numpy.fft.ifftn",
"cv2.imread",
"hog_cpp.fhog.get_hog.get_hog",
"numpy.double",
"numpy.sum",
"numpy.expand_dims"
] |
[((423, 443), 'scipy.io.loadmat', 'io.loadmat', (['vgg_path'], {}), '(vgg_path)\n', (433, 443), False, 'from scipy import io\n'), ((456, 466), 'keras.applications.vgg19.VGG19', 'VGG19', (['mat'], {}), '(mat)\n', (461, 466), False, 'from keras.applications.vgg19 import VGG19\n'), ((560, 603), 'keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'outputs'}), '(inputs=model.inputs, outputs=outputs)\n', (565, 603), False, 'from keras.models import Model\n'), ((2396, 2421), 'cv2.imread', 'cv2.imread', (['frame_list[0]'], {}), '(frame_list[0])\n', (2406, 2421), False, 'import cv2\n'), ((3398, 3413), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (3406, 3413), True, 'import numpy as np\n'), ((3541, 3579), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3553, 3579), False, 'import cv2\n'), ((3605, 3621), 'numpy.array', 'np.array', (['[h, w]'], {}), '([h, w])\n', (3613, 3621), True, 'import numpy as np\n'), ((3652, 3668), 'numpy.array', 'np.array', (['[h, w]'], {}), '([h, w])\n', (3660, 3668), True, 'import numpy as np\n'), ((4349, 4390), 'numpy.floor', 'np.floor', (['(self.window_sz / self.cell_size)'], {}), '(self.window_sz / self.cell_size)\n', (4357, 4390), True, 'import numpy as np\n'), ((4535, 4585), 'utils.gaussian2d_rolled_labels', 'gaussian2d_rolled_labels', (['yf_sz', 'self.output_sigma'], {}), '(yf_sz, self.output_sigma)\n', (4559, 4585), False, 'from utils import gaussian2d_rolled_labels, cos_window\n'), ((4604, 4615), 'numpy.fft.fft2', 'fft2', (['gauss'], {}), '(gauss)\n', (4608, 4615), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((4684, 4732), 'utils.cos_window', 'cos_window', (['[self.yf.shape[1], self.yf.shape[0]]'], {}), '([self.yf.shape[1], self.yf.shape[0]])\n', (4694, 4732), False, 'from utils import gaussian2d_rolled_labels, cos_window\n'), ((4936, 4959), 'numpy.fft.fftn', 'fftn', (['feat'], {'axes': '(0, 1)'}), '(feat, axes=(0, 1))\n', (4940, 4959), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((5092, 5129), 'numpy.divide', 'np.divide', (['self.yf', '(kf + self.lambda_)'], {}), '(self.yf, kf + self.lambda_)\n', (5101, 5129), True, 'import numpy as np\n'), ((5245, 5283), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (5257, 5283), False, 'import cv2\n'), ((6569, 6592), 'numpy.fft.fftn', 'fftn', (['feat'], {'axes': '(0, 1)'}), '(feat, axes=(0, 1))\n', (6573, 6592), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((6783, 6820), 'numpy.divide', 'np.divide', (['self.yf', '(kf + self.lambda_)'], {}), '(self.yf, kf + self.lambda_)\n', (6792, 6820), True, 'import numpy as np\n'), ((8719, 8742), 'hog_cpp.fhog.get_hog.get_hog', 'get_hog', (['(im_patch / 255)'], {}), '(im_patch / 255)\n', (8726, 8742), False, 'from hog_cpp.fhog.get_hog import get_hog\n'), ((9281, 9304), 'numpy.fft.ifftn', 'ifftn', (['xyf'], {'axes': '(0, 1)'}), '(xyf, axes=(0, 1))\n', (9286, 9304), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((9320, 9330), 'numpy.real', 'real', (['ixyf'], {}), '(ixyf)\n', (9324, 9330), False, 'from numpy import conj, real\n'), ((9344, 9359), 'numpy.sum', 'np.sum', (['rxyf', '(2)'], {}), '(rxyf, 2)\n', (9350, 9359), True, 'import numpy as np\n'), ((9669, 9681), 'numpy.exp', 'np.exp', (['expe'], {}), '(expe)\n', (9675, 9681), True, 'import numpy as np\n'), ((9695, 9718), 'numpy.fft.fftn', 'fftn', (['expx'], {'axes': '(0, 1)'}), '(expx, axes=(0, 1))\n', (9699, 9718), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((9952, 9979), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (9962, 9979), False, 'import cv2\n'), ((9995, 10019), 'numpy.expand_dims', 'expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (10006, 10019), False, 'from numpy import expand_dims\n'), ((10134, 10241), 'cv2.resize', 'cv2.resize', (['f_map', '(self.cos_window.shape[1], self.cos_window.shape[0])'], {'interpolation': 'cv2.INTER_LINEAR'}), '(f_map, (self.cos_window.shape[1], self.cos_window.shape[0]),\n interpolation=cv2.INTER_LINEAR)\n', (10144, 10241), False, 'import cv2\n'), ((3748, 3762), 'numpy.sqrt', 'np.sqrt', (['(h * w)'], {}), '(h * w)\n', (3755, 3762), True, 'import numpy as np\n'), ((3855, 3877), 'numpy.floor', 'np.floor', (['(self.pos / 2)'], {}), '(self.pos / 2)\n', (3863, 3877), True, 'import numpy as np\n'), ((3907, 3935), 'numpy.floor', 'np.floor', (['(self.target_sz / 2)'], {}), '(self.target_sz / 2)\n', (3915, 3935), True, 'import numpy as np\n'), ((3985, 4061), 'cv2.resize', 'cv2.resize', (['self.image', '(self.image.shape[1] // 2, self.image.shape[0] // 2)'], {}), '(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))\n', (3995, 4061), False, 'import cv2\n'), ((4147, 4192), 'numpy.multiply', 'np.multiply', (['self.target_sz', '(1 + self.padding)'], {}), '(self.target_sz, 1 + self.padding)\n', (4158, 4192), True, 'import numpy as np\n'), ((4410, 4451), 'numpy.floor', 'np.floor', (['(self.window_sz / self.cell_size)'], {}), '(self.window_sz / self.cell_size)\n', (4418, 4451), True, 'import numpy as np\n'), ((4474, 4515), 'numpy.floor', 'np.floor', (['(self.window_sz / self.cell_size)'], {}), '(self.window_sz / self.cell_size)\n', (4482, 4515), True, 'import numpy as np\n'), ((5333, 5409), 'cv2.resize', 'cv2.resize', (['self.image', '(self.image.shape[1] // 2, self.image.shape[0] // 2)'], {}), '(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))\n', (5343, 5409), False, 'import cv2\n'), ((5678, 5721), 'numpy.fft.ifftn', 'ifftn', (['(self.model_alphaf * kzf)'], {'axes': '(0, 1)'}), '(self.model_alphaf * kzf, axes=(0, 1))\n', (5683, 5721), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((5901, 5931), 'numpy.argmax', 'np.argmax', (['response'], {'axis': 'None'}), '(response, axis=None)\n', (5910, 5931), True, 'import numpy as np\n'), ((7112, 7136), 'numpy.multiply', 'np.multiply', (['self.pos', '(2)'], {}), '(self.pos, 2)\n', (7123, 7136), True, 'import numpy as np\n'), ((7644, 7663), 'numpy.floor', 'np.floor', (['(sz[0] / 2)'], {}), '(sz[0] / 2)\n', (7652, 7663), True, 'import numpy as np\n'), ((7700, 7719), 'numpy.floor', 'np.floor', (['(sz[1] / 2)'], {}), '(sz[1] / 2)\n', (7708, 7719), True, 'import numpy as np\n'), ((7892, 7906), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (7899, 7906), True, 'import numpy as np\n'), ((7949, 7963), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (7956, 7963), True, 'import numpy as np\n'), ((9257, 9265), 'numpy.conj', 'conj', (['yf'], {}), '(yf)\n', (9261, 9265), False, 'from numpy import conj, real\n'), ((9634, 9653), 'numpy.maximum', 'np.maximum', (['(0)', 'mltp'], {}), '(0, mltp)\n', (9644, 9653), True, 'import numpy as np\n'), ((2626, 2653), 'cv2.imread', 'cv2.imread', (['frame_list[idx]'], {}), '(frame_list[idx])\n', (2636, 2653), False, 'import cv2\n'), ((6026, 6040), 'numpy.size', 'np.size', (['zf', '(0)'], {}), '(zf, 0)\n', (6033, 6040), True, 'import numpy as np\n'), ((6139, 6153), 'numpy.size', 'np.size', (['zf', '(0)'], {}), '(zf, 0)\n', (6146, 6153), True, 'import numpy as np\n'), ((6179, 6193), 'numpy.size', 'np.size', (['zf', '(1)'], {}), '(zf, 1)\n', (6186, 6193), True, 'import numpy as np\n'), ((6267, 6281), 'numpy.size', 'np.size', (['zf', '(1)'], {}), '(zf, 1)\n', (6274, 6281), True, 'import numpy as np\n'), ((6329, 6364), 'numpy.array', 'np.array', (['[vert_delta, horiz_delta]'], {}), '([vert_delta, horiz_delta])\n', (6337, 6364), True, 'import numpy as np\n'), ((7619, 7635), 'numpy.floor', 'np.floor', (['pos[0]'], {}), '(pos[0])\n', (7627, 7635), True, 'import numpy as np\n'), ((7675, 7691), 'numpy.floor', 'np.floor', (['pos[1]'], {}), '(pos[1])\n', (7683, 7691), True, 'import numpy as np\n'), ((8412, 8425), 'numpy.double', 'np.double', (['im'], {}), '(im)\n', (8421, 8425), True, 'import numpy as np\n'), ((8452, 8462), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (8459, 8462), True, 'import numpy as np\n'), ((8611, 8620), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (8617, 8620), True, 'import numpy as np\n'), ((9076, 9094), 'numpy.dot', 'np.dot', (['xff_T', 'xff'], {}), '(xff_T, xff)\n', (9082, 9094), True, 'import numpy as np\n'), ((9138, 9156), 'numpy.dot', 'np.dot', (['yff_T', 'yff'], {}), '(yff_T, yff)\n', (9144, 9156), True, 'import numpy as np\n'), ((3702, 3717), 'numpy.floor', 'np.floor', (['(h / 2)'], {}), '(h / 2)\n', (3710, 3717), True, 'import numpy as np\n'), ((3721, 3736), 'numpy.floor', 'np.floor', (['(w / 2)'], {}), '(w / 2)\n', (3729, 3736), True, 'import numpy as np\n'), ((7870, 7884), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (7877, 7884), True, 'import numpy as np\n'), ((7927, 7941), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (7934, 7941), True, 'import numpy as np\n'), ((3173, 3203), 'cv2.imshow', 'cv2.imshow', (['"""demo"""', 'show_frame'], {}), "('demo', show_frame)\n", (3183, 3203), False, 'import cv2\n'), ((3228, 3242), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3239, 3242), False, 'import cv2\n'), ((4236, 4282), 'numpy.sqrt', 'np.sqrt', (['(self.target_sz[0] * self.target_sz[1])'], {}), '(self.target_sz[0] * self.target_sz[1])\n', (4243, 4282), True, 'import numpy as np\n'), ((2920, 2967), 'cv2.cvtColor', 'cv2.cvtColor', (['current_frame', 'cv2.COLOR_GRAY2BGR'], {}), '(current_frame, cv2.COLOR_GRAY2BGR)\n', (2932, 2967), False, 'import cv2\n')]
|
"""Utility functions used in Activity 7."""
import random
import numpy as np
from matplotlib import pyplot as plt
from keras.callbacks import TensorBoard
def create_groups(data, group_size=7):
"""Create distinct groups from a continuous series.
Parameters
----------
data: np.array
Series of continious observations.
group_size: int, default 7
Determines how large the groups are. That is,
how many observations each group contains.
Returns
-------
A Numpy array object.
"""
samples = list()
for i in range(0, len(data), group_size):
sample = list(data[i:i + group_size])
if len(sample) == group_size:
samples.append(np.array(sample).reshape(1, group_size).tolist())
a = np.array(samples)
return a.reshape(1, a.shape[0], group_size)
def split_lstm_input(groups):
"""Split groups in a format expected by the LSTM layer.
Parameters
----------
groups: np.array
Numpy array with the organized sequences.
Returns
-------
X, Y: np.array
Numpy arrays with the shapes required by
the LSTM layer. X with (1, a - 1, b)
and Y with (1, b). Where a is the total
number of groups in `group` and b the
number of observations per group.
"""
X = groups[0:, :-1].reshape(1, groups.shape[1] - 1, groups.shape[2])
Y = groups[0:, -1:][0]
return X, Y
def mape(A, B):
"""Calculate the mean absolute percentage error from two series."""
return np.mean(np.abs((A - B) / A)) * 100
def rmse(A, B):
"""Calculate the root mean square error from two series."""
return np.sqrt(np.square(np.subtract(A, B)).mean())
def train_model(model, X, Y, epochs=100, version=0, run_number=0):
"""Shorthand function for training a new model.
This function names each run of the model
using the TensorBoard naming conventions.
Parameters
----------
model: Keras model instance
Compiled Keras model.
X, Y: np.array
Series of observations to be used in
the training process.
version: int
Version of the model to run.
run_number: int
The number of the run. Used in case
the same model version is run again.
"""
hash = random.getrandbits(128)
hex_code = '%032x' % hash
model_name = f'bitcoin_lstm_v{version}_run_{run_number}_{hex_code[:6]}'
tensorboard = TensorBoard(log_dir=f'./logs/{model_name}')
model_history = model.fit(
x=X, y=Y,
batch_size=1, epochs=epochs,
callbacks=[tensorboard],
shuffle=False)
return model_history
def plot_two_series(A, B, variable, title):
"""Plot two series using the same `date` index.
Parameters
----------
A, B: pd.DataFrame
Dataframe with a `date` key and a variable
passed in the `variable` parameter. Parameter A
represents the "Observed" series and B the "Predicted"
series. These will be labelled respectivelly.
variable: str
Variable to use in plot.
title: str
Plot title.
"""
plt.figure(figsize=(14, 4))
plt.xlabel('Observed and predicted')
ax1 = A.set_index('date')[variable].plot(
color='#d35400', grid=True, label='Observed', title=title)
ax2 = B.set_index('date')[variable].plot(
color='grey', grid=True, label='Predicted')
ax1.set_xlabel("Predicted Week")
ax1.set_ylabel("Predicted Values")
plt.legend()
plt.show()
def denormalize(reference, series,
normalized_variable='close_point_relative_normalization',
denormalized_variable='close'):
"""Denormalize the values for a given series.
Parameters
----------
reference: pd.DataFrame
DataFrame to use as reference. This dataframe
contains both a week index and the USD price
reference that we are interested on.
series: pd.DataFrame
DataFrame with the predicted series. The
DataFrame must have the same columns as the
`reference` dataset.
normalized_variable: str, default 'close_point_relative_normalization'
Variable to use in normalization.
denormalized_variable: str, default `close`
Variable to use in de-normalization.
Returns
-------
A modified DataFrame with the new variable provided
in `denormalized_variable` parameter.
"""
week_values = reference[reference['iso_week'] == series['iso_week'].values[0]]
last_value = week_values[denormalized_variable].values[0]
series[denormalized_variable] = last_value * (series[normalized_variable] + 1)
return series
|
[
"numpy.abs",
"matplotlib.pyplot.xlabel",
"numpy.subtract",
"keras.callbacks.TensorBoard",
"numpy.array",
"matplotlib.pyplot.figure",
"random.getrandbits",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((779, 796), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (787, 796), True, 'import numpy as np\n'), ((2298, 2321), 'random.getrandbits', 'random.getrandbits', (['(128)'], {}), '(128)\n', (2316, 2321), False, 'import random\n'), ((2447, 2490), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'f"""./logs/{model_name}"""'}), "(log_dir=f'./logs/{model_name}')\n", (2458, 2490), False, 'from keras.callbacks import TensorBoard\n'), ((3136, 3163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 4)'}), '(figsize=(14, 4))\n', (3146, 3163), True, 'from matplotlib import pyplot as plt\n'), ((3168, 3204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observed and predicted"""'], {}), "('Observed and predicted')\n", (3178, 3204), True, 'from matplotlib import pyplot as plt\n'), ((3500, 3512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3510, 3512), True, 'from matplotlib import pyplot as plt\n'), ((3517, 3527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3525, 3527), True, 'from matplotlib import pyplot as plt\n'), ((1547, 1566), 'numpy.abs', 'np.abs', (['((A - B) / A)'], {}), '((A - B) / A)\n', (1553, 1566), True, 'import numpy as np\n'), ((1685, 1702), 'numpy.subtract', 'np.subtract', (['A', 'B'], {}), '(A, B)\n', (1696, 1702), True, 'import numpy as np\n'), ((720, 736), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (728, 736), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import itertools
__metaclass__ = type
def prob_incr(species, proj_compressed_data, min_occurences = 10):
p = proj_compressed_data['count_incr']/ proj_compressed_data['count']
p[proj_compressed_data['count'] < min_occurences] = -1
return p
def score(species,IV, G, exp_digitized, bins, thresholds): # G: control species set
(v_f, v_a, v_n) = (0, 0, 0)
IV[IV.isnull()] = 0
if (IV == 0).all(): return thresholds['Ti']
n_repressors = IV[IV == -1].count()
n_activators = IV[IV == 1].count()
# G.extend(parents(IV) )
GG = G[:]
GG.extend(parents(IV))
GG = np.unique(GG)
pcd = exp_digitized.project(species,GG)
pcd['prob_incr'] = prob_incr(species, pcd, min_occurences = 1)
# if (GG == ['CI','LacI']).all(): print pcd
query_parents= ""
if n_repressors > n_activators:
query_parents = " & ".join(['%s == %s' %(sp, bins[sp][0] ) for sp in IV[IV == -1].index ] ) # lowest level for repressors
query_act = " & ".join(['%s == %s' %(sp, bins[sp][-2]) for sp in IV[IV == 1].index ] ) # highest level for activators
if query_act != "": query_parents += (" & " + query_act )
else:
query_parents = " & ".join(['%s == %s' %(sp, bins[sp][0] ) for sp in IV[IV == 1].index ] ) # lowest level for activators
query_rep = " & ".join(['%s == %s' %(sp, bins[sp][-1]) for sp in IV[IV == -1].index ] ) # highest level for repressors
if query_rep != "": query_parents += (" & " + query_rep)
for g in G:
if (len(parents(IV) == 1) and g == parents(IV)[0]): # single-influence and self-regulating
idx_base = pcd.query(query_parents).index
p_base = pcd.at[idx_base[0], 'prob_incr']
idx_test = np.setdiff1d(pcd.index, idx_base)
if p_base != -1:
for i in idx_test:
p_a = pcd.loc[i,'prob_incr']
# print "p_a / p_base = %s / %s" % (p_a, p_base)
if p_a != -1 :
if n_repressors < n_activators:
if (p_a / p_base) > thresholds['Ta']: v_f += 1; # print "Voted for"
elif (p_a / p_base) < thresholds['Tr']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
if (p_a / p_base) < thresholds['Tr']: v_f += 1; # print "Voted for"
elif (p_a / p_base) > thresholds['Ta']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
for b in bins[g]:
query_cntl = '%s == %s' % (g,b)
if ( g in parents(IV)):
query_str = query_cntl
else:
#p_base = float(pcd.query(query_parents+ ' & ' + query_cntl )['prob_incr'])
query_str = (query_parents+ ' & ' + query_cntl, query_cntl)[query_parents == ""]
idx_base = pcd.query(query_str).index
p_base = pcd.at[idx_base[0], 'prob_incr']
if p_base != -1:
# if p_base == 0: p_base += pseudo_count
idx_test = np.setdiff1d(pcd.query(query_cntl).index, idx_base)
for i in idx_test:
# pcd.loc[i, 'ratio'] = pcd.loc[i,'prob_incr'] / p_base
p_a = pcd.loc[i,'prob_incr']
# print "p_a / p_base = %s / %s" % (p_a, p_base)
if p_a != -1 :
# print pcd.loc[idx, 'prob_incr']/ p_base
if n_repressors < n_activators:
if (p_a / p_base) > thresholds['Ta']: v_f += 1; # print "Voted for"
elif (p_a / p_base) < thresholds['Tr']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
if (p_a / p_base) < thresholds['Tr']: v_f += 1; # print "Voted for"
elif (p_a / p_base) > thresholds['Ta']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
# print "IV: %s" % IV
# print (v_f, v_a, v_n)
if (v_f + v_a + v_n == 0): return 0.
score = (v_f - v_a + 0.) / (v_f + v_a + v_n )
if (len(parents(IV) == 1) and g == parents(IV)[0]): score *= 0.75 # down weight single-influence and self-regulating
return score
def parents(infl):
return infl[(infl.notnull()) & (infl != 0 )].index
def createIVSet(species, exp_digitized,IV0, bins, thresholds):
I = []
scores = []
idx_unknown = IV0[IV0.isnull()].index # species name
iv = IV0.copy()
iv[idx_unknown] = 0
G = [species]
score_zero = score(species,iv, G, exp_digitized, bins, thresholds)
# print "%s \t Background score: %s" % (list(iv), score_zero)
for u in idx_unknown:
iv1 = iv.copy()
iv1.loc[u] = 1 # set activators
# print "scoring %s" % iv1
score_a = score(species ,iv1, G, exp_digitized, bins, thresholds)
# print "%s \t Activator score: %s" % (list(iv1), score_a)
if score_a >= score_zero:
I.append(iv1)
scores.append(score_a)
else:
iv1.loc[u] = -1
# print "scoring %s" % iv1
score_r = score(species ,iv1, G, exp_digitized, bins, thresholds)
# print "%s \t Repressor score: %s" % (list(iv1), score_r)
if score_r >= score_zero:
I.append(iv1)
scores.append(score_r)
return (I, scores)
# IV[IV.isnull()] = 0
def combineIVs(species, IVs, IVscores, IV0,exp_digitized, bins, thresholds):
'''score every possible combination of IV in input IVs'''
I = []
scores = []
to_remove = []
tj = len(IV0[IV0.notnull()])
bg_score = 0.
bg_iv = IV0.copy()
bg_iv[IV0.isnull()] = 0
bg_score = score(species, bg_iv, [species], exp_digitized, bins, thresholds)
for i in range(2, min(thresholds['Tj'], len(IV0)- tj+1)):
K = itertools.combinations(range(len(IVs)), i)
for k in K:
old_scores = np.zeros((len(k),))
added = IVs[0][IV0.isnull()]; added[:] = 0 # combined vector
for j in range(len(k)):
added += IVs[k[j]][IV0.isnull()]
old_scores[j] = IVscores[k[j]]
new_iv = pd.concat((added , IV0[IV0.notnull()]))
if (max(old_scores) - min(old_scores)) <= thresholds['Tm']:
new_score = score(species, new_iv, [species] ,exp_digitized, bins, thresholds)
if ((new_score >= old_scores).all() and (new_score > bg_score)):
I.append(new_iv)
scores.append(new_score)
to_remove.extend(k)
return (I, scores, set(to_remove))
def competeIVs(species, iv1, iv2, exp_digitized, bins, thresholds):
G = [species]; G.extend(np.setdiff1d(parents(iv2), parents(iv1)) )
s1 = score(species, iv1, G, exp_digitized, bins, thresholds)
G = [species]; G.extend(np.setdiff1d(parents(iv1), parents(iv2)) )
s2 = score(species, iv2, G, exp_digitized, bins, thresholds)
if s1 > s2: return (0, s1)
elif s1 < s2: return (1, s2)
else: return ([0, 1], [s1, s2] )
def learn(experiments, initialNetwork, thresholds = { 'Tr': 0.75, 'Ta': 1.15, 'Tj': 2, 'Ti': 0.5, 'Tm': 0.} , nbins=4, bin_assignment = 1):
'''Learning of causal network from a set of time series data, each resulted from an independent experiment
The algorithm learn influence vectors for one gene at a time.
For each gene, there are 3 main stages of learning:
(1) Adding single influence to create set of new influence vectors
(2) Combining influence vectors from stage (1) to create new influence vectors (with more than 1 parents)
(3) competing between influence vectors to determine the best one
'''
cnet = initialNetwork.copy()
binned = experiments.digitize(nbins=nbins, bin_assignment = 1)
bins = { sp: np.unique(binned[sp]) for sp in initialNetwork }
for sp in initialNetwork:
# print "----------------------------\nLearning influence vector for %s" % sp
initial = initialNetwork.influences(sp)
(IVs, scores) = createIVSet(sp,binned, initial, bins, thresholds)
# if sp == 'LacI':
# print "Initial IVs"
# print IVs
# print scores
(cIVs, cScores, to_remove) = combineIVs(sp, IVs, scores, initial,binned, bins, thresholds)
# if sp == 'LacI':
# print "Combined IVs"
# print cIVs
# print cScores
for i in np.setdiff1d(range(len(IVs)), to_remove):
cIVs.append(IVs[i])
cScores.append(scores[i])
while len(cIVs) > 1:
sorted_idx = np.argsort(-np.array(cScores)) # ranking IVs from highest scores
winnerId, wScore = competeIVs(sp, cIVs[0], cIVs[-1], binned, bins, thresholds)
if winnerId == 1:
cIVs[0] = cIVs[-1]
cScores[0] = cScores[-1]
cIVs = cIVs[:-1]
cScores = cScores[:-1]
if len(cIVs) > 0: cnet.loc[sp] = cIVs[0]
else:
cnet.loc[sp] = initial.copy()
cnet.loc[sp][initial.isnull()] = 0
return cnet
class CausalNetwork(pd.DataFrame):
def __init__(self, species):
'''store influence vectors of each gene in a row, with value indicating relationship of gene in the column --> gene in the row. Example
n = CausalNetwork(...)
A B C
A 0 -1 0
B 0 1 1
C -1 None 0
0: no relation ship
1: activate
-1: repress
None: unknown
'''
super(CausalNetwork,self).__init__(np.zeros((len(species), len(species)), dtype=int)/ 0., columns = species, index=species)
def activators(self,i):
''' return the activators of i'''
pass
def repressors(self,i):
''' return the repressors of i'''
pass
def influences(self,i):
'''return the influence vector of i'''
return self.loc[i]
def __getitem__(self, i):
return self.loc[i]
|
[
"numpy.array",
"numpy.setdiff1d",
"numpy.unique"
] |
[((640, 653), 'numpy.unique', 'np.unique', (['GG'], {}), '(GG)\n', (649, 653), True, 'import numpy as np\n'), ((8443, 8464), 'numpy.unique', 'np.unique', (['binned[sp]'], {}), '(binned[sp])\n', (8452, 8464), True, 'import numpy as np\n'), ((1796, 1829), 'numpy.setdiff1d', 'np.setdiff1d', (['pcd.index', 'idx_base'], {}), '(pcd.index, idx_base)\n', (1808, 1829), True, 'import numpy as np\n'), ((9252, 9269), 'numpy.array', 'np.array', (['cScores'], {}), '(cScores)\n', (9260, 9269), True, 'import numpy as np\n')]
|
"""Tools for working with Cryptopunk NFTs; this includes utilities for data analysis and image preparation for training machine learning models using Cryptopunks as training data.
Functions:
get_punk(id)
pixel_to_img(pixel_str, dim)
flatten(img)
unflatten(img)
sort_dict_by_function_of_value(d, f)
add_index_to_colors(colors)
"""
import os
import time
import requests
from collections import OrderedDict
from bs4 import BeautifulSoup
from re import sub
import numpy as np
import pandas as pd
from matplotlib.colors import rgb2hex
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
__ROOT_DIR__ = os.path.dirname(os.path.abspath(__file__))
__PUNK_DIR__ = f"{__ROOT_DIR__}/images/training";
def camel_case(string):
'''
Convert string to camelCase
'''
string = string.strip("\n")
string = sub(r"(_|-)+", " ", string).title().replace(" ", "")
return string[0].lower() + string[1:]
def color_str_to_hex(s):
'''
Convert string representation of numpy pixel array
to a string hex value
'''
return rgb2hex([float(x) for x in s[1:-1].split(' ') if x != ''])
def get_punk(id):
'''
Returns a ndarray with loaded image
'''
return mpimg.imread(f'''{__PUNK_DIR__}/punk{"%04d" % id}.png''')
def pixel_to_img(pixel_str, dim = (24,24)):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
only the pixel's color.
'''
(x,y) = dim
c = np.fromstring(pixel_str[1:-1], float, sep=' ')
return np.full((x, y, 4), c)
def pixel_to_ximg(pixel_strs, dim = (24,24), n=3 ):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
a matrix of size n*n
'''
(x,y) = (dim[0]//n, dim[1]//n)
m = []
for i in range(0,n):
l=[]
for j in range(0,n):
img = np.full((x, y, 4),
np.fromstring(pixel_strs[i*n + j][1:-1], float, sep=' '))
l.append(img)
m.append(np.concatenate(l, axis=1))
return np.concatenate(m, axis=0)
def flatten(img):
'''
Convert (x,y,z) array containing a pixel in z-dimension
to an (x,y) array with str values for each (i,j)
the intention is to make this easier to work with in ML
training.
'''
return np.array([[str(c) for c in row]
for row in img])
def unflatten(img):
'''
Return a flattend image to valid .png format for display
'''
return np.array([[np.fromstring(c[1:-1], float, sep=' ')
for c in row] for row in img])
def sort_dict_by_function_of_value(d, f = len):
sorted_tuples = sorted(d.items(),
key=lambda item: len(item[1]))
return {k: v for k, v in sorted_tuples}
def add_index_to_colors(colors):
'''
Add a unique, sequential index to the entry for
each color. returned dictionary will be of form
{`color_string`: { `"id": `int`, "punkIds" : `list[int`}}
'''
i=0
d={}
for k in colors.keys():
d[k] = {
'id' : i,
'punkIds' : colors[k]
}
i=i+1
return d
def get_attr_dict():
'''
Read the attr csv and populate a default dict
'''
d=OrderedDict()
with open(f"{__ROOT_DIR__}/data/list_attr_punx.csv") as f:
for attr in f.read().strip('\n').split(','):
d[attr]=-1
return d
def get_punk_attrs(id):
'''
Retrieve `id` cryptopunk from larvalabs.com,
parse HTML to extract type and attribute list
to return list of attributes
'''
typeClass="col-md-10 col-md-offset-1 col-xs-12"
punk_page=requests.get(f"https://www.larvalabs.com/cryptopunks/details/{id}")
if(punk_page.status_code != 200):
print(punk_page.status_code)
return {}
punk_html=punk_page.text
soup = BeautifulSoup(punk_html, 'html.parser')
details = soup.find(id="punkDetails")
punkType = camel_case(details.find(class_=typeClass).find('a').contents[0])
attrs=[punkType]
attrTags = details.find(class_ = "row detail-row")
for attrTag in attrTags.find_all('a'):
attrs.append(camel_case(attrTag.contents[0]))
return attrs
def get_punk_dict(id):
'''
Retrieve a punk page, pull type and attributes
from HTML and return a dictionary of attribute to
(-1,1) mapping where 1 is truthy for existence of
attribute
'''
od = {k:__ATTR_DICT__[k] for k in __ATTR_DICT__}
attrs = get_punk_attrs(id)
for attr in attrs:
od[attr]=1
return od
def get_punks(start, end):
'''
Retrieve punks in range `start` to `end`
'''
punks={}
for id in range(start, end):
print(id)
time.sleep(3.3)
punks[id] = get_punk_dict(id)
return punks
def plot_in_grid(n, images, predictions, labels):
'''
Plot `images` in an n*n grid with
prediction and labels as header
'''
(x,y) = (n,n)
fig = plt.figure(figsize=(9,14))
i=0
for i in range(1,(x*y)+1):
fig.add_subplot(x, y, i)
plt.imshow(images[i])
plt.title(f"{predictions[i][0]},{labels[i][0]}")
plt.axis('off')
i=i+1
return fig
|
[
"matplotlib.pyplot.imshow",
"collections.OrderedDict",
"matplotlib.pyplot.title",
"matplotlib.image.imread",
"requests.get",
"time.sleep",
"bs4.BeautifulSoup",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"re.sub",
"numpy.concatenate",
"os.path.abspath",
"numpy.full",
"numpy.fromstring"
] |
[((655, 680), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (670, 680), False, 'import os\n'), ((1216, 1269), 'matplotlib.image.imread', 'mpimg.imread', (['f"""{__PUNK_DIR__}/punk{\'%04d\' % id}.png"""'], {}), '(f"{__PUNK_DIR__}/punk{\'%04d\' % id}.png")\n', (1228, 1269), True, 'import matplotlib.image as mpimg\n'), ((1484, 1530), 'numpy.fromstring', 'np.fromstring', (['pixel_str[1:-1]', 'float'], {'sep': '""" """'}), "(pixel_str[1:-1], float, sep=' ')\n", (1497, 1530), True, 'import numpy as np\n'), ((1542, 1563), 'numpy.full', 'np.full', (['(x, y, 4)', 'c'], {}), '((x, y, 4), c)\n', (1549, 1563), True, 'import numpy as np\n'), ((2069, 2094), 'numpy.concatenate', 'np.concatenate', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (2083, 2094), True, 'import numpy as np\n'), ((3209, 3222), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3220, 3222), False, 'from collections import OrderedDict\n'), ((3623, 3690), 'requests.get', 'requests.get', (['f"""https://www.larvalabs.com/cryptopunks/details/{id}"""'], {}), "(f'https://www.larvalabs.com/cryptopunks/details/{id}')\n", (3635, 3690), False, 'import requests\n'), ((3825, 3864), 'bs4.BeautifulSoup', 'BeautifulSoup', (['punk_html', '"""html.parser"""'], {}), "(punk_html, 'html.parser')\n", (3838, 3864), False, 'from bs4 import BeautifulSoup\n'), ((4930, 4957), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 14)'}), '(figsize=(9, 14))\n', (4940, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4698, 4713), 'time.sleep', 'time.sleep', (['(3.3)'], {}), '(3.3)\n', (4708, 4713), False, 'import time\n'), ((5025, 5046), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[i]'], {}), '(images[i])\n', (5035, 5046), True, 'import matplotlib.pyplot as plt\n'), ((5051, 5099), 'matplotlib.pyplot.title', 'plt.title', (['f"""{predictions[i][0]},{labels[i][0]}"""'], {}), "(f'{predictions[i][0]},{labels[i][0]}')\n", (5060, 5099), True, 'import matplotlib.pyplot as plt\n'), ((5104, 5119), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5112, 5119), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2056), 'numpy.concatenate', 'np.concatenate', (['l'], {'axis': '(1)'}), '(l, axis=1)\n', (2045, 2056), True, 'import numpy as np\n'), ((1930, 1988), 'numpy.fromstring', 'np.fromstring', (['pixel_strs[i * n + j][1:-1]', 'float'], {'sep': '""" """'}), "(pixel_strs[i * n + j][1:-1], float, sep=' ')\n", (1943, 1988), True, 'import numpy as np\n'), ((2514, 2552), 'numpy.fromstring', 'np.fromstring', (['c[1:-1]', 'float'], {'sep': '""" """'}), "(c[1:-1], float, sep=' ')\n", (2527, 2552), True, 'import numpy as np\n'), ((843, 869), 're.sub', 'sub', (['"""(_|-)+"""', '""" """', 'string'], {}), "('(_|-)+', ' ', string)\n", (846, 869), False, 'from re import sub\n')]
|
"""This module provides tools for assessing flood risk
"""
from datetime import timedelta
from floodsystem.datafetcher import fetch_measure_levels
import numpy as np
from floodsystem.analysis import polyfit
from matplotlib import dates as date
def stations_level_over_threshold(stations, tol):
"""For a list of MonitoringStation objects (stations) and a tolerance value (tol),
returns a list of tuples containing a MonitoringStation object and its corresponding relative water level.
The returned list is sorted by the relative level in descending order.
Note: "update_water_levels" function needs to be called at least once for this function to work."""
# Create the output list
output = []
for station in stations:
# Get the relative water level. Will be "None" if typical range is inconsistent or the latest level
# is not known
relative_level = station.relative_water_level()
# Check if the relative level is "None" and, if not "None", compare it with the tolerance value
if relative_level is not None and relative_level > tol:
# Append tuple of MonitoringStation object and relative level to the output list
output.append((station, relative_level))
# Sort the list in order of descending relative water levels
output.sort(key=lambda val: val[1], reverse=True)
# Return the output list
return output
def stations_highest_rel_level(stations, N):
"""For a list of MonitoringStaton objects (stations), returns a list of the N stations
at which the water level, relative to the typical range, is highest"""
#Filter list as to not include stations without relative water level
new_stations = list(filter(lambda station: station.relative_water_level() is not None, stations))
#Sorts stations in descending order of relative water level
new_stations.sort(key=lambda station: station.relative_water_level(), reverse = True)
#Return first N stations in lists (N stations with highest water level)
return new_stations[:N]
def get_station_flood_risk(station):
"""For a MonitoringStation object (station), returns flood a risk rating - a number between
0 and 4. Uses data for the relative water level and the rise in the """
flood_risk = 0
rel_level_threshold = 2
rise_threshold = 0.1
#First factor is the current relative water level of station - sets initial risk
rel_water_level = station.relative_water_level()
#If no data available for relative water level, cannot calculate score, so return None
if rel_water_level is None:
return None
if rel_water_level > rel_level_threshold:
flood_risk = 3 #If above threshold, set high risk
else:
flood_risk = 1 #If below threshold, set low risk
#Second factor is the rate of change of the water level (e.g., if rising rapidly, give a high score) - used to adjust risk
level_rise = get_level_rise(station)
#If no data available for level rise, cannot calculate score, so return None
if level_rise is None:
return None
#For decreasing level, reduce flood risk
if level_rise < 0:
flood_risk -= 1
#For increasing level above threshold, increase flood risk
if level_rise > rise_threshold:
flood_risk += 1
return flood_risk
def get_level_rise(station):
"""For a MonitoringStation object (station), returns a the rate of water level rise, specifically
the average value over the last 2 days"""
#Fetch data (if no data available, return None)
times, values = fetch_measure_levels(station.measure_id, timedelta(days=2))
#Only continue if data available, otherwise return None
if times and values and (None in times or None in values) == False:
#Get polynomial approximation of
poly, d0 = polyfit(times, values, p=4)
#Find derivative polynomial
level_der = np.polyder(poly)
#Obtain list of gradients over last 2 days using the derivative polynomial
grads = []
for t in times:
grads.append(level_der(date.date2num(t) - d0))
#Return average of gradient values
return np.average(grads)
else:
return None
def get_town_flood_risk(town, stations_by_town):
"""Obtains the flood risk for a town, based on the flood risks for the towns
respective station, using the same rating system - returned value is the highest
flood risk of the towns stations"""
#Get stations for town
stations_in_town = stations_by_town[town]
flood_risk = get_station_flood_risk(stations_in_town[0])
#Find highest flood risk value from town's stations by iterating through stations
for i in range(1, len(stations_in_town)):
new_flood_risk = get_station_flood_risk(stations_in_town[i])
if new_flood_risk is None:
break
if flood_risk is None or new_flood_risk > flood_risk:
flood_risk = new_flood_risk
#Return highest value
return flood_risk
def get_flood_risk_rating(num):
"""Converts an integer value of a flood risk rating to the rating it
represents - low (0/1), moderate (2), high (3), severe (4)"""
if num == 0 or num == 1:
return "Low"
if num == 2:
return "Moderate"
if num == 3:
return "High"
if num == 4:
return "Severe"
return None #default (for None value or other)
|
[
"matplotlib.dates.date2num",
"numpy.average",
"numpy.polyder",
"floodsystem.analysis.polyfit",
"datetime.timedelta"
] |
[((3640, 3657), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (3649, 3657), False, 'from datetime import timedelta\n'), ((3860, 3887), 'floodsystem.analysis.polyfit', 'polyfit', (['times', 'values'], {'p': '(4)'}), '(times, values, p=4)\n', (3867, 3887), False, 'from floodsystem.analysis import polyfit\n'), ((3945, 3961), 'numpy.polyder', 'np.polyder', (['poly'], {}), '(poly)\n', (3955, 3961), True, 'import numpy as np\n'), ((4215, 4232), 'numpy.average', 'np.average', (['grads'], {}), '(grads)\n', (4225, 4232), True, 'import numpy as np\n'), ((4132, 4148), 'matplotlib.dates.date2num', 'date.date2num', (['t'], {}), '(t)\n', (4145, 4148), True, 'from matplotlib import dates as date\n')]
|
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import pandas as pd
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from sklearn.linear_model import LinearRegression
import sys; import re
def AIC(data,model,model_type,k=2):
if model_type=='linear':
return len(data)* np.log(model.ssr/len(data)) + k * (model.df_model+1)
elif model_type=='logistic' :
return model.aic
def Cp(data,model,sigma2):
return model.ssr/sigma2 - (len(data) - 2.*model.df_model- 1)
def BIC(data,model,model_type='linear'):
if model_type=='linear':
return np.log(model.ssr/model.centered_tss) * len(data) + (model.df_model+1) * np.log(len(data))
elif model_type=='logistic':
return model.bicllf
def regressor(y,X, model_type):
if model_type =="linear":
regressor = sm.OLS(y, X)
regressor_fitted = regressor.fit()
elif model_type == 'logistic':
regressor = sm.GLM(y, X,family=sm.families.Binomial())
regressor_fitted = regressor.fit()
return regressor_fitted
def criterion_f(X,model,model_type,elimination_criterion):
if elimination_criterion=='aic':
return AIC(X,model,model_type)
elif elimination_criterion=='bic':
return AIC(X,model,model_type,k=np.log(len(X)))
def detect_dummies(X,variable):
'''
If no dummies simply returns the variable to remove (or add)
'''
cols = X.columns.tolist()
dummy_cols = []
if (len(X[variable].value_counts())==2) and (X[variable].min()==0) and (X[variable].max()==1):
cols.remove(variable)
dummy_cols.append(variable)
if re.search('^([a-zA-Z0-9]+)[\[_]',variable):
prefix = (re.search('^([a-zA-Z0-9]+)[\[_]',variable).group(1))
for var in cols:
if prefix in var:
dummy_cols.append(var)
else :
dummy_cols.append(variable)
return dummy_cols
def forwardSelection(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Forward Selection : from simple model with only intercept to complete model with all variables present in X
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
return __forwardSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,verbose=verbose)
def backwardSelection(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Backward Selection : from complete with all columns in X to simple model with only intercept
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
return __backwardSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,verbose=verbose )
def bothSelection(X, y, model_type ="linear",elimination_criterion = "aic",start='full',verbose=False):
return __bothSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,start=start,verbose=verbose)
def __forwardSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
cols = X.columns.tolist()
## Begin from a simple model with only intercept
selected_cols = ["Intercept"]
other_cols = cols.copy()
other_cols.remove("Intercept")
model = regressor(y, X[selected_cols],model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
for i in range(X.shape[1]):
aicvals = pd.DataFrame(columns = ["Cols","aic"])
for j in other_cols:
cols_to_add = detect_dummies(X,j)
model = regressor(y, X[selected_cols+cols_to_add],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion)]],columns = ["Cols","aic"]),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
if aicvals.shape[0] > 0:
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
cols_to_add = detect_dummies(X,aicvals["Cols"][0])
print("Entered :", aicvals["Cols"][0], "\tCriterion :", aicvals["aic"][0])
for i in cols_to_add:
selected_cols.append(i)
other_cols.remove(i)
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[selected_cols],model_type)
print(model.summary())
print("Criterion: "+str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def __backwardSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
selected_cols = X.columns.tolist()
selected_cols.remove('Intercept')
model = regressor(y,X,model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
for i in range(X.shape[1]):
aicvals = pd.DataFrame(columns = ["Cols","aic"])
if len(selected_cols)==0:
print("break : Only Intercept left")
break
else :
for j in selected_cols:
temp_cols = selected_cols.copy()
### Detect dummies and remove several columns if necessary
cols_to_remove = detect_dummies(X,j)
for i in cols_to_remove:
temp_cols.remove(i)
model = regressor(y, X[['Intercept']+temp_cols],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion)]],columns = ["Cols","aic"]),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
print("Eliminated :" ,aicvals["Cols"][0],"\tCriterion :", aicvals["aic"][0])
cols_removed = detect_dummies(X,aicvals["Cols"][0])
for i in cols_removed:
selected_cols.remove(i)
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[['Intercept']+selected_cols],model_type)
print(str(model.summary())+"\nCriterion: "+ str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def __bothSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",start='full',verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Both direction Selection : from complete (full) with all columns in X to simple model with only intercept, but try to add or delete one variable at each step
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
cols = X.columns.tolist()
if start=='full':
removed_cols = []
selected_cols = cols.copy()
selected_cols.remove("Intercept")
else :
selected_cols = []
removed_cols = cols.copy()
removed_cols.remove("Intercept")
model = regressor(y,X[['Intercept']+selected_cols],model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
while True :
aicvals = pd.DataFrame(columns = ["Cols","aic",'way'])
###### Try to remove variables still present in the model
if len(selected_cols)==0:
continue
else :
for j in selected_cols:
temp_cols = selected_cols.copy()
### Detect dummies and remove several columns if necessary
cols_to_remove = detect_dummies(X,j)
for i in cols_to_remove:
temp_cols.remove(i)
model = regressor(y, X[['Intercept']+temp_cols],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion),'delete']],columns = ["Cols","aic",'way']),ignore_index=True)
###### Try to add previously removed variables
for j in removed_cols:
cols_to_add = detect_dummies(X,j)
model = regressor(y, X[['Intercept']+selected_cols+cols_to_add],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion),'add']],columns = ["Cols","aic",'way']),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
if aicvals.shape[0] > 0:
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
cols_concerned = detect_dummies(X,aicvals["Cols"][0])
if aicvals["way"][0]=='delete':
print("Eliminated :" ,aicvals["Cols"][0],"\tCriterion :", aicvals["aic"][0])
criterion = new_criterion
for i in cols_concerned:
selected_cols.remove(i)
removed_cols.append(i)
# removed_cols.append(aicvals["Cols"][0])
# selected_cols.remove(aicvals["Cols"][0])
elif aicvals["way"][0]=='add':
print("Entered :", aicvals["Cols"][0], "\tCriterion :", aicvals["aic"][0])
for i in cols_concerned:
selected_cols.append(i)
removed_cols.remove(i)
# selected_cols.append(aicvals["Cols"][0])
# removed_cols.remove(aicvals["Cols"][0])
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[['Intercept']+selected_cols],model_type)
print(str(model.summary())+"\nCriterion: "+ str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def exhaustivesearch_selectionmodel(X,y,vmin=1,vmax=10):
'''
Function to compute exhaustive search for LINEAR regression y ~X : test all models with p features from X with p between vmin and vmax.
For each size p : select the best model based on MSE.
Then compute R2,adj R2, Cp and BIC on selected models.
X : Dataframe of explanatory variables, WITHOUT intercept column, nxp
y : Dataframe of output variable
---------
Returns these different criterion in a DataFrame.
'''
if ('const' in X.columns.tolist()) or ('Intercept' in X.columns.tolist()):
raise SystemExit('Delete Intercept column in X before to pass it to this function')
# sys.exit('Delete Intercept column in X before to pass it to this function')
### First, exhaustive search with LienarRegression() from sklearn and EFS() from mlxtend
### Returns a dictionnary with all estimated models for each model dimension
lm = LinearRegression(fit_intercept=True)
efs1 = EFS(lm,min_features=1,max_features=vmax,scoring='neg_mean_squared_error',print_progress=True,cv=False)
efs1 = efs1.fit(X, y)
#### Find for each model size the best model in terms of (neg) MSE
best_idxs_all = []
for k in range(1,vmax+1):
best_score = -np.infty
best_idx = 0
for i in efs1.subsets_:
if (len(efs1.subsets_[i]['feature_idx'])) == k:
if efs1.subsets_[i]['avg_score'] > best_score:
best_score = efs1.subsets_[i]['avg_score']
best_idx = i
best_idxs_all.append(best_idx)
df_subsets = pd.DataFrame(index=best_idxs_all,columns=['Variables','R2','R2_adj','Cp','BIC','Number of variables (except intercept)'])
X_copy = X.copy()
X_copy = sm.add_constant(X_copy)
full_model = sm.OLS(y,X_copy).fit()
sigma2 = (full_model.ssr)/(len(X_copy)-full_model.df_model-1)
for index in best_idxs_all:
df_subsets['Variables'] = df_subsets['Variables'].astype(object)
variables = (efs1.subsets_[index]['feature_names'])
variables = np.array(variables).tolist()
df_subsets.loc[index,'Number of variables (except intercept)'] = len(variables)
model = sm.OLS(y,X_copy[['const']+variables]).fit()
df_subsets.loc[index,'R2'] = model.rsquared
df_subsets.loc[index,'R2_adj'] = model.rsquared_adj
df_subsets.loc[index,'BIC'] = BIC(X_copy,model)
df_subsets.loc[index,'Cp'] = Cp(X_copy,model,sigma2)
df_subsets.loc[index,'Variables'] = variables
return df_subsets
|
[
"mlxtend.feature_selection.ExhaustiveFeatureSelector",
"numpy.log",
"statsmodels.api.families.Binomial",
"numpy.array",
"statsmodels.api.add_constant",
"pandas.DataFrame",
"statsmodels.api.OLS",
"sklearn.linear_model.LinearRegression",
"re.search"
] |
[((10412, 10448), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (10428, 10448), False, 'from sklearn.linear_model import LinearRegression\n'), ((10457, 10568), 'mlxtend.feature_selection.ExhaustiveFeatureSelector', 'EFS', (['lm'], {'min_features': '(1)', 'max_features': 'vmax', 'scoring': '"""neg_mean_squared_error"""', 'print_progress': '(True)', 'cv': '(False)'}), "(lm, min_features=1, max_features=vmax, scoring='neg_mean_squared_error',\n print_progress=True, cv=False)\n", (10460, 10568), True, 'from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS\n'), ((10984, 11115), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'best_idxs_all', 'columns': "['Variables', 'R2', 'R2_adj', 'Cp', 'BIC',\n 'Number of variables (except intercept)']"}), "(index=best_idxs_all, columns=['Variables', 'R2', 'R2_adj',\n 'Cp', 'BIC', 'Number of variables (except intercept)'])\n", (10996, 11115), True, 'import pandas as pd\n'), ((11135, 11158), 'statsmodels.api.add_constant', 'sm.add_constant', (['X_copy'], {}), '(X_copy)\n', (11150, 11158), True, 'import statsmodels.api as sm\n'), ((819, 831), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (825, 831), True, 'import statsmodels.api as sm\n'), ((1544, 1588), 're.search', 're.search', (['"""^([a-zA-Z0-9]+)[\\\\[_]"""', 'variable'], {}), "('^([a-zA-Z0-9]+)[\\\\[_]', variable)\n", (1553, 1588), False, 'import re\n'), ((3767, 3804), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Cols', 'aic']"}), "(columns=['Cols', 'aic'])\n", (3779, 3804), True, 'import pandas as pd\n'), ((5110, 5147), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Cols', 'aic']"}), "(columns=['Cols', 'aic'])\n", (5122, 5147), True, 'import pandas as pd\n'), ((7344, 7388), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Cols', 'aic', 'way']"}), "(columns=['Cols', 'aic', 'way'])\n", (7356, 7388), True, 'import pandas as pd\n'), ((11173, 11190), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X_copy'], {}), '(y, X_copy)\n', (11179, 11190), True, 'import statsmodels.api as sm\n'), ((603, 641), 'numpy.log', 'np.log', (['(model.ssr / model.centered_tss)'], {}), '(model.ssr / model.centered_tss)\n', (609, 641), True, 'import numpy as np\n'), ((11426, 11445), 'numpy.array', 'np.array', (['variables'], {}), '(variables)\n', (11434, 11445), True, 'import numpy as np\n'), ((11547, 11587), 'statsmodels.api.OLS', 'sm.OLS', (['y', "X_copy[['const'] + variables]"], {}), "(y, X_copy[['const'] + variables])\n", (11553, 11587), True, 'import statsmodels.api as sm\n'), ((935, 957), 'statsmodels.api.families.Binomial', 'sm.families.Binomial', ([], {}), '()\n', (955, 957), True, 'import statsmodels.api as sm\n'), ((1601, 1645), 're.search', 're.search', (['"""^([a-zA-Z0-9]+)[\\\\[_]"""', 'variable'], {}), "('^([a-zA-Z0-9]+)[\\\\[_]', variable)\n", (1610, 1645), False, 'import re\n')]
|
import numpy as np
import math
import pickle
def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height):
"""
Read and return the viewport data
"""
VIEW_PATH = '../../Viewport/'
view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1')
if dataset == 1:
max_frame = int(view_info[-1][0]*1.0*fps/milisec)
for i in range(len(view_info)-1):
frame = int(view_info[i][0]*1.0*fps/milisec)
frame += int(offset*1.0*fps/milisec)
frame_nos.append(frame)
if(frame > max_frame):
break
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
elif dataset == 2:
for k in range(len(view_info)-1):
if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60:
max_frame = int(view_info[k][0]*1.0*fps/milisec)
break
for k in range(len(view_info)-1):
if view_info[k][0]<=offset and view_info[k+1][0]>offset:
min_index = k+1
break
prev_frame = 0
for i in range(min_index,len(view_info)-1):
frame = int((view_info[i][0])*1.0*fps/milisec)
if frame == prev_frame:
continue
if(frame > max_frame):
break
frame_nos.append(frame)
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
prev_frame = frame
return data, frame_nos, max_frame
def tiling(data, frame_nos, max_frame, width, height, nrow_tiles, ncol_tiles, fps, pred_nframe):
"""
Calculate the tiles corresponding to the viewport and segment them into different chunks
"""
count=0
i=0
act_tiles = []
chunk_frames = []
# Leaving the first 5 seconds ( to keep consistent with our model)
while True:
curr_frame = frame_nos[i]
if curr_frame<5*fps:
i=i+1
[inp_i,x,y]=data[curr_frame]
else:
break
# Calulate the tiles and store it in chunks
while True:
curr_frame = frame_nos[i]
nframe = min(pred_nframe, max_frame - frame_nos[i])
if(nframe <= 0):
break
# Add the frames that will be in the current chunk
frames = {i}
for k in range(i+1, len(frame_nos)):
if(frame_nos[k] < curr_frame + nframe):
frames.add(k)
else:
i=k
break
if(i!=k):
i=k
if(i==(len(frame_nos)-1)):
break
frames = sorted(frames)
chunk_frames.append(frames)
# Get the actual tile
for k in range(len(frames)):
[inp_k, x_act, y_act] = data[frames[k]]
# print(x_act, y_act)
actual_tile_col = int(x_act * ncol_tiles / width)
actual_tile_row = int(y_act * nrow_tiles / height)
# print(actual_tile_col, actual_tile_row)
actual_tile_row = actual_tile_row-nrow_tiles if(actual_tile_row >= nrow_tiles) else actual_tile_row
actual_tile_col = actual_tile_col-ncol_tiles if(actual_tile_col >= ncol_tiles) else actual_tile_col
actual_tile_row = actual_tile_row+nrow_tiles if actual_tile_row < 0 else actual_tile_row
actual_tile_col = actual_tile_col+ncol_tiles if actual_tile_col < 0 else actual_tile_col
# print(actual_tile_col, actual_tile_row)
# print()
act_tiles.append((actual_tile_row, actual_tile_col))
return act_tiles, chunk_frames
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate
def calc_qoe(vid_bitrate, act_tiles, frame_nos, chunk_frames, width, height, nrow_tiles, ncol_tiles, player_width, player_height):
"""
Calculate QoE based on the video bitrates
"""
qoe = 0
prev_qoe_1 = 0
weight_1 = 1
weight_2 = 1
weight_3 = 1
tile_width = width/ncol_tiles
tile_height = height/nrow_tiles
for i in range(len(chunk_frames[:55])):
qoe_1, qoe_2, qoe_3, qoe_4 = 0, 0, 0, 0
tile_count = 0
rows, cols = set(), set()
rate = []
chunk = chunk_frames[i]
chunk_bitrate = vid_bitrate[i]
chunk_act = act_tiles[chunk[0]-chunk_frames[0][0] : chunk[-1]-chunk_frames[0][0]]
for j in range(len(chunk_act)):
if(chunk_act[j][0] not in rows or chunk_act[j][1] not in cols):
tile_count += 1
rows.add(chunk_act[j][0])
cols.add(chunk_act[j][1])
row, col = chunk_act[j][0], chunk_act[j][1]
# Find the number of tiles that can be accomodated from the center of the viewport
n_tiles_width = math.ceil((player_width/2 - tile_width/2)/tile_width)
n_tiles_height = math.ceil((player_height/2 - tile_height/2)/tile_height)
tot_tiles = (2 * n_tiles_width+1) * (2 * n_tiles_height+1)
local_qoe = 0
local_rate = [] # a new metric to get the standard deviation of bitrate within the player view (qoe2)
for x in range(2*n_tiles_height+1):
for y in range(2*n_tiles_width+1):
sub_row = row - n_tiles_height + x
sub_col = col - n_tiles_width + y
sub_row = nrow_tiles+row+sub_row if sub_row < 0 else sub_row
sub_col = ncol_tiles+col+sub_col if sub_col < 0 else sub_col
sub_row = sub_row-nrow_tiles if sub_row >= nrow_tiles else sub_row
sub_col = sub_col-ncol_tiles if sub_col >= ncol_tiles else sub_col
local_qoe += chunk_bitrate[sub_row][sub_col]
local_rate.append(chunk_bitrate[sub_row][sub_col])
qoe_1 += local_qoe / tot_tiles
if(len(local_rate)>0):
qoe_2 += np.std(local_rate)
rate.append(local_qoe / tot_tiles)
tile_count = 1 if tile_count==0 else tile_count
qoe_1 /= tile_count
qoe_2 /= tile_count
if(len(rate)>0):
qoe_3 = np.std(rate)
qoe_3 /= tile_count
if(i>0):
qoe_4 = abs(prev_qoe_1 - qoe_1)
qoe += qoe_1 - weight_1*qoe_2 - weight_2*qoe_3 - weight_3*qoe_4
prev_qoe_1 = qoe_1
return qoe
|
[
"math.ceil",
"numpy.std"
] |
[((5199, 5258), 'math.ceil', 'math.ceil', (['((player_width / 2 - tile_width / 2) / tile_width)'], {}), '((player_width / 2 - tile_width / 2) / tile_width)\n', (5208, 5258), False, 'import math\n'), ((5274, 5336), 'math.ceil', 'math.ceil', (['((player_height / 2 - tile_height / 2) / tile_height)'], {}), '((player_height / 2 - tile_height / 2) / tile_height)\n', (5283, 5336), False, 'import math\n'), ((6345, 6357), 'numpy.std', 'np.std', (['rate'], {}), '(rate)\n', (6351, 6357), True, 'import numpy as np\n'), ((6152, 6170), 'numpy.std', 'np.std', (['local_rate'], {}), '(local_rate)\n', (6158, 6170), True, 'import numpy as np\n')]
|
# Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test for augment contrast node
"""
import numpy as np
import pytest
from peekingduck.pipeline.nodes.augment.contrast import Node
@pytest.fixture
def contrast_same():
node = Node({"input": ["img"], "output": ["img"], "alpha": 1.0})
return node
@pytest.fixture
def contrast_increase():
node = Node({"input": ["img"], "output": ["img"], "alpha": 2.0})
return node
class TestContrast:
def test_no_change(self, contrast_same, create_image):
original_img = create_image((28, 28, 3))
input1 = {"img": original_img}
results = contrast_same.run(input1)
np.testing.assert_equal(original_img, results["img"])
def test_increase_contrast(self, contrast_increase):
original_img = np.ones(shape=(28, 28, 3), dtype=np.uint8)
input1 = {"img": original_img}
results = contrast_increase.run(input1)
assert original_img.shape == results["img"].shape
with pytest.raises(AssertionError):
np.testing.assert_equal(original_img, results["img"])
np.testing.assert_equal(results["img"][0][0], original_img[0][0] * 2)
def test_overflow(self, contrast_increase):
# Test positive overflow - any values that sum up to higher than 255 will
# be clipped at 255
bright_img = np.ones(shape=(28, 28, 3), dtype=np.uint8) * 250
bright_input = {"img": bright_img}
results = contrast_increase.run(bright_input)
np.testing.assert_equal(results["img"][0][0], np.array([255, 255, 255]))
def test_beta_range(self):
with pytest.raises(ValueError) as excinfo:
Node({"input": ["img"], "output": ["img"], "alpha": -0.5})
assert str(excinfo.value) == "alpha must be between [0.0, 3.0]"
with pytest.raises(ValueError) as excinfo:
Node({"input": ["img"], "output": ["img"], "alpha": 3.1})
assert str(excinfo.value) == "alpha must be between [0.0, 3.0]"
|
[
"numpy.ones",
"numpy.testing.assert_equal",
"numpy.array",
"pytest.raises",
"peekingduck.pipeline.nodes.augment.contrast.Node"
] |
[((763, 820), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': 1.0}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': 1.0})\n", (767, 820), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n'), ((891, 948), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': 2.0}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': 2.0})\n", (895, 948), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n'), ((1186, 1239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['original_img', "results['img']"], {}), "(original_img, results['img'])\n", (1209, 1239), True, 'import numpy as np\n'), ((1321, 1363), 'numpy.ones', 'np.ones', ([], {'shape': '(28, 28, 3)', 'dtype': 'np.uint8'}), '(shape=(28, 28, 3), dtype=np.uint8)\n', (1328, 1363), True, 'import numpy as np\n'), ((1628, 1697), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["results['img'][0][0]", '(original_img[0][0] * 2)'], {}), "(results['img'][0][0], original_img[0][0] * 2)\n", (1651, 1697), True, 'import numpy as np\n'), ((1523, 1552), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1536, 1552), False, 'import pytest\n'), ((1566, 1619), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['original_img', "results['img']"], {}), "(original_img, results['img'])\n", (1589, 1619), True, 'import numpy as np\n'), ((1878, 1920), 'numpy.ones', 'np.ones', ([], {'shape': '(28, 28, 3)', 'dtype': 'np.uint8'}), '(shape=(28, 28, 3), dtype=np.uint8)\n', (1885, 1920), True, 'import numpy as np\n'), ((2078, 2103), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (2086, 2103), True, 'import numpy as np\n'), ((2150, 2175), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2163, 2175), False, 'import pytest\n'), ((2200, 2258), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': -0.5}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': -0.5})\n", (2204, 2258), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n'), ((2345, 2370), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2358, 2370), False, 'import pytest\n'), ((2395, 2452), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': 3.1}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': 3.1})\n", (2399, 2452), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n')]
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training protocol used for unsupervised disentanglement models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from disentanglement_lib.data.ground_truth import named_data
from disentanglement_lib.data.ground_truth import util
from disentanglement_lib.data.ground_truth.ground_truth_data import *
from disentanglement_lib.methods.shared import losses
from disentanglement_lib.methods.unsupervised import gaussian_encoder_model
from disentanglement_lib.methods.unsupervised import model # pylint: disable=unused-import
from disentanglement_lib.methods.unsupervised.gaussian_encoder_model import GaussianModel
from disentanglement_lib.methods.unsupervised.model import gaussian_log_density
from disentanglement_lib.utils import results
from disentanglement_lib.evaluation.metrics import mig
import numpy as np
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
import gin
import pathlib, shutil
import wandb
from disentanglement_lib.utils.hub import convert_model
from disentanglement_lib.utils.mi_estimators import estimate_entropies
from disentanglement_lib.visualize.visualize_util import plt_sample_traversal
@gin.configurable("train", denylist=[])
class Train(pl.LightningModule):
"""Trains the estimator and exports the snapshot and the gin config.
The use of this function requires the gin binding 'dataset.name' to be
specified as that determines the data set used for training.
Args:
model: GaussianEncoderModel that should be trained and exported.
training_steps: Integer with number of training steps.
random_seed: Integer with random seed used for training.
batch_size: Integer with the batch size.
name: Optional string with name of the model (can be used to name models).
model_num: Optional integer with model number (can be used to identify
models).
"""
def __init__(self,
model=gin.REQUIRED,
training_steps=gin.REQUIRED,
random_seed=gin.REQUIRED,
batch_size=gin.REQUIRED,
opt_name=torch.optim.Adam,
lr=5e-4,
eval_numbers=10,
name="",
model_num=None):
super().__init__()
self.training_steps = training_steps
self.random_seed = random_seed
self.batch_size = batch_size
self.lr = lr
self.name = name
self.model_num = model_num
self.eval_numbers = eval_numbers
wandb.config['dataset'] = gin.query_parameter('dataset.name')
self.save_hyperparameters()
self.opt_name = opt_name
self.data = named_data.get_named_ground_truth_data()
img_shape = np.array(self.data.observation_shape)[[2, 0, 1]].tolist()
# img_shape = [1,64,64]
self.ae = model(img_shape)
def training_step(self, batch, batch_idx):
if (self.global_step + 1) % (self.training_steps // self.eval_numbers) == 0:
self.evaluate()
x = batch
loss, summary = self.ae.model_fn(x.float(), None)
self.log_dict(summary)
return loss
def evaluate(self) -> None:
model = self.ae
model.cpu()
model.eval()
dic_log = {}
dic_log.update(self.visualize_model(model))
wandb.log(dic_log)
model.cuda()
model.train()
def visualize_model(self, model) -> dict:
_encoder, _decoder = convert_model(model)
num_latent = self.ae.num_latent
mu = torch.zeros(1, num_latent)
fig = plt_sample_traversal(mu, _decoder, 8, range(num_latent), 2)
return {'traversal': wandb.Image(fig)}
def train_dataloader(self) -> DataLoader:
dl = DataLoader(self.data,
batch_size=self.batch_size,
num_workers=4,
shuffle=True,
pin_memory=True)
return dl
def configure_optimizers(self):
optimizer = self.opt_name(self.parameters(), lr=self.lr)
return optimizer
def save_model(self, file):
dir = '/tmp/models/' + str(np.random.randint(99999))
file_path = os.path.join(dir, file)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
torch.save(self.ae.state_dict(), file_path)
wandb.save(file_path, base_path=dir)
|
[
"wandb.log",
"disentanglement_lib.methods.unsupervised.model.train",
"disentanglement_lib.methods.unsupervised.model.cuda",
"wandb.save",
"wandb.Image",
"disentanglement_lib.data.ground_truth.named_data.get_named_ground_truth_data",
"disentanglement_lib.methods.unsupervised.model.cpu",
"gin.query_parameter",
"disentanglement_lib.utils.hub.convert_model",
"disentanglement_lib.methods.unsupervised.model",
"os.path.join",
"pathlib.Path",
"disentanglement_lib.methods.unsupervised.model.eval",
"gin.configurable",
"numpy.random.randint",
"numpy.array",
"torch.utils.data.DataLoader",
"torch.zeros"
] |
[((1989, 2027), 'gin.configurable', 'gin.configurable', (['"""train"""'], {'denylist': '[]'}), "('train', denylist=[])\n", (2005, 2027), False, 'import gin\n'), ((3411, 3446), 'gin.query_parameter', 'gin.query_parameter', (['"""dataset.name"""'], {}), "('dataset.name')\n", (3430, 3446), False, 'import gin\n'), ((3536, 3576), 'disentanglement_lib.data.ground_truth.named_data.get_named_ground_truth_data', 'named_data.get_named_ground_truth_data', ([], {}), '()\n', (3574, 3576), False, 'from disentanglement_lib.data.ground_truth import named_data\n'), ((3705, 3721), 'disentanglement_lib.methods.unsupervised.model', 'model', (['img_shape'], {}), '(img_shape)\n', (3710, 3721), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4075, 4086), 'disentanglement_lib.methods.unsupervised.model.cpu', 'model.cpu', ([], {}), '()\n', (4084, 4086), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4095, 4107), 'disentanglement_lib.methods.unsupervised.model.eval', 'model.eval', ([], {}), '()\n', (4105, 4107), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4189, 4207), 'wandb.log', 'wandb.log', (['dic_log'], {}), '(dic_log)\n', (4198, 4207), False, 'import wandb\n'), ((4216, 4228), 'disentanglement_lib.methods.unsupervised.model.cuda', 'model.cuda', ([], {}), '()\n', (4226, 4228), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4237, 4250), 'disentanglement_lib.methods.unsupervised.model.train', 'model.train', ([], {}), '()\n', (4248, 4250), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4327, 4347), 'disentanglement_lib.utils.hub.convert_model', 'convert_model', (['model'], {}), '(model)\n', (4340, 4347), False, 'from disentanglement_lib.utils.hub import convert_model\n'), ((4401, 4427), 'torch.zeros', 'torch.zeros', (['(1)', 'num_latent'], {}), '(1, num_latent)\n', (4412, 4427), False, 'import torch\n'), ((4609, 4709), 'torch.utils.data.DataLoader', 'DataLoader', (['self.data'], {'batch_size': 'self.batch_size', 'num_workers': '(4)', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(self.data, batch_size=self.batch_size, num_workers=4, shuffle=\n True, pin_memory=True)\n', (4619, 4709), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5060, 5083), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (5072, 5083), False, 'import os\n'), ((5205, 5241), 'wandb.save', 'wandb.save', (['file_path'], {'base_path': 'dir'}), '(file_path, base_path=dir)\n', (5215, 5241), False, 'import wandb\n'), ((4531, 4547), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (4542, 4547), False, 'import wandb\n'), ((5014, 5038), 'numpy.random.randint', 'np.random.randint', (['(99999)'], {}), '(99999)\n', (5031, 5038), True, 'import numpy as np\n'), ((5092, 5109), 'pathlib.Path', 'pathlib.Path', (['dir'], {}), '(dir)\n', (5104, 5109), False, 'import pathlib, shutil\n'), ((3597, 3634), 'numpy.array', 'np.array', (['self.data.observation_shape'], {}), '(self.data.observation_shape)\n', (3605, 3634), True, 'import numpy as np\n')]
|
"""Models and utilities for processing SMIRNOFF data."""
import abc
import copy
import functools
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from openff.toolkit.topology import Molecule
from openff.toolkit.typing.engines.smirnoff.parameters import (
AngleHandler,
BondHandler,
ChargeIncrementModelHandler,
ConstraintHandler,
ElectrostaticsHandler,
ImproperTorsionHandler,
LibraryChargeHandler,
ParameterHandler,
ProperTorsionHandler,
ToolkitAM1BCCHandler,
UnassignedProperTorsionParameterException,
UnassignedValenceParameterException,
VirtualSiteHandler,
vdWHandler,
)
from openff.units import unit
from openff.units.openmm import from_openmm
from openmm import unit as omm_unit
from pydantic import Field
from typing_extensions import Literal
from openff.interchange.components.potentials import (
Potential,
PotentialHandler,
WrappedPotential,
)
from openff.interchange.exceptions import (
InvalidParameterHandlerError,
MissingParametersError,
SMIRNOFFParameterAttributeNotImplementedError,
)
from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey
from openff.interchange.types import FloatQuantity
kcal_mol = omm_unit.kilocalorie_per_mole
kcal_mol_angstroms = kcal_mol / omm_unit.angstrom ** 2
kcal_mol_radians = kcal_mol / omm_unit.radian ** 2
if TYPE_CHECKING:
from openff.toolkit.topology import Topology
from openff.interchange.components.mdtraj import _OFFBioTop
ElectrostaticsHandlerType = Union[
ElectrostaticsHandler,
ChargeIncrementModelHandler,
LibraryChargeHandler,
ToolkitAM1BCCHandler,
]
T = TypeVar("T", bound="SMIRNOFFPotentialHandler")
TP = TypeVar("TP", bound="PotentialHandler")
class SMIRNOFFPotentialHandler(PotentialHandler, abc.ABC):
"""Base class for handlers storing potentials produced by SMIRNOFF force fields."""
@classmethod
@abc.abstractmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
raise NotImplementedError()
# @classmethod
# @abc.abstractmethod
# def valence_terms(cls, topology):
# """Return an interable of all of one type of valence term in this topology."""
# raise NotImplementedError()
@classmethod
def check_supported_parameters(cls, parameter_handler: ParameterHandler):
"""Verify that a parameter handler is in an allowed list of handlers."""
for parameter in parameter_handler.parameters:
for parameter_attribute in parameter._get_defined_parameter_attributes():
if parameter_attribute not in cls.supported_parameters():
raise SMIRNOFFParameterAttributeNotImplementedError(
parameter_attribute,
)
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey]."""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
topology_key = TopologyKey(atom_indices=key)
potential_key = PotentialKey(
id=val.parameter_type.smirks, associated_handler=parameter_handler_name
)
self.slot_map[topology_key] = potential_key
if self.__class__.__name__ in ["SMIRNOFFBondHandler", "SMIRNOFFAngleHandler"]:
valence_terms = self.valence_terms(topology) # type: ignore[attr-defined]
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: TP,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
if hasattr(handler, "fractional_bond_order_method"):
if getattr(parameter_handler, "fractional_bondorder_method", None):
handler.fractional_bond_order_method = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_method # type: ignore[attr-defined]
)
handler.fractional_bond_order_interpolation = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_interpolation # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFBondHandler(SMIRNOFFPotentialHandler):
"""Handler storing bond potentials as produced by a SMIRNOFF force field."""
type: Literal["Bonds"] = "Bonds"
expression: Literal["k/2*(r-length)**2"] = "k/2*(r-length)**2"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "k_bondorder", "length_bondorder"]
@classmethod
def valence_terms(cls, topology):
"""Return all bonds in this topology."""
return [list(b.atoms) for b in topology.topology_bonds]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
if param.k_bondorder or param.length_bondorder:
top_bond = topology.get_bond_between(*key)
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
valence_terms = self.valence_terms(topology)
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
def store_potentials(self, parameter_handler: "BondHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
if self.potentials:
self.potentials = dict()
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
if parameter.k_bondorder:
data = parameter.k_bondorder
else:
data = parameter.length_bondorder
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
pots.append(
Potential(
parameters={
"k": parameter.k_bondorder[map_key],
"length": parameter.length_bondorder[map_key],
},
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
potential = Potential( # type: ignore[assignment]
parameters={
"k": parameter.k,
"length": parameter.length,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "BondHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFBondHandler from toolkit data.
"""
# TODO: This method overrides SMIRNOFFPotentialHandler.from_toolkit in order to gobble up
# a ConstraintHandler. This seems like a good solution for the interdependence, but is also
# not a great practice. A better solution would involve not overriding the method with a
# different function signature.
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError
handler: T = cls(type="Bonds", expression="k/2*(r-length)**2")
if (
any(
getattr(p, "k_bondorder", None) is not None
for p in parameter_handler.parameters
)
) or (
any(
getattr(p, "length_bondorder", None) is not None
for p in parameter_handler.parameters
)
):
for ref_mol in topology.reference_molecules:
# TODO: expose conformer generation and fractional bond order assigment
# knobs to user via API
ref_mol.generate_conformers(n_conformers=1)
ref_mol.assign_fractional_bond_orders(
bond_order_model=handler.fractional_bond_order_method.lower(), # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFConstraintHandler(SMIRNOFFPotentialHandler):
"""Handler storing constraint potentials as produced by a SMIRNOFF force field."""
type: Literal["Constraints"] = "Constraints"
expression: Literal[""] = ""
constraints: Dict[
PotentialKey, bool
] = dict() # should this be named potentials for consistency?
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler, ConstraintHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "distance"]
@classmethod
def _from_toolkit( # type: ignore[override]
cls: Type[T],
parameter_handler: List,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
handler.store_constraints( # type: ignore[attr-defined]
parameter_handlers=parameter_handlers, topology=topology
)
return handler
def store_constraints(
self,
parameter_handlers: Any,
topology: "_OFFBioTop",
) -> None:
"""Store constraints."""
if self.slot_map:
self.slot_map = dict()
constraint_handler = [
p for p in parameter_handlers if type(p) == ConstraintHandler
][0]
constraint_matches = constraint_handler.find_matches(topology)
if any([type(p) == BondHandler for p in parameter_handlers]):
bond_handler = [p for p in parameter_handlers if type(p) == BondHandler][0]
bonds = SMIRNOFFBondHandler._from_toolkit(
parameter_handler=bond_handler,
topology=topology,
)
else:
bond_handler = None
bonds = None
for key, match in constraint_matches.items():
topology_key = TopologyKey(atom_indices=key)
smirks = match.parameter_type.smirks
distance = match.parameter_type.distance
if distance is not None:
# This constraint parameter is fully specified
potential_key = PotentialKey(
id=smirks, associated_handler="Constraints"
)
distance = match.parameter_type.distance
else:
# This constraint parameter depends on the BondHandler ...
if bond_handler is None:
raise MissingParametersError(
f"Constraint with SMIRKS pattern {smirks} found with no distance "
"specified, and no corresponding bond parameters were found. The distance "
"of this constraint is not specified."
)
# ... so use the same PotentialKey instance as the BondHandler to look up the distance
potential_key = bonds.slot_map[topology_key] # type: ignore[union-attr]
self.slot_map[topology_key] = potential_key
distance = bonds.potentials[potential_key].parameters["length"] # type: ignore[union-attr]
potential = Potential(
parameters={
"distance": distance,
}
)
self.constraints[potential_key] = potential # type: ignore[assignment]
class SMIRNOFFAngleHandler(SMIRNOFFPotentialHandler):
"""Handler storing angle potentials as produced by a SMIRNOFF force field."""
type: Literal["Angles"] = "Angles"
expression: Literal["k/2*(theta-angle)**2"] = "k/2*(theta-angle)**2"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [AngleHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "k", "angle"]
@classmethod
def valence_terms(cls, topology):
"""Return all angles in this topology."""
return list(topology.angles)
def store_potentials(self, parameter_handler: "AngleHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
# ParameterHandler.get_parameter returns a list, although this
# should only ever be length 1
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"k": parameter.k,
"angle": parameter.angle,
},
)
self.potentials[potential_key] = potential
@classmethod
def f_from_toolkit(
cls: Type[T],
parameter_handler: "AngleHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFAngleHandler from toolkit data.
"""
handler = cls()
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFProperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing proper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ProperTorsions"] = "ProperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ProperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf", "k_bondorder"]
def store_matches(
self,
parameter_handler: "ProperTorsionHandler",
topology: "_OFFBioTop",
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
n_terms = len(val.parameter_type.phase)
for n in range(n_terms):
smirks = param.smirks
if param.k_bondorder:
# The relevant bond order is that of the _central_ bond in the torsion
top_bond = topology.get_bond_between(key[1], key[2])
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, mult=n, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=smirks,
mult=n,
associated_handler="ProperTorsions",
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=list(topology.propers),
exception_cls=UnassignedProperTorsionParameterException,
)
def store_potentials(self, parameter_handler: "ProperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
# n_terms = len(parameter.k)
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
data = parameter.k_bondorder[n]
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
parameters = {
"k": parameter.k_bondorder[n][map_key],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
pots.append(
Potential(
parameters=parameters,
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
potential = Potential(parameters=parameters) # type: ignore[assignment]
self.potentials[potential_key] = potential
class SMIRNOFFImproperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing improper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ImproperTorsions"] = "ImproperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ImproperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf"]
def store_matches(
self, parameter_handler: "ImproperTorsionHandler", topology: "_OFFBioTop"
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
parameter_handler._assert_correct_connectivity(
val,
[
(0, 1),
(1, 2),
(1, 3),
],
)
n_terms = len(val.parameter_type.k)
for n in range(n_terms):
smirks = val.parameter_type.smirks
non_central_indices = [key[0], key[2], key[3]]
for permuted_key in [
(
non_central_indices[i],
non_central_indices[j],
non_central_indices[k],
)
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]:
topology_key = TopologyKey(
atom_indices=(key[1], *permuted_key), mult=n
)
potential_key = PotentialKey(
id=smirks, mult=n, associated_handler="ImproperTorsions"
)
self.slot_map[topology_key] = potential_key
def store_potentials(self, parameter_handler: "ImproperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": 3.0 * unit.dimensionless,
}
potential = Potential(parameters=parameters)
self.potentials[potential_key] = potential
class _SMIRNOFFNonbondedHandler(SMIRNOFFPotentialHandler, abc.ABC):
"""Base class for handlers storing non-bonded potentials produced by SMIRNOFF force fields."""
type: Literal["nonbonded"] = "nonbonded"
cutoff: FloatQuantity["angstrom"] = Field( # type: ignore
9.0 * unit.angstrom,
description="The distance at which pairwise interactions are truncated",
)
scale_13: float = Field(
0.0, description="The scaling factor applied to 1-3 interactions"
)
scale_14: float = Field(
0.5, description="The scaling factor applied to 1-4 interactions"
)
scale_15: float = Field(
1.0, description="The scaling factor applied to 1-5 interactions"
)
class SMIRNOFFvdWHandler(_SMIRNOFFNonbondedHandler):
"""Handler storing vdW potentials as produced by a SMIRNOFF force field."""
type: Literal["vdW"] = "vdW" # type: ignore[assignment]
expression: Literal[
"4*epsilon*((sigma/r)**12-(sigma/r)**6)"
] = "4*epsilon*((sigma/r)**12-(sigma/r)**6)"
method: Literal["cutoff", "pme", "no-cutoff"] = Field("cutoff")
mixing_rule: Literal["lorentz-berthelot", "geometric"] = Field(
"lorentz-berthelot",
description="The mixing rule (combination rule) used in computing pairwise vdW interactions",
)
switch_width: FloatQuantity["angstrom"] = Field( # type: ignore
1.0 * unit.angstrom,
description="The width over which the switching function is applied",
)
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [vdWHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "sigma", "epsilon", "rmin_half"]
def store_potentials(self, parameter_handler: vdWHandler) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
self.method = parameter_handler.method.lower()
self.cutoff = parameter_handler.cutoff
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
try:
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
except AttributeError:
# Handle rmin_half pending https://github.com/openforcefield/openff-toolkit/pull/750
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "vdWHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFvdWHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(
f"Found parameter handler type {type(parameter_handler)}, which is not "
f"supported by potential type {type(cls)}"
)
handler = cls(
scale_13=parameter_handler.scale13,
scale_14=parameter_handler.scale14,
scale_15=parameter_handler.scale15,
cutoff=parameter_handler.cutoff,
mixing_rule=parameter_handler.combining_rules.lower(),
method=parameter_handler.method.lower(),
switch_width=parameter_handler.switch_width,
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["vdw", "VirtualSites"]
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atoms, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
top_key = VirtualSiteKey(
atom_indices=atoms,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
pot_key = PotentialKey(
id=virtual_site_type.smirks, associated_handler=virtual_site_type.type
)
pot = Potential(
parameters={
"sigma": virtual_site_type.sigma,
"epsilon": virtual_site_type.epsilon,
# "distance": virtual_site_type.distance,
}
)
# if virtual_site_type.type in {"MonovalentLonePair", "DivalentLonePair"}:
# pot.parameters.update(
# {
# "outOfPlaneAngle": virtual_site_type.outOfPlaneAngle,
# }
# )
# if virtual_site_type.type in {"MonovalentLonePair"}:
# pot.parameters.update(
# {
# "inPlaneAngle": virtual_site_type.inPlaneAngle,
# }
# )
self.slot_map.update({top_key: pot_key})
self.potentials.update({pot_key: pot})
class SMIRNOFFElectrostaticsHandler(_SMIRNOFFNonbondedHandler):
"""
A handler which stores any electrostatic parameters applied to a topology.
This handler is responsible for grouping together
* global settings for the electrostatic interactions such as the cutoff distance
and the intramolecular scale factors.
* partial charges which have been assigned by a ``ToolkitAM1BCC``,
``LibraryCharges``, or a ``ChargeIncrementModel`` parameter
handler.
* charge corrections applied by a ``SMIRNOFFChargeIncrementHandler``.
rather than having each in their own handler.
"""
type: Literal["Electrostatics"] = "Electrostatics" # type: ignore[assignment]
expression: Literal["coul"] = "coul"
method: Literal["pme", "cutoff", "reaction-field", "no-cutoff"] = Field("pme")
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [
LibraryChargeHandler,
ChargeIncrementModelHandler,
ToolkitAM1BCCHandler,
ElectrostaticsHandler,
]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
pass
@property
def charges(self) -> Dict[Union[TopologyKey, VirtualSiteKey], unit.Quantity]:
"""Get the total partial charge on each atom, excluding virtual sites."""
return self.get_charges(include_virtual_sites=False)
@property
def charges_with_virtual_sites(
self,
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom, including virtual sites."""
return self.get_charges(include_virtual_sites=True)
def get_charges(
self, include_virtual_sites=False
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom or particle."""
charges: DefaultDict[
Union[TopologyKey, VirtualSiteKey], FloatQuantity
] = defaultdict(lambda: 0.0 * unit.e)
for topology_key, potential_key in self.slot_map.items():
potential = self.potentials[potential_key]
for parameter_key, parameter_value in potential.parameters.items():
if parameter_key == "charge_increments":
if type(topology_key) != VirtualSiteKey:
raise RuntimeError
charge = -1.0 * np.sum(parameter_value)
# assumes virtual sites can only have charges determined in one step
# also, topology_key is actually a VirtualSiteKey
charges[topology_key] = charge
elif parameter_key in ["charge", "charge_increment"]:
charge = parameter_value
charges[topology_key.atom_indices[0]] += charge # type: ignore
else:
raise NotImplementedError()
returned_charges: Dict[
Union[VirtualSiteKey, TopologyKey], unit.Quantity
] = dict()
for index, charge in charges.items():
if isinstance(index, int):
returned_charges[TopologyKey(atom_indices=(index,))] = charge
else:
if include_virtual_sites:
returned_charges[index] = charge
return returned_charges
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["LibraryCharges", "ChargeIncrementModel", "ToolkitAM1BCC"]
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: Any,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFElectrostaticsHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
toolkit_handler_with_metadata = [
p for p in parameter_handlers if type(p) == ElectrostaticsHandler
][0]
handler = cls(
type=toolkit_handler_with_metadata._TAGNAME,
scale_13=toolkit_handler_with_metadata.scale13,
scale_14=toolkit_handler_with_metadata.scale14,
scale_15=toolkit_handler_with_metadata.scale15,
cutoff=toolkit_handler_with_metadata.cutoff,
method=toolkit_handler_with_metadata.method.lower(),
)
handler.store_matches(parameter_handlers, topology)
return handler
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atom_indices, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
virtual_site_key = VirtualSiteKey(
atom_indices=atom_indices,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
virtual_site_potential_key = PotentialKey(
id=virtual_site_type.smirks,
associated_handler="VirtualSiteHandler",
)
virtual_site_potential = Potential(
parameters={
"charge_increments": from_openmm(
virtual_site_type.charge_increment
),
}
)
matches = {}
potentials = {}
self.slot_map.update({virtual_site_key: virtual_site_potential_key})
self.potentials.update({virtual_site_potential_key: virtual_site_potential})
# TODO: Counter-intuitive that toolkit regression tests pass by using the counter
# variable i as if it was the atom index - shouldn't it just use atom_index?
for i, atom_index in enumerate(atom_indices): # noqa
topology_key = TopologyKey(atom_indices=(i,), mult=2)
potential_key = PotentialKey(
id=virtual_site_type.smirks,
mult=i,
associated_handler="VirtualSiteHandler",
)
charge_increment = getattr(
virtual_site_type, f"charge_increment{i + 1}"
)
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
self.slot_map.update(matches)
self.potentials.update(potentials)
@classmethod
@functools.lru_cache(None)
def _compute_partial_charges(cls, molecule: Molecule, method: str) -> unit.Quantity:
"""Call out to the toolkit's toolkit wrappers to generate partial charges."""
molecule = copy.deepcopy(molecule)
molecule.assign_partial_charges(method)
return from_openmm(molecule.partial_charges)
@classmethod
def _library_charge_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: LibraryChargeHandler.LibraryChargeType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched library charge parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, (atom_index, charge) in enumerate(zip(atom_indices, parameter.charge)):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="LibraryCharges"
)
potential = Potential(parameters={"charge": from_openmm(charge)})
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _charge_increment_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: ChargeIncrementModelHandler.ChargeIncrementType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched charge increment parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, atom_index in enumerate(atom_indices):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="ChargeIncrementModel"
)
# TODO: Handle the cases where n - 1 charge increments have been defined,
# maybe by implementing this in the TK?
charge_increment = getattr(parameter, f"charge_increment{i + 1}")
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _find_slot_matches(
cls,
parameter_handler: Union["LibraryChargeHandler", "ChargeIncrementModelHandler"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a slot based parameter handler.
"""
# Ideally this would be made redundant by OpenFF TK #971
unique_parameter_matches = {
tuple(sorted(key)): (key, val)
for key, val in parameter_handler.find_matches(
reference_molecule.to_topology()
).items()
}
parameter_matches = {key: val for key, val in unique_parameter_matches.values()}
matches, potentials = {}, {}
for key, val in parameter_matches.items():
parameter = val.parameter_type
if isinstance(parameter_handler, LibraryChargeHandler):
(
parameter_matches,
parameter_potentials,
) = cls._library_charge_to_potentials(key, parameter)
elif isinstance(parameter_handler, ChargeIncrementModelHandler):
(
parameter_matches,
parameter_potentials,
) = cls._charge_increment_to_potentials(key, parameter)
else:
raise NotImplementedError()
matches.update(parameter_matches)
potentials.update(parameter_potentials)
return matches, potentials
@classmethod
def _find_am1_matches(
cls,
parameter_handler: Union["ToolkitAM1BCCHandler", ChargeIncrementModelHandler],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""Construct a slot and potential map for a charge model based parameter handler."""
reference_molecule = copy.deepcopy(reference_molecule)
reference_smiles = reference_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
method = getattr(parameter_handler, "partial_charge_method", "am1bcc")
partial_charges = cls._compute_partial_charges(
reference_molecule, method=method
)
matches = {}
potentials = {}
for i, partial_charge in enumerate(partial_charges):
potential_key = PotentialKey(
id=reference_smiles, mult=i, associated_handler="ToolkitAM1BCC"
)
potentials[potential_key] = Potential(parameters={"charge": partial_charge})
matches[TopologyKey(atom_indices=(i,))] = potential_key
return matches, potentials
@classmethod
def _find_reference_matches(
cls,
parameter_handlers: Dict[str, "ElectrostaticsHandlerType"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a particular reference molecule and set of parameter handlers.
"""
matches = {}
potentials = {}
expected_matches = {i for i in range(reference_molecule.n_atoms)}
for handler_type in cls.parameter_handler_precedence():
if handler_type not in parameter_handlers:
continue
parameter_handler = parameter_handlers[handler_type]
slot_matches, am1_matches = None, None
slot_potentials: Dict = {}
am1_potentials: Dict = {}
if handler_type in ["LibraryCharges", "ChargeIncrementModel"]:
slot_matches, slot_potentials = cls._find_slot_matches(
parameter_handler, reference_molecule
)
if handler_type in ["ToolkitAM1BCC", "ChargeIncrementModel"]:
am1_matches, am1_potentials = cls._find_am1_matches(
parameter_handler, reference_molecule
)
if slot_matches is None and am1_matches is None:
raise NotImplementedError()
elif slot_matches is not None and am1_matches is not None:
am1_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=0
): potential_key
for topology_key, potential_key in am1_matches.items()
}
slot_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=1
): potential_key
for topology_key, potential_key in slot_matches.items()
}
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
matched_atom_indices.intersection_update(
{index for key in am1_matches for index in key.atom_indices}
)
elif slot_matches is not None:
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
else:
matched_atom_indices = {
index for key in am1_matches for index in key.atom_indices # type: ignore[union-attr]
}
if matched_atom_indices != expected_matches:
# Handle the case where a handler could not fully assign the charges
# to the whole molecule.
continue
matches.update(slot_matches if slot_matches is not None else {})
matches.update(am1_matches if am1_matches is not None else {})
potentials.update(slot_potentials)
potentials.update(am1_potentials)
break
found_matches = {index for key in matches for index in key.atom_indices}
if found_matches != expected_matches:
raise RuntimeError(
f"{reference_molecule.to_smiles(explicit_hydrogens=False)} could "
f"not be fully assigned charges."
)
return matches, potentials
def store_matches(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
# Reshape the parameter handlers into a dictionary for easier referencing.
parameter_handlers = {
handler._TAGNAME: handler
for handler in (
parameter_handler
if isinstance(parameter_handler, list)
else [parameter_handler]
)
}
self.potentials = dict()
self.slot_map = dict()
reference_molecules = [*topology.reference_molecules]
for reference_molecule in reference_molecules:
matches, potentials = self._find_reference_matches(
parameter_handlers, reference_molecule
)
match_mults = defaultdict(set)
for top_key in matches:
match_mults[top_key.atom_indices].add(top_key.mult)
self.potentials.update(potentials)
for top_mol in topology._reference_molecule_to_topology_molecules[
reference_molecule
]:
for topology_particle in top_mol.atoms:
reference_index = topology_particle.atom.molecule_particle_index
topology_index = topology_particle.topology_particle_index
for mult in match_mults[(reference_index,)]:
top_key = TopologyKey(atom_indices=(topology_index,), mult=mult)
self.slot_map[top_key] = matches[
TopologyKey(atom_indices=(reference_index,), mult=mult)
]
def store_potentials(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
# This logic is handled by ``store_matches`` as we may need to create potentials
# to store depending on the handler type.
pass
class SMIRNOFFVirtualSiteHandler(SMIRNOFFPotentialHandler):
"""
A handler which stores the information necessary to construct virtual sites (virtual particles).
"""
type: Literal["Bonds"] = "Bonds"
expression: Literal[""] = ""
virtual_site_key_topology_index_map: Dict["VirtualSiteKey", int] = Field(
dict(),
description="A mapping between VirtualSiteKey objects (stored analogously to TopologyKey objects"
"in other handlers) and topology indices describing the associated virtual site",
)
exclusion_policy: Literal["parents"] = "parents"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [VirtualSiteHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
return ["distance", "outOfPlaneAngle", "inPlaneAngle"]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey].
Differs from SMIRNOFFPotentialHandler.store_matches because each key
can point to multiple potentials (?); each value in the dict is a
list of parametertypes, whereas conventional handlers don't have lists
"""
virtual_site_index = topology.n_topology_atoms
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val_list in matches.items():
for val in val_list:
virtual_site_key = VirtualSiteKey(
atom_indices=key,
type=val.parameter_type.type,
match=val.parameter_type.match,
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
)
self.slot_map[virtual_site_key] = potential_key
self.virtual_site_key_topology_index_map[
virtual_site_key
] = virtual_site_index
virtual_site_index += 1
def store_potentials(self, parameter_handler: ParameterHandler) -> None:
"""Store VirtualSite-specific parameter-like data."""
if self.potentials:
self.potentials = dict()
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter_type = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"distance": parameter_type.distance,
},
)
for attr in ["outOfPlaneAngle", "inPlaneAngle"]:
if hasattr(parameter_type, attr):
potential.parameters.update(
{attr: from_openmm(getattr(parameter_type, attr))}
)
self.potentials[potential_key] = potential
def _get_local_frame_weights(self, virtual_site_key: "VirtualSiteKey"):
if virtual_site_key.type == "BondCharge":
origin_weight = [1.0, 0.0]
x_direction = [-1.0, 1.0]
y_direction = [-1.0, 1.0]
elif virtual_site_key.type == "MonovalentLonePair":
origin_weight = [1, 0.0, 0.0]
x_direction = [-1.0, 1.0, 0.0]
y_direction = [-1.0, 0.0, 1.0]
elif virtual_site_key.type == "DivalentLonePair":
origin_weight = [0.0, 1.0, 0.0]
x_direction = [0.5, -1.0, 0.5]
y_direction = [1.0, -1.0, 1.0]
elif virtual_site_key.type == "TrivalentLonePair":
origin_weight = [0.0, 1.0, 0.0, 0.0]
x_direction = [1 / 3, -1.0, 1 / 3, 1 / 3]
y_direction = [1.0, -1.0, 0.0, 0.0]
return origin_weight, x_direction, y_direction
def _get_local_frame_position(self, virtual_site_key: "VirtualSiteKey"):
potential_key = self.slot_map[virtual_site_key]
potential = self.potentials[potential_key]
if virtual_site_key.type == "BondCharge":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
elif virtual_site_key.type == "MonovalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
psi = potential.parameters["outOfPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.array(
[np.cos(theta) * np.cos(psi), np.sin(theta) * np.cos(psi), np.sin(psi)]
)
local_frame_position = factor * distance
elif virtual_site_key.type == "DivalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.asarray([-1.0 * np.cos(theta), 0.0, np.sin(theta)])
local_frame_position = factor * distance
elif virtual_site_key.type == "TrivalentLonePair":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
return local_frame_position
def library_charge_from_molecule(
molecule: "Molecule",
) -> LibraryChargeHandler.LibraryChargeType:
"""Given an OpenFF Molecule with charges, generate a corresponding LibraryChargeType."""
if molecule.partial_charges is None:
raise ValueError("Input molecule is missing partial charges.")
smirks = molecule.to_smiles(mapped=True)
charges = molecule.partial_charges
library_charge_type = LibraryChargeHandler.LibraryChargeType(
smirks=smirks, charge=charges
)
return library_charge_type
def _get_interpolation_coeffs(fractional_bond_order, data):
x1, x2 = data.keys()
coeff1 = (x2 - fractional_bond_order) / (x2 - x1)
coeff2 = (fractional_bond_order - x1) / (x2 - x1)
return coeff1, coeff2
SMIRNOFF_POTENTIAL_HANDLERS = [
SMIRNOFFBondHandler,
SMIRNOFFConstraintHandler,
SMIRNOFFAngleHandler,
SMIRNOFFProperTorsionHandler,
SMIRNOFFImproperTorsionHandler,
SMIRNOFFvdWHandler,
SMIRNOFFElectrostaticsHandler,
]
|
[
"openff.units.openmm.from_openmm",
"pydantic.Field",
"openff.interchange.exceptions.SMIRNOFFParameterAttributeNotImplementedError",
"numpy.asarray",
"openff.interchange.models.TopologyKey",
"openff.toolkit.typing.engines.smirnoff.parameters.LibraryChargeHandler.LibraryChargeType",
"numpy.sum",
"openff.interchange.exceptions.MissingParametersError",
"collections.defaultdict",
"openff.interchange.models.PotentialKey",
"openff.interchange.models.VirtualSiteKey",
"copy.deepcopy",
"numpy.sin",
"functools.lru_cache",
"numpy.cos",
"openff.interchange.components.potentials.Potential",
"typing.TypeVar"
] |
[((1814, 1860), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""SMIRNOFFPotentialHandler"""'}), "('T', bound='SMIRNOFFPotentialHandler')\n", (1821, 1860), False, 'from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Tuple, Type, TypeVar, Union\n'), ((1866, 1905), 'typing.TypeVar', 'TypeVar', (['"""TP"""'], {'bound': '"""PotentialHandler"""'}), "('TP', bound='PotentialHandler')\n", (1873, 1905), False, 'from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Tuple, Type, TypeVar, Union\n'), ((25522, 25626), 'pydantic.Field', 'Field', (['(9.0 * unit.angstrom)'], {'description': '"""The distance at which pairwise interactions are truncated"""'}), "(9.0 * unit.angstrom, description=\n 'The distance at which pairwise interactions are truncated')\n", (25527, 25626), False, 'from pydantic import Field\n'), ((25684, 25756), 'pydantic.Field', 'Field', (['(0.0)'], {'description': '"""The scaling factor applied to 1-3 interactions"""'}), "(0.0, description='The scaling factor applied to 1-3 interactions')\n", (25689, 25756), False, 'from pydantic import Field\n'), ((25793, 25865), 'pydantic.Field', 'Field', (['(0.5)'], {'description': '"""The scaling factor applied to 1-4 interactions"""'}), "(0.5, description='The scaling factor applied to 1-4 interactions')\n", (25798, 25865), False, 'from pydantic import Field\n'), ((25902, 25974), 'pydantic.Field', 'Field', (['(1.0)'], {'description': '"""The scaling factor applied to 1-5 interactions"""'}), "(1.0, description='The scaling factor applied to 1-5 interactions')\n", (25907, 25974), False, 'from pydantic import Field\n'), ((26363, 26378), 'pydantic.Field', 'Field', (['"""cutoff"""'], {}), "('cutoff')\n", (26368, 26378), False, 'from pydantic import Field\n'), ((26441, 26571), 'pydantic.Field', 'Field', (['"""lorentz-berthelot"""'], {'description': '"""The mixing rule (combination rule) used in computing pairwise vdW interactions"""'}), "('lorentz-berthelot', description=\n 'The mixing rule (combination rule) used in computing pairwise vdW interactions'\n )\n", (26446, 26571), False, 'from pydantic import Field\n'), ((26632, 26733), 'pydantic.Field', 'Field', (['(1.0 * unit.angstrom)'], {'description': '"""The width over which the switching function is applied"""'}), "(1.0 * unit.angstrom, description=\n 'The width over which the switching function is applied')\n", (26637, 26733), False, 'from pydantic import Field\n'), ((32709, 32721), 'pydantic.Field', 'Field', (['"""pme"""'], {}), "('pme')\n", (32714, 32721), False, 'from pydantic import Field\n'), ((39374, 39399), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (39393, 39399), False, 'import functools\n'), ((56282, 56351), 'openff.toolkit.typing.engines.smirnoff.parameters.LibraryChargeHandler.LibraryChargeType', 'LibraryChargeHandler.LibraryChargeType', ([], {'smirks': 'smirks', 'charge': 'charges'}), '(smirks=smirks, charge=charges)\n', (56320, 56351), False, 'from openff.toolkit.typing.engines.smirnoff.parameters import AngleHandler, BondHandler, ChargeIncrementModelHandler, ConstraintHandler, ElectrostaticsHandler, ImproperTorsionHandler, LibraryChargeHandler, ParameterHandler, ProperTorsionHandler, ToolkitAM1BCCHandler, UnassignedProperTorsionParameterException, UnassignedValenceParameterException, VirtualSiteHandler, vdWHandler\n'), ((33976, 34010), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0 * unit.e)'], {}), '(lambda : 0.0 * unit.e)\n', (33987, 34010), False, 'from collections import defaultdict\n'), ((39594, 39617), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (39607, 39617), False, 'import copy\n'), ((39682, 39719), 'openff.units.openmm.from_openmm', 'from_openmm', (['molecule.partial_charges'], {}), '(molecule.partial_charges)\n', (39693, 39719), False, 'from openff.units.openmm import from_openmm\n'), ((43721, 43754), 'copy.deepcopy', 'copy.deepcopy', (['reference_molecule'], {}), '(reference_molecule)\n', (43734, 43754), False, 'import copy\n'), ((3870, 3899), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key'}), '(atom_indices=key)\n', (3881, 3899), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((3928, 4018), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'val.parameter_type.smirks', 'associated_handler': 'parameter_handler_name'}), '(id=val.parameter_type.smirks, associated_handler=\n parameter_handler_name)\n', (3940, 4018), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((7738, 7801), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key', 'bond_order': 'fractional_bond_order'}), '(atom_indices=key, bond_order=fractional_bond_order)\n', (7749, 7801), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((7860, 7984), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'val.parameter_type.smirks', 'associated_handler': 'parameter_handler_name', 'bond_order': 'fractional_bond_order'}), '(id=val.parameter_type.smirks, associated_handler=\n parameter_handler_name, bond_order=fractional_bond_order)\n', (7872, 7984), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((14302, 14331), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key'}), '(atom_indices=key)\n', (14313, 14331), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((15563, 15607), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'distance': distance}"}), "(parameters={'distance': distance})\n", (15572, 15607), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((16977, 17043), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'k': parameter.k, 'angle': parameter.angle}"}), "(parameters={'k': parameter.k, 'angle': parameter.angle})\n", (16986, 17043), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((25178, 25210), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (25187, 25210), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((30700, 30799), 'openff.interchange.models.VirtualSiteKey', 'VirtualSiteKey', ([], {'atom_indices': 'atoms', 'type': 'virtual_site_type.type', 'match': 'virtual_site_type.match'}), '(atom_indices=atoms, type=virtual_site_type.type, match=\n virtual_site_type.match)\n', (30714, 30799), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((30880, 30969), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'virtual_site_type.smirks', 'associated_handler': 'virtual_site_type.type'}), '(id=virtual_site_type.smirks, associated_handler=\n virtual_site_type.type)\n', (30892, 30969), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((31013, 31111), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'sigma': virtual_site_type.sigma, 'epsilon': virtual_site_type.epsilon}"}), "(parameters={'sigma': virtual_site_type.sigma, 'epsilon':\n virtual_site_type.epsilon})\n", (31022, 31111), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((37560, 37665), 'openff.interchange.models.VirtualSiteKey', 'VirtualSiteKey', ([], {'atom_indices': 'atom_indices', 'type': 'virtual_site_type.type', 'match': 'virtual_site_type.match'}), '(atom_indices=atom_indices, type=virtual_site_type.type,\n match=virtual_site_type.match)\n', (37574, 37665), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((37767, 37854), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'virtual_site_type.smirks', 'associated_handler': '"""VirtualSiteHandler"""'}), "(id=virtual_site_type.smirks, associated_handler=\n 'VirtualSiteHandler')\n", (37779, 37854), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((40223, 40262), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(atom_index,)'}), '(atom_indices=(atom_index,))\n', (40234, 40262), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((40291, 40369), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'parameter.smirks', 'mult': 'i', 'associated_handler': '"""LibraryCharges"""'}), "(id=parameter.smirks, mult=i, associated_handler='LibraryCharges')\n", (40303, 40369), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((41098, 41137), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(atom_index,)'}), '(atom_indices=(atom_index,))\n', (41109, 41137), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((41166, 41255), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'parameter.smirks', 'mult': 'i', 'associated_handler': '"""ChargeIncrementModel"""'}), "(id=parameter.smirks, mult=i, associated_handler=\n 'ChargeIncrementModel')\n", (41178, 41255), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((44216, 44293), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'reference_smiles', 'mult': 'i', 'associated_handler': '"""ToolkitAM1BCC"""'}), "(id=reference_smiles, mult=i, associated_handler='ToolkitAM1BCC')\n", (44228, 44293), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((44364, 44412), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'charge': partial_charge}"}), "(parameters={'charge': partial_charge})\n", (44373, 44412), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((49027, 49043), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (49038, 49043), False, 'from collections import defaultdict\n'), ((53120, 53179), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'distance': parameter_type.distance}"}), "(parameters={'distance': parameter_type.distance})\n", (53129, 53179), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((9937, 10005), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'k': parameter.k, 'length': parameter.length}"}), "(parameters={'k': parameter.k, 'length': parameter.length})\n", (9946, 10005), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((14566, 14623), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks', 'associated_handler': '"""Constraints"""'}), "(id=smirks, associated_handler='Constraints')\n", (14578, 14623), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((19637, 19708), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key', 'mult': 'n', 'bond_order': 'fractional_bond_order'}), '(atom_indices=key, mult=n, bond_order=fractional_bond_order)\n', (19648, 19708), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((19779, 19885), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks', 'mult': 'n', 'associated_handler': '"""ProperTorsions"""', 'bond_order': 'fractional_bond_order'}), "(id=smirks, mult=n, associated_handler='ProperTorsions',\n bond_order=fractional_bond_order)\n", (19791, 19885), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((22197, 22229), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (22206, 22229), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((27609, 27687), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'sigma': parameter.sigma, 'epsilon': parameter.epsilon}"}), "(parameters={'sigma': parameter.sigma, 'epsilon': parameter.epsilon})\n", (27618, 27687), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((38649, 38687), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(i,)', 'mult': '(2)'}), '(atom_indices=(i,), mult=2)\n', (38660, 38687), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((38720, 38815), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'virtual_site_type.smirks', 'mult': 'i', 'associated_handler': '"""VirtualSiteHandler"""'}), "(id=virtual_site_type.smirks, mult=i, associated_handler=\n 'VirtualSiteHandler')\n", (38732, 38815), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((44434, 44464), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(i,)'}), '(atom_indices=(i,))\n', (44445, 44464), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((52127, 52226), 'openff.interchange.models.VirtualSiteKey', 'VirtualSiteKey', ([], {'atom_indices': 'key', 'type': 'val.parameter_type.type', 'match': 'val.parameter_type.match'}), '(atom_indices=key, type=val.parameter_type.type, match=val.\n parameter_type.match)\n', (52141, 52226), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((52333, 52423), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'val.parameter_type.smirks', 'associated_handler': 'parameter_handler_name'}), '(id=val.parameter_type.smirks, associated_handler=\n parameter_handler_name)\n', (52345, 52423), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((54772, 54800), 'numpy.asarray', 'np.asarray', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (54782, 54800), True, 'import numpy as np\n'), ((3086, 3152), 'openff.interchange.exceptions.SMIRNOFFParameterAttributeNotImplementedError', 'SMIRNOFFParameterAttributeNotImplementedError', (['parameter_attribute'], {}), '(parameter_attribute)\n', (3131, 3152), False, 'from openff.interchange.exceptions import InvalidParameterHandlerError, MissingParametersError, SMIRNOFFParameterAttributeNotImplementedError\n'), ((14879, 15088), 'openff.interchange.exceptions.MissingParametersError', 'MissingParametersError', (['f"""Constraint with SMIRKS pattern {smirks} found with no distance specified, and no corresponding bond parameters were found. The distance of this constraint is not specified."""'], {}), "(\n f'Constraint with SMIRKS pattern {smirks} found with no distance specified, and no corresponding bond parameters were found. The distance of this constraint is not specified.'\n )\n", (14901, 15088), False, 'from openff.interchange.exceptions import InvalidParameterHandlerError, MissingParametersError, SMIRNOFFParameterAttributeNotImplementedError\n'), ((24181, 24238), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(key[1], *permuted_key)', 'mult': 'n'}), '(atom_indices=(key[1], *permuted_key), mult=n)\n', (24192, 24238), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((24321, 24391), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks', 'mult': 'n', 'associated_handler': '"""ImproperTorsions"""'}), "(id=smirks, mult=n, associated_handler='ImproperTorsions')\n", (24333, 24391), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((27962, 28040), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'sigma': parameter.sigma, 'epsilon': parameter.epsilon}"}), "(parameters={'sigma': parameter.sigma, 'epsilon': parameter.epsilon})\n", (27971, 28040), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((35148, 35182), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(index,)'}), '(atom_indices=(index,))\n', (35159, 35182), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((9434, 9561), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'k': parameter.k_bondorder[map_key], 'length': parameter.length_bondorder[\n map_key]}", 'map_key': 'map_key'}), "(parameters={'k': parameter.k_bondorder[map_key], 'length':\n parameter.length_bondorder[map_key]}, map_key=map_key)\n", (9443, 9561), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((21572, 21621), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': 'parameters', 'map_key': 'map_key'}), '(parameters=parameters, map_key=map_key)\n', (21581, 21621), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((34412, 34435), 'numpy.sum', 'np.sum', (['parameter_value'], {}), '(parameter_value)\n', (34418, 34435), True, 'import numpy as np\n'), ((38016, 38063), 'openff.units.openmm.from_openmm', 'from_openmm', (['virtual_site_type.charge_increment'], {}), '(virtual_site_type.charge_increment)\n', (38027, 38063), False, 'from openff.units.openmm import from_openmm\n'), ((40456, 40475), 'openff.units.openmm.from_openmm', 'from_openmm', (['charge'], {}), '(charge)\n', (40467, 40475), False, 'from openff.units.openmm import from_openmm\n'), ((41588, 41617), 'openff.units.openmm.from_openmm', 'from_openmm', (['charge_increment'], {}), '(charge_increment)\n', (41599, 41617), False, 'from openff.units.openmm import from_openmm\n'), ((46040, 46099), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'topology_key.atom_indices', 'mult': '(0)'}), '(atom_indices=topology_key.atom_indices, mult=0)\n', (46051, 46099), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((46307, 46366), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'topology_key.atom_indices', 'mult': '(1)'}), '(atom_indices=topology_key.atom_indices, mult=1)\n', (46318, 46366), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((49650, 49704), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(topology_index,)', 'mult': 'mult'}), '(atom_indices=(topology_index,), mult=mult)\n', (49661, 49704), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((55217, 55228), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (55223, 55228), True, 'import numpy as np\n'), ((39111, 39140), 'openff.units.openmm.from_openmm', 'from_openmm', (['charge_increment'], {}), '(charge_increment)\n', (39122, 39140), False, 'from openff.units.openmm import from_openmm\n'), ((49792, 49847), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(reference_index,)', 'mult': 'mult'}), '(atom_indices=(reference_index,), mult=mult)\n', (49803, 49847), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((55159, 55172), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (55165, 55172), True, 'import numpy as np\n'), ((55175, 55186), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (55181, 55186), True, 'import numpy as np\n'), ((55188, 55201), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (55194, 55201), True, 'import numpy as np\n'), ((55204, 55215), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (55210, 55215), True, 'import numpy as np\n'), ((55562, 55575), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (55568, 55575), True, 'import numpy as np\n'), ((55781, 55809), 'numpy.asarray', 'np.asarray', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (55791, 55809), True, 'import numpy as np\n'), ((55542, 55555), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (55548, 55555), True, 'import numpy as np\n')]
|
from __future__ import print_function
import pylab as plt
import numpy as np
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import Context, RequestContext, loader
from astrometry.net.models import *
from astrometry.util.resample import *
from astrometry.net.tmpfile import *
def simple_histeq(pixels, getinverse=False, mx=256):
assert(pixels.dtype in [np.uint8, np.uint16])
if not getinverse:
h = np.bincount(pixels, minlength=mx)
# pixel value -> quantile map.
# If you imagine jittering the pixels so there are no repeats,
# this assigns the middle quantile to a pixel value.
quant = h * 0.5
cs = np.cumsum(h)
quant[1:] += cs[:-1]
quant /= float(cs[-1])
# quant = np.cumsum(h / float(h.sum()))
return quant[pixels]
# This inverse function has slightly weird properties -- it
# puts a ramp across each pixel value, so inv(0.) may produce
# values as small as -0.5, and inv(1.) may produce 255.5
h = np.bincount(pixels.astype(int)+1, minlength=mx+1)
quant = h[1:] * 0.5
cs = np.cumsum(h)
quant[1:] += cs[1:-1]
quant /= float(cs[-1])
# interp1d is fragile -- remove duplicate "yy" values that
# otherwise cause nans.
yy = cs / float(cs[-1])
xx = np.arange(mx + 1) - 0.5
I = np.append([0], 1 + np.flatnonzero(np.diff(yy)))
print('mx:', mx)
print('xx:', len(xx))
print('yy:', len(yy))
print('I:', I.min(), I.max())
yy = yy[I]
xx = xx[I]
xx[-1] = mx-0.5
# print 'yy', yy[0], yy[-1]
# print 'xx', xx[0], xx[-1]
inv = interp1d(yy, xx, kind='linear')
return quant[pixels], inv
def enhanced_ui(req, user_image_id=None):
ui = UserImage.objects.get(id=user_image_id)
job = ui.get_best_job()
return enhanced_image(req, job_id=job.id, size='display')
def enhanced_image(req, job_id=None, size=None):
job = get_object_or_404(Job, pk=job_id)
ui = job.user_image
cal = job.calibration
tan = cal.raw_tan
nside,hh = get_healpixes_touching_wcs(tan)
tt = 'hello %s, job %s, nside %s, hh %s' % (ui, job, nside, hh)
ver = EnhanceVersion.objects.get(name='v4')
print('Using', ver)
EIms = EnhancedImage.objects.filter(version=ver)
ens = []
for hp in hh:
en = EIms.filter(nside=nside, healpix=hp, version=ver)
if len(en):
ens.extend(list(en))
for dnside in range(1, 3):
if len(ens) == 0:
bignside = nside / (2**dnside)
nil,hh = get_healpixes_touching_wcs(tan, nside=bignside)
tt += 'bigger healpixes: %s: %s' % (bignside, hh)
for hp in hh:
en = EIms.filter(nside=bignside, healpix=hp)
if len(en):
ens.extend(list(en))
tt = tt + ', EnhancedImages: ' + ', '.join('%s'%e for e in ens)
img = ui.image
W,H = img.width, img.height
tt = tt + 'image size %ix%i' % (W,H)
#return HttpResponse(tt)
targetwcs = tan.to_tanwcs()
#print 'Target WCS:', targetwcs
#print 'W,H', W,H
logmsg('wcs:', str(targetwcs))
if size == 'display':
scale = float(ui.image.get_display_image().width)/ui.image.width
logmsg('scaling:', scale)
targetwcs = targetwcs.scale(scale)
logmsg('scaled wcs:', str(targetwcs))
H,W = targetwcs.get_height(), targetwcs.get_width()
img = ui.image.get_display_image()
print(tt)
ee = np.zeros((H,W,3), np.float32)
imgdata = None
df = img.disk_file
ft = df.file_type
fn = df.get_path()
if 'JPEG' in ft:
print('Reading', fn)
I = plt.imread(fn)
print('Read', I.shape, I.dtype)
if len(I.shape) == 2:
I = I[:,:,np.newaxis].repeat(3, axis=2)
assert(len(I.shape) == 3)
if I.shape[2] > 3:
I = I.shape[:,:,:3]
# vertical FLIP to match WCS
I = I[::-1,:,:]
imgdata = I
mapped = np.zeros_like(imgdata)
for en in ens:
logmsg('Resampling %s' % en)
wcs = en.wcs.to_tanwcs()
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(targetwcs, wcs, [], 3)
except OverlapError:
continue
#logmsg(len(Yo), 'pixels')
enI,enW = en.read_files()
#print 'Cals included in this Enhanced image:'
#for c in en.cals.all():
# print ' ', c
#logmsg('en:', enI.min(), enI.max())
if imgdata is not None:
mask = (enW[Yi,Xi] > 0)
for b in range(3):
enI[:,:,b] /= enI[:,:,b].max()
if imgdata is not None:
idata = imgdata[Yo[mask],Xo[mask],b]
DI = np.argsort((idata + np.random.uniform(size=idata.shape))/255.)
EI = np.argsort(enI[Yi[mask], Xi[mask], b])
Erank = np.zeros_like(EI)
Erank[EI] = np.arange(len(Erank))
mapped[Yo[mask],Xo[mask],b] = idata[DI[Erank]]
else:
# Might have to average the coverage here...
ee[Yo,Xo,b] += enI[Yi,Xi,b]
# ee[Yo[mask],Xo[mask],b] += enI[Yi[mask],Xi[mask],b]
tempfn = get_temp_file(suffix='.png')
if imgdata is not None:
im = mapped
else:
im = np.clip(ee, 0., 1.)
dpi = 100
figsize = [x / float(dpi) for x in im.shape[:2][::-1]]
fig = plt.figure(figsize=figsize, frameon=False, dpi=dpi)
plt.clf()
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.imshow(im, interpolation='nearest')
# rdfn = job.get_rdls_file()
# rd = fits_table(rdfn)
# ok,x,y = targetwcs.radec2pixelxy(rd.ra, rd.dec)
# plt.plot(x, y, 'o', mec='r', mfc='none', ms=10)
plt.savefig(tempfn)
print('Wrote', tempfn)
f = open(tempfn)
res = HttpResponse(f)
res['Content-Type'] = 'image/png'
return res
|
[
"numpy.clip",
"pylab.subplots_adjust",
"pylab.imread",
"django.http.HttpResponse",
"pylab.savefig",
"django.shortcuts.get_object_or_404",
"numpy.zeros_like",
"numpy.diff",
"pylab.figure",
"numpy.argsort",
"numpy.zeros",
"numpy.random.uniform",
"numpy.cumsum",
"numpy.bincount",
"pylab.clf",
"numpy.arange",
"pylab.imshow"
] |
[((1246, 1258), 'numpy.cumsum', 'np.cumsum', (['h'], {}), '(h)\n', (1255, 1258), True, 'import numpy as np\n'), ((2058, 2091), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Job'], {'pk': 'job_id'}), '(Job, pk=job_id)\n', (2075, 2091), False, 'from django.shortcuts import render_to_response, get_object_or_404, redirect, render\n'), ((3611, 3642), 'numpy.zeros', 'np.zeros', (['(H, W, 3)', 'np.float32'], {}), '((H, W, 3), np.float32)\n', (3619, 3642), True, 'import numpy as np\n'), ((5543, 5594), 'pylab.figure', 'plt.figure', ([], {'figsize': 'figsize', 'frameon': '(False)', 'dpi': 'dpi'}), '(figsize=figsize, frameon=False, dpi=dpi)\n', (5553, 5594), True, 'import pylab as plt\n'), ((5599, 5608), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (5606, 5608), True, 'import pylab as plt\n'), ((5613, 5666), 'pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1)', 'bottom': '(0)', 'top': '(1)'}), '(left=0, right=1, bottom=0, top=1)\n', (5632, 5666), True, 'import pylab as plt\n'), ((5671, 5710), 'pylab.imshow', 'plt.imshow', (['im'], {'interpolation': '"""nearest"""'}), "(im, interpolation='nearest')\n", (5681, 5710), True, 'import pylab as plt\n'), ((5886, 5905), 'pylab.savefig', 'plt.savefig', (['tempfn'], {}), '(tempfn)\n', (5897, 5905), True, 'import pylab as plt\n'), ((5965, 5980), 'django.http.HttpResponse', 'HttpResponse', (['f'], {}), '(f)\n', (5977, 5980), False, 'from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict\n'), ((570, 603), 'numpy.bincount', 'np.bincount', (['pixels'], {'minlength': 'mx'}), '(pixels, minlength=mx)\n', (581, 603), True, 'import numpy as np\n'), ((812, 824), 'numpy.cumsum', 'np.cumsum', (['h'], {}), '(h)\n', (821, 824), True, 'import numpy as np\n'), ((1441, 1458), 'numpy.arange', 'np.arange', (['(mx + 1)'], {}), '(mx + 1)\n', (1450, 1458), True, 'import numpy as np\n'), ((3791, 3805), 'pylab.imread', 'plt.imread', (['fn'], {}), '(fn)\n', (3801, 3805), True, 'import pylab as plt\n'), ((4119, 4141), 'numpy.zeros_like', 'np.zeros_like', (['imgdata'], {}), '(imgdata)\n', (4132, 4141), True, 'import numpy as np\n'), ((5440, 5461), 'numpy.clip', 'np.clip', (['ee', '(0.0)', '(1.0)'], {}), '(ee, 0.0, 1.0)\n', (5447, 5461), True, 'import numpy as np\n'), ((1507, 1518), 'numpy.diff', 'np.diff', (['yy'], {}), '(yy)\n', (1514, 1518), True, 'import numpy as np\n'), ((4937, 4975), 'numpy.argsort', 'np.argsort', (['enI[Yi[mask], Xi[mask], b]'], {}), '(enI[Yi[mask], Xi[mask], b])\n', (4947, 4975), True, 'import numpy as np\n'), ((5000, 5017), 'numpy.zeros_like', 'np.zeros_like', (['EI'], {}), '(EI)\n', (5013, 5017), True, 'import numpy as np\n'), ((4872, 4907), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'idata.shape'}), '(size=idata.shape)\n', (4889, 4907), True, 'import numpy as np\n')]
|
"""
Created on Dec 16 2021
@author: <NAME>
Poisson equation solver for the Hall effect.
Includes classes for Hall bars, Hall bars in a nonlocal geometry, and Corbino disks.
The Hall bar class has build in methods for longitudinal and Hall 4-probe resistance measurements.
Plotting functions assume coordinates are in microns, but the Poisson equation is scale-invariant.
"""
import time
import math
import numpy as np
import scipy.sparse as sp # import sparse matrix library
import matplotlib.pyplot as plt
from scipy.sparse.linalg import spsolve
# import the file where the differentiation matrix operators are defined
from diff_matrices import Diff_mat_1D, Diff_mat_2D
class hallbar():
"""The class for a Hall bar device
Source is the left terminal, drain is the right terminal.
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_unravel_L,:] = BD[self.ind_unravel_L,:] # Boundaries at the left layer
L_sys[self.ind_unravel_R,:] = BD[self.ind_unravel_R,:] # Boundaries at the right edges
# CURRENT THROUGH EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_L] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_R] = 0 # Dirichlet boundary condition at drain
g[self.ind_unravel_T] = 0 # No current through top
g[self.ind_unravel_B] = 0 # No current through bottom
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top', 'bottom', or 'hall') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif side=='bottom':
ya = 0
yb = 0
elif side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, ya] - self.u[xb, yb]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
def plot_resistance(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
r_top = (self.u[0:-1, -1] - self.u[1:, -1]) * 25812 * self.Ly / self.dx
r_bottom = (self.u[0:-1, 0] - self.u[1:, 0]) * 25812 * self.Ly / self.dx
rxx = 25812 / self.lmbda
fig = plt.figure(figsize = [8,5])
plt.plot(self.x[0:-1] - self.dx, r_top, 'r', label='top')
plt.plot(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')
plt.hlines(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey', label=r'$\rho_{xx}$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'$\rho_{xx}$ $(\Omega)$');
plt.legend()
plt.ylim([0, 12000]);
plt.show()
def add_contact(self, contact):
if contact.x1 > self.Lx or contact.x2 > self.Lx:
raise Exception('Contact out of bounds')
self.contacts.append(contact)
def measure_contact_voltageonly(self, contact):
# Args: contact instance
# Returns: measured resistivity
# Voltage is averaged across voltage tap
# THIS FUNCTION DOES NOT CHECK THE CURRENT!
# This method assumes 2terminal resistance is h/e2, which in general is wrong
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
y = self.Ny-1
elif contact.side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, y].mean()
# Average voltage A
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, y].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
# return apparent resistivity
return 25812 * v * self.Ly / dx
def measure_all_contacts_voltageonly(self):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact_voltageonly(contact))
return result
def measure_contact(self, contact, sxx, sxy):
'''
Voltage is averaged across voltage tap
This method checks the current and outputs resistivity.
Args:
contact : contact instance
sxx : longitudinal
sxy : hall. sxy/sxx should match self.lmbda
Returns: measured resistivity
'''
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif contact.side=='bottom':
ya = 0
yb = 0
elif contact.side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, ya].mean()
# Average voltage B
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, yb].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
i = self.measure_current(sxx, sxy)
# return apparent resistivity
if contact.side=='hall':
return v / i
else:
return v / i * self.Ly / dx
def measure_all_contacts(self, sxx, sxy):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact(contact, sxx, sxy))
return result
def measure_current(self, sxx, sxy):
'''
ARGS : sxx and sxy : longitudinal and Hall conductivity. units e2/h
Returns : current moving through device
'''
# choose place to measure: halfway across Hallbar
ind_x = int(self.Nx/2)
# calculate electric field using E = -\nabla V
# x electric field, using second order central finite difference
E_x = 0.5 * (self.u[ind_x - 1, :] - self.u[ind_x + 1, :]) / self.dx
# y electric field, need forward/backward differences for edges
Dy_1d, D2y_1d = Diff_mat_1D(self.Ny)
E_y = - 0.5 * Dy_1d.dot(self.u[ind_x, :]) / self.dy
# calculate x current using j = sigma E; integrate and convert to SI units
current = np.sum(sxx * E_x + sxy * E_y) * self.dy / 25812
return current
class contact():
"""The class for a voltage contact
Args:
x1 : coordinate location of V_A
x2 : coordinate location of V_B
side ('top', 'bottom', or 'hall') : which side of the Hall bar to measure
width : width of voltage tap in microns
"""
def __init__(self, x1, x2, side='top', width=6):
self.x1 = x1
self.x2 = x2
self.side = side
self.width = width
class nonlocal_hb():
"""The class for nonlocal measurements
Contacts are on the bottom edge of the device
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
settings : positions of contacts
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201, settings = {}):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Nonlocal contacts
self.source_x1 = settings.get("source_x1", Lx/4)
self.source_x2 = settings.get("source_x2", Lx/3)
self.drain_x1 = settings.get("drain_x1", 2*Lx/3)
self.drain_x2 = settings.get("drain_x2", 3*Lx/4)
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
self.ind_unravel_source = np.squeeze(np.where( (self.Yu==self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <= self.source_x2) )) # Source
self.ind_unravel_drain = np.squeeze(np.where( (self.Yu==self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <= self.drain_x2) )) # Drain
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
''' Constructs matrix problem and solves Poisson equation
# Args:
lmbda : sigma_xy / sigma_xx. Must be finite
# Returns:
self.u : electric potential
'''
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# CURRENT THROUGH TOP/BOTTOM EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# CURRENT THROUGH LEFT/RIGHT EDGES
L_sys[self.ind_unravel_L,:] = BNx[self.ind_unravel_L,:] + lmbda * BNy[self.ind_unravel_L,:]
L_sys[self.ind_unravel_R,:] = BNx[self.ind_unravel_R,:] + lmbda * BNy[self.ind_unravel_R,:]
# REPLACE WITH DIRICHLET BOUNDARY CONDITIONS FOR SOURCE/DRAIN
L_sys[self.ind_unravel_source,:] = BD[self.ind_unravel_source,:]
L_sys[self.ind_unravel_drain,:] = BD[self.ind_unravel_drain,:]
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# No current boundary conditions
g[self.ind_unravel_L] = 0
g[self.ind_unravel_R] = 0
g[self.ind_unravel_T] = 0
g[self.ind_unravel_B] = 0
# Replace source with potential
g[self.ind_unravel_source] = 1
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top' or 'bottom') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
y = self.Ny-1
elif side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, y] - self.u[xb, y]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
# plt.contour(self.x,self.y,self.u.T,41,cmap = 'viridis', vmin=0, vmax=1)
plt.pcolormesh(self.X, self.Y, self.u.T, cmap='inferno', vmin=0, vmax=1)
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
class corbino():
"""The class for a Corbino disk
Args:
ro : outer radius
ri : inner radius
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, ro, ri, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.ro = ro
self.ri = ri
self.x = np.linspace(-self.ro, self.ro, self.Nx)
self.y = np.linspace(-self.ro, self.ro, self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_outer = np.squeeze(np.where(self.Xu**2 + self.Yu**2 >= self.ro**2)) # outer boundary
self.ind_unravel_inner = np.squeeze(np.where(self.Xu**2 + self.Yu**2 <= self.ri**2)) # inner boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu**2 + self.Yu**2 >= self.ro**2) | (self.Xu**2 + self.Yu**2 <= self.ri**2))) # boundary 1D unravel indices
self.ind_boundary = np.where((self.Xu**2 + self.Yu**2 >= self.ro**2) | (self.Xu**2 + self.Yu**2 <= self.ri**2)) # boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_boundary_unravel,:] = BD[self.ind_boundary_unravel,:]
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_outer] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_inner] = 0 # Dirichlet boundary condition at drain
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"diff_matrices.Diff_mat_2D",
"matplotlib.pyplot.pcolormesh",
"numpy.arange",
"matplotlib.pyplot.contourf",
"scipy.sparse.eye",
"numpy.where",
"numpy.searchsorted",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.ylim",
"numpy.abs",
"scipy.sparse.linalg.spsolve",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hlines",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"diff_matrices.Diff_mat_1D",
"numpy.all"
] |
[((1161, 1193), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'self.Nx'], {}), '(0, self.Lx, self.Nx)\n', (1172, 1193), True, 'import numpy as np\n'), ((1203, 1235), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'self.Ny'], {}), '(0, self.Ly, self.Ny)\n', (1214, 1235), True, 'import numpy as np\n'), ((1419, 1446), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (1430, 1446), True, 'import numpy as np\n'), ((1639, 1650), 'time.time', 'time.time', ([], {}), '()\n', (1648, 1650), False, 'import time\n'), ((2239, 2366), 'numpy.where', 'np.where', (['((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y == self.\n y[0]) | (self.Y == self.y[self.Ny - 1]))'], {}), '((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y ==\n self.y[0]) | (self.Y == self.y[self.Ny - 1]))\n', (2247, 2366), True, 'import numpy as np\n'), ((2537, 2566), 'diff_matrices.Diff_mat_2D', 'Diff_mat_2D', (['self.Nx', 'self.Ny'], {}), '(self.Nx, self.Ny)\n', (2548, 2566), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((2888, 2899), 'time.time', 'time.time', ([], {}), '()\n', (2897, 2899), False, 'import time\n'), ((3803, 3830), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (3811, 3830), True, 'import numpy as np\n'), ((4244, 4255), 'time.time', 'time.time', ([], {}), '()\n', (4253, 4255), False, 'import time\n'), ((4583, 4602), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (4589, 4602), True, 'import numpy as np\n'), ((5011, 5051), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x1'], {'side': '"""left"""'}), "(self.x, x1, side='left')\n", (5026, 5051), True, 'import numpy as np\n'), ((5059, 5099), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x2'], {'side': '"""left"""'}), "(self.x, x2, side='left')\n", (5074, 5099), True, 'import numpy as np\n'), ((5176, 5195), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (5182, 5195), True, 'import numpy as np\n'), ((5253, 5279), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (5263, 5279), True, 'import matplotlib.pyplot as plt\n'), ((5283, 5341), 'matplotlib.pyplot.contourf', 'plt.contourf', (['self.x', 'self.y', 'self.u.T', '(41)'], {'cmap': '"""inferno"""'}), "(self.x, self.y, self.u.T, 41, cmap='inferno')\n", (5295, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5425, 5450), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (5435, 5450), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y ($\\\\mu$m)"""'], {}), "('y ($\\\\mu$m)')\n", (5464, 5479), True, 'import matplotlib.pyplot as plt\n'), ((5483, 5493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5491, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5529, 5548), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (5535, 5548), True, 'import numpy as np\n'), ((5783, 5809), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (5793, 5809), True, 'import matplotlib.pyplot as plt\n'), ((5813, 5870), 'matplotlib.pyplot.plot', 'plt.plot', (['(self.x[0:-1] - self.dx)', 'r_top', '"""r"""'], {'label': '"""top"""'}), "(self.x[0:-1] - self.dx, r_top, 'r', label='top')\n", (5821, 5870), True, 'import matplotlib.pyplot as plt\n'), ((5873, 5936), 'matplotlib.pyplot.plot', 'plt.plot', (['(self.x[0:-1] - self.dx)', 'r_bottom', '"""b"""'], {'label': '"""bottom"""'}), "(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')\n", (5881, 5936), True, 'import matplotlib.pyplot as plt\n'), ((5939, 6037), 'matplotlib.pyplot.hlines', 'plt.hlines', (['rxx', 'self.x[0]', 'self.x[-1]'], {'linestyle': '"""dashed"""', 'color': '"""grey"""', 'label': '"""$\\\\rho_{xx}$"""'}), "(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey',\n label='$\\\\rho_{xx}$')\n", (5949, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6061), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (6046, 6061), True, 'import matplotlib.pyplot as plt\n'), ((6065, 6103), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rho_{xx}$ $(\\\\Omega)$"""'], {}), "('$\\\\rho_{xx}$ $(\\\\Omega)$')\n", (6075, 6103), True, 'import matplotlib.pyplot as plt\n'), ((6106, 6118), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6116, 6118), True, 'import matplotlib.pyplot as plt\n'), ((6121, 6141), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 12000]'], {}), '([0, 12000])\n', (6129, 6141), True, 'import matplotlib.pyplot as plt\n'), ((6145, 6155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6153, 6155), True, 'import matplotlib.pyplot as plt\n'), ((6606, 6625), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (6612, 6625), True, 'import numpy as np\n'), ((7174, 7205), 'numpy.abs', 'np.abs', (['(contact.x1 - contact.x2)'], {}), '(contact.x1 - contact.x2)\n', (7180, 7205), True, 'import numpy as np\n'), ((7400, 7419), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (7406, 7419), True, 'import numpy as np\n'), ((7898, 7917), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (7904, 7917), True, 'import numpy as np\n'), ((8555, 8586), 'numpy.abs', 'np.abs', (['(contact.x1 - contact.x2)'], {}), '(contact.x1 - contact.x2)\n', (8561, 8586), True, 'import numpy as np\n'), ((8865, 8884), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (8871, 8884), True, 'import numpy as np\n'), ((9571, 9591), 'diff_matrices.Diff_mat_1D', 'Diff_mat_1D', (['self.Ny'], {}), '(self.Ny)\n', (9582, 9591), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((10693, 10725), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'self.Nx'], {}), '(0, self.Lx, self.Nx)\n', (10704, 10725), True, 'import numpy as np\n'), ((10735, 10767), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'self.Ny'], {}), '(0, self.Ly, self.Ny)\n', (10746, 10767), True, 'import numpy as np\n'), ((10950, 10977), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (10961, 10977), True, 'import numpy as np\n'), ((11397, 11408), 'time.time', 'time.time', ([], {}), '()\n', (11406, 11408), False, 'import time\n'), ((11997, 12124), 'numpy.where', 'np.where', (['((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y == self.\n y[0]) | (self.Y == self.y[self.Ny - 1]))'], {}), '((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y ==\n self.y[0]) | (self.Y == self.y[self.Ny - 1]))\n', (12005, 12124), True, 'import numpy as np\n'), ((12576, 12605), 'diff_matrices.Diff_mat_2D', 'Diff_mat_2D', (['self.Nx', 'self.Ny'], {}), '(self.Nx, self.Ny)\n', (12587, 12605), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((12941, 12952), 'time.time', 'time.time', ([], {}), '()\n', (12950, 12952), False, 'import time\n'), ((14059, 14086), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (14067, 14086), True, 'import numpy as np\n'), ((14418, 14429), 'time.time', 'time.time', ([], {}), '()\n', (14427, 14429), False, 'import time\n'), ((14748, 14767), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (14754, 14767), True, 'import numpy as np\n'), ((15097, 15137), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x1'], {'side': '"""left"""'}), "(self.x, x1, side='left')\n", (15112, 15137), True, 'import numpy as np\n'), ((15145, 15185), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x2'], {'side': '"""left"""'}), "(self.x, x2, side='left')\n", (15160, 15185), True, 'import numpy as np\n'), ((15260, 15279), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (15266, 15279), True, 'import numpy as np\n'), ((15337, 15363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (15347, 15363), True, 'import matplotlib.pyplot as plt\n'), ((15443, 15515), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['self.X', 'self.Y', 'self.u.T'], {'cmap': '"""inferno"""', 'vmin': '(0)', 'vmax': '(1)'}), "(self.X, self.Y, self.u.T, cmap='inferno', vmin=0, vmax=1)\n", (15457, 15515), True, 'import matplotlib.pyplot as plt\n'), ((15601, 15626), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (15611, 15626), True, 'import matplotlib.pyplot as plt\n'), ((15630, 15655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y ($\\\\mu$m)"""'], {}), "('y ($\\\\mu$m)')\n", (15640, 15655), True, 'import matplotlib.pyplot as plt\n'), ((15659, 15669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15667, 15669), True, 'import matplotlib.pyplot as plt\n'), ((16059, 16098), 'numpy.linspace', 'np.linspace', (['(-self.ro)', 'self.ro', 'self.Nx'], {}), '(-self.ro, self.ro, self.Nx)\n', (16070, 16098), True, 'import numpy as np\n'), ((16110, 16149), 'numpy.linspace', 'np.linspace', (['(-self.ro)', 'self.ro', 'self.Ny'], {}), '(-self.ro, self.ro, self.Ny)\n', (16121, 16149), True, 'import numpy as np\n'), ((16335, 16362), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (16346, 16362), True, 'import numpy as np\n'), ((16555, 16566), 'time.time', 'time.time', ([], {}), '()\n', (16564, 16566), False, 'import time\n'), ((16963, 17071), 'numpy.where', 'np.where', (['((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + self.Yu **\n 2 <= self.ri ** 2))'], {}), '((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + \n self.Yu ** 2 <= self.ri ** 2))\n', (16971, 17071), True, 'import numpy as np\n'), ((17235, 17264), 'diff_matrices.Diff_mat_2D', 'Diff_mat_2D', (['self.Nx', 'self.Ny'], {}), '(self.Nx, self.Ny)\n', (17246, 17264), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((17586, 17597), 'time.time', 'time.time', ([], {}), '()\n', (17595, 17597), False, 'import time\n'), ((17940, 17967), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (17948, 17967), True, 'import numpy as np\n'), ((18280, 18291), 'time.time', 'time.time', ([], {}), '()\n', (18289, 18291), False, 'import time\n'), ((18442, 18461), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (18448, 18461), True, 'import numpy as np\n'), ((18519, 18545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (18529, 18545), True, 'import matplotlib.pyplot as plt\n'), ((18549, 18607), 'matplotlib.pyplot.contourf', 'plt.contourf', (['self.x', 'self.y', 'self.u.T', '(41)'], {'cmap': '"""inferno"""'}), "(self.x, self.y, self.u.T, 41, cmap='inferno')\n", (18561, 18607), True, 'import matplotlib.pyplot as plt\n'), ((18691, 18716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (18701, 18716), True, 'import matplotlib.pyplot as plt\n'), ((18720, 18745), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y ($\\\\mu$m)"""'], {}), "('y ($\\\\mu$m)')\n", (18730, 18745), True, 'import matplotlib.pyplot as plt\n'), ((18749, 18759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18757, 18759), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1715), 'numpy.where', 'np.where', (['(self.Xu == self.x[0])'], {}), '(self.Xu == self.x[0])\n', (1693, 1715), True, 'import numpy as np\n'), ((1774, 1814), 'numpy.where', 'np.where', (['(self.Xu == self.x[self.Nx - 1])'], {}), '(self.Xu == self.x[self.Nx - 1])\n', (1782, 1814), True, 'import numpy as np\n'), ((1869, 1899), 'numpy.where', 'np.where', (['(self.Yu == self.y[0])'], {}), '(self.Yu == self.y[0])\n', (1877, 1899), True, 'import numpy as np\n'), ((1960, 2000), 'numpy.where', 'np.where', (['(self.Yu == self.y[self.Ny - 1])'], {}), '(self.Yu == self.y[self.Ny - 1])\n', (1968, 2000), True, 'import numpy as np\n'), ((2061, 2193), 'numpy.where', 'np.where', (['((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.Yu ==\n self.y[0]) | (self.Yu == self.y[self.Ny - 1]))'], {}), '((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.\n Yu == self.y[0]) | (self.Yu == self.y[self.Ny - 1]))\n', (2069, 2193), True, 'import numpy as np\n'), ((11443, 11473), 'numpy.where', 'np.where', (['(self.Xu == self.x[0])'], {}), '(self.Xu == self.x[0])\n', (11451, 11473), True, 'import numpy as np\n'), ((11532, 11572), 'numpy.where', 'np.where', (['(self.Xu == self.x[self.Nx - 1])'], {}), '(self.Xu == self.x[self.Nx - 1])\n', (11540, 11572), True, 'import numpy as np\n'), ((11627, 11657), 'numpy.where', 'np.where', (['(self.Yu == self.y[0])'], {}), '(self.Yu == self.y[0])\n', (11635, 11657), True, 'import numpy as np\n'), ((11718, 11758), 'numpy.where', 'np.where', (['(self.Yu == self.y[self.Ny - 1])'], {}), '(self.Yu == self.y[self.Ny - 1])\n', (11726, 11758), True, 'import numpy as np\n'), ((11819, 11951), 'numpy.where', 'np.where', (['((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.Yu ==\n self.y[0]) | (self.Yu == self.y[self.Ny - 1]))'], {}), '((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.\n Yu == self.y[0]) | (self.Yu == self.y[self.Ny - 1]))\n', (11827, 11951), True, 'import numpy as np\n'), ((12169, 12265), 'numpy.where', 'np.where', (['((self.Yu == self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <= self.\n source_x2))'], {}), '((self.Yu == self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <=\n self.source_x2))\n', (12177, 12265), True, 'import numpy as np\n'), ((12310, 12404), 'numpy.where', 'np.where', (['((self.Yu == self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <= self.\n drain_x2))'], {}), '((self.Yu == self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <=\n self.drain_x2))\n', (12318, 12404), True, 'import numpy as np\n'), ((16605, 16658), 'numpy.where', 'np.where', (['(self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2)'], {}), '(self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2)\n', (16613, 16658), True, 'import numpy as np\n'), ((16709, 16762), 'numpy.where', 'np.where', (['(self.Xu ** 2 + self.Yu ** 2 <= self.ri ** 2)'], {}), '(self.Xu ** 2 + self.Yu ** 2 <= self.ri ** 2)\n', (16717, 16762), True, 'import numpy as np\n'), ((16817, 16925), 'numpy.where', 'np.where', (['((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + self.Yu **\n 2 <= self.ri ** 2))'], {}), '((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + \n self.Yu ** 2 <= self.ri ** 2))\n', (16825, 16925), True, 'import numpy as np\n'), ((2909, 2934), 'scipy.sparse.eye', 'sp.eye', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (2915, 2934), True, 'import scipy.sparse as sp\n'), ((5370, 5393), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (5379, 5393), True, 'import numpy as np\n'), ((9736, 9765), 'numpy.sum', 'np.sum', (['(sxx * E_x + sxy * E_y)'], {}), '(sxx * E_x + sxy * E_y)\n', (9742, 9765), True, 'import numpy as np\n'), ((12962, 12987), 'scipy.sparse.eye', 'sp.eye', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (12968, 12987), True, 'import scipy.sparse as sp\n'), ((15546, 15569), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (15555, 15569), True, 'import numpy as np\n'), ((17607, 17632), 'scipy.sparse.eye', 'sp.eye', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (17613, 17632), True, 'import scipy.sparse as sp\n'), ((18636, 18659), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (18645, 18659), True, 'import numpy as np\n'), ((2414, 2425), 'time.time', 'time.time', ([], {}), '()\n', (2423, 2425), False, 'import time\n'), ((4200, 4211), 'time.time', 'time.time', ([], {}), '()\n', (4209, 4211), False, 'import time\n'), ((4267, 4284), 'scipy.sparse.linalg.spsolve', 'spsolve', (['L_sys', 'g'], {}), '(L_sys, g)\n', (4274, 4284), False, 'from scipy.sparse.linalg import spsolve\n'), ((4347, 4358), 'time.time', 'time.time', ([], {}), '()\n', (4356, 4358), False, 'import time\n'), ((6861, 6888), 'numpy.abs', 'np.abs', (['(self.x - contact.x1)'], {}), '(self.x - contact.x1)\n', (6867, 6888), True, 'import numpy as np\n'), ((6997, 7024), 'numpy.abs', 'np.abs', (['(self.x - contact.x2)'], {}), '(self.x - contact.x2)\n', (7003, 7024), True, 'import numpy as np\n'), ((8240, 8267), 'numpy.abs', 'np.abs', (['(self.x - contact.x1)'], {}), '(self.x - contact.x1)\n', (8246, 8267), True, 'import numpy as np\n'), ((8377, 8404), 'numpy.abs', 'np.abs', (['(self.x - contact.x2)'], {}), '(self.x - contact.x2)\n', (8383, 8404), True, 'import numpy as np\n'), ((12453, 12464), 'time.time', 'time.time', ([], {}), '()\n', (12462, 12464), False, 'import time\n'), ((14374, 14385), 'time.time', 'time.time', ([], {}), '()\n', (14383, 14385), False, 'import time\n'), ((14441, 14458), 'scipy.sparse.linalg.spsolve', 'spsolve', (['L_sys', 'g'], {}), '(L_sys, g)\n', (14448, 14458), False, 'from scipy.sparse.linalg import spsolve\n'), ((14521, 14532), 'time.time', 'time.time', ([], {}), '()\n', (14530, 14532), False, 'import time\n'), ((17112, 17123), 'time.time', 'time.time', ([], {}), '()\n', (17121, 17123), False, 'import time\n'), ((18236, 18247), 'time.time', 'time.time', ([], {}), '()\n', (18245, 18247), False, 'import time\n'), ((18303, 18320), 'scipy.sparse.linalg.spsolve', 'spsolve', (['L_sys', 'g'], {}), '(L_sys, g)\n', (18310, 18320), False, 'from scipy.sparse.linalg import spsolve\n'), ((18383, 18394), 'time.time', 'time.time', ([], {}), '()\n', (18392, 18394), False, 'import time\n')]
|
import unittest
from electropy.charge import Charge
import numpy as np
from electropy import volume
class VolumeTest(unittest.TestCase):
def setUp(self):
self.position_1 = [0, 0, 0]
self.position_2 = [-2, 4, 1]
self.charge = 7e-9
def tearDown(self):
pass
# Potential function volume tests
def test_potential_volume_at_point_equal_class_potential(self):
charge = Charge(self.position_1, self.charge)
potential_volume = volume.potential(
[charge],
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -6, -6]
potential_at_point = potential_volume[4][4][4]
expected_potential = charge.potential([-6, -6, -6])
np.testing.assert_equal(potential_at_point, expected_potential)
def test_two_charge_potential_volume_eq_sum_of_class_potential(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
potential_volume = volume.potential(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -5, -3]
potential_at_point = potential_volume[4][5][7]
expected_potential = np.add(
charges[0].potential([-6, -5, -3]),
charges[1].potential([-6, -5, -3]),
)
np.testing.assert_equal(potential_at_point, expected_potential)
# Field function volume tests
def test_field_volume_at_point_equal_class_field(self):
charge = Charge(self.position_1, self.charge)
field_volume = volume.field(
[charge],
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-10, -6, -3]
field_at_point = field_volume[0][4][7]
expected_field = charge.field([-10, -6, -3])
np.testing.assert_equal(field_at_point, expected_field)
def test_two_charge_field_volume_eq_sum_of_class_field(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3]), charges[1].field([-6, -5, -3])
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_x_components_eq_sum_of_class_field_x(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="x",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="x"),
charges[1].field([-6, -5, -3], component="x"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_y_components_eq_sum_of_class_field_y(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="y",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="y"),
charges[1].field([-6, -5, -3], component="y"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_z_components_eq_sum_of_class_field_z(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="z",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="z"),
charges[1].field([-6, -5, -3], component="z"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_field_returns_singleton_dim_for_single_slice(self):
charge = Charge(self.position_1, self.charge)
field_volume = volume.field(
[charge],
x_range=[-10, 10],
y_range=[1, 1],
z_range=[-10, 10],
h=0.1,
)
expected_shape = (201, 1, 201)
actual_shape = field_volume.shape
np.testing.assert_equal(actual_shape, expected_shape)
def test__arange_almost_equals_numpy_arange(self):
actual = volume._arange(-10, 10, 0.1) # Mine is rounder anyways =)
expected = np.arange(-10, 10 + 0.1, 0.1)
np.testing.assert_almost_equal(actual, expected)
|
[
"numpy.testing.assert_equal",
"electropy.volume.field",
"electropy.charge.Charge",
"numpy.testing.assert_almost_equal",
"electropy.volume.potential",
"electropy.volume._arange",
"numpy.arange"
] |
[((425, 461), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (431, 461), False, 'from electropy.charge import Charge\n'), ((490, 583), 'electropy.volume.potential', 'volume.potential', (['[charge]'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '([charge], x_range=[-10, 10], y_range=[-10, 10], z_range=[-\n 10, 10], h=1)\n', (506, 583), False, 'from electropy import volume\n'), ((807, 870), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['potential_at_point', 'expected_potential'], {}), '(potential_at_point, expected_potential)\n', (830, 870), True, 'import numpy as np\n'), ((1094, 1186), 'electropy.volume.potential', 'volume.potential', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-\n 10, 10], h=1)\n', (1110, 1186), False, 'from electropy import volume\n'), ((1493, 1556), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['potential_at_point', 'expected_potential'], {}), '(potential_at_point, expected_potential)\n', (1516, 1556), True, 'import numpy as np\n'), ((1670, 1706), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (1676, 1706), False, 'from electropy.charge import Charge\n'), ((1731, 1820), 'electropy.volume.field', 'volume.field', (['[charge]'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '([charge], x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1)\n', (1743, 1820), False, 'from electropy import volume\n'), ((2030, 2085), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (2053, 2085), True, 'import numpy as np\n'), ((2297, 2385), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1)\n', (2309, 2385), False, 'from electropy import volume\n'), ((2659, 2714), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (2682, 2714), True, 'import numpy as np\n'), ((2937, 3040), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)', 'component': '"""x"""'}), "(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1, component='x')\n", (2949, 3040), False, 'from electropy import volume\n'), ((3369, 3424), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (3392, 3424), True, 'import numpy as np\n'), ((3647, 3750), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)', 'component': '"""y"""'}), "(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1, component='y')\n", (3659, 3750), False, 'from electropy import volume\n'), ((4079, 4134), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (4102, 4134), True, 'import numpy as np\n'), ((4357, 4460), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)', 'component': '"""z"""'}), "(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1, component='z')\n", (4369, 4460), False, 'from electropy import volume\n'), ((4789, 4844), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (4812, 4844), True, 'import numpy as np\n'), ((4929, 4965), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (4935, 4965), False, 'from electropy.charge import Charge\n'), ((4990, 5077), 'electropy.volume.field', 'volume.field', (['[charge]'], {'x_range': '[-10, 10]', 'y_range': '[1, 1]', 'z_range': '[-10, 10]', 'h': '(0.1)'}), '([charge], x_range=[-10, 10], y_range=[1, 1], z_range=[-10, 10],\n h=0.1)\n', (5002, 5077), False, 'from electropy import volume\n'), ((5236, 5289), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual_shape', 'expected_shape'], {}), '(actual_shape, expected_shape)\n', (5259, 5289), True, 'import numpy as np\n'), ((5364, 5392), 'electropy.volume._arange', 'volume._arange', (['(-10)', '(10)', '(0.1)'], {}), '(-10, 10, 0.1)\n', (5378, 5392), False, 'from electropy import volume\n'), ((5442, 5471), 'numpy.arange', 'np.arange', (['(-10)', '(10 + 0.1)', '(0.1)'], {}), '(-10, 10 + 0.1, 0.1)\n', (5451, 5471), True, 'import numpy as np\n'), ((5480, 5528), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5510, 5528), True, 'import numpy as np\n'), ((966, 1002), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (972, 1002), False, 'from electropy.charge import Charge\n'), ((1027, 1064), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (1033, 1064), False, 'from electropy.charge import Charge\n'), ((2173, 2209), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (2179, 2209), False, 'from electropy.charge import Charge\n'), ((2234, 2271), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (2240, 2271), False, 'from electropy.charge import Charge\n'), ((2813, 2849), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (2819, 2849), False, 'from electropy.charge import Charge\n'), ((2874, 2911), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (2880, 2911), False, 'from electropy.charge import Charge\n'), ((3523, 3559), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (3529, 3559), False, 'from electropy.charge import Charge\n'), ((3584, 3621), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (3590, 3621), False, 'from electropy.charge import Charge\n'), ((4233, 4269), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (4239, 4269), False, 'from electropy.charge import Charge\n'), ((4294, 4331), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (4300, 4331), False, 'from electropy.charge import Charge\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils.validation import check_random_state
from sklearn.datasets import fetch_olivetti_faces
from sklearn.externals import joblib
rng = check_random_state(21)
dataset = fetch_olivetti_faces()
X = dataset.images.reshape(dataset.images.shape[0], -1)
train = X[dataset.target < 30]
test = X[dataset.target >= 30]
n_faces = 3
face_ids = rng.randint(test.shape[0], size=(n_faces,))
test = test[face_ids, :]
n_pixels = X.shape[1]
# Upper half of the faces
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
n_rows = 2
imshape = (64, 64,)
def test_model(y_pred, model_name):
plt.figure(figsize=(1.7*n_faces, 4))
plt.suptitle('Face completion with ' + model_name, size=12)
# plot the true faces first
for i in range(n_faces):
plt.subplot(int( '{}{}{}'.format( n_rows, n_faces, i + 1 ) ))
plt.axis('off')
plt.imshow(np.hstack((X_test[i], y_test[i])).reshape(imshape), cmap=plt.cm.gray, interpolation='nearest')
# then plot the predictions
for i in range(n_faces):
plt.subplot(int( '{}{}{}'.format( n_rows, n_faces, i + n_faces + 1 ) ))
plt.axis('off')
plt.imshow(np.hstack((X_test[i], y_pred[i])).reshape(imshape), cmap=plt.cm.gray, interpolation='nearest')
test_model(joblib.load('../trained_models/nn_face_completion.pkl').predict(X_test), 'Face completion with a Neural Network')
test_model(joblib.load('../trained_models/knn_face_completion.pkl').predict(X_test), 'Face completion with a k-Nearest Neighbors')
test_model(joblib.load('../trained_models/dt_face_completion.pkl').predict(X_test), 'Face completion with a Decision Tree')
plt.show()
|
[
"numpy.hstack",
"sklearn.utils.validation.check_random_state",
"sklearn.externals.joblib.load",
"sklearn.datasets.fetch_olivetti_faces",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show"
] |
[((202, 224), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['(21)'], {}), '(21)\n', (220, 224), False, 'from sklearn.utils.validation import check_random_state\n'), ((235, 257), 'sklearn.datasets.fetch_olivetti_faces', 'fetch_olivetti_faces', ([], {}), '()\n', (255, 257), False, 'from sklearn.datasets import fetch_olivetti_faces\n'), ((1812, 1822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1820, 1822), True, 'import matplotlib.pyplot as plt\n'), ((769, 807), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.7 * n_faces, 4)'}), '(figsize=(1.7 * n_faces, 4))\n', (779, 807), True, 'import matplotlib.pyplot as plt\n'), ((810, 869), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Face completion with ' + model_name)"], {'size': '(12)'}), "('Face completion with ' + model_name, size=12)\n", (822, 869), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1026), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1019, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1315), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1308, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1498), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../trained_models/nn_face_completion.pkl"""'], {}), "('../trained_models/nn_face_completion.pkl')\n", (1454, 1498), False, 'from sklearn.externals import joblib\n'), ((1568, 1624), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../trained_models/knn_face_completion.pkl"""'], {}), "('../trained_models/knn_face_completion.pkl')\n", (1579, 1624), False, 'from sklearn.externals import joblib\n'), ((1699, 1754), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../trained_models/dt_face_completion.pkl"""'], {}), "('../trained_models/dt_face_completion.pkl')\n", (1710, 1754), False, 'from sklearn.externals import joblib\n'), ((1046, 1079), 'numpy.hstack', 'np.hstack', (['(X_test[i], y_test[i])'], {}), '((X_test[i], y_test[i]))\n', (1055, 1079), True, 'import numpy as np\n'), ((1335, 1368), 'numpy.hstack', 'np.hstack', (['(X_test[i], y_pred[i])'], {}), '((X_test[i], y_pred[i]))\n', (1344, 1368), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import models
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import numpy as np
import cv2
# prebuild model with pre-trained weights on imagenet
base_model = VGG16(weights='imagenet', include_top=True)
print (base_model)
for i, layer in enumerate(base_model.layers):
print (i, layer.name, layer.output_shape)
# extract features from block4_pool block
model = models.Model(inputs=base_model.input,
outputs=base_model.get_layer('block4_pool').output)
img_path = 'cat.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# get the features from this block
features = model.predict(x)
print(features)
|
[
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.vgg16.preprocess_input",
"numpy.expand_dims",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.preprocessing.image.img_to_array"
] |
[((336, 379), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (341, 379), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((669, 717), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (683, 717), False, 'from tensorflow.keras.preprocessing import image\n'), ((723, 746), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (741, 746), False, 'from tensorflow.keras.preprocessing import image\n'), ((752, 777), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (766, 777), True, 'import numpy as np\n'), ((783, 802), 'tensorflow.keras.applications.vgg16.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (799, 802), False, 'from tensorflow.keras.applications.vgg16 import preprocess_input\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 08:38:28 2020
pyqt realtime plot tutorial
source: https://www.learnpyqt.com/courses/graphics-plotting/plotting-pyqtgraph/
@author: nlourie
"""
from PyQt5 import QtWidgets, QtCore,uic
from pyqtgraph import PlotWidget, plot,QtGui
import pyqtgraph as pg
import sys # We need sys so that we can pass argv to QApplication
import os
from datetime import datetime
import numpy as np
from scipy import signal
import board
import busio
import adafruit_lps35hw
import time
from scipy import interpolate
#import monitor_utils as mu
# Initialize the i2c bus
i2c = busio.I2C(board.SCL, board.SDA)
# Using the adafruit_lps35hw class to read in the pressure sensor
# note the address must be in decimal.
# allowed addresses are:
# 92 (0x5c - if you put jumper from SDO to Gnd)
# 93 (0x5d - default)
p2 = adafruit_lps35hw.LPS35HW(i2c, address = 92)
p1 = adafruit_lps35hw.LPS35HW(i2c, address = 93)
p1.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
p2.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
mbar2cmh20 = 1.01972
# Now read out the pressure difference between the sensors
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print('')
print('Now zero the pressure:')
# Not sure why sometimes I have to do this twice??
p1.zero_pressure()
p1.zero_pressure()
time.sleep(1)
p2.zero_pressure()
p2.zero_pressure()
time.sleep(1)
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print()
def breath_detect_coarse(flow,fs,plotflag = False):
"""
%% This function detects peaks of flow signal
% Inputs:
% flow: flow signal
% fs: sampling frequency
% plotflag: set to 1 to plot
% Output:
% peak (location, amplitude)
% Written by: <NAME>, PhD
% Email: <EMAIL>
% Updated on: 12 Nov 2015.
% Ver: 1.0
# Converted to python by: <NAME>, PhD
# Email: <EMAIL>
# Updated on: April, 2020
"""
# detect peaks of flow signal
minpeakwidth = fs*0.3
peakdistance = fs*1.5
#print('peakdistance = ',peakdistance)
minPeak = 0.05 # flow threshold = 0.05 (L/s)
minpeakprominence = 0.05
peak_index, _ = signal.find_peaks(flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
valley_index, _ = signal.find_peaks(-1*flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
print('found peaks at index = ',peak_index)
return peak_index
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Standalone Respiratory Monitor")
self.graph0 = pg.PlotWidget()
self.graph1 = pg.PlotWidget()
self.graph2 = pg.PlotWidget()
self.graph3 = pg.PlotWidget()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.graph0)
layout.addWidget(self.graph1)
layout.addWidget(self.graph2)
layout.addWidget(self.graph3)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
# make the window with a graph widget
#self.graph1 = pg.PlotWidget()
self.setCentralWidget(widget)
# set the plot properties
self.graph1.setBackground('k')
self.graph0.showGrid(x = True, y = True)
self.graph1.showGrid(x=True,y=True)
self.graph2.showGrid(x = True, y = True)
self.graph3.showGrid(x = True, y = True)
# Set the label properties with valid CSS commands -- https://groups.google.com/forum/#!topic/pyqtgraph/jS1Ju8R6PXk
labelStyle = {'color': '#FFF', 'font-size': '12pt'}
self.graph0.setLabel('left','P','cmH20',**labelStyle)
self.graph1.setLabel('left','Flow','L/s',**labelStyle)
self.graph3.setLabel('bottom', 'Time', 's', **labelStyle)
#self.graph2.setLabel('left', 'V raw','L',**labelStyle)
self.graph3.setLabel('left','V corr','L',**labelStyle)
# change the plot range
#self.graph0.setYRange(-30,30,padding = 0.1)
#self.graph1.setYRange(-2,2,padding = 0.1)
#self.graph3.setYRange(-0.5,1.5,padding = 0.1)
#self.graph3.setYRange(200,200,padding = 0.1)
self.x = [0]
self.t = [datetime.utcnow().timestamp()]
self.dt = [0]
self.x = [0]
self.dt = [0]
#self.y = [honeywell_v2f(chan.voltage)]
self.dp = [(p1.pressure - p2.pressure)*mbar2cmh20]
self.p1 = [(p1.pressure)*mbar2cmh20]
self.p2 = [(p2.pressure)*mbar2cmh20]
self.flow = [0]
self.vol = [0]
print('P1 = ',p1.pressure,' cmH20')
print('P2 = ',p2.pressure,' cmH20')
# plot data: x, y values
# make a QPen object to hold the marker properties
pen = pg.mkPen(color = 'y',width = 1)
pen2 = pg.mkPen(color = 'b',width = 2)
self.data_line01 = self.graph0.plot(self.dt,self.p1,pen = pen)
self.data_line02 = self.graph0.plot(self.dt,self.p2,pen = pen2)
self.data_line1 = self.graph1.plot(self.dt, self.flow,pen = pen)
# graph2
self.data_line21 = self.graph2.plot(self.dt,self.flow,pen = pen)
self.data_line22 = self.graph2.plot(self.dt,self.flow,pen = pen)
# graph3
self.data_line3 = self.graph3.plot(self.dt,self.vol,pen = pen)
self.calibrating = False
"""
# Slower timer
self.t_cal = 100
self.cal_timer = QtCore.QTimer()
self.cal_timer.setInterval(self.t_cal)
self.cal_timer.timeout.connect(self.update_cal)
self.cal_timer.start()
"""
# Stuff with the timer
self.t_update = 10 #update time of timer in ms
self.timer = QtCore.QTimer()
self.timer.setInterval(self.t_update)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
self.drift_model = [0,datetime.utcnow().timestamp()/1000*self.t_update]
self.i_valleys = []
self.time_to_show = 30 #s
def update_plot_data(self):
# This is what happens every timer loop
if self.dt[-1] >= self.time_to_show:
self.x = self.x[1:] # Remove the first element
#self.y = self.y[1:] # remove the first element
self.dp = self.dp[1:]
self.t = self.t[1:] # remove the first element
self.dt= self.dt[1:]
self.p1 = self.p1[1:]
self.p2 = self.p2[1:]
self.vol = self.vol[1:]
self.flow = self.flow[1:]
self.x.append(self.x[-1] + 1) # add a new value 1 higher than the last
self.t.append(datetime.utcnow().timestamp())
self.dt = [(ti - self.t[0]) for ti in self.t]
dp_cmh20 = ((p1.pressure - p2.pressure))*mbar2cmh20
self.dp.append(dp_cmh20)
self.flow.append(dp_cmh20)
self.p1.append(p1.pressure*mbar2cmh20)
self.p2.append(p2.pressure*mbar2cmh20)
# remove any linear trend in the volume data since it's just nonsense.
# THis should zero it out okay if there's no noticeable "dips"
self.vol = signal.detrend(np.cumsum(self.flow))
self.fs = 1/(self.t[-1] - self.t[-2])
print('Sample Freq = ',self.fs)
negative_mean_subtracted_volume = [-1*(v-np.mean(self.vol)) for v in self.vol]
i_valleys = breath_detect_coarse(negative_mean_subtracted_volume,fs = self.fs,plotflag = False)
self.i_valleys = i_valleys
#print('i_valleys = ',self.i_valleys)
#print('datatype of i_valleys = ',type(self.i_valleys))
if len(self.i_valleys) >= 2:
t = np.array(self.t)
vol = np.array(self.vol)
dt = np.array(self.dt)
print('found peaks at dt = ',dt[self.i_valleys])
#self.drift_model = np.polyfit(t[self.i_valleys],vol[self.i_valleys],1)
#self.v_drift = np.polyval(self.drift_model,t)
#self.vol_corr = vol - self.v_drift
#self.data_line22.setData(self.dt,self.v_drift)
self.drift_model = interpolate.interp1d(t[i_valleys],vol[i_valleys],kind = 'linear')
v_drift_within_spline = self.drift_model(t[i_valleys[0]:i_valleys[-1]])
v_drift = np.zeros(len(t))
v_drift[0:self.i_valleys[1]] = np.polyval(np.polyfit(t[i_valleys[0:1]],vol[self.i_valleys[0:1]],1),t[0:self.i_valleys[1]],)
v_drift[self.i_valleys[0]:self.i_valleys[-1]] = v_drift_within_spline
v_drift[self.i_valleys[-1]:] = np.polyval(np.polyfit(t[self.i_valleys[-2:]],vol[self.i_valleys[-2:]],1),t[self.i_valleys[-1]:])
self.v_drift = v_drift
self.vol_corr = vol - v_drift
self.data_line22.setData(self.dt,self.v_drift)
else:
self.vol_corr = self.vol
self.data_line01.setData(self.dt,self.p1)
self.data_line02.setData(self.dt,self.p2)
self.data_line1.setData(self.dt,self.flow) #update the data
self.data_line21.setData(self.dt,self.vol)
self.data_line3.setData(self.dt,self.vol_corr)
"""
def update_cal(self) :
print ('len dt = ',len(self.dt))
if len(self.dt) > 50:
# try to run the monitor utils functions
fs = 1000/self.t_update
i_peaks,i_valleys,i_infl_points,vol_last_peak,flow,self.vol_corr,self.vol_offset,time,vol,drift_model = mu.get_processed_flow(np.array(self.t),np.array(self.y),fs,SmoothingParam = 0,smoothflag=True,plotflag = False)
if len(i_peaks) > 2:
self.drift_model = drift_model
print('updating calibration')
self.calibrating = True
self.data_line2.setData(self.dt,vol)
self.data_line5.setData(self.dt,np.polyval(self.drift_model,time))
self.data_line3.setData(self.dt,vol - np.polyval(self.drift_model,time))
print('drift model = ',self.drift_model)
"""
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"PyQt5.QtWidgets.QWidget",
"numpy.mean",
"adafruit_lps35hw.LPS35HW",
"numpy.polyfit",
"datetime.datetime.utcnow",
"busio.I2C",
"PyQt5.QtCore.QTimer",
"time.sleep",
"scipy.interpolate.interp1d",
"numpy.array",
"pyqtgraph.PlotWidget",
"numpy.cumsum",
"PyQt5.QtWidgets.QApplication",
"scipy.signal.find_peaks",
"PyQt5.QtWidgets.QVBoxLayout",
"pyqtgraph.mkPen"
] |
[((632, 663), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (641, 663), False, 'import busio\n'), ((904, 945), 'adafruit_lps35hw.LPS35HW', 'adafruit_lps35hw.LPS35HW', (['i2c'], {'address': '(92)'}), '(i2c, address=92)\n', (928, 945), False, 'import adafruit_lps35hw\n'), ((953, 994), 'adafruit_lps35hw.LPS35HW', 'adafruit_lps35hw.LPS35HW', (['i2c'], {'address': '(93)'}), '(i2c, address=93)\n', (977, 994), False, 'import adafruit_lps35hw\n'), ((1488, 1501), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1498, 1501), False, 'import time\n'), ((1540, 1553), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1550, 1553), False, 'import time\n'), ((2510, 2627), 'scipy.signal.find_peaks', 'signal.find_peaks', (['flow'], {'height': 'minPeak', 'distance': 'peakdistance', 'prominence': 'minpeakprominence', 'width': 'minpeakwidth'}), '(flow, height=minPeak, distance=peakdistance, prominence=\n minpeakprominence, width=minpeakwidth)\n', (2527, 2627), False, 'from scipy import signal\n'), ((10966, 10998), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (10988, 10998), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((3390, 3405), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3403, 3405), True, 'import pyqtgraph as pg\n'), ((3428, 3443), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3441, 3443), True, 'import pyqtgraph as pg\n'), ((3466, 3481), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3479, 3481), True, 'import pyqtgraph as pg\n'), ((3504, 3519), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3517, 3519), True, 'import pyqtgraph as pg\n'), ((3546, 3569), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (3567, 3569), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((3748, 3767), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (3765, 3767), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((5597, 5625), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '"""y"""', 'width': '(1)'}), "(color='y', width=1)\n", (5605, 5625), True, 'import pyqtgraph as pg\n'), ((5644, 5672), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '"""b"""', 'width': '(2)'}), "(color='b', width=2)\n", (5652, 5672), True, 'import pyqtgraph as pg\n'), ((6595, 6610), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (6608, 6610), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((8054, 8074), 'numpy.cumsum', 'np.cumsum', (['self.flow'], {}), '(self.flow)\n', (8063, 8074), True, 'import numpy as np\n'), ((8592, 8608), 'numpy.array', 'np.array', (['self.t'], {}), '(self.t)\n', (8600, 8608), True, 'import numpy as np\n'), ((8627, 8645), 'numpy.array', 'np.array', (['self.vol'], {}), '(self.vol)\n', (8635, 8645), True, 'import numpy as np\n'), ((8663, 8680), 'numpy.array', 'np.array', (['self.dt'], {}), '(self.dt)\n', (8671, 8680), True, 'import numpy as np\n'), ((9037, 9102), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['t[i_valleys]', 'vol[i_valleys]'], {'kind': '"""linear"""'}), "(t[i_valleys], vol[i_valleys], kind='linear')\n", (9057, 9102), False, 'from scipy import interpolate\n'), ((9280, 9338), 'numpy.polyfit', 'np.polyfit', (['t[i_valleys[0:1]]', 'vol[self.i_valleys[0:1]]', '(1)'], {}), '(t[i_valleys[0:1]], vol[self.i_valleys[0:1]], 1)\n', (9290, 9338), True, 'import numpy as np\n'), ((9498, 9561), 'numpy.polyfit', 'np.polyfit', (['t[self.i_valleys[-2:]]', 'vol[self.i_valleys[-2:]]', '(1)'], {}), '(t[self.i_valleys[-2:]], vol[self.i_valleys[-2:]], 1)\n', (9508, 9561), True, 'import numpy as np\n'), ((5050, 5067), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5065, 5067), False, 'from datetime import datetime\n'), ((7554, 7571), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7569, 7571), False, 'from datetime import datetime\n'), ((8221, 8238), 'numpy.mean', 'np.mean', (['self.vol'], {}), '(self.vol)\n', (8228, 8238), True, 'import numpy as np\n'), ((6790, 6807), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6805, 6807), False, 'from datetime import datetime\n')]
|
import pandas as pd
import numpy as np
print(pd.__version__)
# 1.0.0
print(pd.DataFrame.agg is pd.DataFrame.aggregate)
# True
df = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]})
print(df)
# A B
# 0 0 3
# 1 1 4
# 2 2 5
print(df.agg(['sum', 'mean', 'min', 'max']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
# min 0.0 3.0
# max 2.0 5.0
print(type(df.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg(['sum']))
# A B
# sum 3 12
print(type(df.agg(['sum'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg('sum'))
# A 3
# B 12
# dtype: int64
print(type(df.agg('sum')))
# <class 'pandas.core.series.Series'>
print(df.agg({'A': ['sum', 'min', 'max'],
'B': ['mean', 'min', 'max']}))
# A B
# max 2.0 5.0
# mean NaN 4.0
# min 0.0 3.0
# sum 3.0 NaN
print(df.agg({'A': 'sum', 'B': 'mean'}))
# A 3.0
# B 4.0
# dtype: float64
print(df.agg({'A': ['sum'], 'B': ['mean']}))
# A B
# mean NaN 4.0
# sum 3.0 NaN
print(df.agg({'A': ['min', 'max'], 'B': 'mean'}))
# A B
# max 2.0 NaN
# mean NaN 4.0
# min 0.0 NaN
print(df.agg(['sum', 'mean', 'min', 'max'], axis=1))
# sum mean min max
# 0 3.0 1.5 0.0 3.0
# 1 5.0 2.5 1.0 4.0
# 2 7.0 3.5 2.0 5.0
s = df['A']
print(s)
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(s.agg(['sum', 'mean', 'min', 'max']))
# sum 3.0
# mean 1.0
# min 0.0
# max 2.0
# Name: A, dtype: float64
print(type(s.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.series.Series'>
print(s.agg(['sum']))
# sum 3
# Name: A, dtype: int64
print(type(s.agg(['sum'])))
# <class 'pandas.core.series.Series'>
print(s.agg('sum'))
# 3
print(type(s.agg('sum')))
# <class 'numpy.int64'>
print(s.agg({'Total': 'sum', 'Average': 'mean', 'Min': 'min', 'Max': 'max'}))
# Total 3.0
# Average 1.0
# Min 0.0
# Max 2.0
# Name: A, dtype: float64
# print(s.agg({'NewLabel_1': ['sum', 'max'], 'NewLabel_2': ['mean', 'min']}))
# SpecificationError: nested renamer is not supported
print(df.agg(['mad', 'amax', 'dtype']))
# A B
# mad 0.666667 0.666667
# amax 2 5
# dtype int64 int64
print(df['A'].mad())
# 0.6666666666666666
print(np.amax(df['A']))
# 2
print(df['A'].dtype)
# int64
# print(df.agg(['xxx']))
# AttributeError: 'xxx' is not a valid function for 'Series' object
# print(df.agg('xxx'))
# AttributeError: 'xxx' is not a valid function for 'DataFrame' object
print(hasattr(pd.DataFrame, '__array__'))
# True
print(hasattr(pd.core.groupby.GroupBy, '__array__'))
# False
print(df.agg([np.sum, max]))
# A B
# sum 3 12
# max 2 5
print(np.sum(df['A']))
# 3
print(max(df['A']))
# 2
print(np.abs(df['A']))
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(df.agg([np.abs]))
# A B
# absolute absolute
# 0 0 3
# 1 1 4
# 2 2 5
# print(df.agg([np.abs, max]))
# ValueError: cannot combine transform and aggregation operations
def my_func(x):
return min(x) / max(x)
print(df.agg([my_func, lambda x: min(x) / max(x)]))
# A B
# my_func 0.0 0.6
# <lambda> 0.0 0.6
print(df['A'].std())
# 1.0
print(df['A'].std(ddof=0))
# 0.816496580927726
print(df.agg(['std', lambda x: x.std(ddof=0)]))
# A B
# std 1.000000 1.000000
# <lambda> 0.816497 0.816497
print(df.agg('std', ddof=0))
# A 0.816497
# B 0.816497
# dtype: float64
print(df.agg(['std'], ddof=0))
# A B
# std 1.0 1.0
df_str = df.assign(C=['X', 'Y', 'Z'])
print(df_str)
# A B C
# 0 0 3 X
# 1 1 4 Y
# 2 2 5 Z
# df_str['C'].mean()
# TypeError: Could not convert XYZ to numeric
print(df_str.agg(['sum', 'mean']))
# A B C
# sum 3.0 12.0 XYZ
# mean 1.0 4.0 NaN
print(df_str.agg(['mean', 'std']))
# A B
# mean 1.0 4.0
# std 1.0 1.0
print(df_str.agg(['sum', 'min', 'max']))
# A B C
# sum 3 12 XYZ
# min 0 3 X
# max 2 5 Z
print(df_str.select_dtypes(include='number').agg(['sum', 'mean']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
|
[
"pandas.DataFrame",
"numpy.sum",
"numpy.amax",
"numpy.abs"
] |
[((134, 180), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [0, 1, 2], 'B': [3, 4, 5]}"], {}), "({'A': [0, 1, 2], 'B': [3, 4, 5]})\n", (146, 180), True, 'import pandas as pd\n'), ((2325, 2341), 'numpy.amax', 'np.amax', (["df['A']"], {}), "(df['A'])\n", (2332, 2341), True, 'import numpy as np\n'), ((2754, 2769), 'numpy.sum', 'np.sum', (["df['A']"], {}), "(df['A'])\n", (2760, 2769), True, 'import numpy as np\n'), ((2807, 2822), 'numpy.abs', 'np.abs', (["df['A']"], {}), "(df['A'])\n", (2813, 2822), True, 'import numpy as np\n')]
|
import os
import re
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
# Assign labels used in eep conversion
eep_params = dict(
age = 'Age (yrs)',
hydrogen_lum = 'L_H',
lum = 'Log L',
logg = 'Log g',
log_teff = 'Log T',
core_hydrogen_frac = 'X_core', # must be added
core_helium_frac = 'Y_core',
teff_scale = 20, # used in metric function
lum_scale = 1, # used in metric function
# `intervals` is a list containing the number of secondary Equivalent
# Evolutionary Phases (EEPs) between each pair of primary EEPs.
intervals = [200, # Between PreMS and ZAMS
50, # Between ZAMS and EAMS
100, # Between EAMS and IAMS
100, # IAMS-TAMS
150], # TAMS-RGBump
)
def my_PreMS(track, eep_params, i0=None):
'''
Dartmouth models do not have central temperature, which is necessary for
the default PreMS calculation. For now, let the first point be the PreMS.
'''
return 0
def my_TAMS(track, eep_params, i0, Xmin=1e-5):
'''
By default, the TAMS is defined as the first point in the track where Xcen
drops below 10^-12. But not all the DSEP tracks hit this value. To ensure
the TAMS is placed correctly, here I'm using Xcen = 10^-5 as the critical
value.
'''
core_hydrogen_frac = eep_params['core_hydrogen_frac']
Xc_tr = track.loc[i0:, core_hydrogen_frac]
below_crit = Xc_tr <= Xmin
if not below_crit.any():
return -1
return below_crit.idxmax()
def my_RGBump(track, eep_params, i0=None):
'''
Modified from eep.get_RGBump to make luminosity logarithmic
'''
lum = eep_params['lum']
log_teff = eep_params['log_teff']
N = len(track)
lum_tr = track.loc[i0:, lum]
logT_tr = track.loc[i0:, log_teff]
lum_greater = (lum_tr > 1)
if not lum_greater.any():
return -1
RGBump = lum_greater.idxmax() + 1
while logT_tr[RGBump] < logT_tr[RGBump-1] and RGBump < N-1:
RGBump += 1
# Two cases: 1) We didn't reach an extremum, in which case RGBump gets
# set as the final index of the track. In this case, return -1.
# 2) We found the extremum, in which case RGBump gets set
# as the index corresponding to the extremum.
if RGBump >= N-1:
return -1
return RGBump-1
def my_HRD(track, eep_params):
'''
Adapted from eep._HRD_distance to fix lum logarithm
'''
# Allow for scaling to make changes in Teff and L comparable
Tscale = eep_params['teff_scale']
Lscale = eep_params['lum_scale']
log_teff = eep_params['log_teff']
lum = eep_params['lum']
logTeff = track[log_teff]
logLum = track[lum]
N = len(track)
dist = np.zeros(N)
for i in range(1, N):
temp_dist = (((logTeff.iloc[i] - logTeff.iloc[i-1])*Tscale)**2
+ ((logLum.iloc[i] - logLum.iloc[i-1])*Lscale)**2)
dist[i] = dist[i-1] + np.sqrt(temp_dist)
return dist
def from_dartmouth(path):
fname = path.split('/')[-1]
file_str = fname.replace('.trk', '')
mass = int(file_str[1:4])/100
met_str = file_str[7:10]
met = int(met_str[1:])/10
if met_str[0] == 'm':
met *= -1
alpha_str = file_str[13:]
alpha = int(alpha_str[1:])/10
if alpha_str[0] == 'm':
alpha *= -1
with open(path, 'r') as f:
header = f.readline()
col_line = f.readline()
data_lines = f.readlines()
columns = re.split(r'\s{2,}', col_line.strip('# \n'))
data = np.genfromtxt(data_lines)
# Build multi-indexed DataFrame, dropping unwanted columns
multi_index = pd.MultiIndex.from_tuples(
[(mass, met, step) for step in range(len(data))],
names=['initial_mass', 'initial_met', 'step'])
df = pd.DataFrame(data, index=multi_index, columns=columns)
return df
def all_from_dartmouth(raw_grids_path, progress=True):
df_list = []
filelist = [f for f in os.listdir(raw_grids_path) if '.trk' in f]
if progress:
file_iter = tqdm(filelist)
else:
file_iter = filelist
for fname in file_iter:
fpath = os.path.join(raw_grids_path, fname)
df_list.append(from_dartmouth(fpath))
dfs = pd.concat(df_list).sort_index()
# Need X_core for EEP computation
dfs['X_core'] = 1 - dfs['Y_core'] - dfs['Z_core']
return dfs
def install(
raw_grids_path,
name=None,
eep_params=eep_params,
eep_functions={'prems': my_PreMS, 'tams': my_TAMS, 'rgbump': my_RGBump},
metric_function=my_HRD,
):
'''
The main method to install grids that are output of the `rotevol` rotational
evolution tracer code.
Parameters
----------
raw_grids_path (str): the path to the folder containing the raw model grids.
name (str, optional): the name of the grid you're installing. By default,
the basename of the `raw_grids_path` will be used.
eep_params (dict, optional): contains a mapping from your grid's specific
column names to the names used by kiauhoku's default EEP functions.
It also contains 'eep_intervals', the number of secondary EEPs
between each consecutive pair of primary EEPs. By default, the params
defined at the top of this script will be used, but users may specify
their own.
eep_functions (dict, optional): if the default EEP functions won't do the
job, you can specify your own and supply them in a dictionary.
EEP functions must have the call signature
function(track, eep_params), where `track` is a single track.
If none are supplied, the default functions will be used.
metric_function (callable, None): the metric function is how the EEP
interpolator spaces the secondary EEPs. By default, the path
length along the evolution track on the H-R diagram (luminosity vs.
Teff) is used, but you can specify your own if desired.
metric_function must have the call signature
function(track, eep_params), where `track` is a single track.
If no function is supplied, defaults to dartmouth.my_HRD.
Returns None
'''
from .stargrid import from_pandas
from .stargrid import grids_path as install_path
if name is None:
name = os.path.basename(raw_grids_path)
# Create cache directories
path = os.path.join(install_path, name)
if not os.path.exists(path):
os.makedirs(path)
# Cache eep parameters
with open(os.path.join(path, 'eep_params.pkl'), 'wb') as f:
pickle.dump(eep_params, f)
print('Reading and combining grid files')
grids = all_from_dartmouth(raw_grids_path)
grids = from_pandas(grids, name=name)
# Save full grid to file
full_save_path = os.path.join(path, 'full_grid.pqt')
print(f'Saving to {full_save_path}')
grids.to_parquet(full_save_path)
print(f'Converting to eep-based tracks')
eeps = grids.to_eep(eep_params, eep_functions, metric_function)
# Save EEP grid to file
eep_save_path = os.path.join(path, 'eep_grid.pqt')
print(f'Saving to {eep_save_path}')
eeps.to_parquet(eep_save_path)
# Create and save interpolator to file
interp = eeps.to_interpolator()
interp_save_path = os.path.join(path, 'interpolator.pkl')
print(f'Saving interpolator to {interp_save_path}')
interp.to_pickle(path=interp_save_path)
print(f'Model grid "{name}" installed.')
|
[
"os.path.exists",
"os.listdir",
"pickle.dump",
"numpy.sqrt",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"numpy.zeros",
"pandas.concat",
"os.path.basename",
"pandas.DataFrame",
"numpy.genfromtxt"
] |
[((2750, 2761), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2758, 2761), True, 'import numpy as np\n'), ((3554, 3579), 'numpy.genfromtxt', 'np.genfromtxt', (['data_lines'], {}), '(data_lines)\n', (3567, 3579), True, 'import numpy as np\n'), ((3815, 3869), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'multi_index', 'columns': 'columns'}), '(data, index=multi_index, columns=columns)\n', (3827, 3869), True, 'import pandas as pd\n'), ((6396, 6428), 'os.path.join', 'os.path.join', (['install_path', 'name'], {}), '(install_path, name)\n', (6408, 6428), False, 'import os\n'), ((6802, 6837), 'os.path.join', 'os.path.join', (['path', '"""full_grid.pqt"""'], {}), "(path, 'full_grid.pqt')\n", (6814, 6837), False, 'import os\n'), ((7079, 7113), 'os.path.join', 'os.path.join', (['path', '"""eep_grid.pqt"""'], {}), "(path, 'eep_grid.pqt')\n", (7091, 7113), False, 'import os\n'), ((7292, 7330), 'os.path.join', 'os.path.join', (['path', '"""interpolator.pkl"""'], {}), "(path, 'interpolator.pkl')\n", (7304, 7330), False, 'import os\n'), ((4066, 4080), 'tqdm.tqdm', 'tqdm', (['filelist'], {}), '(filelist)\n', (4070, 4080), False, 'from tqdm import tqdm\n'), ((4165, 4200), 'os.path.join', 'os.path.join', (['raw_grids_path', 'fname'], {}), '(raw_grids_path, fname)\n', (4177, 4200), False, 'import os\n'), ((6320, 6352), 'os.path.basename', 'os.path.basename', (['raw_grids_path'], {}), '(raw_grids_path)\n', (6336, 6352), False, 'import os\n'), ((6440, 6460), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6454, 6460), False, 'import os\n'), ((6470, 6487), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6481, 6487), False, 'import os\n'), ((6588, 6614), 'pickle.dump', 'pickle.dump', (['eep_params', 'f'], {}), '(eep_params, f)\n', (6599, 6614), False, 'import pickle\n'), ((2960, 2978), 'numpy.sqrt', 'np.sqrt', (['temp_dist'], {}), '(temp_dist)\n', (2967, 2978), True, 'import numpy as np\n'), ((3985, 4011), 'os.listdir', 'os.listdir', (['raw_grids_path'], {}), '(raw_grids_path)\n', (3995, 4011), False, 'import os\n'), ((4258, 4276), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (4267, 4276), True, 'import pandas as pd\n'), ((6530, 6566), 'os.path.join', 'os.path.join', (['path', '"""eep_params.pkl"""'], {}), "(path, 'eep_params.pkl')\n", (6542, 6566), False, 'import os\n')]
|
from precise.covariance.movingaverage import ema_scov
from precise.covariance.matrixfunctions import grand_mean, grand_shrink
from sklearn.covariance._shrunk_covariance import ledoit_wolf_shrinkage
import numpy as np
# Experimental estimator inspired by Ledoit-Wolf
# Keeps a buffer of last n_buffer observations
# Tracks quantities akin to a^2, d^2 in LW
def lw_ema_scov(s:dict, x=None, r=0.025)->dict:
if s.get('s_c') is None:
if isinstance(x,int):
return _lw_ema_scov_init(n_dim=x, r=r)
else:
s = _lw_ema_scov_init(n_dim=len(x), r=r)
if x is not None:
s = _lw_ema_scov_update(s=s, x=x, r=r)
return s
def _lw_ema_scov_init(n_dim, r):
sc = ema_scov({}, n_dim, r=r)
return {'s_c':sc,
'bn_bar':None,
'a2':0,
'mn':0,
'n_new':0,
'buffer':[]}
def _lw_ema_scov_update(s, x, r):
"""
Attempts to track quantities similar to those used to estimate LD shrinkage
"""
x = np.asarray(x)
s['s_c'] = ema_scov(s=s['s_c'], x=x, r=r)
s['buffer'].append(x)
if len(s['buffer'])>s['s_c']['n_emp']:
# Update running estimate of the LD shrinkage parameter
s['n_new'] = s['n_new']+1
xl = s['buffer'].pop(0)
xc = np.atleast_2d(xl-s['s_c']['mean']) # <--- Do we need this?
scov = s['s_c']['scov']
# Compute d^2
mn = grand_mean(scov)
s['mn'] = mn
n_dim = np.shape(scov)[0]
s['dn'] = np.linalg.norm(scov - mn * np.eye(n_dim))**2
# Update b^2
xc2 = xc
xl2 = np.dot(xc2.T,xc2) - scov
if s.get('bn_bar') is None:
s['bn_bar'] = s['lmbd']*s['dn']
s['lmbd_lw'] = 1.0 * s['lmbd']
r_shrink = r/2 # <--- Heuristic
bk = np.linalg.norm( xl2 )
s['bn_bar'] = (1-r_shrink)*s['bn_bar'] + r_shrink*bk # b^2
ratio = bk/s['dn']
# Imply new shrinkage
bn = min( s['bn_bar'], s['dn'] )
lmbd = bn/s['dn']
s['lmbd'] = lmbd
if 2< s['s_c']['n_samples']<2*s['s_c']['n_emp']:
# Override with traditional Ledoit-Shrinkage
X = np.asarray(s['buffer'])
s['lmbd'] = ledoit_wolf_shrinkage(X=X)
if s['s_c']['n_samples']>2:
scov = s['s_c']['scov']
s['scov'] = grand_shrink(a=scov, lmbd=s['lmbd'], copy=True)
return s
|
[
"numpy.atleast_2d",
"sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage",
"precise.covariance.movingaverage.ema_scov",
"numpy.eye",
"precise.covariance.matrixfunctions.grand_mean",
"numpy.asarray",
"numpy.dot",
"precise.covariance.matrixfunctions.grand_shrink",
"numpy.linalg.norm",
"numpy.shape"
] |
[((711, 735), 'precise.covariance.movingaverage.ema_scov', 'ema_scov', (['{}', 'n_dim'], {'r': 'r'}), '({}, n_dim, r=r)\n', (719, 735), False, 'from precise.covariance.movingaverage import ema_scov\n'), ((1017, 1030), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1027, 1030), True, 'import numpy as np\n'), ((1047, 1077), 'precise.covariance.movingaverage.ema_scov', 'ema_scov', ([], {'s': "s['s_c']", 'x': 'x', 'r': 'r'}), "(s=s['s_c'], x=x, r=r)\n", (1055, 1077), False, 'from precise.covariance.movingaverage import ema_scov\n'), ((1290, 1326), 'numpy.atleast_2d', 'np.atleast_2d', (["(xl - s['s_c']['mean'])"], {}), "(xl - s['s_c']['mean'])\n", (1303, 1326), True, 'import numpy as np\n'), ((1418, 1434), 'precise.covariance.matrixfunctions.grand_mean', 'grand_mean', (['scov'], {}), '(scov)\n', (1428, 1434), False, 'from precise.covariance.matrixfunctions import grand_mean, grand_shrink\n'), ((1807, 1826), 'numpy.linalg.norm', 'np.linalg.norm', (['xl2'], {}), '(xl2)\n', (1821, 1826), True, 'import numpy as np\n'), ((2167, 2190), 'numpy.asarray', 'np.asarray', (["s['buffer']"], {}), "(s['buffer'])\n", (2177, 2190), True, 'import numpy as np\n'), ((2211, 2237), 'sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage', 'ledoit_wolf_shrinkage', ([], {'X': 'X'}), '(X=X)\n', (2232, 2237), False, 'from sklearn.covariance._shrunk_covariance import ledoit_wolf_shrinkage\n'), ((2322, 2369), 'precise.covariance.matrixfunctions.grand_shrink', 'grand_shrink', ([], {'a': 'scov', 'lmbd': "s['lmbd']", 'copy': '(True)'}), "(a=scov, lmbd=s['lmbd'], copy=True)\n", (2334, 2369), False, 'from precise.covariance.matrixfunctions import grand_mean, grand_shrink\n'), ((1472, 1486), 'numpy.shape', 'np.shape', (['scov'], {}), '(scov)\n', (1480, 1486), True, 'import numpy as np\n'), ((1606, 1624), 'numpy.dot', 'np.dot', (['xc2.T', 'xc2'], {}), '(xc2.T, xc2)\n', (1612, 1624), True, 'import numpy as np\n'), ((1535, 1548), 'numpy.eye', 'np.eye', (['n_dim'], {}), '(n_dim)\n', (1541, 1548), True, 'import numpy as np\n')]
|
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import numpy as np
from astropy.io import fits
from ..op import Operator
from ..timing import function_timer
from .tod_math import calibrate
from ..utils import Logger
def write_calibration_file(filename, gain):
"""Write gains to a FITS file in the standard TOAST format
Args:
filename (string): output filename, overwritten by default
gain (dict): Dictionary, key "TIME" has the common timestamps,
other keys are channel names their values are the gains
"""
log = Logger.get()
detectors = list(gain.keys())
detectors.remove("TIME")
hdus = [
fits.PrimaryHDU(),
fits.BinTableHDU.from_columns(
[
fits.Column(
name="DETECTORS",
array=detectors,
unit="",
format="{0}A".format(max([len(x) for x in detectors])),
)
]
),
]
hdus[1].header["EXTNAME"] = "DETECTORS"
cur_hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="TIME", array=gain["TIME"], unit="s", format="1D")]
)
cur_hdu.header["EXTNAME"] = "TIME"
hdus.append(cur_hdu)
gain_table = np.zeros(
(len(detectors), len(gain["TIME"])), dtype=gain[detectors[0]].dtype
)
for i_det, det in enumerate(detectors):
gain_table[i_det, :] = gain[det]
gainhdu = fits.ImageHDU(gain_table)
gainhdu.header["EXTNAME"] = "GAINS"
hdus.append(gainhdu)
fits.HDUList(hdus).writeto(filename, overwrite=True)
log.info("Gains written to file {}".format(filename))
return
class OpApplyGain(Operator):
"""Operator which applies gains to timelines.
Args:
gain (dict): Dictionary, key "TIME" has the common timestamps,
other keys are channel names their values are the gains
name (str): Name of the output signal cache object will be
<name_in>_<detector>. If the object exists, it is used as
input. Otherwise signal is read using the tod read method.
"""
def __init__(self, gain, name=None):
self._gain = gain
self._name = name
# Call the parent class constructor
super().__init__()
@function_timer
def exec(self, data):
"""Apply the gains.
Args:
data (toast.Data): The distributed data.
"""
for obs in data.obs:
tod = obs["tod"]
for det in tod.local_dets:
# Cache the output signal
ref = tod.local_signal(det, self._name)
obs_times = tod.read_times()
calibrate(
obs_times,
ref,
self._gain["TIME"],
self._gain[det],
order=0,
inplace=True,
)
assert np.isnan(ref).sum() == 0, "The signal timestream includes NaN"
del ref
return
|
[
"astropy.io.fits.HDUList",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.ImageHDU",
"astropy.io.fits.Column",
"numpy.isnan"
] |
[((1589, 1614), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['gain_table'], {}), '(gain_table)\n', (1602, 1614), False, 'from astropy.io import fits\n'), ((811, 828), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (826, 828), False, 'from astropy.io import fits\n'), ((1240, 1307), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""TIME"""', 'array': "gain['TIME']", 'unit': '"""s"""', 'format': '"""1D"""'}), "(name='TIME', array=gain['TIME'], unit='s', format='1D')\n", (1251, 1307), False, 'from astropy.io import fits\n'), ((1685, 1703), 'astropy.io.fits.HDUList', 'fits.HDUList', (['hdus'], {}), '(hdus)\n', (1697, 1703), False, 'from astropy.io import fits\n'), ((3090, 3103), 'numpy.isnan', 'np.isnan', (['ref'], {}), '(ref)\n', (3098, 3103), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 15:28:09 2018
@author: dataquanty
"""
import numpy as np
from math import sqrt, pi, acos,cos
from matplotlib import pyplot as plt
from scipy.misc import imsave
from bisect import bisect_left
h , w = 1000, 1000
img = np.ones((h,w))
center = (500,500)
r = [20,80,200,300,400,500,600]
r = np.exp(range(1,8)).astype(int)
lagval = [0,pi,0,pi,0,pi,0]
maxd = 810
r = range(10,maxd,20)
lagval = [0,pi]*int(len(r)/2)
lagval = np.random.rand(len(r))*pi
lagval = [-pi/4,pi/3]*int(len(r)/2)
lagval = [0,0.05]*int(len(r)/2)
def dist(a,b):
return sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)
for i in range(h):
for j in range(w):
if (i,j) == center:
img[i][j]=0
else:
d = dist((i,j),center)
k = bisect_left(list(r),d)
#dist((i,j),center)<= r1:
val = (j-center[1])/d
img[i][j] = cos(acos(val)-lagval[k])
"""
angle = acos((j-center[1])/dist((i,j),center))
if i > center[0]:
angle = 2*pi - angle
val = ((angle - lagrad)%(2*pi))/2*pi
img[i][j] = val
"""
#imsave('figLag_pi_s2.png',img)
plt.figure(figsize=(10,10))
plt.imshow(img,cmap='gray')
#interpolation='nearest'
plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.ones",
"math.acos",
"math.sqrt",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] |
[((293, 308), 'numpy.ones', 'np.ones', (['(h, w)'], {}), '((h, w))\n', (300, 308), True, 'import numpy as np\n'), ((1265, 1293), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1275, 1293), True, 'from matplotlib import pyplot as plt\n'), ((1300, 1328), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (1310, 1328), True, 'from matplotlib import pyplot as plt\n'), ((1354, 1364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1362, 1364), True, 'from matplotlib import pyplot as plt\n'), ((618, 663), 'math.sqrt', 'sqrt', (['((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)'], {}), '((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n', (622, 663), False, 'from math import sqrt, pi, acos, cos\n'), ((975, 984), 'math.acos', 'acos', (['val'], {}), '(val)\n', (979, 984), False, 'from math import sqrt, pi, acos, cos\n')]
|
#!/usr/bin/env ipython
import numpy as np
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class gral():
def __init__(self):
self.name = ''
sh, mc = gral(), gral()
cr = gral()
cr.sh, cr.mc = gral(), gral()
vlo, vhi = 550.0, 3000.0 #550., 3000. #100.0, 450.0 #550.0, 3000.0
dir_inp_sh = '../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = '../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%4.1f.vhi.%4.1f' % (vlo, vhi)
fname_sh = dir_inp_sh + '/%s_V.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_V.txt' % fname_inp_part
sh.data = np.loadtxt(fname_sh).T
mc.data = np.loadtxt(fname_mc).T
sh.t, sh.avr = sh.data[0], sh.data[2]
mc.t, mc.avr = mc.data[0], mc.data[2]
#++++++++++++++++++++++++++++++++++++++++++++++++++++
fname_sh = dir_inp_sh + '/%s_CRs.txt' % fname_inp_part
fname_mc = dir_inp_mc + '/%s_CRs.txt' % fname_inp_part
cr.sh.data = np.loadtxt(fname_sh).T
cr.mc.data = np.loadtxt(fname_mc).T
cr.sh.t, cr.sh.avr = cr.sh.data[0], cr.sh.data[2]
cr.mc.t, cr.mc.avr = cr.mc.data[0], cr.mc.data[2]
|
[
"numpy.loadtxt"
] |
[((687, 707), 'numpy.loadtxt', 'np.loadtxt', (['fname_sh'], {}), '(fname_sh)\n', (697, 707), True, 'import numpy as np\n'), ((720, 740), 'numpy.loadtxt', 'np.loadtxt', (['fname_mc'], {}), '(fname_mc)\n', (730, 740), True, 'import numpy as np\n'), ((1005, 1025), 'numpy.loadtxt', 'np.loadtxt', (['fname_sh'], {}), '(fname_sh)\n', (1015, 1025), True, 'import numpy as np\n'), ((1041, 1061), 'numpy.loadtxt', 'np.loadtxt', (['fname_mc'], {}), '(fname_mc)\n', (1051, 1061), True, 'import numpy as np\n')]
|
"""
DUNE CVN generator module.
"""
__version__ = '1.0'
__author__ = '<NAME>, <NAME>'
__email__ = "<EMAIL>, <EMAIL>"
import numpy as np
import zlib
class DataGenerator(object):
''' Generate data for tf.keras.
'''
def __init__(self, cells=500, planes=500, views=3, batch_size=32,
images_path = 'dataset', shuffle=True, test_values=[]):
''' Constructor.
Args:
cells: image cells.
planes: image planes.
views: number of views.
batch_size: batch size.
images_path: path of input events.
shuffle: shuffle the events.
test_values: array to be filled with test values.
'''
self.cells = cells
self.planes = planes
self.views = views
self.batch_size = batch_size
self.images_path = images_path
self.shuffle = shuffle
self.test_values = test_values
def generate(self, labels, list_IDs):
''' Generates batches of samples.
Args:
labels: event labels.
list_IDs: event IDs within partition.
Yields: a batch of events.
'''
# infinite loop
while 1:
# generate random order of exploration of dataset (to make each epoch different)
indexes = self.get_exploration_order(list_IDs)
# generate batches
imax = int(len(indexes)/self.batch_size) # number of batches
for i in range(imax):
# find list of IDs for one batch
list_IDs_temp = [list_IDs[k] for k in indexes[i*self.batch_size:(i+1)*self.batch_size]]
# generate data
X = self.data_generation(labels, list_IDs_temp)
yield X
def get_exploration_order(self, list_IDs):
''' Generates order of exploration.
Args:
list_IDs: event IDs within partition.
Returns: random order of exploration.
'''
# find exploration order
indexes = np.arange(len(list_IDs))
if self.shuffle == True:
np.random.shuffle(indexes)
return indexes
def data_generation(self, labels, list_IDs_temp):
''' Generates data of batch_size sample.
Args:
labels: event labels.
list_IDs: event IDs within partition.
Returns: a batch of events.
'''
X = [None]*self.views
for view in range(self.views):
X[view] = np.empty((self.batch_size, self.planes, self.cells, 1), dtype='float32')
# generate data
for i, ID in enumerate(list_IDs_temp):
# decompress images into pixel numpy array
with open('dataset/event' + ID + '.gz', 'rb') as image_file:
pixels = np.fromstring(zlib.decompress(image_file.read()), dtype=np.uint8, sep='')
pixels = pixels.reshape(self.views, self.planes, self.cells)
# store volume
for view in range(self.views):
X[view][i, :, :, :] = pixels[view, :, :].reshape(self.planes, self.cells, 1)
# get y label
y_value = labels[ID]
# store actual y label
self.test_values.append(y_value)
return X
|
[
"numpy.empty",
"numpy.random.shuffle"
] |
[((2123, 2149), 'numpy.random.shuffle', 'np.random.shuffle', (['indexes'], {}), '(indexes)\n', (2140, 2149), True, 'import numpy as np\n'), ((2517, 2589), 'numpy.empty', 'np.empty', (['(self.batch_size, self.planes, self.cells, 1)'], {'dtype': '"""float32"""'}), "((self.batch_size, self.planes, self.cells, 1), dtype='float32')\n", (2525, 2589), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def show_batch(ds: tf.data.Dataset,
classes: list,
rescale: bool = False,
size: tuple = (10, 10),
title: str = None):
"""
Function to show a batch of images including labels from tf.data object
Args:
ds: a (batched) tf.data.Dataset
classes: a list of all classes (in order of one-hot-encoding)
rescale: boolen whether to multiple image values by 255
size: tuple giving plot size
title: plot title
Returns:
matplotlib.pyplot
"""
plt.figure(figsize=size)
# Take on batch from dataset and iterate over image-label-combination
for image, label in ds.take(1):
image_array = image.numpy()
# Undo scaling in preprocess_input or plotting
image_array += 1.0
image_array /= 2.0
label_array = label.numpy()
batch_size = image_array.shape[0]
for idx in range(batch_size):
label = classes[np.argmax(label_array[idx])]
ax = plt.subplot(np.ceil(batch_size / 4), 4, idx + 1)
if rescale:
plt.imshow(image_array[idx] * 255)
else:
plt.imshow(image_array[idx])
plt.title(label + ' ' + str(image_array[idx].shape), fontsize=10)
plt.axis('off')
if title is not None:
plt.suptitle(title)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
def create_target_list(files: list, target: str = 'make') -> list:
"""
Create a list of unique target classes from file names
Args:
files: a list of file names
target: either 'model' or 'make'
Returns:
list of classes
"""
if target not in ['make', 'model']:
raise ValueError('target must be either "make" or "model"')
if target == 'make':
classes = list(set([file.split('_')[0] for file in files]))
if target == 'model':
classes = list(set([file.split('_')[0] + '_' + file.split('_')[1] for file in files]))
return classes
|
[
"matplotlib.pyplot.imshow",
"numpy.ceil",
"numpy.argmax",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show"
] |
[((640, 664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (650, 664), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1505), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0.03, 1, 0.95]'}), '(rect=[0, 0.03, 1, 0.95])\n', (1480, 1505), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1518, 1520), True, 'import matplotlib.pyplot as plt\n'), ((1439, 1458), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (1451, 1458), True, 'import matplotlib.pyplot as plt\n'), ((1388, 1403), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1396, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1092), 'numpy.argmax', 'np.argmax', (['label_array[idx]'], {}), '(label_array[idx])\n', (1074, 1092), True, 'import numpy as np\n'), ((1123, 1146), 'numpy.ceil', 'np.ceil', (['(batch_size / 4)'], {}), '(batch_size / 4)\n', (1130, 1146), True, 'import numpy as np\n'), ((1200, 1234), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(image_array[idx] * 255)'], {}), '(image_array[idx] * 255)\n', (1210, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1297), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_array[idx]'], {}), '(image_array[idx])\n', (1279, 1297), True, 'import matplotlib.pyplot as plt\n')]
|
import matlab.engine
import matlab
import numpy as np
import PIL
import matplotlib.pyplot as plt
import sys
print(sys.version_info[0:2])
if sys.version_info[0:2] != (3, 8) and sys.version_info[0:2] != (3, 7) and sys.version_info[0:2] != (3, 6):
raise Exception('Requires python 3.6, 3.7, or 3.8')
eng = matlab.engine.start_matlab()
def blind_deconvolution(image, kernel_size=3, num_iterations=30, weighted=False, edge_weight=.08):
# If URL to image
if type(image) == type(str()):
image = PIL.Image.open(image)
image = PIL.ImageOps.grayscale(image)
# If PIL image object
elif type(image) == PIL.Image:
image = np.asarray(image)
image = PIL.ImageOps.grayscale(image)
# If its already in numpy array
elif type(image) == np.ndarray:
image = PIL.Image.fromarray(image)
image = PIL.ImageOps.grayscale(image)
# Else raise exception
else:
raise Exception('Input was of type ' + str(type(image)) + '. Must be a URL to an image, a PIL Image object, or an np array')
# If weighted
if weighted:
weight = eng.edge(image,"sobel",edge_weight)
se = eng.strel("disk",2)
weight = 1-matlab.double(eng.imdilate(weight,se))
# Starting kernel
start_kernel_np = np.ones((kernel_size,kernel_size))
start_kernel = []
image_np = np.asarray(image)
image = []
# Convert to matlab types
for i in range(len(start_kernel_np)):
start_kernel.append(matlab.double(start_kernel_np[i].tolist()))
start_kernel = matlab.double(start_kernel)
for i in range(len(image_np)):
image.append(matlab.double(image_np[i].tolist()))
image = matlab.double(image)
# Call Matlab Blind deconvolution
if weighted:
deconvolved = eng.deconvblind(image, start_kernel, num_iterations, weight)
else:
deconvolved = eng.deconvblind(image, start_kernel)
deconvolved = np.asarray(deconvolved).squeeze()
return deconvolved
|
[
"PIL.Image.fromarray",
"PIL.Image.open",
"matlab.engine.start_matlab",
"numpy.ones",
"numpy.asarray",
"PIL.ImageOps.grayscale",
"matlab.double"
] |
[((310, 338), 'matlab.engine.start_matlab', 'matlab.engine.start_matlab', ([], {}), '()\n', (336, 338), False, 'import matlab\n'), ((1282, 1317), 'numpy.ones', 'np.ones', (['(kernel_size, kernel_size)'], {}), '((kernel_size, kernel_size))\n', (1289, 1317), True, 'import numpy as np\n'), ((1354, 1371), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1364, 1371), True, 'import numpy as np\n'), ((1554, 1581), 'matlab.double', 'matlab.double', (['start_kernel'], {}), '(start_kernel)\n', (1567, 1581), False, 'import matlab\n'), ((1697, 1717), 'matlab.double', 'matlab.double', (['image'], {}), '(image)\n', (1710, 1717), False, 'import matlab\n'), ((512, 533), 'PIL.Image.open', 'PIL.Image.open', (['image'], {}), '(image)\n', (526, 533), False, 'import PIL\n'), ((550, 579), 'PIL.ImageOps.grayscale', 'PIL.ImageOps.grayscale', (['image'], {}), '(image)\n', (572, 579), False, 'import PIL\n'), ((657, 674), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (667, 674), True, 'import numpy as np\n'), ((691, 720), 'PIL.ImageOps.grayscale', 'PIL.ImageOps.grayscale', (['image'], {}), '(image)\n', (713, 720), False, 'import PIL\n'), ((1944, 1967), 'numpy.asarray', 'np.asarray', (['deconvolved'], {}), '(deconvolved)\n', (1954, 1967), True, 'import numpy as np\n'), ((809, 835), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image'], {}), '(image)\n', (828, 835), False, 'import PIL\n'), ((852, 881), 'PIL.ImageOps.grayscale', 'PIL.ImageOps.grayscale', (['image'], {}), '(image)\n', (874, 881), False, 'import PIL\n')]
|
from skimage.util import img_as_float
from skimage import io, filters
# from skimage.viewer import ImageViewer
import numpy as np
def split_image_into_channels(image):
"""Look at each image separately"""
red_channel = image[:, :, 0]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 2]
return red_channel, green_channel, blue_channel
def merge_channels(red, green, blue):
"""Merge channels back into an image"""
return np.stack([red, green, blue], axis=2)
def sharpen(image, a, b):
"""Sharpening an image: Blur and then subtract from original"""
blurred = filters.gaussian(image, sigma=10, multichannel=True)
sharper = np.clip(image * a - blurred * b, 0, 1.0)
return sharper
def channel_adjust(channel, values):
# preserve the original size, so we can reconstruct at the end
orig_size = channel.shape
# flatten the image into a single array
flat_channel = channel.flatten()
# this magical numpy function takes the values in flat_channel
# and maps it from its range in [0, 1] to its new squeezed and
# stretched range
adjusted = np.interp(flat_channel, np.linspace(0, 1, len(values)), values)
# put back into the original image shape
return adjusted.reshape(orig_size)
def gotham(
original_image,
r_boost_upper=1,
b_adjusted_upper=1,
blurriness=1.3,
subtraction=0.3,
amount_bluer_blacks=0.03,
):
original_image = img_as_float(original_image)
r, g, b = split_image_into_channels(original_image)
# np.linspace second argument
r_boost_lower = channel_adjust(r, np.linspace(0, r_boost_upper))
# amount of bluer_blacks
bluer_blacks = merge_channels(
r_boost_lower, g, np.clip(b + amount_bluer_blacks, 0, 1.0)
)
# amount blurriness, and subtraction
sharper = sharpen(bluer_blacks, blurriness, subtraction)
r, g, b = split_image_into_channels(sharper)
# np.linspace second argument
b_adjusted = channel_adjust(b, np.linspace(0, b_adjusted_upper))
return merge_channels(r, g, b_adjusted)
if __name__ == "__main__":
original_image = io.imread("data/input/sample.jpg")
output = gotham(original_image, b_adjusted_upper=3)
io.imsave("data/output/image-experiment/gotham.jpg", output)
|
[
"numpy.clip",
"skimage.util.img_as_float",
"numpy.stack",
"skimage.io.imread",
"numpy.linspace",
"skimage.io.imsave",
"skimage.filters.gaussian"
] |
[((460, 496), 'numpy.stack', 'np.stack', (['[red, green, blue]'], {'axis': '(2)'}), '([red, green, blue], axis=2)\n', (468, 496), True, 'import numpy as np\n'), ((607, 659), 'skimage.filters.gaussian', 'filters.gaussian', (['image'], {'sigma': '(10)', 'multichannel': '(True)'}), '(image, sigma=10, multichannel=True)\n', (623, 659), False, 'from skimage import io, filters\n'), ((674, 714), 'numpy.clip', 'np.clip', (['(image * a - blurred * b)', '(0)', '(1.0)'], {}), '(image * a - blurred * b, 0, 1.0)\n', (681, 714), True, 'import numpy as np\n'), ((1446, 1474), 'skimage.util.img_as_float', 'img_as_float', (['original_image'], {}), '(original_image)\n', (1458, 1474), False, 'from skimage.util import img_as_float\n'), ((2124, 2158), 'skimage.io.imread', 'io.imread', (['"""data/input/sample.jpg"""'], {}), "('data/input/sample.jpg')\n", (2133, 2158), False, 'from skimage import io, filters\n'), ((2219, 2279), 'skimage.io.imsave', 'io.imsave', (['"""data/output/image-experiment/gotham.jpg"""', 'output'], {}), "('data/output/image-experiment/gotham.jpg', output)\n", (2228, 2279), False, 'from skimage import io, filters\n'), ((1605, 1634), 'numpy.linspace', 'np.linspace', (['(0)', 'r_boost_upper'], {}), '(0, r_boost_upper)\n', (1616, 1634), True, 'import numpy as np\n'), ((1727, 1767), 'numpy.clip', 'np.clip', (['(b + amount_bluer_blacks)', '(0)', '(1.0)'], {}), '(b + amount_bluer_blacks, 0, 1.0)\n', (1734, 1767), True, 'import numpy as np\n'), ((1996, 2028), 'numpy.linspace', 'np.linspace', (['(0)', 'b_adjusted_upper'], {}), '(0, b_adjusted_upper)\n', (2007, 2028), True, 'import numpy as np\n')]
|
import argparse
import logging
from pathlib import Path
import dask
import h5py
import joblib
import numpy as np
import pandas as pd
from dask.diagnostics import ProgressBar
from tqdm import tqdm
from dsconcept.get_metrics import (
get_cat_inds,
get_synth_preds,
load_category_models,
load_concept_models,
HierarchicalClassifier,
get_mets,
)
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def main(
experiment_name,
synth_strat,
in_cat_preds,
out_store,
synth_batch_size,
t,
out_synth_scores,
limit=None,
con_limit=None,
):
test_inds = np.load(f"data/interim/{experiment_name}/test_inds.npy")
feature_matrix = joblib.load(f"data/interim/{experiment_name}/feature_matrix.jbl")
in_cat_models = Path(f"models/{experiment_name}/categories/models/")
in_kwd_models = Path(f"models/{experiment_name}/keywords/models/")
cat_preds = np.load(in_cat_preds) # based on experiment or explicit path?
cat_clfs = load_category_models(in_cat_models)
cd = load_concept_models(in_kwd_models)
clf = HierarchicalClassifier(cat_clfs, cd)
if limit is not None:
LOG.info(f"Limiting to {limit} test records.")
feature_matrix_test = feature_matrix.tocsc()[test_inds[0:limit], :]
cat_preds = cat_preds[0:limit, :]
# TODO: How does this affect indices?
else:
feature_matrix_test = feature_matrix.tocsc()[test_inds, :]
LOG.info(f'Synthesizing predictions with strategy "{synth_strat}".')
all_cat_inds = get_cat_inds(clf.categories, cat_preds, t=t)
if con_limit is not None:
conwc = clf.concepts_with_classifiers[0:con_limit]
else:
conwc = clf.concepts_with_classifiers
shape = (feature_matrix_test.shape[0], len(conwc))
with tqdm(total=shape[0]) as pbar:
get_synth_preds(
out_store,
shape,
all_cat_inds,
clf.categories,
synth_batch_size,
only_cat=False,
synth_strat=synth_strat,
con_limit=con_limit,
limit=limit,
pbar=pbar,
)
LOG.info("Obtaining metrics.")
with h5py.File(out_store, "r") as f0:
if limit is not None:
target_values = f0["ground_truth"][0:limit, :]
else:
target_values = f0["ground_truth"].value
with h5py.File(out_store, "r") as f0:
synth_preds = f0["synthesis"].value
jobs = []
mets_pbar = tqdm(
range(len(conwc)),
total=len(conwc),
)
for i in mets_pbar:
job = dask.delayed(get_mets)(
i, synth_preds, target_values, conwc, mets_pbar
)
jobs.append(job)
records = dask.compute(jobs)
new_recs_df = pd.DataFrame(records[0])
LOG.info(f"Saving results to {out_synth_scores}.")
new_recs_df.to_csv(out_synth_scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Say hello")
parser.add_argument("--experiment_name", help="input txt file")
parser.add_argument("--synth_strat", help="input txt file")
parser.add_argument("--in_cat_preds", help="input txt file")
parser.add_argument("--store", help="input txt file")
parser.add_argument("--synth_batch_size", help="input txt file", type=int)
parser.add_argument("--threshold", help="input txt file", type=float)
parser.add_argument("--out_synth_scores", help="input txt file")
parser.add_argument(
"--limit", help="size for sample to test synthesis", type=int, default=None
)
parser.add_argument(
"--con_limit", help="size for concept sample", type=int, default=None
)
args = parser.parse_args()
main(
args.experiment_name,
args.synth_strat,
args.in_cat_preds,
args.store,
args.synth_batch_size,
args.threshold,
args.out_synth_scores,
args.limit,
args.con_limit,
)
|
[
"logging.basicConfig",
"logging.getLogger",
"dsconcept.get_metrics.get_synth_preds",
"dask.delayed",
"dask.compute",
"pathlib.Path",
"argparse.ArgumentParser",
"dsconcept.get_metrics.get_cat_inds",
"dsconcept.get_metrics.load_concept_models",
"tqdm.tqdm",
"h5py.File",
"dsconcept.get_metrics.load_category_models",
"joblib.load",
"pandas.DataFrame",
"dsconcept.get_metrics.HierarchicalClassifier",
"numpy.load"
] |
[((369, 408), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (388, 408), False, 'import logging\n'), ((415, 442), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (432, 442), False, 'import logging\n'), ((659, 715), 'numpy.load', 'np.load', (['f"""data/interim/{experiment_name}/test_inds.npy"""'], {}), "(f'data/interim/{experiment_name}/test_inds.npy')\n", (666, 715), True, 'import numpy as np\n'), ((737, 802), 'joblib.load', 'joblib.load', (['f"""data/interim/{experiment_name}/feature_matrix.jbl"""'], {}), "(f'data/interim/{experiment_name}/feature_matrix.jbl')\n", (748, 802), False, 'import joblib\n'), ((823, 875), 'pathlib.Path', 'Path', (['f"""models/{experiment_name}/categories/models/"""'], {}), "(f'models/{experiment_name}/categories/models/')\n", (827, 875), False, 'from pathlib import Path\n'), ((896, 946), 'pathlib.Path', 'Path', (['f"""models/{experiment_name}/keywords/models/"""'], {}), "(f'models/{experiment_name}/keywords/models/')\n", (900, 946), False, 'from pathlib import Path\n'), ((963, 984), 'numpy.load', 'np.load', (['in_cat_preds'], {}), '(in_cat_preds)\n', (970, 984), True, 'import numpy as np\n'), ((1041, 1076), 'dsconcept.get_metrics.load_category_models', 'load_category_models', (['in_cat_models'], {}), '(in_cat_models)\n', (1061, 1076), False, 'from dsconcept.get_metrics import get_cat_inds, get_synth_preds, load_category_models, load_concept_models, HierarchicalClassifier, get_mets\n'), ((1086, 1120), 'dsconcept.get_metrics.load_concept_models', 'load_concept_models', (['in_kwd_models'], {}), '(in_kwd_models)\n', (1105, 1120), False, 'from dsconcept.get_metrics import get_cat_inds, get_synth_preds, load_category_models, load_concept_models, HierarchicalClassifier, get_mets\n'), ((1131, 1167), 'dsconcept.get_metrics.HierarchicalClassifier', 'HierarchicalClassifier', (['cat_clfs', 'cd'], {}), '(cat_clfs, cd)\n', (1153, 1167), False, 'from dsconcept.get_metrics import get_cat_inds, get_synth_preds, load_category_models, load_concept_models, HierarchicalClassifier, get_mets\n'), ((1584, 1628), 'dsconcept.get_metrics.get_cat_inds', 'get_cat_inds', (['clf.categories', 'cat_preds'], {'t': 't'}), '(clf.categories, cat_preds, t=t)\n', (1596, 1628), False, 'from dsconcept.get_metrics import get_cat_inds, get_synth_preds, load_category_models, load_concept_models, HierarchicalClassifier, get_mets\n'), ((2762, 2780), 'dask.compute', 'dask.compute', (['jobs'], {}), '(jobs)\n', (2774, 2780), False, 'import dask\n'), ((2799, 2823), 'pandas.DataFrame', 'pd.DataFrame', (['records[0]'], {}), '(records[0])\n', (2811, 2823), True, 'import pandas as pd\n'), ((2962, 3010), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Say hello"""'}), "(description='Say hello')\n", (2985, 3010), False, 'import argparse\n'), ((1838, 1858), 'tqdm.tqdm', 'tqdm', ([], {'total': 'shape[0]'}), '(total=shape[0])\n', (1842, 1858), False, 'from tqdm import tqdm\n'), ((1876, 2052), 'dsconcept.get_metrics.get_synth_preds', 'get_synth_preds', (['out_store', 'shape', 'all_cat_inds', 'clf.categories', 'synth_batch_size'], {'only_cat': '(False)', 'synth_strat': 'synth_strat', 'con_limit': 'con_limit', 'limit': 'limit', 'pbar': 'pbar'}), '(out_store, shape, all_cat_inds, clf.categories,\n synth_batch_size, only_cat=False, synth_strat=synth_strat, con_limit=\n con_limit, limit=limit, pbar=pbar)\n', (1891, 2052), False, 'from dsconcept.get_metrics import get_cat_inds, get_synth_preds, load_category_models, load_concept_models, HierarchicalClassifier, get_mets\n'), ((2220, 2245), 'h5py.File', 'h5py.File', (['out_store', '"""r"""'], {}), "(out_store, 'r')\n", (2229, 2245), False, 'import h5py\n'), ((2418, 2443), 'h5py.File', 'h5py.File', (['out_store', '"""r"""'], {}), "(out_store, 'r')\n", (2427, 2443), False, 'import h5py\n'), ((2629, 2651), 'dask.delayed', 'dask.delayed', (['get_mets'], {}), '(get_mets)\n', (2641, 2651), False, 'import dask\n')]
|
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pytest
from pyrado.spaces.box import BoxSpace
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from tests.environment_wrappers.mock_env import MockEnv
@pytest.mark.wrapper
def test_no_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=0)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([4, 1]))
assert mockenv.last_act == [4, 1]
wenv.step(np.array([7, 5]))
assert mockenv.last_act == [7, 5]
@pytest.mark.wrapper
def test_act_delay():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=2)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 1]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [2, 4]
@pytest.mark.wrapper
def test_reset():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 4]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([4, 4]))
assert mockenv.last_act == [0, 4]
# The next action would be [4, 4], but now we reset again
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [1, 2]
@pytest.mark.wrapper
def test_domain_param():
mockenv = MockEnv(act_space=BoxSpace(-1, 1, shape=(2,)))
wenv = ActDelayWrapper(mockenv, delay=1)
# Reset to initialize buffer
wenv.reset()
# Perform some actions
wenv.step(np.array([0, 1]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 4]))
assert mockenv.last_act == [0, 1]
# change the delay and reset
wenv.domain_param = {"act_delay": 2}
wenv.reset()
wenv.step(np.array([1, 2]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([2, 3]))
assert mockenv.last_act == [0, 0]
wenv.step(np.array([8, 9]))
assert mockenv.last_act == [1, 2]
|
[
"numpy.array",
"pyrado.spaces.box.BoxSpace",
"pyrado.environment_wrappers.action_delay.ActDelayWrapper"
] |
[((2046, 2079), 'pyrado.environment_wrappers.action_delay.ActDelayWrapper', 'ActDelayWrapper', (['mockenv'], {'delay': '(0)'}), '(mockenv, delay=0)\n', (2061, 2079), False, 'from pyrado.environment_wrappers.action_delay import ActDelayWrapper\n'), ((2416, 2449), 'pyrado.environment_wrappers.action_delay.ActDelayWrapper', 'ActDelayWrapper', (['mockenv'], {'delay': '(2)'}), '(mockenv, delay=2)\n', (2431, 2449), False, 'from pyrado.environment_wrappers.action_delay import ActDelayWrapper\n'), ((2922, 2955), 'pyrado.environment_wrappers.action_delay.ActDelayWrapper', 'ActDelayWrapper', (['mockenv'], {'delay': '(1)'}), '(mockenv, delay=1)\n', (2937, 2955), False, 'from pyrado.environment_wrappers.action_delay import ActDelayWrapper\n'), ((3516, 3549), 'pyrado.environment_wrappers.action_delay.ActDelayWrapper', 'ActDelayWrapper', (['mockenv'], {'delay': '(1)'}), '(mockenv, delay=1)\n', (3531, 3549), False, 'from pyrado.environment_wrappers.action_delay import ActDelayWrapper\n'), ((2173, 2189), 'numpy.array', 'np.array', (['[4, 1]'], {}), '([4, 1])\n', (2181, 2189), True, 'import numpy as np\n'), ((2243, 2259), 'numpy.array', 'np.array', (['[7, 5]'], {}), '([7, 5])\n', (2251, 2259), True, 'import numpy as np\n'), ((2543, 2559), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2551, 2559), True, 'import numpy as np\n'), ((2613, 2629), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (2621, 2629), True, 'import numpy as np\n'), ((2683, 2699), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (2691, 2699), True, 'import numpy as np\n'), ((2753, 2769), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (2761, 2769), True, 'import numpy as np\n'), ((3049, 3065), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (3057, 3065), True, 'import numpy as np\n'), ((3119, 3135), 'numpy.array', 'np.array', (['[4, 4]'], {}), '([4, 4])\n', (3127, 3135), True, 'import numpy as np\n'), ((3270, 3286), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3278, 3286), True, 'import numpy as np\n'), ((3340, 3356), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (3348, 3356), True, 'import numpy as np\n'), ((3643, 3659), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3651, 3659), True, 'import numpy as np\n'), ((3713, 3729), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (3721, 3729), True, 'import numpy as np\n'), ((3876, 3892), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3884, 3892), True, 'import numpy as np\n'), ((3946, 3962), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (3954, 3962), True, 'import numpy as np\n'), ((4016, 4032), 'numpy.array', 'np.array', (['[8, 9]'], {}), '([8, 9])\n', (4024, 4032), True, 'import numpy as np\n'), ((2006, 2033), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)'], {'shape': '(2,)'}), '(-1, 1, shape=(2,))\n', (2014, 2033), False, 'from pyrado.spaces.box import BoxSpace\n'), ((2376, 2403), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)'], {'shape': '(2,)'}), '(-1, 1, shape=(2,))\n', (2384, 2403), False, 'from pyrado.spaces.box import BoxSpace\n'), ((2882, 2909), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)'], {'shape': '(2,)'}), '(-1, 1, shape=(2,))\n', (2890, 2909), False, 'from pyrado.spaces.box import BoxSpace\n'), ((3476, 3503), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)'], {'shape': '(2,)'}), '(-1, 1, shape=(2,))\n', (3484, 3503), False, 'from pyrado.spaces.box import BoxSpace\n')]
|
#!/usr/bin/env libtbx.python
#
# iotbx.xds.xds_cbf.py
#
# <NAME>, Diamond Light Source, 2012/OCT/16
#
# Class to read the CBF files used in XDS
#
from __future__ import absolute_import, division, print_function
class reader:
"""A class to read the CBF files used in XDS"""
def __init__(self):
pass
def read_file(self, filename):
"""Read the CBF file"""
import pycbf
self.cbf_handle = pycbf.cbf_handle_struct()
self.cbf_handle.read_file(filename, pycbf.MSG_DIGEST)
self.cbf_handle.rewind_datablock()
def get_data(self):
"""Get the gain array from the file"""
import numpy
# Select the first datablock and rewind all the categories
self.cbf_handle.select_datablock(0)
self.cbf_handle.select_category(0)
self.cbf_handle.select_column(2)
self.cbf_handle.select_row(0)
# Check the type of the element to ensure it's a binary
# otherwise raise an exception
type = self.cbf_handle.get_typeofvalue()
if type.find('bnry') > -1:
# Read the image data into an array
image_string = self.cbf_handle.get_integerarray_as_string()
image = numpy.fromstring(image_string, numpy.int32)
# Get the array parameters
parameters = self.cbf_handle.get_integerarrayparameters_wdims()
image_size = (parameters[10], parameters[9])
# Resize the image
image.shape = (image_size)
else:
raise TypeError('Can\'t find image')
# Return the image
return image
if __name__ == '__main__':
import sys
import numpy
handle = reader()
handle.read_file(sys.argv[1])
image = handle.get_data()
|
[
"pycbf.cbf_handle_struct",
"numpy.fromstring"
] |
[((412, 437), 'pycbf.cbf_handle_struct', 'pycbf.cbf_handle_struct', ([], {}), '()\n', (435, 437), False, 'import pycbf\n'), ((1127, 1170), 'numpy.fromstring', 'numpy.fromstring', (['image_string', 'numpy.int32'], {}), '(image_string, numpy.int32)\n', (1143, 1170), False, 'import numpy\n')]
|
import pickle
import numpy as np
def fetch_file(path):
with open(path, 'rb') as fp:
return pickle.load(fp)
def fetch_adj_mat(column):
if column == 0:
return A1
elif column == 1:
return A2
elif column == 2:
return A3
# elif column == 3:
# return A4
print("Fetching files...")
A1 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
A2 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
A3 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
# A4 = np.array(fetch_file(RELATIVE_PATH + ADJACENCY_MATRIX + "A4_fc.txt"))
influence_matrix = np.array(fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/influence_matrix_normal/definition_2/"
"influence_matrix_fc.txt"))
print(influence_matrix.shape)
krp = []
for i in range(3):
wa1 = A1 * influence_matrix[i][0]
wa2 = A2 * influence_matrix[i][1]
wa3 = A3 * influence_matrix[i][2]
# wa4 = A4 * influence_matrix_normal[i][3]
print(influence_matrix[i][0])
print(influence_matrix[i][1])
print(influence_matrix[i][2])
# print(influence_matrix_normal[i][3])
for j in range(1134):
row = []
row.extend(wa1[j])
row.extend(wa2[j])
row.extend(wa3[j])
# row.extend(wa4[j])
krp.append(row)
print("Clearing variables...")
A1 = None
A2 = None
A3 = None
# A4 = None
influence_matrix = None
print("Setting up kr_product...")
kr_product = np.array(krp, dtype=np.float)
krp.clear()
print(kr_product.shape)
print(kr_product)
print("Calculating eigenvector...")
e = np.linalg.eig(kr_product)
e_val = e[0]
e_vec = e[1]
ind = list(e_val).index(max(e_val))
print(ind)
pev = e_vec[ind] / np.linalg.norm(e_vec[ind])
print(pev.shape)
print(pev)
print(sum(map(lambda x: x.real * x.real, pev)))
print("Saving eigenvector...")
with open("global_eigenvector_fc.txt", 'wb') as fp:
pickle.dump(pev, fp)
print("Saving eigenvalues...")
with open("eigenvalue_" + str(ind) + "_fc.txt", "wb") as fp:
pickle.dump(e_val[ind], fp)
print("Process finished!")
|
[
"pickle.dump",
"numpy.linalg.eig",
"pickle.load",
"numpy.array",
"numpy.linalg.norm"
] |
[((1745, 1774), 'numpy.array', 'np.array', (['krp'], {'dtype': 'np.float'}), '(krp, dtype=np.float)\n', (1753, 1774), True, 'import numpy as np\n'), ((1871, 1896), 'numpy.linalg.eig', 'np.linalg.eig', (['kr_product'], {}), '(kr_product)\n', (1884, 1896), True, 'import numpy as np\n'), ((1992, 2018), 'numpy.linalg.norm', 'np.linalg.norm', (['e_vec[ind]'], {}), '(e_vec[ind])\n', (2006, 2018), True, 'import numpy as np\n'), ((2185, 2205), 'pickle.dump', 'pickle.dump', (['pev', 'fp'], {}), '(pev, fp)\n', (2196, 2205), False, 'import pickle\n'), ((2303, 2330), 'pickle.dump', 'pickle.dump', (['e_val[ind]', 'fp'], {}), '(e_val[ind], fp)\n', (2314, 2330), False, 'import pickle\n'), ((105, 120), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (116, 120), False, 'import pickle\n')]
|
from typing import List, overload
from flow.envs.multiagent.traffic_light_grid import MultiTrafficLightGridPOEnv
from flow.envs.traffic_light_grid import TrafficLightGridPOEnv
from gym.spaces import Box, Discrete
import numpy as np
ID_IDX = 1
class SeqTraffiLightEnv(TrafficLightGridPOEnv):
def __init__(self, env_params, sim_params, network, simulator):
super().__init__(env_params, sim_params, network, simulator=simulator)
# number of nearest lights to observe, defaults to 4
self.num_local_lights = env_params.additional_params.get(
"num_local_lights", 4)
# number of nearest edges to observe, defaults to 4
self.num_local_edges = env_params.additional_params.get(
"num_local_edges", 4)
@property
def observation_space(self):
"""State space that is partially observed.
Velocities, distance to intersections, edge number (for nearby
vehicles) from each direction, local edge information, and traffic
light state.
"""
tl_box = Box(
low=0.,
high=1,
shape=(
self.num_traffic_lights,
3 * 4 * self.num_observed +
2 * self.num_local_edges +
2 * (1 + self.num_local_lights),
),
dtype=np.float32)
return tl_box
def get_state(self):
"""Observations for each traffic light agent.
:return: dictionary which contains agent-wise observations as follows:
- For the self.num_observed number of vehicles closest and incoming
towards traffic light agent, gives the vehicle velocity, distance to
intersection, edge number.
- For edges in the network, gives the density and average velocity.
- For the self.num_local_lights number of nearest lights (itself
included), gives the traffic light information, including the last
change time, light direction (i.e. phase), and a currently_yellow flag.
"""
# Normalization factors
max_speed = max(
self.k.network.speed_limit(edge)
for edge in self.k.network.get_edge_list())
grid_array = self.net_params.additional_params["grid_array"]
max_dist = max(grid_array["short_length"], grid_array["long_length"],
grid_array["inner_length"])
# TODO(cathywu) refactor TrafficLightGridPOEnv with convenience
# methods for observations, but remember to flatten for single-agent
# Observed vehicle information
speeds = []
dist_to_intersec = []
edge_number = []
all_observed_ids = []
for _, edges in self.network.node_mapping:
local_speeds = []
local_dists_to_intersec = []
local_edge_numbers = []
for edge in edges:
observed_ids = \
self.get_closest_to_intersection(edge, self.num_observed)
all_observed_ids.append(observed_ids)
# check which edges we have so we can always pad in the right
# positions
local_speeds.extend(
[self.k.vehicle.get_speed(veh_id) / max_speed for veh_id in
observed_ids])
local_dists_to_intersec.extend([(self.k.network.edge_length(
self.k.vehicle.get_edge(
veh_id)) - self.k.vehicle.get_position(
veh_id)) / max_dist for veh_id in observed_ids])
local_edge_numbers.extend([self._convert_edge(
self.k.vehicle.get_edge(veh_id)) / (
self.k.network.network.num_edges - 1) for veh_id in
observed_ids])
if len(observed_ids) < self.num_observed:
diff = self.num_observed - len(observed_ids)
local_speeds.extend([1] * diff)
local_dists_to_intersec.extend([1] * diff)
local_edge_numbers.extend([0] * diff)
speeds.append(local_speeds)
dist_to_intersec.append(local_dists_to_intersec)
edge_number.append(local_edge_numbers)
# Edge information
density = []
velocity_avg = []
for edge in self.k.network.get_edge_list():
ids = self.k.vehicle.get_ids_by_edge(edge)
if len(ids) > 0:
# TODO(cathywu) Why is there a 5 here?
density += [5 * len(ids) / self.k.network.edge_length(edge)]
velocity_avg += [np.mean(
[self.k.vehicle.get_speed(veh_id) for veh_id in
ids]) / max_speed]
else:
density += [0]
velocity_avg += [0]
density = np.array(density)
velocity_avg = np.array(velocity_avg)
self.observed_ids = all_observed_ids
# Traffic light information
direction = self.direction.flatten()
currently_yellow = self.currently_yellow.flatten()
# This is a catch-all for when the relative_node method returns a -1
# (when there is no node in the direction sought). We add a last
# item to the lists here, which will serve as a default value.
# TODO(cathywu) are these values reasonable?
direction = np.append(direction, [0])
currently_yellow = np.append(currently_yellow, [1])
obs = []
# obs -> [num_light, observation]
node_to_edges = self.network.node_mapping
for rl_id in self.k.traffic_light.get_ids():
rl_id_num = int(rl_id.split("center")[ID_IDX])
local_edges = node_to_edges[rl_id_num][1]
local_edge_numbers = [self.k.network.get_edge_list().index(e)
for e in local_edges]
local_id_nums = [rl_id_num, self._get_relative_node(rl_id, "top"),
self._get_relative_node(rl_id, "bottom"),
self._get_relative_node(rl_id, "left"),
self._get_relative_node(rl_id, "right")]
observation = np.array(np.concatenate(
[speeds[rl_id_num], dist_to_intersec[rl_id_num],
edge_number[rl_id_num], density[local_edge_numbers],
velocity_avg[local_edge_numbers],
direction[local_id_nums], currently_yellow[local_id_nums]
]))
obs.append(observation)
return obs
|
[
"numpy.append",
"numpy.array",
"numpy.concatenate",
"gym.spaces.Box"
] |
[((1060, 1229), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': '(1)', 'shape': '(self.num_traffic_lights, 3 * 4 * self.num_observed + 2 * self.\n num_local_edges + 2 * (1 + self.num_local_lights))', 'dtype': 'np.float32'}), '(low=0.0, high=1, shape=(self.num_traffic_lights, 3 * 4 * self.\n num_observed + 2 * self.num_local_edges + 2 * (1 + self.\n num_local_lights)), dtype=np.float32)\n', (1063, 1229), False, 'from gym.spaces import Box, Discrete\n'), ((4827, 4844), 'numpy.array', 'np.array', (['density'], {}), '(density)\n', (4835, 4844), True, 'import numpy as np\n'), ((4868, 4890), 'numpy.array', 'np.array', (['velocity_avg'], {}), '(velocity_avg)\n', (4876, 4890), True, 'import numpy as np\n'), ((5371, 5396), 'numpy.append', 'np.append', (['direction', '[0]'], {}), '(direction, [0])\n', (5380, 5396), True, 'import numpy as np\n'), ((5424, 5456), 'numpy.append', 'np.append', (['currently_yellow', '[1]'], {}), '(currently_yellow, [1])\n', (5433, 5456), True, 'import numpy as np\n'), ((6188, 6413), 'numpy.concatenate', 'np.concatenate', (['[speeds[rl_id_num], dist_to_intersec[rl_id_num], edge_number[rl_id_num],\n density[local_edge_numbers], velocity_avg[local_edge_numbers],\n direction[local_id_nums], currently_yellow[local_id_nums]]'], {}), '([speeds[rl_id_num], dist_to_intersec[rl_id_num], edge_number\n [rl_id_num], density[local_edge_numbers], velocity_avg[\n local_edge_numbers], direction[local_id_nums], currently_yellow[\n local_id_nums]])\n', (6202, 6413), True, 'import numpy as np\n')]
|
import os
import scipy.io.wavfile
import matplotlib.pyplot as plt
import numpy as np
import os
import random
'''
Create a random dataset with three different frequencies that are always in fase.
Frequencies will be octave [440, 880, 1320].
'''
fs = 16000
x1 = scipy.io.wavfile.read('corpus/Analysis/a440.wav')[1]
x2 = scipy.io.wavfile.read('corpus/Analysis/c531.wav')[1]
x3 = scipy.io.wavfile.read('corpus/Analysis/e667.wav')[1]
x4 = scipy.io.wavfile.read('corpus/Analysis/a880.wav')[1]
x5 = scipy.io.wavfile.read('corpus/Analysis/c1056.wav')[1]
x6 = scipy.io.wavfile.read('corpus/Analysis/e1320.wav')[1]
x7 = scipy.io.wavfile.read('corpus/Analysis/a1760.wav')[1]
# Categories
a = [0]
b = [1]
c = [2]
def createRandomSequence():
# sequence length
sq_length = random.randint(5, 10)
#create sequence
sequence = []
sampleSequence = []
minLen = 1818
for i in range(0, sq_length):
value = random.randint(0,6)
sequence.append(value)
#create lengths per value
lenValue = minLen * random.randint(1,10)
sampleSequence.append(lenValue)
return sequence, sampleSequence
def genFile(sequence, sampleSequence, c):
newSequence = []
fullSequence = []
for i in range(len(sequence)):
newSequence = int(sampleSequence[i]) * [sequence[i]]
fullSequence = fullSequence + newSequence
file00 = open(os.path.join('corpus', 'panFluteBigDataset', 'lc_train%s.txt' % c), 'w')
for item in fullSequence:
file00.write('%i,\n' % item)
file00.close()
def case(x):
return {
0: x1,
1: x2,
2: x3,
3: x4,
4: x5,
5: x6,
6: x7
}[x]
def genSignals(sequence, sampleSequence, c):
y=[]
for i in range(len(sequence)):
# convert categories to frequencies
freq = case(sequence[i])
#nSamples = np.arange(sampleSequence[i])
#a = random.randint(25, 100)/100
a = 1
#y0 = a*np.sin(2*np.pi*freq*nSamples / fs)
y0= freq[:sampleSequence[i]]
y = scipy.hstack((y, y0))
y = y / y[np.argmax(y)]
noise = 0.01*np.random.normal(0, 1, len(y))
y = np.asarray(y) + noise
scipy.io.wavfile.write(os.path.join('corpus', 'panFluteBigDataset7freq', 'lc_train%s.wav' % c), fs, y)
def main():
for c in range(0,100):
sequence, sampleSequence = createRandomSequence()
#print(sequence, sampleSequence)
#genFile(sequence, sampleSequence, c)
genSignals(sequence, sampleSequence, c)
if __name__ == '__main__':
main()
|
[
"numpy.argmax",
"numpy.asarray",
"os.path.join",
"random.randint"
] |
[((773, 794), 'random.randint', 'random.randint', (['(5)', '(10)'], {}), '(5, 10)\n', (787, 794), False, 'import random\n'), ((926, 946), 'random.randint', 'random.randint', (['(0)', '(6)'], {}), '(0, 6)\n', (940, 946), False, 'import random\n'), ((1389, 1455), 'os.path.join', 'os.path.join', (['"""corpus"""', '"""panFluteBigDataset"""', "('lc_train%s.txt' % c)"], {}), "('corpus', 'panFluteBigDataset', 'lc_train%s.txt' % c)\n", (1401, 1455), False, 'import os\n'), ((2167, 2180), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2177, 2180), True, 'import numpy as np\n'), ((2216, 2287), 'os.path.join', 'os.path.join', (['"""corpus"""', '"""panFluteBigDataset7freq"""', "('lc_train%s.wav' % c)"], {}), "('corpus', 'panFluteBigDataset7freq', 'lc_train%s.wav' % c)\n", (2228, 2287), False, 'import os\n'), ((1039, 1060), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1053, 1060), False, 'import random\n'), ((2097, 2109), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (2106, 2109), True, 'import numpy as np\n')]
|
""" This module contains a pytorch dataset for learning peptide embeddings.
In particular, each "instance" of the dataset comprises two peptide sequences,
as well as the sNebula similarity between them. The sNebula distance reflects
the BLOSSUM similarity transformed from 0 to 1.
"""
import logging
logger = logging.getLogger(__name__)
import numpy as np
import torch
import torch.utils.data
from lifesci.peptide_dataset import PeptideDataset
import lifesci.sequence_similarity_utils as sequence_similarity_utils
import pyllars.string_utils as string_utils
from typing import NamedTuple, Optional
class PeptideEncoderTrainingDatasetItem(NamedTuple):
aa_sequence_xs: str
aa_sequence_ys: str
encoded_xs: torch.IntTensor
encoded_ys: torch.IntTensor
similarities: torch.FloatTensor
_DEFAULT_SEQUENCE_COLUMN = 'sequence'
_DEFAULT_SEED = 8675309
_DEFAULT_NAME = "PeptideEncoderTrainingDataset"
_DEFAULT_MAX_LEN = 25
class PeptideEncoderTrainingDataset(torch.utils.data.Dataset):
""" Generate training samples from a list of amino acid sequences
In particular, this class reads a list of peptides from `dataset_path`. It
then draws pairs of peptides from the list and calculates the sNebula
similarity score between them. Thus, each item from this dataset consists
of two peptide sequences and the similarity score.
In case the dataset object should be used for validation, the
`is_validation` flag can be set to `True`. In that case, a fixed set of
pairings will be selected for the peptides so that performance metrics are
constant from iteration to iteration. Otherwise (i.e., for training), one
member of each pair is randomly sampled.
Parameters
----------
dataset_path : str
The path to the dataset. It should be compatible with `pandas.read_csv`
and contain a column named `sequence_column` which includes the
sequences. Other columns are ignored.
aa_encoding_map : pyllars.string_utils.encoding_map_type
A mapping from each amino acid to its integer index.
N.B. This should **not** be a one-hot representation, but, as stated,
the integer index. Further, the padding character must be "-".
is_validation : bool
Whether the dataset will be used for validation (or testing)
sequence_column : str
The name of the column which contains the amino acid sequences
max_len : int
The maximum length for a peptide. Peptides longer than this will be
truncated, and shorter peptides will be padded to this length.
seed : int
Seed for the random number generator. This is used to randomly select
the second sequence in each of the instances.
name : str
A name for the dataset instance. This is mostly used for logging.
"""
def __init__(self,
dataset_path:str,
aa_encoding_map:string_utils.encoding_map_type,
is_validation:bool=False,
sequence_column:str=_DEFAULT_SEQUENCE_COLUMN,
max_len:int=_DEFAULT_MAX_LEN,
seed:int=_DEFAULT_SEED,
name:str=_DEFAULT_NAME):
self.aa_encoding_map = aa_encoding_map
self.is_validation = is_validation
self.sequence_column = sequence_column
self.max_len = max_len
self.seed = seed
self.name = name
self.rng = np.random.default_rng(self.seed)
df_peptides = PeptideDataset.load(dataset_path, sequence_column, filters=["standard_aa_only"])
self.aa_sequences = df_peptides[self.sequence_column].values
self.encoded_aa_sequences = string_utils.encode_all_sequences(
sequences=self.aa_sequences,
encoding_map=self.aa_encoding_map,
maxlen=self.max_len,
pad_value='-',
same_length=False
)
self.encoded_aa_sequences = self.encoded_aa_sequences.astype(int)
if self.is_validation:
self._matching_validation_item = np.random.permutation(len(self.aa_sequences))
def log(self, msg:str, level:int=logging.INFO) -> None:
""" Log `msg` using `level` using the module-level logger """
msg = "[{}] {}".format(self.name, msg)
logger.log(level, msg)
def __len__(self) -> int:
return len(self.aa_sequences)
def __getitem__(self, idx) -> PeptideEncoderTrainingDatasetItem:
x = idx
# and choose an appropriate matching index based on the dataset status
if self.is_validation:
y = self._matching_validation_item[idx]
else:
# select the second sequence randomly
y = self.rng.integers(low=0, high=len(self), size=1)
# the rng returns an array...
y = y[0]
encoded_xs = self.encoded_aa_sequences[x]
encoded_ys = self.encoded_aa_sequences[y]
peptide_xs = self.aa_sequences[x]
peptide_ys = self.aa_sequences[y]
similarities = sequence_similarity_utils.get_snebula_score(peptide_xs, peptide_ys)
encoded_xs = torch.as_tensor(encoded_xs, dtype=torch.long)
encoded_ys = torch.as_tensor(encoded_ys, dtype=torch.long)
similarities = torch.as_tensor(similarities, dtype=torch.float32)
ret = PeptideEncoderTrainingDatasetItem(
peptide_xs, peptide_ys, encoded_xs, encoded_ys, similarities
)
return ret
def get_trimmed_peptide_lengths(self, peptides) -> np.ndarray:
""" Extract the trimmed length of the given peptides, which accounts for max_len """
peptide_lengths = [len(p) for p in peptides]
trimmed_peptide_lengths = np.clip(peptide_lengths, 0, self.max_len)
return trimmed_peptide_lengths
@classmethod
def load(clazz,
dataset_path:Optional[str],
aa_encoding_map:string_utils.encoding_map_type,
is_validation:bool,
name:str) -> Optional["PeptideEncoderTrainingDataset"]:
""" Load the dataset given by `key` in `self.config`
Additionally, `name` will be used for the name of the dataset.
Parameters
----------
dataset_path : typing.Optional[str]
The path to the dataset
aa_encoding_map : pyllars.string_utils.encoding_map_type
A mapping from each amino acid to its integer index.
is_validation : bool
Whether the dataset will be used for validation (or testing)
name : str
The name for the dataset, if it is in the config file. Example:
"TrainingSet"
Returns
-------
dataset : typing.Optional[AAEncoderDataset]
If `key` is in `self.config`, then `dataset` will be the dataset
object based on that file. Otherwise, this function returns `None`.
"""
dataset = None
if dataset_path is not None:
dataset = PeptideEncoderTrainingDataset (
dataset_path=dataset_path,
aa_encoding_map=aa_encoding_map,
is_validation=is_validation,
name=name
)
return dataset
|
[
"logging.getLogger",
"numpy.clip",
"pyllars.string_utils.encode_all_sequences",
"torch.as_tensor",
"numpy.random.default_rng",
"lifesci.sequence_similarity_utils.get_snebula_score",
"lifesci.peptide_dataset.PeptideDataset.load"
] |
[((310, 337), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (327, 337), False, 'import logging\n'), ((3411, 3443), 'numpy.random.default_rng', 'np.random.default_rng', (['self.seed'], {}), '(self.seed)\n', (3432, 3443), True, 'import numpy as np\n'), ((3467, 3552), 'lifesci.peptide_dataset.PeptideDataset.load', 'PeptideDataset.load', (['dataset_path', 'sequence_column'], {'filters': "['standard_aa_only']"}), "(dataset_path, sequence_column, filters=['standard_aa_only']\n )\n", (3486, 3552), False, 'from lifesci.peptide_dataset import PeptideDataset\n'), ((3654, 3816), 'pyllars.string_utils.encode_all_sequences', 'string_utils.encode_all_sequences', ([], {'sequences': 'self.aa_sequences', 'encoding_map': 'self.aa_encoding_map', 'maxlen': 'self.max_len', 'pad_value': '"""-"""', 'same_length': '(False)'}), "(sequences=self.aa_sequences, encoding_map\n =self.aa_encoding_map, maxlen=self.max_len, pad_value='-', same_length=\n False)\n", (3687, 3816), True, 'import pyllars.string_utils as string_utils\n'), ((5023, 5090), 'lifesci.sequence_similarity_utils.get_snebula_score', 'sequence_similarity_utils.get_snebula_score', (['peptide_xs', 'peptide_ys'], {}), '(peptide_xs, peptide_ys)\n', (5066, 5090), True, 'import lifesci.sequence_similarity_utils as sequence_similarity_utils\n'), ((5113, 5158), 'torch.as_tensor', 'torch.as_tensor', (['encoded_xs'], {'dtype': 'torch.long'}), '(encoded_xs, dtype=torch.long)\n', (5128, 5158), False, 'import torch\n'), ((5180, 5225), 'torch.as_tensor', 'torch.as_tensor', (['encoded_ys'], {'dtype': 'torch.long'}), '(encoded_ys, dtype=torch.long)\n', (5195, 5225), False, 'import torch\n'), ((5249, 5299), 'torch.as_tensor', 'torch.as_tensor', (['similarities'], {'dtype': 'torch.float32'}), '(similarities, dtype=torch.float32)\n', (5264, 5299), False, 'import torch\n'), ((5700, 5741), 'numpy.clip', 'np.clip', (['peptide_lengths', '(0)', 'self.max_len'], {}), '(peptide_lengths, 0, self.max_len)\n', (5707, 5741), True, 'import numpy as np\n')]
|
from collections import defaultdict
import itertools
import numpy as np
import pickle
import time
import warnings
from Analysis import binomial_pgf, BranchModel, StaticModel
from simulators.fires.UrbanForest import UrbanForest
from Policies import NCTfires, UBTfires, DWTfires, RHTfires, USTfires
from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control
np.seterr(all='raise')
def uniform():
# given alpha and beta, compute lattice probabilities for every (parent, child) pair
a = 0.2763
b = np.exp(-1/10)
p = percolation_parameter(a, b)
if p <= 0.5:
raise Warning('Percolation parameter {0:0.2f} is not supercritical'.format(p))
lattice_p = defaultdict(lambda: p)
# given (delta_alpha, delta_beta), construct the equivalent delta_p
delta_a = 0
delta_b = 0.4
dp = equivalent_percolation_control(a, b, delta_a, delta_b)
if p - dp >= 0.5:
raise Warning('Control is insufficient: p - dp = {0:0.2f} - {1:0.2f} = {2:0.2f}'.format(p, dp, p-dp))
control_p = defaultdict(lambda: dp)
control_ab = defaultdict(lambda: (delta_a, delta_b))
# or given delta_p, construct the equivalent (delta_alpha, delta_beta)
# delta_p = 0.4
# control_percolation = defaultdict(lambda: delta_p)
# control_gmdp = defaultdict(lambda: equivalent_gmdp_control(a, b, delta_p))
a = defaultdict(lambda: a)
b = defaultdict(lambda: b)
return a, b, lattice_p, control_p, control_ab
def nonuniform(simulation):
alpha_set = dict()
# beta_set = defaultdict(lambda: np.exp(-1/9))
beta_set = dict()
p_set = dict()
delta_beta = 0.35
control_gmdp = dict()
alpha_start = 0.2
alpha_end = 0.4
for r in range(simulation.dims[0]):
for c in range(simulation.dims[1]):
alpha_set[(r, c)] = alpha_start + (c/(simulation.dims[1]-1))*(alpha_end-alpha_start)
beta1 = np.exp(-1/5)
beta2 = np.exp(-1/10)
for r in range(simulation.dims[0]):
for c in range(simulation.dims[1]):
if c < simulation.dims[1]-simulation.urban_width:
beta_set[(r, c)] = beta1
else:
beta_set[(r, c)] = beta2
control_gmdp[(r, c)] = {'healthy': (alpha_set[(r, c)], 0),
'on_fire': (0, np.amin([delta_beta, beta_set[(r, c)]]))}
# set initial condition
initial_fire = []
r_center = np.floor((simulation.dims[0]-1)/2).astype(np.uint8)
c_center = np.floor((simulation.dims[1]-1)/2).astype(np.uint8)
delta_r = [k for k in range(-2, 3)]
delta_c = [k for k in range(-2, 3)]
deltas = itertools.product(delta_r, delta_c)
for (dr, dc) in deltas:
if dr == 0 and dc == 0:
continue
elif (dr == -2 or dr == 2) and (dc == -2 or dc == 2):
continue
elif dc == dr or dc == -dr:
continue
r, c = r_center + dr, c_center + dc
initial_fire.append((r, c))
# control_p = dict()
for tree_rc in simulation.group.keys():
for neighbor in simulation.group[tree_rc].neighbors:
p = percolation_parameter(alpha_set[neighbor], beta_set[tree_rc])
if p <= 0.5:
warnings.warn('p({0:0.2f}, {1:0.2f}) = {2:0.2f} <= 0.5'.format(alpha_set[neighbor],
beta_set[tree_rc], p))
p_set[(tree_rc, neighbor)] = p
# control_p[(tree_rc, neighbor)] = dict()
#
# for k in control_gmdp[neighbor].keys():
# da, db = control_gmdp[neighbor][k]
# dp = equivalent_percolation_control(alpha_set[neighbor], beta_set[tree_rc], da, db)
# if p - dp >= 0.5:
# warnings.warn('p - dp = {0:0.2f} - {1:0.2f} = {2:0.2f} >= 0.5'.format(p, dp, p - dp))
#
# control_p[(tree_rc, neighbor)][k] = dp
return alpha_set, beta_set, initial_fire, control_gmdp, p_set
def benchmark(simulation, branchmodel, policy, num_generations=1, num_simulations=1):
print('Running policy {0:s} with capacity {1:d} for {2:d} simulations'.format(policy.name,
policy.capacity,
num_simulations))
print('started at {0:s}'.format(time.strftime('%d-%b-%Y %H:%M')))
tic = time.clock()
results = dict()
staticmodel = StaticModel()
for seed in range(num_simulations):
np.random.seed(seed)
simulation.reset()
simulation.rng = seed
while not simulation.early_end:
branchmodel.reset()
branchmodel.set_boundary(fire_boundary(simulation))
if isinstance(policy, USTfires):
staticmodel.set_boundary(urban_boundary(simulation))
policy.urbanboundary = urban_boundary(simulation)
def children_function(p):
return forest_children(simulation, p)
branchmodel.set_children_function(children_function)
for _ in range(num_generations):
for process in branchmodel.GWprocesses.values():
for parent in process.current_parents:
if parent not in branchmodel.lattice_children:
branchmodel.lattice_children[parent] = branchmodel.children_function(parent)
if not isinstance(policy, USTfires):
policy.generate_map(branchmodel)
else:
policy.generate_map(branchmodel, staticmodel)
branchmodel.next_generation(policy)
if isinstance(policy, USTfires):
staticmodel.next_boundary(policy.control_decisions)
# apply control and update simulator
if not isinstance(policy, USTfires):
control = policy.control(branchmodel)
else:
control = policy.control(branchmodel, staticmodel)
simulation.update(control)
if (seed+1) % 10 == 0:
print('completed {0:d} simulations'.format((seed+1)))
results[seed] = {'healthy_trees': simulation.stats_trees[0]/np.sum(simulation.stats_trees),
'healthy_urban': simulation.stats_urban[0]/np.sum(simulation.stats_urban),
'razed_urban': simulation.stats_urban[3]/np.sum(simulation.stats_urban)}
toc = time.clock()
dt = toc - tic
print('finished at {0:s}'.format(time.strftime('%d-%b-%Y %H:%M')))
print('{0:0.2f}s = {1:0.2f}m = {2:0.2f}h elapsed'.format(dt, dt/60, dt/3600))
filename = policy.name + '_s' + str(num_simulations) + '.pkl'
output = open('results/' + filename, 'wb')
pickle.dump(results, output)
output.close()
print('median healthy trees: {0:0.2f}%'.format(100*np.median([results[s]['healthy_trees']
for s in results.keys()])))
print('median healthy urban developments: {0:0.2f}%'.format(100*np.median([results[s]['healthy_urban']
for s in results.keys()])))
print('median removed urban developments: {0:0.2f}%'.format(100*np.median([results[s]['razed_urban']
for s in results.keys()])))
# print('mean remaining trees: {0:0.2f}%'.format(100*np.mean(results)))
# print('minimum {0:0.2f}, maximum {1:0.2f}'.format(100*np.amin(results), 100*np.amax(results)))
# first, third = np.percentile(results, [25, 75])
# print('1st quartile {0:0.2f}, 3rd quartile {1:0.2f}'.format(100*first, 100*third))
return
if __name__ == '__main__':
# forest parameters
dimension = 50
urban_width = 10
# generate information for uniform or non-uniform case
# alpha, beta, lattice_parameters, control_percolation, control_gmdp = uniform(LatticeForest(dimension))
# alpha, beta, p_parameters, map_percolation, map_gmdp = nonuniform(LatticeForest(dimension))
alpha, beta, initial_fire, map_gmdp, p_parameters = nonuniform(UrbanForest(dimension, urban_width))
# sim = LatticeForest(dimension, alpha=alpha, beta=beta)
sim = UrbanForest(dimension, urban_width, initial_fire=initial_fire, alpha=alpha, beta=beta)
# define policy
cap = 6
pi = NCTfires(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = UBTfires(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = DWTfires(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = RHTfires(capacity=cap, horizon=1, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = USTfires(capacity=cap, horizon=5, control_map_gmdp=map_gmdp, alpha_set=alpha, beta_set=beta)
# create branching process model approximation
bm = BranchModel(lattice_parameters=p_parameters, pgf=binomial_pgf)
sm = StaticModel()
benchmark(sim, bm, pi, num_generations=1, num_simulations=1000)
print()
|
[
"Utilities.equivalent_percolation_control",
"time.clock",
"Utilities.urban_boundary",
"simulators.fires.UrbanForest.UrbanForest",
"Utilities.percolation_parameter",
"itertools.product",
"numpy.exp",
"numpy.random.seed",
"Utilities.fire_boundary",
"numpy.amin",
"Policies.NCTfires",
"numpy.floor",
"Utilities.forest_children",
"pickle.dump",
"Analysis.StaticModel",
"time.strftime",
"Analysis.BranchModel",
"numpy.sum",
"collections.defaultdict",
"numpy.seterr"
] |
[((423, 445), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (432, 445), True, 'import numpy as np\n'), ((575, 590), 'numpy.exp', 'np.exp', (['(-1 / 10)'], {}), '(-1 / 10)\n', (581, 590), True, 'import numpy as np\n'), ((597, 624), 'Utilities.percolation_parameter', 'percolation_parameter', (['a', 'b'], {}), '(a, b)\n', (618, 624), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n'), ((745, 768), 'collections.defaultdict', 'defaultdict', (['(lambda : p)'], {}), '(lambda : p)\n', (756, 768), False, 'from collections import defaultdict\n'), ((884, 938), 'Utilities.equivalent_percolation_control', 'equivalent_percolation_control', (['a', 'b', 'delta_a', 'delta_b'], {}), '(a, b, delta_a, delta_b)\n', (914, 938), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n'), ((1087, 1111), 'collections.defaultdict', 'defaultdict', (['(lambda : dp)'], {}), '(lambda : dp)\n', (1098, 1111), False, 'from collections import defaultdict\n'), ((1128, 1168), 'collections.defaultdict', 'defaultdict', (['(lambda : (delta_a, delta_b))'], {}), '(lambda : (delta_a, delta_b))\n', (1139, 1168), False, 'from collections import defaultdict\n'), ((1411, 1434), 'collections.defaultdict', 'defaultdict', (['(lambda : a)'], {}), '(lambda : a)\n', (1422, 1434), False, 'from collections import defaultdict\n'), ((1442, 1465), 'collections.defaultdict', 'defaultdict', (['(lambda : b)'], {}), '(lambda : b)\n', (1453, 1465), False, 'from collections import defaultdict\n'), ((1946, 1960), 'numpy.exp', 'np.exp', (['(-1 / 5)'], {}), '(-1 / 5)\n', (1952, 1960), True, 'import numpy as np\n'), ((1971, 1986), 'numpy.exp', 'np.exp', (['(-1 / 10)'], {}), '(-1 / 10)\n', (1977, 1986), True, 'import numpy as np\n'), ((2675, 2710), 'itertools.product', 'itertools.product', (['delta_r', 'delta_c'], {}), '(delta_r, delta_c)\n', (2692, 2710), False, 'import itertools\n'), ((4517, 4529), 'time.clock', 'time.clock', ([], {}), '()\n', (4527, 4529), False, 'import time\n'), ((4570, 4583), 'Analysis.StaticModel', 'StaticModel', ([], {}), '()\n', (4581, 4583), False, 'from Analysis import binomial_pgf, BranchModel, StaticModel\n'), ((6588, 6600), 'time.clock', 'time.clock', ([], {}), '()\n', (6598, 6600), False, 'import time\n'), ((6891, 6919), 'pickle.dump', 'pickle.dump', (['results', 'output'], {}), '(results, output)\n', (6902, 6919), False, 'import pickle\n'), ((8420, 8510), 'simulators.fires.UrbanForest.UrbanForest', 'UrbanForest', (['dimension', 'urban_width'], {'initial_fire': 'initial_fire', 'alpha': 'alpha', 'beta': 'beta'}), '(dimension, urban_width, initial_fire=initial_fire, alpha=alpha,\n beta=beta)\n', (8431, 8510), False, 'from simulators.fires.UrbanForest import UrbanForest\n'), ((8549, 8635), 'Policies.NCTfires', 'NCTfires', ([], {'capacity': 'cap', 'alpha_set': 'alpha', 'beta_set': 'beta', 'control_map_gmdp': 'map_gmdp'}), '(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=\n map_gmdp)\n', (8557, 8635), False, 'from Policies import NCTfires, UBTfires, DWTfires, RHTfires, USTfires\n'), ((9086, 9148), 'Analysis.BranchModel', 'BranchModel', ([], {'lattice_parameters': 'p_parameters', 'pgf': 'binomial_pgf'}), '(lattice_parameters=p_parameters, pgf=binomial_pgf)\n', (9097, 9148), False, 'from Analysis import binomial_pgf, BranchModel, StaticModel\n'), ((9158, 9171), 'Analysis.StaticModel', 'StaticModel', ([], {}), '()\n', (9169, 9171), False, 'from Analysis import binomial_pgf, BranchModel, StaticModel\n'), ((4633, 4653), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4647, 4653), True, 'import numpy as np\n'), ((8311, 8346), 'simulators.fires.UrbanForest.UrbanForest', 'UrbanForest', (['dimension', 'urban_width'], {}), '(dimension, urban_width)\n', (8322, 8346), False, 'from simulators.fires.UrbanForest import UrbanForest\n'), ((2462, 2500), 'numpy.floor', 'np.floor', (['((simulation.dims[0] - 1) / 2)'], {}), '((simulation.dims[0] - 1) / 2)\n', (2470, 2500), True, 'import numpy as np\n'), ((2529, 2567), 'numpy.floor', 'np.floor', (['((simulation.dims[1] - 1) / 2)'], {}), '((simulation.dims[1] - 1) / 2)\n', (2537, 2567), True, 'import numpy as np\n'), ((3161, 3222), 'Utilities.percolation_parameter', 'percolation_parameter', (['alpha_set[neighbor]', 'beta_set[tree_rc]'], {}), '(alpha_set[neighbor], beta_set[tree_rc])\n', (3182, 3222), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n'), ((4473, 4504), 'time.strftime', 'time.strftime', (['"""%d-%b-%Y %H:%M"""'], {}), "('%d-%b-%Y %H:%M')\n", (4486, 4504), False, 'import time\n'), ((6657, 6688), 'time.strftime', 'time.strftime', (['"""%d-%b-%Y %H:%M"""'], {}), "('%d-%b-%Y %H:%M')\n", (6670, 6688), False, 'import time\n'), ((4821, 4846), 'Utilities.fire_boundary', 'fire_boundary', (['simulation'], {}), '(simulation)\n', (4834, 4846), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n'), ((5002, 5028), 'Utilities.urban_boundary', 'urban_boundary', (['simulation'], {}), '(simulation)\n', (5016, 5028), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n'), ((5091, 5121), 'Utilities.forest_children', 'forest_children', (['simulation', 'p'], {}), '(simulation, p)\n', (5106, 5121), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n'), ((6347, 6377), 'numpy.sum', 'np.sum', (['simulation.stats_trees'], {}), '(simulation.stats_trees)\n', (6353, 6377), True, 'import numpy as np\n'), ((6447, 6477), 'numpy.sum', 'np.sum', (['simulation.stats_urban'], {}), '(simulation.stats_urban)\n', (6453, 6477), True, 'import numpy as np\n'), ((6545, 6575), 'numpy.sum', 'np.sum', (['simulation.stats_urban'], {}), '(simulation.stats_urban)\n', (6551, 6575), True, 'import numpy as np\n'), ((2354, 2391), 'numpy.amin', 'np.amin', (['[delta_beta, beta_set[r, c]]'], {}), '([delta_beta, beta_set[r, c]])\n', (2361, 2391), True, 'import numpy as np\n'), ((4935, 4961), 'Utilities.urban_boundary', 'urban_boundary', (['simulation'], {}), '(simulation)\n', (4949, 4961), False, 'from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control\n')]
|
# -*- coding: utf-8 -*-
""" BLEND
This module defines classes and methods for blending images.
:Author: <NAME> <<EMAIL>>
"""
import numpy as np
from lmfit import Model
from lmfit.models import GaussianModel, ConstantModel
from modopt.base.np_adjust import pad2d
from sf_tools.image.stamp import postage_stamp
from sf_tools.image.distort import recentre
class Blender(object):
def __init__(self, images, ratio=1.0, overlap=True, stamp_shape=(116, 116),
method='sf', xwang_sigma=0.15, seed=None):
self.ratio = ratio
self.overlap = overlap
self.stamp_shape = np.array(stamp_shape)
if method in ('sf', 'xwang'):
self.method = method
else:
raise ValueError('Method must be "sf" or "xwang".')
self.xwang_sigma = xwang_sigma
self.seed = seed
if images.shape[0] % 2:
images = images[:-1]
half_sample = images.shape[0] // 2
self._images = images
self._centrals = images[:half_sample]
self._companions = images[half_sample:]
self.obj_centres = []
@staticmethod
def _fit_gauss(xval, yval):
model = GaussianModel()
result = model.fit(yval, model.guess(yval, x=xval,
amplitude=np.max(yval)), x=xval)
return result
@classmethod
def _fit_image(cls, image):
sum_x = image.sum(axis=0)
sum_y = image.sum(axis=1)
x_vals = np.arange(sum_x.size)
sum_x_fit = cls._fit_gauss(x_vals, sum_x)
sum_y_fit = cls._fit_gauss(x_vals, sum_y)
centre = (int(sum_x_fit.params['center'].value),
int(sum_y_fit.params['center'].value))
width = min(sum_x_fit.params['fwhm'].value,
sum_y_fit.params['fwhm'].value)
return centre, width
@staticmethod
def _random_shift(radius, outer_radius=None, seed=None):
if seed:
np.random.seed(seed)
theta = np.random.ranf() * 2 * np.pi
if outer_radius:
r = radius + np.random.ranf() * (outer_radius - radius)
else:
r = np.random.ranf() * radius
x = int(np.around(r * np.cos(theta)))
y = int(np.around(r * np.sin(theta)))
return x, y
@staticmethod
def _pad_image_shift(image, shift):
pad = [(_shift, 0) if _shift >= 0 else (0, -_shift)
for _shift in shift]
return np.pad(image, pad, 'constant')
@classmethod
def _blend(cls, image1, image2, shift):
dim = image1.shape
image2 = cls._pad_image_shift(image2, shift)
image2 = image2[:dim[0]] if shift[0] >= 0 else image2[-shift[0]:]
image2 = image2[:, :dim[1]] if shift[1] >= 0 else image2[:, -shift[1]:]
return image1 + image2
@staticmethod
def _gal_size_xwang(image):
return np.array([np.count_nonzero(image.sum(axis=ax))
for ax in range(2)])
@staticmethod
def _area_prob(shape1, shape2):
shape1, shape2 = np.array(shape1), np.array(shape2)
area = np.prod(shape1) - np.prod(shape2)
shape_diff = (shape1 - shape2) // 2
prob_ab = shape_diff[1] * shape1[0] / area
prob_cd = 0.5 - prob_ab
return prob_ab, prob_ab, prob_cd, prob_cd
@classmethod
def _blend_pos_xwang(cls, centre, box, limits, overlap=True):
centre, box, limits = np.array(centre), np.array(box), np.array(limits)
if overlap:
blend_pos = [np.random.randint(centre[i] - box[i],
centre[i] + box[i]) for i in range(2)]
else:
sector = np.random.choice(['a', 'b', 'c', 'd'],
p=cls.area_prob(centre * 2, box))
blend_pos = [None, None]
if sector == 'a':
blend_pos[0] = np.random.randint(limits[0][0], limits[1][0])
blend_pos[1] = np.random.randint(limits[0][1],
centre[1] - box[1])
elif sector == 'b':
blend_pos[0] = np.random.randint(limits[0][0], limits[1][0])
blend_pos[1] = np.random.randint(centre[1] + box[1],
limits[1][1])
elif sector == 'c':
blend_pos[0] = np.random.randint(limits[0][0],
centre[0] - box[0])
blend_pos[1] = np.random.randint(centre[1] - box[1],
centre[1] + box[1])
elif sector == 'd':
blend_pos[0] = np.random.randint(centre[0] + box[0],
limits[1][1])
blend_pos[1] = np.random.randint(centre[1] - box[1],
centre[1] + box[1])
return blend_pos
@classmethod
def _blend_xwang(cls, image1, image2, ps_shape=(116, 116), sigma=0.15,
overlap=True):
shape1, shape2 = np.array(image1.shape), np.array(image2.shape)
rad2 = shape2 // 2
ps_shape = np.array(ps_shape)
shape_diff = (ps_shape - shape1) // 2 + shape2
dis = cls._gal_size_xwang(image1) + cls._gal_size_xwang(image2)
box = np.around(sigma * dis).astype(int)
padding = ((shape_diff[0], shape_diff[0]),
(shape_diff[1], shape_diff[1]))
new_image = np.pad(image1, padding, 'constant')
new_shape = np.array(new_image.shape)
new_centre = new_shape // 2
limits = rad2, new_shape - rad2
bp = cls._blend_pos_xwang(new_centre, box, limits, overlap=True)
blend_slice = [slice(bp[i] - shape2[i] // 2,
bp[i] + shape2[i] // 2 + 1) for i in range(2)]
new_image[blend_slice[0], blend_slice[1]] += image2
new_image = postage_stamp(new_image, pos=new_centre,
pixel_rad=ps_shape // 2)
return new_image
def _pad_image(self, image):
if not isinstance(image, np.ndarray):
print(type(image))
im_shape = np.array(image.shape)
padding = (self.stamp_shape - im_shape) // 2
return pad2d(image, padding)
def _combine_images(self, image1, image2):
if self.method == 'xwang':
res = self._blend_xwang(image1, image2, ps_shape=self.stamp_shape,
sigma=self.xwang_sigma,
overlap=self.overlap)
else:
centre1, width1 = self._fit_image(image1)
centre2, width2 = self._fit_image(image2)
image1 = self._pad_image(recentre(image1, centre1))
image2 = self._pad_image(recentre(image2, centre2))
radius = self.ratio * (width1 + width2)
outer_radius = image1.shape[0] / 2.
if self.overlap:
shift = self._random_shift(radius, seed=self.seed)
else:
shift = self._random_shift(radius, outer_radius=outer_radius,
seed=self.seed)
im1_cen = np.array(image1.shape) // 2
im2_cen = np.copy(im1_cen) + np.array(shift)[::-1]
self.obj_centres.append((tuple(im1_cen), tuple(im2_cen)))
res = self._blend(image1, image2, shift)
return res
def blend(self):
blends = [self._combine_images(image1, image2) for image1, image2 in
zip(self._centrals, self._companions)]
return np.array(blends)
def pad(self):
im1_cen = np.array(self._pad_image(self._images[0]).shape) // 2
res = []
for image in self._images:
res.append(self._pad_image(image))
self.obj_centres.append((tuple(im1_cen), (None, None)))
return np.array(res)
|
[
"numpy.prod",
"numpy.copy",
"numpy.arange",
"modopt.base.np_adjust.pad2d",
"sf_tools.image.distort.recentre",
"numpy.sin",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"lmfit.models.GaussianModel",
"numpy.random.seed",
"numpy.around",
"numpy.cos",
"numpy.random.ranf",
"numpy.pad",
"sf_tools.image.stamp.postage_stamp"
] |
[((610, 631), 'numpy.array', 'np.array', (['stamp_shape'], {}), '(stamp_shape)\n', (618, 631), True, 'import numpy as np\n'), ((1178, 1193), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {}), '()\n', (1191, 1193), False, 'from lmfit.models import GaussianModel, ConstantModel\n'), ((1472, 1493), 'numpy.arange', 'np.arange', (['sum_x.size'], {}), '(sum_x.size)\n', (1481, 1493), True, 'import numpy as np\n'), ((2455, 2485), 'numpy.pad', 'np.pad', (['image', 'pad', '"""constant"""'], {}), "(image, pad, 'constant')\n", (2461, 2485), True, 'import numpy as np\n'), ((5174, 5192), 'numpy.array', 'np.array', (['ps_shape'], {}), '(ps_shape)\n', (5182, 5192), True, 'import numpy as np\n'), ((5495, 5530), 'numpy.pad', 'np.pad', (['image1', 'padding', '"""constant"""'], {}), "(image1, padding, 'constant')\n", (5501, 5530), True, 'import numpy as np\n'), ((5551, 5576), 'numpy.array', 'np.array', (['new_image.shape'], {}), '(new_image.shape)\n', (5559, 5576), True, 'import numpy as np\n'), ((5934, 5999), 'sf_tools.image.stamp.postage_stamp', 'postage_stamp', (['new_image'], {'pos': 'new_centre', 'pixel_rad': '(ps_shape // 2)'}), '(new_image, pos=new_centre, pixel_rad=ps_shape // 2)\n', (5947, 5999), False, 'from sf_tools.image.stamp import postage_stamp\n'), ((6192, 6213), 'numpy.array', 'np.array', (['image.shape'], {}), '(image.shape)\n', (6200, 6213), True, 'import numpy as np\n'), ((6283, 6304), 'modopt.base.np_adjust.pad2d', 'pad2d', (['image', 'padding'], {}), '(image, padding)\n', (6288, 6304), False, 'from modopt.base.np_adjust import pad2d\n'), ((7625, 7641), 'numpy.array', 'np.array', (['blends'], {}), '(blends)\n', (7633, 7641), True, 'import numpy as np\n'), ((7920, 7933), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (7928, 7933), True, 'import numpy as np\n'), ((1954, 1974), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1968, 1974), True, 'import numpy as np\n'), ((3057, 3073), 'numpy.array', 'np.array', (['shape1'], {}), '(shape1)\n', (3065, 3073), True, 'import numpy as np\n'), ((3075, 3091), 'numpy.array', 'np.array', (['shape2'], {}), '(shape2)\n', (3083, 3091), True, 'import numpy as np\n'), ((3108, 3123), 'numpy.prod', 'np.prod', (['shape1'], {}), '(shape1)\n', (3115, 3123), True, 'import numpy as np\n'), ((3126, 3141), 'numpy.prod', 'np.prod', (['shape2'], {}), '(shape2)\n', (3133, 3141), True, 'import numpy as np\n'), ((3435, 3451), 'numpy.array', 'np.array', (['centre'], {}), '(centre)\n', (3443, 3451), True, 'import numpy as np\n'), ((3453, 3466), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (3461, 3466), True, 'import numpy as np\n'), ((3468, 3484), 'numpy.array', 'np.array', (['limits'], {}), '(limits)\n', (3476, 3484), True, 'import numpy as np\n'), ((5081, 5103), 'numpy.array', 'np.array', (['image1.shape'], {}), '(image1.shape)\n', (5089, 5103), True, 'import numpy as np\n'), ((5105, 5127), 'numpy.array', 'np.array', (['image2.shape'], {}), '(image2.shape)\n', (5113, 5127), True, 'import numpy as np\n'), ((1992, 2008), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (2006, 2008), True, 'import numpy as np\n'), ((2144, 2160), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (2158, 2160), True, 'import numpy as np\n'), ((3531, 3588), 'numpy.random.randint', 'np.random.randint', (['(centre[i] - box[i])', '(centre[i] + box[i])'], {}), '(centre[i] - box[i], centre[i] + box[i])\n', (3548, 3588), True, 'import numpy as np\n'), ((3877, 3922), 'numpy.random.randint', 'np.random.randint', (['limits[0][0]', 'limits[1][0]'], {}), '(limits[0][0], limits[1][0])\n', (3894, 3922), True, 'import numpy as np\n'), ((3954, 4005), 'numpy.random.randint', 'np.random.randint', (['limits[0][1]', '(centre[1] - box[1])'], {}), '(limits[0][1], centre[1] - box[1])\n', (3971, 4005), True, 'import numpy as np\n'), ((5336, 5358), 'numpy.around', 'np.around', (['(sigma * dis)'], {}), '(sigma * dis)\n', (5345, 5358), True, 'import numpy as np\n'), ((6749, 6774), 'sf_tools.image.distort.recentre', 'recentre', (['image1', 'centre1'], {}), '(image1, centre1)\n', (6757, 6774), False, 'from sf_tools.image.distort import recentre\n'), ((6813, 6838), 'sf_tools.image.distort.recentre', 'recentre', (['image2', 'centre2'], {}), '(image2, centre2)\n', (6821, 6838), False, 'from sf_tools.image.distort import recentre\n'), ((7216, 7238), 'numpy.array', 'np.array', (['image1.shape'], {}), '(image1.shape)\n', (7224, 7238), True, 'import numpy as np\n'), ((7266, 7282), 'numpy.copy', 'np.copy', (['im1_cen'], {}), '(im1_cen)\n', (7273, 7282), True, 'import numpy as np\n'), ((1290, 1302), 'numpy.max', 'np.max', (['yval'], {}), '(yval)\n', (1296, 1302), True, 'import numpy as np\n'), ((2071, 2087), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (2085, 2087), True, 'import numpy as np\n'), ((2200, 2213), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2206, 2213), True, 'import numpy as np\n'), ((2246, 2259), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2252, 2259), True, 'import numpy as np\n'), ((4118, 4163), 'numpy.random.randint', 'np.random.randint', (['limits[0][0]', 'limits[1][0]'], {}), '(limits[0][0], limits[1][0])\n', (4135, 4163), True, 'import numpy as np\n'), ((4195, 4246), 'numpy.random.randint', 'np.random.randint', (['(centre[1] + box[1])', 'limits[1][1]'], {}), '(centre[1] + box[1], limits[1][1])\n', (4212, 4246), True, 'import numpy as np\n'), ((7285, 7300), 'numpy.array', 'np.array', (['shift'], {}), '(shift)\n', (7293, 7300), True, 'import numpy as np\n'), ((4359, 4410), 'numpy.random.randint', 'np.random.randint', (['limits[0][0]', '(centre[0] - box[0])'], {}), '(limits[0][0], centre[0] - box[0])\n', (4376, 4410), True, 'import numpy as np\n'), ((4491, 4548), 'numpy.random.randint', 'np.random.randint', (['(centre[1] - box[1])', '(centre[1] + box[1])'], {}), '(centre[1] - box[1], centre[1] + box[1])\n', (4508, 4548), True, 'import numpy as np\n'), ((4661, 4712), 'numpy.random.randint', 'np.random.randint', (['(centre[0] + box[0])', 'limits[1][1]'], {}), '(centre[0] + box[0], limits[1][1])\n', (4678, 4712), True, 'import numpy as np\n'), ((4793, 4850), 'numpy.random.randint', 'np.random.randint', (['(centre[1] - box[1])', '(centre[1] + box[1])'], {}), '(centre[1] - box[1], centre[1] + box[1])\n', (4810, 4850), True, 'import numpy as np\n')]
|
import os
import sys
import argparse
import onnx
import time
import subprocess
import numpy as np
import tempfile
from onnx import numpy_helper
from collections import OrderedDict
# Command arguments.
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help="Path to the ONNX model.")
parser.add_argument('--print_input',
action='store_true',
help="Print out inputs")
parser.add_argument('--print_output',
action='store_true',
help="Print out outputs")
parser.add_argument('--compile_args',
type=str,
default="",
help="Arguments passed directly to onnx-mlir command."
" See bin/onnx-mlir --help")
parser.add_argument(
'--shape_info',
type=str,
help="Shape for each dynamic input, e.g. 0:1x10x20,1:7x5x3")
parser.add_argument('--verify',
choices=['onnxruntime', 'ref'],
help="Verify the output by using onnxruntime or reference"
" inputs/outputs. By default, no verification")
parser.add_argument(
'--ref_folder',
type=str,
help="Path to the folder containing reference inputs and outputs stored"
" in protobuf. Used when --verify=ref")
parser.add_argument('--rtol',
type=str,
default="0.05",
help="Relative tolerance for verification")
parser.add_argument('--atol',
type=str,
default="0.01",
help="Absolute tolerance for verification")
args = parser.parse_args()
if (not os.environ.get('ONNX_MLIR_HOME', None)):
raise RuntimeError(
"Environment variable ONNX_MLIR_HOME is not set, please set it to the path to "
"the HOME directory for onnx-mlir. The HOME directory for onnx-mlir refers to "
"the parent folder containing the bin, lib, etc sub-folders in which ONNX-MLIR "
"executables and libraries can be found.")
VERBOSE = os.environ.get('VERBOSE', False)
ONNX_MLIR_EXENAME = "onnx-mlir"
if sys.platform == "win32":
ONNX_MLIR_EXENAME = "onnx-mlir.exe"
ONNX_MLIR = os.path.join(os.environ['ONNX_MLIR_HOME'], "bin",
ONNX_MLIR_EXENAME)
# Include runtime directory in python paths, so PyRuntime can be imported.
RUNTIME_DIR = os.path.join(os.environ['ONNX_MLIR_HOME'], "lib")
sys.path.append(RUNTIME_DIR)
try:
from PyRuntime import ExecutionSession
except ImportError:
raise ImportError(
"Looks like you did not build the PyRuntime target, build it by running `make PyRuntime`."
)
def ordinal(n):
suffix = ['th', 'st', 'nd', 'rd', 'th'][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = 'th'
return str(n) + suffix
def execute_commands(cmds):
if (VERBOSE):
print(cmds)
subprocess.call(cmds, shell=True)
def extend_model_output(model, intermediate_outputs):
# onnx-mlir doesn't care about manually specified output types & shapes.
DUMMY_TENSOR_TYPE = onnx.TensorProto.FLOAT
while (len(model.graph.output)):
model.graph.output.pop()
for output_name in intermediate_outputs:
output_value_info = onnx.helper.make_tensor_value_info(
output_name, DUMMY_TENSOR_TYPE, None)
model.graph.output.extend([output_value_info])
return model
def read_input_from_refs(model, ref_folder):
print("Reading inputs from {} ...".format(ref_folder))
i = 0
inputs = []
input_names = []
initializers = list(map(lambda x: x.name, model.graph.initializer))
for input_proto in model.graph.input:
if input_proto.name not in initializers:
input_names.append(input_proto.name)
input_file = ref_folder + '/input_{}.pb'.format(i)
input_ts = onnx.TensorProto()
with open(input_file, 'rb') as f:
input_ts.ParseFromString(f.read())
inputs += [numpy_helper.to_array(input_ts)]
i += 1
print(" done.\n")
return (inputs, input_names)
def read_output_from_refs(model, ref_folder):
print("Reading reference outputs from {} ...".format(ref_folder))
reference_output = []
for i, _ in enumerate(model.graph.output):
output_file = ref_folder + '/output_{}.pb'.format(i)
output_ts = onnx.TensorProto()
with open(output_file, 'rb') as f:
output_ts.ParseFromString(f.read())
reference_output += [numpy_helper.to_array(output_ts)]
print(" done.\n")
return reference_output
def generate_random_input(model, input_shapes):
print("Generating random inputs ...")
# Generate random data as input.
inputs = []
input_names = []
initializers = list(map(lambda x: x.name, model.graph.initializer))
np.random.seed(42)
for i, input_proto in enumerate(model.graph.input):
if input_proto.name in initializers:
continue
input_names.append(input_proto.name)
shape_proto = input_proto.type.tensor_type.shape
explicit_shape = []
for d, dim in enumerate(shape_proto.dim):
if dim.dim_value:
explicit_shape.append(dim.dim_value)
continue
if i in input_shapes:
if d < len(input_shapes[i]):
explicit_shape.append(input_shapes[i][d])
else:
print("The {} dim".format(ordinal(d + 1)),
"of the {} input is unknown.".format(ordinal(i + 1)),
"Use --shape_info to set.")
print(shape_proto)
exit()
else:
print("The shape of the {} input".format(ordinal(i + 1)),
"is unknown. Use --shape_info to set.")
print(shape_proto)
exit()
inputs.append(
np.random.uniform(-1.0, 1.0, explicit_shape).astype(np.float32))
print(" done.\n")
return (inputs, input_names)
def main():
# Get shape information if given.
# args.shape_info in the form of 'input_index:d1xd2, input_index:d1xd2'
input_shapes = {}
if args.shape_info:
for input_shape in args.shape_info.strip().split(","):
input_index_shape = input_shape.split(":")
input_index = input_index_shape[0]
assert not (input_index in input_shapes), "Duplicate input indices"
dims = [int(d) for d in input_index_shape[1].split("x")]
input_shapes[int(input_index)] = dims
# Load the onnx model.
model = onnx.load(args.model_path)
# Get the output names that we want to verify.
# If using onnxruntime for verification, we can verify every operation output.
output_names = [o.name for o in model.graph.output]
output_names = list(OrderedDict.fromkeys(output_names))
if (args.verify and args.verify == "onnxruntime"):
output_names = sum([[n for n in node.output if n != '']
for node in model.graph.node], [])
output_names = list(OrderedDict.fromkeys(output_names))
model = extend_model_output(model, output_names)
# Compile, run, and verify.
with tempfile.TemporaryDirectory() as temp_dir:
print("Temporary directory has been created at {}".format(temp_dir))
print("Compiling the model ...")
# Save modified model & invoke onnx-mlir to compile it.
temp_model_path = os.path.join(temp_dir, "model.onnx")
onnx.save(model, temp_model_path)
command_str = ONNX_MLIR
if args.compile_args:
command_str += " " + args.compile_args
command_str += " " + temp_model_path
start = time.perf_counter()
execute_commands(command_str)
end = time.perf_counter()
print(" took ", end - start, " seconds.\n")
# Prepare input data.
inputs = []
input_names = []
if (args.verify and args.verify.lower() == "ref"):
assert args.ref_folder, "No reference folder given"
inputs, input_names = read_input_from_refs(model, args.ref_folder)
else:
inputs, input_names = generate_random_input(model, input_shapes)
# Print the input if required.
if (args.print_input):
for i, inp in enumerate(inputs):
print("The {} input {}:{} is: \n {} \n".format(
ordinal(i + 1), input_names[i], list(inp.shape), inp))
print("Running inference ...")
temp_shared_lib_path = os.path.join(temp_dir, "model.so")
start = time.perf_counter()
# Use the generated shared library to create an execution session.
sess = ExecutionSession(temp_shared_lib_path, "run_main_graph")
outs = sess.run(inputs)
end = time.perf_counter()
print(" took ", end - start, " seconds.\n")
# Print the output if required.
if (args.print_output):
for i, out in enumerate(outs):
print("The {} output {}:{} is: \n {} \n".format(
ordinal(i + 1), output_names[i], list(out.shape), out))
# Run the model with reference backend and get results.
if (args.verify):
ref_outs = []
if (args.verify.lower() == "onnxruntime"):
# Reference backend by using onnxruntime.
import onnxruntime
output_names = list(map(lambda x: x.name, model.graph.output))
input_feed = dict(zip(input_names, inputs))
print("Running inference using onnxruntime ...")
start = time.perf_counter()
ref_session = onnxruntime.InferenceSession(temp_model_path)
ref_outs = ref_session.run(output_names, input_feed)
end = time.perf_counter()
print(" took ", end - start, " seconds.\n")
elif (args.verify.lower() == "ref"):
ref_outs = read_output_from_refs(model, args.ref_folder)
else:
print("Invalid verify option")
exit()
# For each output tensor, compare results.
for i, name in enumerate(output_names):
print("Verifying value of {}:{}".format(name, list(outs[i].shape)),
"using atol={}, rtol={} ...".format(args.atol, args.rtol))
total_elements = 0
mismatched_elements = 0
for index, actual_val in np.ndenumerate(outs[i]):
total_elements += 1
ref_val = ref_outs[i][index]
# Use equation atol + rtol * abs(desired), that is used in assert_allclose.
diff = float(args.atol) + float(args.rtol) * abs(ref_val)
if (abs(actual_val - ref_val) <= diff):
continue
mismatched_elements += 1
print(" at {}".format(index),
"mismatch {} (actual)".format(actual_val),
"vs {} (reference)".format(ref_val))
if mismatched_elements == 0:
print(" correct.\n".format(
args.atol, args.rtol))
else:
print(" mismatched elements {}/{}.\n".format(
mismatched_elements, total_elements))
if __name__ == '__main__':
main()
|
[
"PyRuntime.ExecutionSession",
"tempfile.TemporaryDirectory",
"onnx.save",
"collections.OrderedDict.fromkeys",
"argparse.ArgumentParser",
"onnx.helper.make_tensor_value_info",
"os.path.join",
"os.environ.get",
"time.perf_counter",
"onnxruntime.InferenceSession",
"onnx.TensorProto",
"numpy.ndenumerate",
"numpy.random.uniform",
"subprocess.call",
"numpy.random.seed",
"onnx.load",
"onnx.numpy_helper.to_array",
"sys.path.append"
] |
[((212, 237), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (235, 237), False, 'import argparse\n'), ((2061, 2093), 'os.environ.get', 'os.environ.get', (['"""VERBOSE"""', '(False)'], {}), "('VERBOSE', False)\n", (2075, 2093), False, 'import os\n'), ((2208, 2276), 'os.path.join', 'os.path.join', (["os.environ['ONNX_MLIR_HOME']", '"""bin"""', 'ONNX_MLIR_EXENAME'], {}), "(os.environ['ONNX_MLIR_HOME'], 'bin', ONNX_MLIR_EXENAME)\n", (2220, 2276), False, 'import os\n'), ((2392, 2441), 'os.path.join', 'os.path.join', (["os.environ['ONNX_MLIR_HOME']", '"""lib"""'], {}), "(os.environ['ONNX_MLIR_HOME'], 'lib')\n", (2404, 2441), False, 'import os\n'), ((2442, 2470), 'sys.path.append', 'sys.path.append', (['RUNTIME_DIR'], {}), '(RUNTIME_DIR)\n', (2457, 2470), False, 'import sys\n'), ((1669, 1707), 'os.environ.get', 'os.environ.get', (['"""ONNX_MLIR_HOME"""', 'None'], {}), "('ONNX_MLIR_HOME', None)\n", (1683, 1707), False, 'import os\n'), ((2897, 2930), 'subprocess.call', 'subprocess.call', (['cmds'], {'shell': '(True)'}), '(cmds, shell=True)\n', (2912, 2930), False, 'import subprocess\n'), ((4850, 4868), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4864, 4868), True, 'import numpy as np\n'), ((6651, 6677), 'onnx.load', 'onnx.load', (['args.model_path'], {}), '(args.model_path)\n', (6660, 6677), False, 'import onnx\n'), ((3256, 3328), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['output_name', 'DUMMY_TENSOR_TYPE', 'None'], {}), '(output_name, DUMMY_TENSOR_TYPE, None)\n', (3290, 3328), False, 'import onnx\n'), ((4384, 4402), 'onnx.TensorProto', 'onnx.TensorProto', ([], {}), '()\n', (4400, 4402), False, 'import onnx\n'), ((6893, 6927), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['output_names'], {}), '(output_names)\n', (6913, 6927), False, 'from collections import OrderedDict\n'), ((7274, 7303), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7301, 7303), False, 'import tempfile\n'), ((7526, 7562), 'os.path.join', 'os.path.join', (['temp_dir', '"""model.onnx"""'], {}), "(temp_dir, 'model.onnx')\n", (7538, 7562), False, 'import os\n'), ((7571, 7604), 'onnx.save', 'onnx.save', (['model', 'temp_model_path'], {}), '(model, temp_model_path)\n', (7580, 7604), False, 'import onnx\n'), ((7779, 7798), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7796, 7798), False, 'import time\n'), ((7851, 7870), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7868, 7870), False, 'import time\n'), ((8618, 8652), 'os.path.join', 'os.path.join', (['temp_dir', '"""model.so"""'], {}), "(temp_dir, 'model.so')\n", (8630, 8652), False, 'import os\n'), ((8669, 8688), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8686, 8688), False, 'import time\n'), ((8779, 8835), 'PyRuntime.ExecutionSession', 'ExecutionSession', (['temp_shared_lib_path', '"""run_main_graph"""'], {}), "(temp_shared_lib_path, 'run_main_graph')\n", (8795, 8835), False, 'from PyRuntime import ExecutionSession\n'), ((8882, 8901), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8899, 8901), False, 'import time\n'), ((3865, 3883), 'onnx.TensorProto', 'onnx.TensorProto', ([], {}), '()\n', (3881, 3883), False, 'import onnx\n'), ((4523, 4555), 'onnx.numpy_helper.to_array', 'numpy_helper.to_array', (['output_ts'], {}), '(output_ts)\n', (4544, 4555), False, 'from onnx import numpy_helper\n'), ((7139, 7173), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['output_names'], {}), '(output_names)\n', (7159, 7173), False, 'from collections import OrderedDict\n'), ((4004, 4035), 'onnx.numpy_helper.to_array', 'numpy_helper.to_array', (['input_ts'], {}), '(input_ts)\n', (4025, 4035), False, 'from onnx import numpy_helper\n'), ((9705, 9724), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9722, 9724), False, 'import time\n'), ((9755, 9800), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['temp_model_path'], {}), '(temp_model_path)\n', (9783, 9800), False, 'import onnxruntime\n'), ((9892, 9911), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9909, 9911), False, 'import time\n'), ((10572, 10595), 'numpy.ndenumerate', 'np.ndenumerate', (['outs[i]'], {}), '(outs[i])\n', (10586, 10595), True, 'import numpy as np\n'), ((5952, 5996), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', 'explicit_shape'], {}), '(-1.0, 1.0, explicit_shape)\n', (5969, 5996), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def db2lin(val):
"""
Converting from linear to dB domain.
Parameters
----------
val : numpy.ndarray
Values in dB domain.
Returns
-------
val : numpy.ndarray
Values in linear domain.
"""
return 10 ** (val / 10.)
def lin2db(val):
"""
Converting from linear to dB domain.
Parameters
----------
val : numpy.ndarray
Values in linear domain.
Returns
-------
val : numpy.ndarray
Values in dB domain.
"""
return 10. * np.log10(val)
def get_window_radius(window, hp_radius):
"""
Calculates the required radius of a window function in order to achieve
the provided half power radius.
Parameters
----------
window : string
Window function name.
Current supported windows:
- Hamming
- Boxcar
hp_radius : float32
Half power radius. Radius of window function for weight
equal to 0.5 (-3 dB). In the spatial domain this corresponds to
half of the spatial resolution one would like to achieve with the
given window.
Returns
-------
r : float32
Window radius needed to achieve the given half power radius
"""
window = window.lower()
hp_weight = 0.5
if window == 'hamming':
alpha = 0.54
r = (np.pi * hp_radius) / np.arccos((hp_weight-alpha) / (1-alpha))
elif window == 'boxcar':
r = hp_radius
else:
raise ValueError('Window name not supported.')
return r
def hamming_window(radius, distances):
"""
Hamming window filter.
Parameters
----------
radius : float32
Radius of the window.
distances : numpy.ndarray
Array with distances.
Returns
-------
weights : numpy.ndarray
Distance weights.
tw : float32
Sum of weigths.
"""
alpha = 0.54
weights = alpha + (1 - alpha) * np.cos(np.pi / radius * distances)
return weights, np.sum(weights)
def boxcar(radius, distance):
"""
Boxcar filter
Parameters
----------
n : int
Length.
Returns
-------
weights : numpy.ndarray
Distance weights.
tw : float32
Sum of weigths.
"""
weights = np.zeros(distance.size)
weights[distance <= radius] = 1.
return weights, np.sum(weights)
def get_window_weights(window, radius, distance, norm=False):
"""
Function returning weights for the provided window function
Parameters
----------
window : str
Window function name
radius : float
Radius of the window.
distance : numpy.ndarray
Distance array
norm : boolean
If true, normalised weights will be returned.
Returns
-------
weights : numpy.ndarray
Weights according to distances and given window function
"""
if window == 'hamming':
weights, w_sum = hamming_window(radius, distance)
elif window == 'boxcar':
weights, w_sum = boxcar(radius, distance)
else:
raise ValueError('Window name not supported.')
if norm is True:
weights = weights / w_sum
return weights
|
[
"numpy.log10",
"numpy.arccos",
"numpy.sum",
"numpy.zeros",
"numpy.cos"
] |
[((3934, 3957), 'numpy.zeros', 'np.zeros', (['distance.size'], {}), '(distance.size)\n', (3942, 3957), True, 'import numpy as np\n'), ((2192, 2205), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (2200, 2205), True, 'import numpy as np\n'), ((3659, 3674), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3665, 3674), True, 'import numpy as np\n'), ((4016, 4031), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (4022, 4031), True, 'import numpy as np\n'), ((3033, 3077), 'numpy.arccos', 'np.arccos', (['((hp_weight - alpha) / (1 - alpha))'], {}), '((hp_weight - alpha) / (1 - alpha))\n', (3042, 3077), True, 'import numpy as np\n'), ((3603, 3637), 'numpy.cos', 'np.cos', (['(np.pi / radius * distances)'], {}), '(np.pi / radius * distances)\n', (3609, 3637), True, 'import numpy as np\n')]
|
"""fasterRCNN对象创建"""
import numpy as np
import colorsys
import os
from keras import backend as K
from keras.applications.imagenet_utils import preprocess_input
from PIL import Image, ImageFont, ImageDraw
import copy
import math
from net import fasterrcnn as frcnn
from net import netconfig as netconfig
from net import RPN as RPN
from net import tools as tools
class FasterRCNN(object):
_defaults = {
"model_path": './model_data/logs/epoch015-loss1.729-rpn1.025-roi0.704.h5',
"classes_path": './model_data/index.txt',
"confidence": 0.7,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
"""初始化faster RCNN"""
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.sess = K.get_session()
self.config = netconfig.Config()
self.generate()
self.bbox_util = tools.BBoxUtility()
self.confidence = 0.7
self.classes_path='./model_data/index.txt'
self.model_path='./model_data/logs/epoch015-loss1.729-rpn1.025-roi0.704.h5'
def _get_class(self):
"""获得所有的分类"""
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def generate(self):
"""获得所有的分类"""
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# 计算总的种类
self.num_classes = len(self.class_names) + 1
# 载入模型,如果原来的模型里已经包括了模型结构则直接载入。
# 否则先构建模型再载入
self.model_rpn, self.model_classifier = frcnn.get_predict_model(self.config, self.num_classes)
self.model_rpn.load_weights(self.model_path, by_name=True)
self.model_classifier.load_weights(self.model_path, by_name=True, skip_mismatch=True)
print('{} model, anchors, and classes loaded.'.format(model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
def get_img_output_length(self, width, height):
def get_output_length(input_length):
# input_length += 6
filter_sizes = [7, 3, 1, 1]
padding = [3, 1, 0, 0]
stride = 2
for i in range(4):
# input_length = (input_length - filter_size + stride) // stride
input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1
return input_length
return get_output_length(width), get_output_length(height)
def detect_image(self, image):
"""检测图片"""
image_shape = np.array(np.shape(image)[0:2])
old_width = image_shape[1]
old_height = image_shape[0]
# 保存原始图片
old_image = copy.deepcopy(image)
# 把图片的最短边resize到600
width, height = tools.get_new_img_size(old_width, old_height)
image = image.resize([width, height])
# 图片转成数组
photo = np.array(image, dtype=np.float64)
# 图片预处理,归一化
photo = preprocess_input(np.expand_dims(photo, 0))
# 使用RPN预测,获得概率x_class和x_regr
preds = self.model_rpn.predict(photo)
# 将预测结果进行解码
# 获得所有先验框
anchors = RPN.create_anchor(self.get_img_output_length(width, height), width, height)
# 解码获得建议框,这里得到了300个建议框,注意其坐标均为0-1间
rpn_results = self.bbox_util.detection_out(preds, anchors, 1, confidence_threshold=0)
# 将返回的0-1的建议框映射到共享特征图,如果特征图为38*38,值域变成0-38之间,R为300行4列,分别是左上角右下角坐标
R = rpn_results[0][:, 2:]
R[:, 0] = np.array(np.round(R[:, 0] * width / self.config.rpn_stride), dtype=np.int32)
R[:, 1] = np.array(np.round(R[:, 1] * height / self.config.rpn_stride), dtype=np.int32)
R[:, 2] = np.array(np.round(R[:, 2] * width / self.config.rpn_stride), dtype=np.int32)
R[:, 3] = np.array(np.round(R[:, 3] * height / self.config.rpn_stride), dtype=np.int32)
print(R)
# R转换一下,前两列是左上角坐标,后两列是宽和高
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
base_layer = preds[2]
delete_line = []
for i, r in enumerate(R):
if r[2] < 1 or r[3] < 1:
delete_line.append(i)
R = np.delete(R, delete_line, axis=0)
bboxes = []
probs = []
labels = []
# 分批次遍历建议框,每批32个
for jk in range(R.shape[0] // self.config.num_rois + 1):
# 取出32个建议框
ROIs = np.expand_dims(R[self.config.num_rois * jk:self.config.num_rois * (jk + 1), :], axis=0)
# 判断建议框是否有效
if ROIs.shape[1] == 0:
break
# 对最后一次整除不全,不能到32个的建议框小批进行填充
if jk == R.shape[0] // self.config.num_rois:
# pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0], self.config.num_rois, curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
# 将共享特征层和建议框传入end_classifier进行预测
# P_cls为(Batch_size,32个建议框,21)
# P_regr为(Batch_size,32个建议框,80)
[P_cls, P_regr] = self.model_classifier.predict([base_layer, ROIs])
# 判断输出的每批中每个建议框是否真实包含我们要的物体,本身置信度阈值设置为0.9,如果是背景也要跳过
for ii in range(P_cls.shape[1]):
# P_cls[0, ii, :-1]是21个概率组成的列表
if np.max(P_cls[0, ii, :-1]) < self.confidence or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
# 获得label
label = np.argmax(P_cls[0, ii, :-1])
# 获得坐标信息
(x, y, w, h) = ROIs[0, ii, :]
# 其实就是label
cls_num = np.argmax(P_cls[0, ii, :-1])
# 获取框的信息,并改变数量级
(tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= self.config.classifier_regr_std[0]
ty /= self.config.classifier_regr_std[1]
tw /= self.config.classifier_regr_std[2]
th /= self.config.classifier_regr_std[3]
# 获取到共享特征层上真实的坐标信息
cx = x + w / 2.
cy = y + h / 2.
cx1 = tx * w + cx
cy1 = ty * h + cy
w1 = math.exp(tw) * w
h1 = math.exp(th) * h
x1 = cx1 - w1 / 2.
y1 = cy1 - h1 / 2.
x2 = cx1 + w1 / 2
y2 = cy1 + h1 / 2
x1 = int(round(x1))
y1 = int(round(y1))
x2 = int(round(x2))
y2 = int(round(y2))
# bboxes是最终从300个建议框过滤出来与目标物体对应的建议框
# 但注意,这里的建议框还是存在重叠现象,因为之前仅仅靠物体置信度来筛选
bboxes.append([x1, y1, x2, y2])
probs.append(np.max(P_cls[0, ii, :-1]))
labels.append(label)
# 没检测到物体,返回
if len(bboxes) == 0:
return old_image
# 将38*38特征层的建议框映射到600*600
# 筛选出其中得分高于confidence的框,因此此时需要再次NMS删除重叠框
labels = np.array(labels)
probs = np.array(probs)
boxes = np.array(bboxes, dtype=np.float32)
boxes[:, 0] = boxes[:, 0] * self.config.rpn_stride / width
boxes[:, 1] = boxes[:, 1] * self.config.rpn_stride / height
boxes[:, 2] = boxes[:, 2] * self.config.rpn_stride / width
boxes[:, 3] = boxes[:, 3] * self.config.rpn_stride / height
results = np.array(
self.bbox_util.nms_for_out(np.array(labels), np.array(probs), np.array(boxes), self.num_classes - 1, 0.4))
top_label_indices = results[:, 0]
top_conf = results[:, 1]
boxes = results[:, 2:]
#top_label_indices=labels
#top_conf=probs
# 大小调整到原图上,此时已经完成了建议框的计算
boxes[:, 0] = boxes[:, 0] * old_width
boxes[:, 1] = boxes[:, 1] * old_height
boxes[:, 2] = boxes[:, 2] * old_width
boxes[:, 3] = boxes[:, 3] * old_height
# simhei.ttf用于设置字体
font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
thickness = (np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2
image = old_image
for i, c in enumerate(top_label_indices):
predicted_class = self.class_names[int(c)]
score = top_conf[i]
left, top, right, bottom = boxes[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[int(c)])
draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def close(self):
self.sess.close()
|
[
"net.tools.get_new_img_size",
"colorsys.hsv_to_rgb",
"numpy.array",
"PIL.ImageDraw.Draw",
"copy.deepcopy",
"math.exp",
"numpy.delete",
"numpy.max",
"numpy.round",
"os.path.expanduser",
"net.tools.BBoxUtility",
"net.fasterrcnn.get_predict_model",
"numpy.floor",
"numpy.argmax",
"numpy.shape",
"net.netconfig.Config",
"keras.backend.get_session",
"numpy.zeros",
"numpy.expand_dims"
] |
[((938, 953), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (951, 953), True, 'from keras import backend as K\n'), ((976, 994), 'net.netconfig.Config', 'netconfig.Config', ([], {}), '()\n', (992, 994), True, 'from net import netconfig as netconfig\n'), ((1044, 1063), 'net.tools.BBoxUtility', 'tools.BBoxUtility', ([], {}), '()\n', (1061, 1063), True, 'from net import tools as tools\n'), ((1301, 1338), 'os.path.expanduser', 'os.path.expanduser', (['self.classes_path'], {}), '(self.classes_path)\n', (1319, 1338), False, 'import os\n'), ((1567, 1602), 'os.path.expanduser', 'os.path.expanduser', (['self.model_path'], {}), '(self.model_path)\n', (1585, 1602), False, 'import os\n'), ((1871, 1925), 'net.fasterrcnn.get_predict_model', 'frcnn.get_predict_model', (['self.config', 'self.num_classes'], {}), '(self.config, self.num_classes)\n', (1894, 1925), True, 'from net import fasterrcnn as frcnn\n'), ((3270, 3290), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (3283, 3290), False, 'import copy\n'), ((3344, 3389), 'net.tools.get_new_img_size', 'tools.get_new_img_size', (['old_width', 'old_height'], {}), '(old_width, old_height)\n', (3366, 3389), True, 'from net import tools as tools\n'), ((3469, 3502), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float64'}), '(image, dtype=np.float64)\n', (3477, 3502), True, 'import numpy as np\n'), ((4711, 4744), 'numpy.delete', 'np.delete', (['R', 'delete_line'], {'axis': '(0)'}), '(R, delete_line, axis=0)\n', (4720, 4744), True, 'import numpy as np\n'), ((7633, 7649), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7641, 7649), True, 'import numpy as np\n'), ((7666, 7681), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (7674, 7681), True, 'import numpy as np\n'), ((7698, 7732), 'numpy.array', 'np.array', (['bboxes'], {'dtype': 'np.float32'}), '(bboxes, dtype=np.float32)\n', (7706, 7732), True, 'import numpy as np\n'), ((3557, 3581), 'numpy.expand_dims', 'np.expand_dims', (['photo', '(0)'], {}), '(photo, 0)\n', (3571, 3581), True, 'import numpy as np\n'), ((4073, 4123), 'numpy.round', 'np.round', (['(R[:, 0] * width / self.config.rpn_stride)'], {}), '(R[:, 0] * width / self.config.rpn_stride)\n', (4081, 4123), True, 'import numpy as np\n'), ((4168, 4219), 'numpy.round', 'np.round', (['(R[:, 1] * height / self.config.rpn_stride)'], {}), '(R[:, 1] * height / self.config.rpn_stride)\n', (4176, 4219), True, 'import numpy as np\n'), ((4264, 4314), 'numpy.round', 'np.round', (['(R[:, 2] * width / self.config.rpn_stride)'], {}), '(R[:, 2] * width / self.config.rpn_stride)\n', (4272, 4314), True, 'import numpy as np\n'), ((4359, 4410), 'numpy.round', 'np.round', (['(R[:, 3] * height / self.config.rpn_stride)'], {}), '(R[:, 3] * height / self.config.rpn_stride)\n', (4367, 4410), True, 'import numpy as np\n'), ((4938, 5029), 'numpy.expand_dims', 'np.expand_dims', (['R[self.config.num_rois * jk:self.config.num_rois * (jk + 1), :]'], {'axis': '(0)'}), '(R[self.config.num_rois * jk:self.config.num_rois * (jk + 1),\n :], axis=0)\n', (4952, 5029), True, 'import numpy as np\n'), ((9503, 9524), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (9517, 9524), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((3140, 3155), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3148, 3155), True, 'import numpy as np\n'), ((6153, 6181), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :-1]'], {}), '(P_cls[0, ii, :-1])\n', (6162, 6181), True, 'import numpy as np\n'), ((6308, 6336), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :-1]'], {}), '(P_cls[0, ii, :-1])\n', (6317, 6336), True, 'import numpy as np\n'), ((8070, 8086), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8078, 8086), True, 'import numpy as np\n'), ((8088, 8103), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (8096, 8103), True, 'import numpy as np\n'), ((8105, 8120), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (8113, 8120), True, 'import numpy as np\n'), ((9716, 9753), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (9724, 9753), True, 'import numpy as np\n'), ((9802, 9827), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (9810, 9827), True, 'import numpy as np\n'), ((2344, 2367), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (2363, 2367), False, 'import colorsys\n'), ((6867, 6879), 'math.exp', 'math.exp', (['tw'], {}), '(tw)\n', (6875, 6879), False, 'import math\n'), ((6905, 6917), 'math.exp', 'math.exp', (['th'], {}), '(th)\n', (6913, 6917), False, 'import math\n'), ((7389, 7414), 'numpy.max', 'np.max', (['P_cls[0, ii, :-1]'], {}), '(P_cls[0, ii, :-1])\n', (7395, 7414), True, 'import numpy as np\n'), ((9259, 9274), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (9267, 9274), True, 'import numpy as np\n'), ((9343, 9358), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (9351, 9358), True, 'import numpy as np\n'), ((5385, 5407), 'numpy.zeros', 'np.zeros', (['target_shape'], {}), '(target_shape)\n', (5393, 5407), True, 'import numpy as np\n'), ((5974, 5999), 'numpy.max', 'np.max', (['P_cls[0, ii, :-1]'], {}), '(P_cls[0, ii, :-1])\n', (5980, 5999), True, 'import numpy as np\n'), ((6021, 6047), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (6030, 6047), True, 'import numpy as np\n'), ((8713, 8732), 'numpy.shape', 'np.shape', (['old_image'], {}), '(old_image)\n', (8721, 8732), True, 'import numpy as np\n'), ((8738, 8757), 'numpy.shape', 'np.shape', (['old_image'], {}), '(old_image)\n', (8746, 8757), True, 'import numpy as np\n'), ((9133, 9152), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (9141, 9152), True, 'import numpy as np\n'), ((9196, 9216), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (9204, 9216), True, 'import numpy as np\n'), ((9279, 9301), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (9287, 9301), True, 'import numpy as np\n'), ((9363, 9384), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (9371, 9384), True, 'import numpy as np\n'), ((8648, 8663), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (8656, 8663), True, 'import numpy as np\n')]
|
# Standard Library
import pickle
from typing import *
from pathlib import Path
# Third-party Party
import numpy as np
import PIL.Image as Image
from colorama import Fore, init
# Torch Library
import torch
import torch.utils.data as data
import torchvision.transforms as T
# My Library
from helper import visualize_np, visualize_plt, visualize_pil
from helper import ProjectPath, DatasetPath
from helper import ClassLabelLookuper
init(autoreset=True)
ImageType = TypeVar(
"ImageType",
np.ndarray, torch.Tensor, Path
)
ClassType = TypeVar(
"ClassType",
np.ndarray, torch.Tensor
)
class MultiDataset(data.Dataset):
def __init__(self, dataset: str, split: str):
super(MultiDataset, self).__init__()
assert split in (s := ["train", "val", "test"]), f"{Fore.RED}Invalid split, should be in {s}"
self.split = split
self.dataset = dataset
self._dataset_reader: Dict[str, Callable] = {
"Cifar10": self.__read_cifar10,
"Cifar100": self.__read_cifar100,
"PascalVOC2012": self.__read_PascalVOC2012
}
assert dataset in self._dataset_reader.keys(), f"{Fore.RED}Invalid dataset, please select in " \
f"{self._dataset_reader.keys()}."
self.image: Union[np.ndarray, List[Path]]
self.label: np.ndarray
self.image, self.label = self._dataset_reader[self.dataset]()
self.select_train_val()
self.num_class = len(ClassLabelLookuper(self.dataset).cls)
def __len__(self) -> int:
return len(self.image)
def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor]:
image, label = self.image[idx], self.label[idx]
if isinstance(image, Path):
image = Image.open(image)
else:
image = Image.fromarray(image.astype(np.uint8)).convert("RGB")
return self.transform(image), label
def set_transform(self, transform: T.Compose) -> "MultiDataset":
self.transform = transform
return self
def select_train_val(self, trainval_ratio: Optional[float] = 0.2) -> None:
# get image of each label
self.label_image: Dict[int, np.ndarray] = {}
for label in np.unique(self.label):
self.label_image[label] = np.where(self.label == label)[0]
if self.dataset in ["Cifar10", "Cifar100"]:
if self.split == "test":
return
else:
# generate train val if not exists, else load
if (config_path := ProjectPath.config.joinpath(f"{self.dataset}.npz")).exists():
data = np.load(config_path)
ratio, train, val =data["ratio"], data["train"], data["val"]
if not config_path.exists() or ratio != trainval_ratio:
train, val = [], []
for label, image_idx in self.label_image.items():
np.random.shuffle(image_idx)
val_num = int(trainval_ratio * len(image_idx))
val.append(image_idx[:val_num])
train.append(image_idx[val_num:])
train = np.stack(train, axis=0)
val = np.stack(val, axis=0)
config_path.parent.mkdir(parents=True, exist_ok=True)
np.savez(config_path, ratio=trainval_ratio, train=train, val=val)
train = np.concatenate(train, axis=0)
val = np.concatenate(val, axis=0)
# select train val
if self.split == "val":
self.image = self.image[val]
self.label = self.label[val]
else:
self.image = self.image[train]
self.label = self.label[train]
else:
return
def __read_cifar10(self) -> Tuple[np.ndarray, np.ndarray]:
if self.split in ["train", "val"]:
data = []
for batch in DatasetPath.Cifar10.train:
with batch.open(mode="rb") as f:
data.append(pickle.load(f, encoding="bytes"))
image = np.concatenate([i[b"data"].reshape(-1, 3, 32, 32) for i in data], axis=0)
label = np.concatenate([i[b"labels"] for i in data], axis=0)
else:
with DatasetPath.Cifar10.test.open(mode="rb") as f:
data = pickle.load(f, encoding="bytes")
image = data[b"data"].reshape(-1, 3, 32, 32)
label = data[b"labels"]
return image.transpose(0, 2, 3, 1), np.array(label)
def __read_cifar100(self) -> Tuple[np.ndarray, np.ndarray]:
if self.split in ["train", "val"]:
with DatasetPath.Cifar100.train.open(mode="rb") as f:
data = pickle.load(f, encoding="bytes")
image = data[b"data"].reshape(-1, 3, 32, 32)
label = data[b"fine_labels"]
else:
with DatasetPath.Cifar100.test.open(mode="rb") as f:
data = pickle.load(f, encoding="bytes")
image = data["data"].reshape(-1, 3, 32, 32)
label = data["label"]
return image.transpose(0, 2, 3, 1), np.asarray(label)
def __read_PascalVOC2012(self) -> Tuple[List[Path], np.ndarray]:
image = []
label = []
ccn = ClassLabelLookuper(datasets="PascalVOC2012")
if self.split in "train":
for k, v in DatasetPath.PascalVOC2012.train_idx.items():
image.extend(v)
label.extend([ccn.get_label(k)] * len(v))
elif self.split == "val":
for k, v in DatasetPath.PascalVOC2012.val_idx.items():
image.extend(v)
label.extend([ccn.get_label(k)] * len(v))
else:
assert False, f"{Fore.RED}PascalVOC2012 test data is not accesibly"
# Attention: PascalVOC2012 中图像是存在重复的
image, idx = np.unique(image, return_index=True)
return image, np.array(label)[idx]
if __name__ == "__main__":
# md = MultiDataset(dataset="PascalVOC2012", split="val")
# tt = T.Compose([
# T.RandomHorizontalFlip(),
# T.Resize((224, 224)),
# T.ToTensor()
# ])
# md.set_transform(tt)
md = MultiDataset(dataset="Cifar100", split="train")
tt = T.Compose([
T.RandomHorizontalFlip(),
T.ToTensor()
])
md.set_transform(tt)
ccn = ClassLabelLookuper(datasets=md.dataset)
dl = data.DataLoader(md, batch_size=64)
for x, y in dl:
print(x.shape)
visualize_plt(x, [ccn.get_class(i.item()) for i in y])
break
|
[
"helper.ProjectPath.config.joinpath",
"numpy.array",
"helper.DatasetPath.Cifar10.test.open",
"colorama.init",
"numpy.load",
"numpy.savez",
"numpy.where",
"helper.DatasetPath.Cifar100.test.open",
"helper.ClassLabelLookuper",
"numpy.asarray",
"helper.DatasetPath.PascalVOC2012.train_idx.items",
"helper.DatasetPath.Cifar100.train.open",
"numpy.stack",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomHorizontalFlip",
"pickle.load",
"PIL.Image.open",
"numpy.unique",
"torch.utils.data.DataLoader",
"helper.DatasetPath.PascalVOC2012.val_idx.items",
"numpy.random.shuffle"
] |
[((434, 454), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (438, 454), False, 'from colorama import Fore, init\n'), ((6474, 6513), 'helper.ClassLabelLookuper', 'ClassLabelLookuper', ([], {'datasets': 'md.dataset'}), '(datasets=md.dataset)\n', (6492, 6513), False, 'from helper import ClassLabelLookuper\n'), ((6523, 6557), 'torch.utils.data.DataLoader', 'data.DataLoader', (['md'], {'batch_size': '(64)'}), '(md, batch_size=64)\n', (6538, 6557), True, 'import torch.utils.data as data\n'), ((2257, 2278), 'numpy.unique', 'np.unique', (['self.label'], {}), '(self.label)\n', (2266, 2278), True, 'import numpy as np\n'), ((5389, 5433), 'helper.ClassLabelLookuper', 'ClassLabelLookuper', ([], {'datasets': '"""PascalVOC2012"""'}), "(datasets='PascalVOC2012')\n", (5407, 5433), False, 'from helper import ClassLabelLookuper\n'), ((5978, 6013), 'numpy.unique', 'np.unique', (['image'], {'return_index': '(True)'}), '(image, return_index=True)\n', (5987, 6013), True, 'import numpy as np\n'), ((1789, 1806), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1799, 1806), True, 'import PIL.Image as Image\n'), ((4312, 4364), 'numpy.concatenate', 'np.concatenate', (["[i[b'labels'] for i in data]"], {'axis': '(0)'}), "([i[b'labels'] for i in data], axis=0)\n", (4326, 4364), True, 'import numpy as np\n'), ((4636, 4651), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (4644, 4651), True, 'import numpy as np\n'), ((5249, 5266), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (5259, 5266), True, 'import numpy as np\n'), ((5492, 5535), 'helper.DatasetPath.PascalVOC2012.train_idx.items', 'DatasetPath.PascalVOC2012.train_idx.items', ([], {}), '()\n', (5533, 5535), False, 'from helper import ProjectPath, DatasetPath\n'), ((6384, 6408), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (6406, 6408), True, 'import torchvision.transforms as T\n'), ((6418, 6430), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (6428, 6430), True, 'import torchvision.transforms as T\n'), ((1507, 1539), 'helper.ClassLabelLookuper', 'ClassLabelLookuper', (['self.dataset'], {}), '(self.dataset)\n', (1525, 1539), False, 'from helper import ClassLabelLookuper\n'), ((2318, 2347), 'numpy.where', 'np.where', (['(self.label == label)'], {}), '(self.label == label)\n', (2326, 2347), True, 'import numpy as np\n'), ((3474, 3503), 'numpy.concatenate', 'np.concatenate', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (3488, 3503), True, 'import numpy as np\n'), ((3526, 3553), 'numpy.concatenate', 'np.concatenate', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (3540, 3553), True, 'import numpy as np\n'), ((4396, 4436), 'helper.DatasetPath.Cifar10.test.open', 'DatasetPath.Cifar10.test.open', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (4425, 4436), False, 'from helper import ProjectPath, DatasetPath\n'), ((4466, 4498), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4477, 4498), False, 'import pickle\n'), ((4777, 4819), 'helper.DatasetPath.Cifar100.train.open', 'DatasetPath.Cifar100.train.open', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (4808, 4819), False, 'from helper import ProjectPath, DatasetPath\n'), ((4849, 4881), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4860, 4881), False, 'import pickle\n'), ((5011, 5052), 'helper.DatasetPath.Cifar100.test.open', 'DatasetPath.Cifar100.test.open', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (5041, 5052), False, 'from helper import ProjectPath, DatasetPath\n'), ((5082, 5114), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (5093, 5114), False, 'import pickle\n'), ((5685, 5726), 'helper.DatasetPath.PascalVOC2012.val_idx.items', 'DatasetPath.PascalVOC2012.val_idx.items', ([], {}), '()\n', (5724, 5726), False, 'from helper import ProjectPath, DatasetPath\n'), ((6036, 6051), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (6044, 6051), True, 'import numpy as np\n'), ((2668, 2688), 'numpy.load', 'np.load', (['config_path'], {}), '(config_path)\n', (2675, 2688), True, 'import numpy as np\n'), ((3218, 3241), 'numpy.stack', 'np.stack', (['train'], {'axis': '(0)'}), '(train, axis=0)\n', (3226, 3241), True, 'import numpy as np\n'), ((3268, 3289), 'numpy.stack', 'np.stack', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (3276, 3289), True, 'import numpy as np\n'), ((3384, 3449), 'numpy.savez', 'np.savez', (['config_path'], {'ratio': 'trainval_ratio', 'train': 'train', 'val': 'val'}), '(config_path, ratio=trainval_ratio, train=train, val=val)\n', (3392, 3449), True, 'import numpy as np\n'), ((2976, 3004), 'numpy.random.shuffle', 'np.random.shuffle', (['image_idx'], {}), '(image_idx)\n', (2993, 3004), True, 'import numpy as np\n'), ((4164, 4196), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4175, 4196), False, 'import pickle\n'), ((2579, 2629), 'helper.ProjectPath.config.joinpath', 'ProjectPath.config.joinpath', (['f"""{self.dataset}.npz"""'], {}), "(f'{self.dataset}.npz')\n", (2606, 2629), False, 'from helper import ProjectPath, DatasetPath\n')]
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
pipline for U-GAT-IT
"""
import time
import math
import os
from glob import glob
import cv2
import numpy as np
import mindspore.ops as ops
from mindspore import nn
from mindspore import save_checkpoint, load_checkpoint, load_param_into_net
from mindspore.common import initializer as init
from mindspore.communication.management import get_rank
from .networks import ResnetGenerator, Discriminator, GWithLossCell, DWithLossCell
from .cell import TrainOneStepG, TrainOneStepD, Generator
from ..utils.tools import denorm, tensor2numpy, RGB2BGR, cam
from ..dataset.dataset import TrainDataLoader, TestDataLoader
from ..metrics.metrics import mean_kernel_inception_distance
class UGATIT:
"""pipline"""
def __init__(self, args):
self.light = args.light
self.distributed = args.distributed
self.mode = args.phase
if self.light:
self.model_name = 'UGATIT_light'
else:
self.model_name = 'UGATIT'
self.modelart = args.enable_modelarts
self.train_url = args.train_url
self.output_path = args.output_path
self.dataset = args.dataset
self.data_path = args.data_path
self.decay_flag = args.decay_flag
self.epoch = args.epoch
self.decay_epoch = args.decay_epoch
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.lr_policy = 'linear'
self.loss_scale = args.loss_scale
self.lr = args.lr
self.weight_decay = args.weight_decay
self.ch = args.ch
self.use_global_norm = args.use_global_norm
""" Weight """
self.adv_weight = args.adv_weight
self.cycle_weight = args.cycle_weight
self.identity_weight = args.identity_weight
self.cam_weight = args.cam_weight
self.weights = [self.adv_weight, self.cycle_weight, self.identity_weight, self.cam_weight]
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.img_size = args.img_size
self.img_ch = args.img_ch
self.resume = args.resume
"""utils"""
self.oneslike = ops.OnesLike()
self.zeroslike = ops.ZerosLike()
self.assign = ops.Assign()
print()
print("##### Information #####")
print("# light : ", self.light)
print("# dataset : ", self.dataset)
print("# batch_size : ", self.batch_size)
print("# epochs: ", self.epoch)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print()
print("##### Weight #####")
print("# adv_weight : ", self.adv_weight)
print("# cycle_weight : ", self.cycle_weight)
print("# identity_weight : ", self.identity_weight)
print("# cam_weight : ", self.cam_weight)
##################################################################################
# Model
##################################################################################
def build_model(self):
"""build model"""
self.train_nums = 1
if self.mode == 'train':
train_loader, test_loader, train_nums = TrainDataLoader(self.img_size,
self.data_path,
self.dataset,
self.batch_size,
self.distributed)
self.train_loader = train_loader
self.test_iterator = test_loader.create_dict_iterator()
self.train_nums = train_nums
print("Training dataset size = ", self.train_nums)
elif self.mode == 'test':
test_loader = TestDataLoader(self.img_size,
self.data_path,
self.dataset)
self.test_iterator = test_loader.create_dict_iterator()
else:
raise RuntimeError("Invalid mode")
print("Dataset load finished")
self.genA2B = ResnetGenerator(input_nc=3,
output_nc=3,
ngf=self.ch,
n_blocks=self.n_res,
img_size=self.img_size,
light=self.light)
self.genB2A = ResnetGenerator(input_nc=3,
output_nc=3,
ngf=self.ch,
n_blocks=self.n_res,
img_size=self.img_size,
light=self.light)
self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
self.generator = Generator(self.genA2B, self.genB2A)
self.init_weights(self.genA2B, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.genB2A, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disGA, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disGB, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disLA, 'KaimingUniform', math.sqrt(5))
self.init_weights(self.disLB, 'KaimingUniform', math.sqrt(5))
self.start_epoch = 1
if self.resume:
model_list = glob(os.path.join(self.output_path, self.dataset, 'model', '*.ckpt'))
if model_list:
model_list.sort()
self.start_epoch = int(model_list[-1].split('_')[-1].split('.')[0])
self.load(os.path.join(self.output_path, self.dataset, 'model'), self.start_epoch)
print(" [*]Epoch %d Load SUCCESS" % self.start_epoch)
start_step = (self.start_epoch - 1) * self.train_nums
self.learning_rate = self.get_lr()[start_step:]
loss_scale = self.loss_scale
self.D_loss_net = DWithLossCell(self.disGA,
self.disLA,
self.disGB,
self.disLB,
self.weights)
self.G_loss_net = GWithLossCell(self.generator,
self.disGA,
self.disLA,
self.disGB,
self.disLB,
self.weights)
self.G_optim = nn.Adam(self.generator.trainable_params(),
learning_rate=self.learning_rate,
beta1=0.5,
beta2=0.999,
weight_decay=self.weight_decay)
self.D_optim = nn.Adam(self.D_loss_net.trainable_params(),
learning_rate=self.learning_rate,
beta1=0.5,
beta2=0.999,
weight_decay=self.weight_decay)
self.D_train_net = TrainOneStepD(self.D_loss_net, self.D_optim, loss_scale, self.use_global_norm)
self.G_train_net = TrainOneStepG(self.G_loss_net, self.generator, self.G_optim,
loss_scale, self.use_global_norm)
def get_lr(self):
"""
Learning rate generator.
"""
if self.lr_policy == 'linear':
lrs = [self.lr] * self.train_nums * self.decay_epoch
for epoch in range(self.decay_epoch, self.epoch):
lr_epoch = self.lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
lrs += [lr_epoch] * self.train_nums
return lrs
return self.lr
def init_weights(self, net, init_type='normal', init_gain=0.02):
"""init weights"""
for _, cell in net.cells_and_names():
if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose, nn.Dense)):
if init_type == 'normal':
cell.weight.set_data(init.initializer(init.Normal(init_gain), cell.weight.shape))
elif init_type == 'xavier':
cell.weight.set_data(init.initializer(init.XavierUniform(init_gain), cell.weight.shape))
elif init_type == 'KaimingUniform':
cell.weight.set_data(init.initializer(init.HeUniform(init_gain), cell.weight.shape))
elif init_type == 'constant':
cell.weight.set_data(init.initializer(0.0005, cell.weight.shape))
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif isinstance(cell, (nn.GroupNorm, nn.BatchNorm2d)):
cell.gamma.set_data(init.initializer('ones', cell.gamma.shape))
cell.beta.set_data(init.initializer('zeros', cell.beta.shape))
def train(self):
"""train"""
self.D_train_net.set_train()
self.G_train_net.set_train()
data_loader = self.train_loader.create_dict_iterator()
# training loop
print('training start !')
for epoch in range(self.start_epoch, self.epoch + 1):
i = 0
for data in data_loader:
i += 1
start_time = time.time()
real_A = data["image_A"]
real_B = data["image_B"]
# Update
fake_A2B, fake_B2A, Generator_loss = self.G_train_net(real_A, real_B)
Discriminator_loss = self.D_train_net(real_A, real_B, fake_A2B, fake_B2A)
# clip parameter of AdaILN and ILN, applied after optimizer step
for m in self.genA2B.cells_and_names():
if hasattr(m[1], 'rho'):
w = m[1].rho.data
w = ops.clip_by_value(w, 0, 1)
m[1].rho.data.set_data(w)
for m in self.genB2A.cells_and_names():
if hasattr(m[1], 'rho'):
w = m[1].rho.data
w = ops.clip_by_value(w, 0, 1)
m[1].rho.data.set_data(w)
print("epoch %d:[%5d/%5d] time per iter: %4.4f " % (epoch,
i,
self.train_nums,
time.time() - start_time))
print("d_loss:", Discriminator_loss)
print("g_loss:", Generator_loss)
if epoch % self.print_freq == 0:
if self.distributed:
if get_rank() == 0:
self.print(epoch)
save_checkpoint(self.genA2B,
os.path.join(self.output_path, self.dataset + '_genA2B_params_latest.ckpt'))
save_checkpoint(self.genB2A,
os.path.join(self.output_path, self.dataset + '_genB2A_params_latest.ckpt'))
save_checkpoint(self.disGA,
os.path.join(self.output_path, self.dataset + '_disGA_params_latest.ckpt'))
save_checkpoint(self.disGB,
os.path.join(self.output_path, self.dataset + '_disGB_params_latest.ckpt'))
save_checkpoint(self.disLA,
os.path.join(self.output_path, self.dataset + '_disLA_params_latest.ckpt'))
save_checkpoint(self.disLB,
os.path.join(self.output_path, self.dataset + '_disLB_params_latest.ckpt'))
else:
self.print(epoch)
save_checkpoint(self.genA2B,
os.path.join(self.output_path, self.dataset + '_genA2B_params_latest.ckpt'))
save_checkpoint(self.genB2A,
os.path.join(self.output_path, self.dataset + '_genB2A_params_latest.ckpt'))
save_checkpoint(self.disGA,
os.path.join(self.output_path, self.dataset + '_disGA_params_latest.ckpt'))
save_checkpoint(self.disGB,
os.path.join(self.output_path, self.dataset + '_disGB_params_latest.ckpt'))
save_checkpoint(self.disLA,
os.path.join(self.output_path, self.dataset + '_disLA_params_latest.ckpt'))
save_checkpoint(self.disLB,
os.path.join(self.output_path, self.dataset + '_disLB_params_latest.ckpt'))
if epoch % self.save_freq == 0:
if self.distributed:
if get_rank() == 0:
self.save(os.path.join(self.output_path, self.dataset, 'model'), epoch)
else:
self.save(os.path.join(self.output_path, self.dataset, 'model'), epoch)
def print(self, epoch):
"""save middle results"""
test_sample_num = 5
A2B = np.zeros((self.img_size * 7, 0, 3))
B2A = np.zeros((self.img_size * 7, 0, 3))
for _ in range(test_sample_num):
data = next(self.test_iterator)
real_A = data["image_A"]
real_B = data["image_B"]
fake_A2B, _, fake_A2B_heatmap = self.genA2B(real_A)
fake_B2A, _, fake_B2A_heatmap = self.genB2A(real_B)
fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(fake_A2B)
fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(fake_B2A)
# Without copying real_A and real_B tensors before feeding them
# into genB2A and genA2B does not work correctly with the GPU backend.
fake_A2A, _, fake_A2A_heatmap = self.genB2A(real_A.copy())
fake_B2B, _, fake_B2B_heatmap = self.genA2B(real_B.copy())
A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))),
cam(tensor2numpy(fake_A2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))),
cam(tensor2numpy(fake_A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))),
cam(tensor2numpy(fake_A2B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)), 1)
B2A = np.concatenate((B2A, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_B[0]))),
cam(tensor2numpy(fake_B2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2B[0]))),
cam(tensor2numpy(fake_B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A[0]))),
cam(tensor2numpy(fake_B2A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A2B[0])))), 0)), 1)
cv2.imwrite(os.path.join(self.output_path, self.dataset, 'img', 'A2B_%07d.png' % epoch), A2B * 255.0)
cv2.imwrite(os.path.join(self.output_path, self.dataset, 'img', 'B2A_%07d.png' % epoch), B2A * 255.0)
def save(self, savedir, epoch):
save_checkpoint(self.genA2B, os.path.join(savedir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch))
save_checkpoint(self.genB2A, os.path.join(savedir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch))
save_checkpoint(self.disGA, os.path.join(savedir, self.dataset + '_disGA_params_%07d.ckpt' % epoch))
save_checkpoint(self.disGB, os.path.join(savedir, self.dataset + '_disGB_params_%07d.ckpt' % epoch))
save_checkpoint(self.disLA, os.path.join(savedir, self.dataset + '_disLA_params_%07d.ckpt' % epoch))
save_checkpoint(self.disLB, os.path.join(savedir, self.dataset + '_disLB_params_%07d.ckpt' % epoch))
def load(self, loaddir, epoch):
"""load checkpoint"""
genA2B_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch))
not_load = {}
not_load['genA2B'] = load_param_into_net(self.genA2B, genA2B_params)
if self.mode == 'train':
genB2A_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch))
disGA_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disGA_params_%07d.ckpt' % epoch))
disGB_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disGB_params_%07d.ckpt' % epoch))
disLA_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disLA_params_%07d.ckpt' % epoch))
disLB_params = load_checkpoint(os.path.join(loaddir, self.dataset + '_disLB_params_%07d.ckpt' % epoch))
not_load['genB2A'] = load_param_into_net(self.genB2A, genB2A_params)
not_load['disGA'] = load_param_into_net(self.disGA, disGA_params)
not_load['disGB'] = load_param_into_net(self.disGB, disGB_params)
not_load['disLA'] = load_param_into_net(self.disLA, disLA_params)
not_load['disLB'] = load_param_into_net(self.disLB, disLB_params)
print("these params are not loaded: ", not_load)
def test(self, inception_ckpt_path=None):
"""test"""
self.genA2B.set_train(True)
output_path = os.path.join(self.output_path, self.dataset)
model_list = glob(os.path.join(output_path, 'model', '*.ckpt'))
if model_list:
model_list.sort()
start_epoch = int(model_list[-1].split('_')[-1].split('.')[0])
self.load(os.path.join(output_path, 'model'), start_epoch)
print(" [*] epoch %d Load SUCCESS" % start_epoch)
else:
print(" [*] Load FAILURE")
return
for n, data in enumerate(self.test_iterator):
real_A = data['image_A']
fake_A2B, _, _ = self.genA2B(real_A)
A = RGB2BGR(tensor2numpy(denorm(real_A[0])))
A2B = RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))
cv2.imwrite(os.path.join(output_path, 'test', 'A_%d.png' % (n + 1)), A * 255.0)
cv2.imwrite(os.path.join(output_path, 'test', 'A2B_%d.png' % (n + 1)), A2B * 255.0)
if inception_ckpt_path is not None:
dataset_path = os.path.join(self.data_path, self.dataset)
mean_kernel_inception_distance(output_path, dataset_path, inception_ckpt_path)
|
[
"mindspore.common.initializer.XavierUniform",
"mindspore.common.initializer.HeUniform",
"mindspore.ops.ZerosLike",
"mindspore.ops.clip_by_value",
"os.path.join",
"math.sqrt",
"mindspore.ops.OnesLike",
"numpy.zeros",
"mindspore.common.initializer.Normal",
"mindspore.common.initializer.initializer",
"mindspore.load_param_into_net",
"mindspore.ops.Assign",
"time.time",
"mindspore.communication.management.get_rank"
] |
[((2894, 2908), 'mindspore.ops.OnesLike', 'ops.OnesLike', ([], {}), '()\n', (2906, 2908), True, 'import mindspore.ops as ops\n'), ((2934, 2949), 'mindspore.ops.ZerosLike', 'ops.ZerosLike', ([], {}), '()\n', (2947, 2949), True, 'import mindspore.ops as ops\n'), ((2972, 2984), 'mindspore.ops.Assign', 'ops.Assign', ([], {}), '()\n', (2982, 2984), True, 'import mindspore.ops as ops\n'), ((14424, 14459), 'numpy.zeros', 'np.zeros', (['(self.img_size * 7, 0, 3)'], {}), '((self.img_size * 7, 0, 3))\n', (14432, 14459), True, 'import numpy as np\n'), ((14474, 14509), 'numpy.zeros', 'np.zeros', (['(self.img_size * 7, 0, 3)'], {}), '((self.img_size * 7, 0, 3))\n', (14482, 14509), True, 'import numpy as np\n'), ((17867, 17914), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.genA2B', 'genA2B_params'], {}), '(self.genA2B, genA2B_params)\n', (17886, 17914), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((19105, 19149), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset'], {}), '(self.output_path, self.dataset)\n', (19117, 19149), False, 'import os\n'), ((6084, 6096), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6093, 6096), False, 'import math\n'), ((6155, 6167), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6164, 6167), False, 'import math\n'), ((6225, 6237), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6234, 6237), False, 'import math\n'), ((6295, 6307), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6304, 6307), False, 'import math\n'), ((6365, 6377), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6374, 6377), False, 'import math\n'), ((6435, 6447), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6444, 6447), False, 'import math\n'), ((16740, 16815), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""img"""', "('A2B_%07d.png' % epoch)"], {}), "(self.output_path, self.dataset, 'img', 'A2B_%07d.png' % epoch)\n", (16752, 16815), False, 'import os\n'), ((16850, 16925), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""img"""', "('B2A_%07d.png' % epoch)"], {}), "(self.output_path, self.dataset, 'img', 'B2A_%07d.png' % epoch)\n", (16862, 16925), False, 'import os\n'), ((17014, 17086), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_genA2B_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch)\n", (17026, 17086), False, 'import os\n'), ((17125, 17197), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_genB2A_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch)\n", (17137, 17197), False, 'import os\n'), ((17235, 17306), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disGA_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disGA_params_%07d.ckpt' % epoch)\n", (17247, 17306), False, 'import os\n'), ((17344, 17415), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disGB_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disGB_params_%07d.ckpt' % epoch)\n", (17356, 17415), False, 'import os\n'), ((17453, 17524), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disLA_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disLA_params_%07d.ckpt' % epoch)\n", (17465, 17524), False, 'import os\n'), ((17562, 17633), 'os.path.join', 'os.path.join', (['savedir', "(self.dataset + '_disLB_params_%07d.ckpt' % epoch)"], {}), "(savedir, self.dataset + '_disLB_params_%07d.ckpt' % epoch)\n", (17574, 17633), False, 'import os\n'), ((17742, 17814), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_genA2B_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_genA2B_params_%07d.ckpt' % epoch)\n", (17754, 17814), False, 'import os\n'), ((18564, 18611), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.genB2A', 'genB2A_params'], {}), '(self.genB2A, genB2A_params)\n', (18583, 18611), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18644, 18689), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disGA', 'disGA_params'], {}), '(self.disGA, disGA_params)\n', (18663, 18689), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18722, 18767), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disGB', 'disGB_params'], {}), '(self.disGB, disGB_params)\n', (18741, 18767), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18800, 18845), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disLA', 'disLA_params'], {}), '(self.disLA, disLA_params)\n', (18819, 18845), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((18878, 18923), 'mindspore.load_param_into_net', 'load_param_into_net', (['self.disLB', 'disLB_params'], {}), '(self.disLB, disLB_params)\n', (18897, 18923), False, 'from mindspore import save_checkpoint, load_checkpoint, load_param_into_net\n'), ((19176, 19220), 'os.path.join', 'os.path.join', (['output_path', '"""model"""', '"""*.ckpt"""'], {}), "(output_path, 'model', '*.ckpt')\n", (19188, 19220), False, 'import os\n'), ((20075, 20117), 'os.path.join', 'os.path.join', (['self.data_path', 'self.dataset'], {}), '(self.data_path, self.dataset)\n', (20087, 20117), False, 'import os\n'), ((6532, 6595), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""', '"""*.ckpt"""'], {}), "(self.output_path, self.dataset, 'model', '*.ckpt')\n", (6544, 6595), False, 'import os\n'), ((10482, 10493), 'time.time', 'time.time', ([], {}), '()\n', (10491, 10493), False, 'import time\n'), ((17992, 18064), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_genB2A_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_genB2A_params_%07d.ckpt' % epoch)\n", (18004, 18064), False, 'import os\n'), ((18109, 18180), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disGA_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disGA_params_%07d.ckpt' % epoch)\n", (18121, 18180), False, 'import os\n'), ((18225, 18296), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disGB_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disGB_params_%07d.ckpt' % epoch)\n", (18237, 18296), False, 'import os\n'), ((18341, 18412), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disLA_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disLA_params_%07d.ckpt' % epoch)\n", (18353, 18412), False, 'import os\n'), ((18457, 18528), 'os.path.join', 'os.path.join', (['loaddir', "(self.dataset + '_disLB_params_%07d.ckpt' % epoch)"], {}), "(loaddir, self.dataset + '_disLB_params_%07d.ckpt' % epoch)\n", (18469, 18528), False, 'import os\n'), ((19372, 19406), 'os.path.join', 'os.path.join', (['output_path', '"""model"""'], {}), "(output_path, 'model')\n", (19384, 19406), False, 'import os\n'), ((19839, 19894), 'os.path.join', 'os.path.join', (['output_path', '"""test"""', "('A_%d.png' % (n + 1))"], {}), "(output_path, 'test', 'A_%d.png' % (n + 1))\n", (19851, 19894), False, 'import os\n'), ((19931, 19988), 'os.path.join', 'os.path.join', (['output_path', '"""test"""', "('A2B_%d.png' % (n + 1))"], {}), "(output_path, 'test', 'A2B_%d.png' % (n + 1))\n", (19943, 19988), False, 'import os\n'), ((6768, 6821), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""'], {}), "(self.output_path, self.dataset, 'model')\n", (6780, 6821), False, 'import os\n'), ((9953, 9995), 'mindspore.common.initializer.initializer', 'init.initializer', (['"""ones"""', 'cell.gamma.shape'], {}), "('ones', cell.gamma.shape)\n", (9969, 9995), True, 'from mindspore.common import initializer as init\n'), ((10032, 10074), 'mindspore.common.initializer.initializer', 'init.initializer', (['"""zeros"""', 'cell.beta.shape'], {}), "('zeros', cell.beta.shape)\n", (10048, 10074), True, 'from mindspore.common import initializer as init\n'), ((11031, 11057), 'mindspore.ops.clip_by_value', 'ops.clip_by_value', (['w', '(0)', '(1)'], {}), '(w, 0, 1)\n', (11048, 11057), True, 'import mindspore.ops as ops\n'), ((11279, 11305), 'mindspore.ops.clip_by_value', 'ops.clip_by_value', (['w', '(0)', '(1)'], {}), '(w, 0, 1)\n', (11296, 11305), True, 'import mindspore.ops as ops\n'), ((11891, 11901), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (11899, 11901), False, 'from mindspore.communication.management import get_rank\n'), ((13108, 13183), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genA2B_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genA2B_params_latest.ckpt')\n", (13120, 13183), False, 'import os\n'), ((13270, 13345), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genB2A_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genB2A_params_latest.ckpt')\n", (13282, 13345), False, 'import os\n'), ((13431, 13505), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGA_params_latest.ckpt')\n", (13443, 13505), False, 'import os\n'), ((13591, 13665), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGB_params_latest.ckpt')\n", (13603, 13665), False, 'import os\n'), ((13751, 13825), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLA_params_latest.ckpt')\n", (13763, 13825), False, 'import os\n'), ((13911, 13985), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLB_params_latest.ckpt')\n", (13923, 13985), False, 'import os\n'), ((14092, 14102), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (14100, 14102), False, 'from mindspore.communication.management import get_rank\n'), ((14257, 14310), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""'], {}), "(self.output_path, self.dataset, 'model')\n", (14269, 14310), False, 'import os\n'), ((9235, 9257), 'mindspore.common.initializer.Normal', 'init.Normal', (['init_gain'], {}), '(init_gain)\n', (9246, 9257), True, 'from mindspore.common import initializer as init\n'), ((12043, 12118), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genA2B_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genA2B_params_latest.ckpt')\n", (12055, 12118), False, 'import os\n'), ((12213, 12288), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_genB2A_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_genB2A_params_latest.ckpt')\n", (12225, 12288), False, 'import os\n'), ((12382, 12456), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGA_params_latest.ckpt')\n", (12394, 12456), False, 'import os\n'), ((12550, 12624), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disGB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disGB_params_latest.ckpt')\n", (12562, 12624), False, 'import os\n'), ((12718, 12792), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLA_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLA_params_latest.ckpt')\n", (12730, 12792), False, 'import os\n'), ((12886, 12960), 'os.path.join', 'os.path.join', (['self.output_path', "(self.dataset + '_disLB_params_latest.ckpt')"], {}), "(self.output_path, self.dataset + '_disLB_params_latest.ckpt')\n", (12898, 12960), False, 'import os\n'), ((14143, 14196), 'os.path.join', 'os.path.join', (['self.output_path', 'self.dataset', '"""model"""'], {}), "(self.output_path, self.dataset, 'model')\n", (14155, 14196), False, 'import os\n'), ((9381, 9410), 'mindspore.common.initializer.XavierUniform', 'init.XavierUniform', (['init_gain'], {}), '(init_gain)\n', (9399, 9410), True, 'from mindspore.common import initializer as init\n'), ((11656, 11667), 'time.time', 'time.time', ([], {}), '()\n', (11665, 11667), False, 'import time\n'), ((9542, 9567), 'mindspore.common.initializer.HeUniform', 'init.HeUniform', (['init_gain'], {}), '(init_gain)\n', (9556, 9567), True, 'from mindspore.common import initializer as init\n'), ((9676, 9719), 'mindspore.common.initializer.initializer', 'init.initializer', (['(0.0005)', 'cell.weight.shape'], {}), '(0.0005, cell.weight.shape)\n', (9692, 9719), True, 'from mindspore.common import initializer as init\n')]
|
#! /usr/bin/python3
import sys
sys.path.append('../../')
import numpy as np
import numpy.fft as npfft
import matplotlib.pyplot as plt
from matplotlib import animation
import time
from netCDF4 import MFDataset
from nephelae_simulation.mesonh_interface import MesoNHVariable
from nephelae_base.types import Position
from nephelae_base.types import Bounds
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import kernels as gpk
class WindKernel(gpk.Kernel):
"""
Kernel compatible with sklearn.gaussian_process.Kernel
to be used in GaussianProcessRegressor
/!\ Hyper parameters optimizatin HAS NOT BEEN TESTED
When using with GaussianProcessRegressor, set optimizer=None
/!\ Only implemented for dimension (t,x,y) for now for testing purposes.
"""
# Actually used (maybe)
def __init__(self, lengthScale=[1.0,1.0,1.0],
stddev=1.0, noiseStddev=0.1,
windSpeed=[0.0,0.0]):
self.lengthScale = lengthScale
self.stddev = stddev
self.noiseStddev = noiseStddev
self.windSpeed = windSpeed
def __call__(self, X, Y=None):
if Y is None:
Y = X
# print("X shape: ", X.shape)
# print("Y shape: ", X.shape, end="\n\n")
cop = False
# cop = True
# Far from most efficient but efficiency requires C++ implementation (or is it ?)
t0,t1 = np.meshgrid(X[:,0], Y[:,0], indexing='ij', copy=cop)
dt = t1 - t0
distMat = (dt / self.lengthScale[0])**2
x0,x1 = np.meshgrid(X[:,1], Y[:,1], indexing='ij', copy=cop)
dx = x1 - (x0 + self.windSpeed[0] * dt)
distMat = distMat + (dx / self.lengthScale[1])**2
x0,x1 = np.meshgrid(X[:,2], Y[:,2], indexing='ij', copy=cop)
dx = x1 - (x0 + self.windSpeed[1] * dt)
distMat = distMat + (dx / self.lengthScale[2])**2
if Y is X:
return self.stddev*np.exp(-0.5*distMat) + np.diag([self.noiseStddev]*X.shape[0])
else:
return self.stddev*np.exp(-0.5*distMat)
def diag(self, X):
return np.array([self.stddev + self.noiseStddev]*X.shape[0])
def is_stationary(self):
return True
mesonhPath = '/home/pnarvor/work/nephelae/data/MesoNH-2019-02/REFHR.1.ARMCu.4D.nc'
rct = MesoNHVariable(MFDataset(mesonhPath), 'RCT')
# Estimating advective wind
ut = MesoNHVariable(MFDataset(mesonhPath), 'UT')[50.0, 1100.0,:,:].data.mean()
vt = MesoNHVariable(MFDataset(mesonhPath), 'VT')[50.0, 1100.0,:,:].data.mean()
print("Advective wind :", [ut, vt])
rctSlice = rct[240,1100,:,:].data
print("Variance : ", (rctSlice**2).mean())
t = np.linspace(0,300.0,300)
# a0 = 400.0
a0 = 250.0
f0 = - 1 / 120.0
# f0 = 1 / 150.0
a1 = 0.0
# f1 = 1.5*f0
f1 = 2.5*f0
# f1 = -1.3*f0
# f1 = -2.5*f0
# f1 = -4.5*f0
tStart = 50.0
tEnd = 700.0
t = np.linspace(tStart, tEnd, int(tEnd - tStart))
# p0 = Position(240.0, 1700.0, 2000.0, 1100.0)
# p0 = Position(50.0, 0.0, 2000.0, 1100.0)
p0 = Position(50.0, 100.0, 1950.0, 1100.0)
p = np.array([[p0.t, p0.x, p0.y, p0.z]]*len(t))
# v0 = np.array([[9.09, 0.68]])
v0 = np.array([8.5, 0.9])
p[:,0] = t
p[:,1] = p[:,1] + a0*(a1 + np.cos(2*np.pi*f1*(t-t[0])))*np.cos(2*np.pi*f0*(t-t[0]))
p[:,2] = p[:,2] + a0*(a1 + np.cos(2*np.pi*f1*(t-t[0])))*np.sin(2*np.pi*f0*(t-t[0]))
print("Max velocity relative to wind :",
max(np.sqrt(np.sum((p[1:,1:3] - p[:-1,1:3])**2, axis=1)) / (p[1:,0] - p[:-1,0])))
p[:,1:3] = p[:,1:3] + (t - tStart).reshape([len(t), 1]) @ v0.reshape([1,2])
# building prediction locations
# X0,Y0 = np.meshgrid(
# np.linspace(rct.bounds[3][0], rct.bounds[3][-1], rct.shape[3]),
# np.linspace(rct.bounds[2][0], rct.bounds[2][-1], rct.shape[2]))
b = rct.bounds
yBounds = [min(p[:,2]), max(p[:,2])]
tmp = rct[p0.t,p0.z,yBounds[0]:yBounds[1],:]
X0,Y0 = np.meshgrid(
np.linspace(tmp.bounds[1][0], tmp.bounds[1][-1], tmp.shape[1]),
np.linspace(tmp.bounds[0][0], tmp.bounds[0][-1], tmp.shape[0]))
xyLocations = np.array([[0]*X0.shape[0]*X0.shape[1], X0.ravel(), Y0.ravel()]).T
b[2].min = yBounds[0]
b[2].max = yBounds[1]
# Kernel
processVariance = 1.0e-8
noiseStddev = 0.1 * np.sqrt(processVariance)
# lengthScales = [100, 50, 50]
# lengthScales = [70, 50, 50]
lengthScales = [70, 60, 60]
# lengthScales = [140, 120, 120]
kernel0 = WindKernel(lengthScales, processVariance, noiseStddev**2, v0)
rctValues = []
print("Getting rct values... ", end='')
sys.stdout.flush()
for pos in p:
rctValues.append(rct[pos[0],pos[3],pos[2],pos[1]])
rctValues = np.array(rctValues)
print("Done !")
sys.stdout.flush()
noise = noiseStddev*np.random.randn(rctValues.shape[0])
rctValues = rctValues + noise
# # plotting rct values
# fig, axes = plt.subplots(1,1)
# axes.plot(p[:,0], np.array(rctValues))
# profiling = False
profiling = True
if not profiling:
fig, axes = plt.subplots(3,1,sharex=True,sharey=True)
simTime = p0.t
lastTime = time.time()
simSpeed = 50.0
def do_update(t):
print("Sim time :", t)
# prediction
gprProcessor0 = GaussianProcessRegressor(kernel0,
alpha=0.0,
optimizer=None,
copy_X_train=False)
# trainSet = np.array([list(pos) + [rctVal] \
# for pos, rctVal in zip(p[:,0:3],rctValues)\
# if pos[0] < t and pos[0] > t - 2*lengthScales[0]])
trainSet = np.array([list(pos) + [rctVal] \
for pos, rctVal in zip(p[:,0:3],rctValues)\
if pos[0] < t and pos[0] > t - 3*lengthScales[0]])
print("Number of used measures samples :", trainSet.shape[0])
gprProcessor0.fit(trainSet[:,:-1], trainSet[:,-1])
xyLocations[:,0] = t
map0, std0 = gprProcessor0.predict(xyLocations, return_std=True)
map0[map0 < 0.0] = 0.0
map0 = map0.reshape(X0.shape)
std0 = std0.reshape(X0.shape)
# display
if not profiling:
global axes
axes[0].cla()
axes[0].imshow(rct[t,p0.z,yBounds[0]:yBounds[1],:].data, origin='lower',
extent=[b[3].min, b[3].max, b[2].min, b[2].max])
axes[0].grid()
axes[0].set_title("Ground truth")
try:
axes[0].plot(p[:int(t-tStart + 0.5),1], p[:int(t-tStart + 0.5),2], '.')
finally:
pass
axes[1].cla()
axes[1].imshow(map0, origin='lower',
extent=[b[3].min, b[3].max, b[2].min, b[2].max])
axes[1].grid()
axes[1].set_title("MAP")
axes[2].cla()
axes[2].imshow(std0**2, origin='lower',
extent=[b[3].min, b[3].max, b[2].min, b[2].max])
axes[2].grid()
axes[2].set_title("Variance AP")
def init():
pass
def update(i):
# global lastTime
global simTime
# currentTime = time.time()
# simTime = simTime + simSpeed*(currentTime - lastTime)
# lastTime = currentTime
# simTime = simTime + 5.0
simTime = simTime + 2.0
do_update(simTime)
if not profiling:
anim = animation.FuncAnimation(
fig,
update,
init_func=init,
interval = 1)
plt.show(block=False)
else:
t0 = time.time()
while simTime < 600:
update(0)
print("Ellapsed :", time.time() - t0, "s")
|
[
"numpy.sqrt",
"netCDF4.MFDataset",
"numpy.array",
"numpy.sin",
"sys.path.append",
"sklearn.gaussian_process.GaussianProcessRegressor",
"nephelae_base.types.Position",
"numpy.exp",
"numpy.linspace",
"numpy.meshgrid",
"sys.stdout.flush",
"numpy.cos",
"numpy.random.randn",
"time.time",
"matplotlib.pyplot.show",
"matplotlib.animation.FuncAnimation",
"numpy.diag",
"numpy.sum",
"matplotlib.pyplot.subplots"
] |
[((32, 57), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (47, 57), False, 'import sys\n'), ((2711, 2737), 'numpy.linspace', 'np.linspace', (['(0)', '(300.0)', '(300)'], {}), '(0, 300.0, 300)\n', (2722, 2737), True, 'import numpy as np\n'), ((3050, 3087), 'nephelae_base.types.Position', 'Position', (['(50.0)', '(100.0)', '(1950.0)', '(1100.0)'], {}), '(50.0, 100.0, 1950.0, 1100.0)\n', (3058, 3087), False, 'from nephelae_base.types import Position\n'), ((3174, 3194), 'numpy.array', 'np.array', (['[8.5, 0.9]'], {}), '([8.5, 0.9])\n', (3182, 3194), True, 'import numpy as np\n'), ((4486, 4504), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4502, 4504), False, 'import sys\n'), ((4586, 4605), 'numpy.array', 'np.array', (['rctValues'], {}), '(rctValues)\n', (4594, 4605), True, 'import numpy as np\n'), ((4622, 4640), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4638, 4640), False, 'import sys\n'), ((4965, 4976), 'time.time', 'time.time', ([], {}), '()\n', (4974, 4976), False, 'import time\n'), ((2375, 2396), 'netCDF4.MFDataset', 'MFDataset', (['mesonhPath'], {}), '(mesonhPath)\n', (2384, 2396), False, 'from netCDF4 import MFDataset\n'), ((3897, 3959), 'numpy.linspace', 'np.linspace', (['tmp.bounds[1][0]', 'tmp.bounds[1][-1]', 'tmp.shape[1]'], {}), '(tmp.bounds[1][0], tmp.bounds[1][-1], tmp.shape[1])\n', (3908, 3959), True, 'import numpy as np\n'), ((3965, 4027), 'numpy.linspace', 'np.linspace', (['tmp.bounds[0][0]', 'tmp.bounds[0][-1]', 'tmp.shape[0]'], {}), '(tmp.bounds[0][0], tmp.bounds[0][-1], tmp.shape[0])\n', (3976, 4027), True, 'import numpy as np\n'), ((4211, 4235), 'numpy.sqrt', 'np.sqrt', (['processVariance'], {}), '(processVariance)\n', (4218, 4235), True, 'import numpy as np\n'), ((4661, 4696), 'numpy.random.randn', 'np.random.randn', (['rctValues.shape[0]'], {}), '(rctValues.shape[0])\n', (4676, 4696), True, 'import numpy as np\n'), ((4897, 4941), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (4909, 4941), True, 'import matplotlib.pyplot as plt\n'), ((5077, 5162), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', (['kernel0'], {'alpha': '(0.0)', 'optimizer': 'None', 'copy_X_train': '(False)'}), '(kernel0, alpha=0.0, optimizer=None, copy_X_train=False\n )\n', (5101, 5162), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((7139, 7203), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'update'], {'init_func': 'init', 'interval': '(1)'}), '(fig, update, init_func=init, interval=1)\n', (7162, 7203), False, 'from matplotlib import animation\n'), ((7248, 7269), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7256, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7285, 7296), 'time.time', 'time.time', ([], {}), '()\n', (7294, 7296), False, 'import time\n'), ((1471, 1525), 'numpy.meshgrid', 'np.meshgrid', (['X[:, 0]', 'Y[:, 0]'], {'indexing': '"""ij"""', 'copy': 'cop'}), "(X[:, 0], Y[:, 0], indexing='ij', copy=cop)\n", (1482, 1525), True, 'import numpy as np\n'), ((1610, 1664), 'numpy.meshgrid', 'np.meshgrid', (['X[:, 1]', 'Y[:, 1]'], {'indexing': '"""ij"""', 'copy': 'cop'}), "(X[:, 1], Y[:, 1], indexing='ij', copy=cop)\n", (1621, 1664), True, 'import numpy as np\n'), ((1786, 1840), 'numpy.meshgrid', 'np.meshgrid', (['X[:, 2]', 'Y[:, 2]'], {'indexing': '"""ij"""', 'copy': 'cop'}), "(X[:, 2], Y[:, 2], indexing='ij', copy=cop)\n", (1797, 1840), True, 'import numpy as np\n'), ((2164, 2219), 'numpy.array', 'np.array', (['([self.stddev + self.noiseStddev] * X.shape[0])'], {}), '([self.stddev + self.noiseStddev] * X.shape[0])\n', (2172, 2219), True, 'import numpy as np\n'), ((3263, 3298), 'numpy.cos', 'np.cos', (['(2 * np.pi * f0 * (t - t[0]))'], {}), '(2 * np.pi * f0 * (t - t[0]))\n', (3269, 3298), True, 'import numpy as np\n'), ((3347, 3382), 'numpy.sin', 'np.sin', (['(2 * np.pi * f0 * (t - t[0]))'], {}), '(2 * np.pi * f0 * (t - t[0]))\n', (3353, 3382), True, 'import numpy as np\n'), ((7364, 7375), 'time.time', 'time.time', ([], {}), '()\n', (7373, 7375), False, 'import time\n'), ((2019, 2059), 'numpy.diag', 'np.diag', (['([self.noiseStddev] * X.shape[0])'], {}), '([self.noiseStddev] * X.shape[0])\n', (2026, 2059), True, 'import numpy as np\n'), ((2103, 2125), 'numpy.exp', 'np.exp', (['(-0.5 * distMat)'], {}), '(-0.5 * distMat)\n', (2109, 2125), True, 'import numpy as np\n'), ((3234, 3269), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * (t - t[0]))'], {}), '(2 * np.pi * f1 * (t - t[0]))\n', (3240, 3269), True, 'import numpy as np\n'), ((3318, 3353), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * (t - t[0]))'], {}), '(2 * np.pi * f1 * (t - t[0]))\n', (3324, 3353), True, 'import numpy as np\n'), ((3432, 3479), 'numpy.sum', 'np.sum', (['((p[1:, 1:3] - p[:-1, 1:3]) ** 2)'], {'axis': '(1)'}), '((p[1:, 1:3] - p[:-1, 1:3]) ** 2, axis=1)\n', (3438, 3479), True, 'import numpy as np\n'), ((1996, 2018), 'numpy.exp', 'np.exp', (['(-0.5 * distMat)'], {}), '(-0.5 * distMat)\n', (2002, 2018), True, 'import numpy as np\n'), ((2454, 2475), 'netCDF4.MFDataset', 'MFDataset', (['mesonhPath'], {}), '(mesonhPath)\n', (2463, 2475), False, 'from netCDF4 import MFDataset\n'), ((2533, 2554), 'netCDF4.MFDataset', 'MFDataset', (['mesonhPath'], {}), '(mesonhPath)\n', (2542, 2554), False, 'from netCDF4 import MFDataset\n')]
|
#!/usr/bin/env python3
import numpy as np
import numpy.random as npr
import pytest
A1 = npr.rand( 1, 1)
B1 = npr.rand( 1, 1)
C1 = npr.rand( 1, 1)
A3 = npr.rand( 3, 3)
B3 = npr.rand( 3, 3)
C3 = npr.rand( 3, 3)
A10 = npr.rand( 10, 10)
B10 = npr.rand( 10, 10)
C10 = npr.rand( 10, 10)
A30 = npr.rand( 30, 30)
B30 = npr.rand( 30, 30)
C30 = npr.rand( 30, 30)
A100 = npr.rand( 100, 100)
B100 = npr.rand( 100, 100)
C100 = npr.rand( 100, 100)
A300 = npr.rand( 300, 300)
B300 = npr.rand( 300, 300)
C300 = npr.rand( 300, 300)
A1000 = npr.rand(1000, 1000)
B1000 = npr.rand(1000, 1000)
C1000 = npr.rand(1000, 1000)
A3000 = npr.rand(3000, 3000)
B3000 = npr.rand(3000, 3000)
C3000 = npr.rand(3000, 3000)
NC_A1 = list(A1 .flatten())
NC_B1 = list(B1 .flatten())
NC_C1 = list(C1 .flatten())
NC_A3 = list(A3 .flatten())
NC_B3 = list(B3 .flatten())
NC_C3 = list(C3 .flatten())
NC_A10 = list(A10 .flatten())
NC_B10 = list(B10 .flatten())
NC_C10 = list(C10 .flatten())
NC_A30 = list(A30 .flatten())
NC_B30 = list(B30 .flatten())
NC_C30 = list(C30 .flatten())
NC_A100 = list(A100 .flatten())
NC_B100 = list(B100 .flatten())
NC_C100 = list(C100 .flatten())
NC_A300 = list(A300 .flatten())
NC_B300 = list(B300 .flatten())
NC_C300 = list(C300 .flatten())
NC_A1000 = list(A1000.flatten())
NC_B1000 = list(B1000.flatten())
NC_C1000 = list(C1000.flatten())
NC_A3000 = list(A3000.flatten())
NC_B3000 = list(B3000.flatten())
NC_C3000 = list(C3000.flatten())
def add_numpy_core(a: np.ndarray, b: np.ndarray, c: np.ndarray) -> np.ndarray:
return a + b + c
def add_simple_core(a: list, b: list, c: list) -> list:
retval = [0.0] * len(a)
for i in range(len(a)):
retval[i] = a[i] + b[i] + c[i]
return retval
def add_numpy_1 (): return add_numpy_core(A1 , B1 , C1 )
def add_numpy_3 (): return add_numpy_core(A3 , B3 , C3 )
def add_numpy_10 (): return add_numpy_core(A10 , B10 , C10 )
def add_numpy_30 (): return add_numpy_core(A30 , B30 , C30 )
def add_numpy_100 (): return add_numpy_core(A100 , B100 , C100 )
def add_numpy_300 (): return add_numpy_core(A300 , B300 , C300 )
def add_numpy_1000(): return add_numpy_core(A1000, B1000, C1000)
def add_numpy_3000(): return add_numpy_core(A3000, B3000, C3000)
def add_simple_1 (): return add_simple_core(A1 , B1 , C1 )
def add_simple_3 (): return add_simple_core(A3 , B3 , C3 )
def add_simple_10 (): return add_simple_core(A10 , B10 , C10 )
def add_simple_30 (): return add_simple_core(A30 , B30 , C30 )
def add_simple_100 (): return add_simple_core(A100 , B100 , C100 )
def add_simple_300 (): return add_simple_core(A300 , B300 , C300 )
def add_simple_1000(): return add_simple_core(A1000, B1000, C1000)
def add_simple_3000(): return add_simple_core(A3000, B3000, C3000)
def test_add_numpy_1 (benchmark): benchmark.pedantic(add_numpy_1 , rounds=256, iterations=16)
def test_add_numpy_3 (benchmark): benchmark.pedantic(add_numpy_3 , rounds=256, iterations=16)
def test_add_numpy_10 (benchmark): benchmark.pedantic(add_numpy_10 , rounds=256, iterations=16)
def test_add_numpy_30 (benchmark): benchmark.pedantic(add_numpy_30 , rounds=256, iterations=16)
def test_add_numpy_100 (benchmark): benchmark.pedantic(add_numpy_100 , rounds=256, iterations=16)
def test_add_numpy_300 (benchmark): benchmark.pedantic(add_numpy_300 , rounds=256, iterations=16)
def test_add_numpy_1000 (benchmark): benchmark.pedantic(add_numpy_1000 , rounds=256, iterations=16)
def test_add_numpy_3000 (benchmark): benchmark.pedantic(add_numpy_3000 , rounds=256, iterations=16)
def test_add_simple_1 (benchmark): benchmark.pedantic(add_simple_1 , rounds=256, iterations=16)
def test_add_simple_3 (benchmark): benchmark.pedantic(add_simple_3 , rounds=256, iterations=16)
def test_add_simple_10 (benchmark): benchmark.pedantic(add_simple_10 , rounds=256, iterations=16)
def test_add_simple_30 (benchmark): benchmark.pedantic(add_simple_30 , rounds=256, iterations=16)
def test_add_simple_100 (benchmark): benchmark.pedantic(add_simple_100 , rounds=256, iterations=16)
def test_add_simple_300 (benchmark): benchmark.pedantic(add_simple_300 , rounds=256, iterations=16)
def test_add_simple_1000(benchmark): benchmark.pedantic(add_simple_1000, rounds=256, iterations=16)
def test_add_simple_3000(benchmark): benchmark.pedantic(add_simple_3000, rounds=256, iterations=16)
if __name__ == "__main__":
pytest.main(['-v', __file__])
|
[
"numpy.random.rand",
"pytest.main"
] |
[((93, 107), 'numpy.random.rand', 'npr.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (101, 107), True, 'import numpy.random as npr\n'), ((122, 136), 'numpy.random.rand', 'npr.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (130, 136), True, 'import numpy.random as npr\n'), ((151, 165), 'numpy.random.rand', 'npr.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (159, 165), True, 'import numpy.random as npr\n'), ((180, 194), 'numpy.random.rand', 'npr.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (188, 194), True, 'import numpy.random as npr\n'), ((209, 223), 'numpy.random.rand', 'npr.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (217, 223), True, 'import numpy.random as npr\n'), ((238, 252), 'numpy.random.rand', 'npr.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (246, 252), True, 'import numpy.random as npr\n'), ((267, 283), 'numpy.random.rand', 'npr.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (275, 283), True, 'import numpy.random as npr\n'), ((296, 312), 'numpy.random.rand', 'npr.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (304, 312), True, 'import numpy.random as npr\n'), ((325, 341), 'numpy.random.rand', 'npr.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (333, 341), True, 'import numpy.random as npr\n'), ((354, 370), 'numpy.random.rand', 'npr.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (362, 370), True, 'import numpy.random as npr\n'), ((383, 399), 'numpy.random.rand', 'npr.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (391, 399), True, 'import numpy.random as npr\n'), ((412, 428), 'numpy.random.rand', 'npr.rand', (['(30)', '(30)'], {}), '(30, 30)\n', (420, 428), True, 'import numpy.random as npr\n'), ((441, 459), 'numpy.random.rand', 'npr.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (449, 459), True, 'import numpy.random as npr\n'), ((470, 488), 'numpy.random.rand', 'npr.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (478, 488), True, 'import numpy.random as npr\n'), ((499, 517), 'numpy.random.rand', 'npr.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (507, 517), True, 'import numpy.random as npr\n'), ((528, 546), 'numpy.random.rand', 'npr.rand', (['(300)', '(300)'], {}), '(300, 300)\n', (536, 546), True, 'import numpy.random as npr\n'), ((557, 575), 'numpy.random.rand', 'npr.rand', (['(300)', '(300)'], {}), '(300, 300)\n', (565, 575), True, 'import numpy.random as npr\n'), ((586, 604), 'numpy.random.rand', 'npr.rand', (['(300)', '(300)'], {}), '(300, 300)\n', (594, 604), True, 'import numpy.random as npr\n'), ((615, 635), 'numpy.random.rand', 'npr.rand', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (623, 635), True, 'import numpy.random as npr\n'), ((644, 664), 'numpy.random.rand', 'npr.rand', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (652, 664), True, 'import numpy.random as npr\n'), ((673, 693), 'numpy.random.rand', 'npr.rand', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (681, 693), True, 'import numpy.random as npr\n'), ((702, 722), 'numpy.random.rand', 'npr.rand', (['(3000)', '(3000)'], {}), '(3000, 3000)\n', (710, 722), True, 'import numpy.random as npr\n'), ((731, 751), 'numpy.random.rand', 'npr.rand', (['(3000)', '(3000)'], {}), '(3000, 3000)\n', (739, 751), True, 'import numpy.random as npr\n'), ((760, 780), 'numpy.random.rand', 'npr.rand', (['(3000)', '(3000)'], {}), '(3000, 3000)\n', (768, 780), True, 'import numpy.random as npr\n'), ((4540, 4569), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (4551, 4569), False, 'import pytest\n')]
|
import random
import numpy as np
import tensorflow as tf
from recognition.utils import train_utils, googlenet_load
try:
from tensorflow.models.rnn import rnn_cell
except ImportError:
rnn_cell = tf.nn.rnn_cell
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
random.seed(0)
np.random.seed(0)
@ops.RegisterGradient("Hungarian")
def _hungarian_grad(op, *args):
return map(array_ops.zeros_like, op.inputs)
def build_lstm_inner(H, lstm_input):
'''
build lstm decoder
'''
lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)
if H['num_lstm_layers'] > 1:
lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)
else:
lstm = lstm_cell
batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
state = tf.zeros([batch_size, lstm.state_size])
outputs = []
with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
for time_step in range(H['rnn_len']):
if time_step > 0: tf.get_variable_scope().reuse_variables()
output, state = lstm(lstm_input, state)
outputs.append(output)
return outputs
def build_overfeat_inner(H, lstm_input):
'''
build simple overfeat decoder
'''
if H['rnn_len'] > 1:
raise ValueError('rnn_len > 1 only supported with use_lstm == True')
outputs = []
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('Overfeat', initializer=initializer):
w = tf.get_variable('ip', shape=[H['later_feat_channels'], H['lstm_size']])
outputs.append(tf.matmul(lstm_input, w))
return outputs
def deconv(x, output_shape, channels):
k_h = 2
k_w = 2
w = tf.get_variable('w_deconv', initializer=tf.random_normal_initializer(stddev=0.01),
shape=[k_h, k_w, channels[1], channels[0]])
y = tf.nn.conv2d_transpose(x, w, output_shape, strides=[1, k_h, k_w, 1], padding='VALID')
return y
def rezoom(H, pred_boxes, early_feat, early_feat_channels, w_offsets, h_offsets):
'''
Rezoom into a feature map at multiple interpolation points in a grid.
If the predicted object center is at X, len(w_offsets) == 3, and len(h_offsets) == 5,
the rezoom grid will look as follows:
[o o o]
[o o o]
[o X o]
[o o o]
[o o o]
Where each letter indexes into the feature map with bilinear interpolation
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
indices = []
for w_offset in w_offsets:
for h_offset in h_offsets:
indices.append(train_utils.bilinear_select(H,
pred_boxes,
early_feat,
early_feat_channels,
w_offset, h_offset))
interp_indices = tf.concat(0, indices)
rezoom_features = train_utils.interp(early_feat,
interp_indices,
early_feat_channels)
rezoom_features_r = tf.reshape(rezoom_features,
[len(w_offsets) * len(h_offsets),
outer_size,
H['rnn_len'],
early_feat_channels])
rezoom_features_t = tf.transpose(rezoom_features_r, [1, 2, 0, 3])
return tf.reshape(rezoom_features_t,
[outer_size,
H['rnn_len'],
len(w_offsets) * len(h_offsets) * early_feat_channels])
def build_forward(H, x, phase, reuse):
'''
Construct the forward model
'''
grid_size = H['grid_width'] * H['grid_height']
outer_size = grid_size * H['batch_size']
input_mean = 117.
x -= input_mean
cnn, early_feat, _ = googlenet_load.model(x, H, reuse)
early_feat_channels = H['early_feat_channels']
early_feat = early_feat[:, :, :, :early_feat_channels]
if H['deconv']:
size = 3
stride = 2
pool_size = 5
with tf.variable_scope("deconv", reuse=reuse):
w = tf.get_variable('conv_pool_w', shape=[size, size, H['later_feat_channels'], H['later_feat_channels']],
initializer=tf.random_normal_initializer(stddev=0.01))
cnn_s = tf.nn.conv2d(cnn, w, strides=[1, stride, stride, 1], padding='SAME')
cnn_s_pool = tf.nn.avg_pool(cnn_s[:, :, :, :256], ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1], padding='SAME')
cnn_s_with_pool = tf.concat(3, [cnn_s_pool, cnn_s[:, :, :, 256:]])
cnn_deconv = deconv(cnn_s_with_pool, output_shape=[H['batch_size'], H['grid_height'], H['grid_width'], 256],
channels=[H['later_feat_channels'], 256])
cnn = tf.concat(3, (cnn_deconv, cnn[:, :, :, 256:]))
elif H['avg_pool_size'] > 1:
pool_size = H['avg_pool_size']
cnn1 = cnn[:, :, :, :700]
cnn2 = cnn[:, :, :, 700:]
cnn2 = tf.nn.avg_pool(cnn2, ksize=[1, pool_size, pool_size, 1],
strides=[1, 1, 1, 1], padding='SAME')
cnn = tf.concat(3, [cnn1, cnn2])
cnn = tf.reshape(cnn,
[H['batch_size'] * H['grid_width'] * H['grid_height'], H['later_feat_channels']])
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope('decoder', reuse=reuse, initializer=initializer):
scale_down = 0.01
lstm_input = tf.reshape(cnn * scale_down, (H['batch_size'] * grid_size, H['later_feat_channels']))
if H['use_lstm']:
lstm_outputs = build_lstm_inner(H, lstm_input)
else:
lstm_outputs = build_overfeat_inner(H, lstm_input)
pred_boxes = []
pred_logits = []
for k in range(H['rnn_len']):
output = lstm_outputs[k]
if phase == 'train':
output = tf.nn.dropout(output, 0.5)
box_weights = tf.get_variable('box_ip%d' % k,
shape=(H['lstm_size'], 4))
conf_weights = tf.get_variable('conf_ip%d' % k,
shape=(H['lstm_size'], H['num_classes']))
pred_boxes_step = tf.reshape(tf.matmul(output, box_weights) * 50,
[outer_size, 1, 4])
pred_boxes.append(pred_boxes_step)
pred_logits.append(tf.reshape(tf.matmul(output, conf_weights),
[outer_size, 1, H['num_classes']]))
pred_boxes = tf.concat(1, pred_boxes)
pred_logits = tf.concat(1, pred_logits)
pred_logits_squash = tf.reshape(pred_logits,
[outer_size * H['rnn_len'], H['num_classes']])
pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
pred_confidences = tf.reshape(pred_confidences_squash,
[outer_size, H['rnn_len'], H['num_classes']])
if H['use_rezoom']:
pred_confs_deltas = []
pred_boxes_deltas = []
w_offsets = H['rezoom_w_coords']
h_offsets = H['rezoom_h_coords']
num_offsets = len(w_offsets) * len(h_offsets)
rezoom_features = rezoom(H, pred_boxes, early_feat, early_feat_channels, w_offsets, h_offsets)
if phase == 'train':
rezoom_features = tf.nn.dropout(rezoom_features, 0.5)
for k in range(H['rnn_len']):
delta_features = tf.concat(1, [lstm_outputs[k], rezoom_features[:, k, :] / 1000.])
dim = 128
delta_weights1 = tf.get_variable(
'delta_ip1%d' % k,
shape=[H['lstm_size'] + early_feat_channels * num_offsets, dim])
# TODO: add dropout here ?
ip1 = tf.nn.relu(tf.matmul(delta_features, delta_weights1))
if phase == 'train':
ip1 = tf.nn.dropout(ip1, 0.5)
delta_confs_weights = tf.get_variable(
'delta_ip2%d' % k,
shape=[dim, H['num_classes']])
if H['reregress']:
delta_boxes_weights = tf.get_variable(
'delta_ip_boxes%d' % k,
shape=[dim, 4])
pred_boxes_deltas.append(tf.reshape(tf.matmul(ip1, delta_boxes_weights) * 5,
[outer_size, 1, 4]))
scale = H.get('rezoom_conf_scale', 50)
pred_confs_deltas.append(tf.reshape(tf.matmul(ip1, delta_confs_weights) * scale,
[outer_size, 1, H['num_classes']]))
pred_confs_deltas = tf.concat(1, pred_confs_deltas)
if H['reregress']:
pred_boxes_deltas = tf.concat(1, pred_boxes_deltas)
return pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas
return pred_boxes, pred_logits, pred_confidences
|
[
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.models.rnn.rnn_cell.MultiRNNCell",
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.get_variable_scope",
"tensorflow.nn.dropout",
"tensorflow.nn.conv2d_transpose",
"tensorflow.nn.softmax",
"recognition.utils.train_utils.interp",
"tensorflow.random_normal_initializer",
"tensorflow.concat",
"recognition.utils.googlenet_load.model",
"numpy.random.seed",
"tensorflow.matmul",
"recognition.utils.train_utils.bilinear_select",
"tensorflow.zeros",
"tensorflow.nn.conv2d",
"tensorflow.variable_scope",
"tensorflow.nn.avg_pool",
"tensorflow.reshape",
"random.seed",
"tensorflow.models.rnn.rnn_cell.BasicLSTMCell",
"tensorflow.random_uniform_initializer"
] |
[((309, 323), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (320, 323), False, 'import random\n'), ((324, 341), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (338, 341), True, 'import numpy as np\n'), ((345, 378), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""Hungarian"""'], {}), "('Hungarian')\n", (365, 378), False, 'from tensorflow.python.framework import ops\n'), ((553, 630), 'tensorflow.models.rnn.rnn_cell.BasicLSTMCell', 'rnn_cell.BasicLSTMCell', (["H['lstm_size']"], {'forget_bias': '(0.0)', 'state_is_tuple': '(False)'}), "(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)\n", (575, 630), False, 'from tensorflow.models.rnn import rnn_cell\n'), ((877, 916), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, lstm.state_size]'], {}), '([batch_size, lstm.state_size])\n', (885, 916), True, 'import tensorflow as tf\n'), ((1478, 1518), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1507, 1518), True, 'import tensorflow as tf\n'), ((1968, 2057), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'w', 'output_shape'], {'strides': '[1, k_h, k_w, 1]', 'padding': '"""VALID"""'}), "(x, w, output_shape, strides=[1, k_h, k_w, 1],\n padding='VALID')\n", (1990, 2057), True, 'import tensorflow as tf\n'), ((3061, 3082), 'tensorflow.concat', 'tf.concat', (['(0)', 'indices'], {}), '(0, indices)\n', (3070, 3082), True, 'import tensorflow as tf\n'), ((3105, 3172), 'recognition.utils.train_utils.interp', 'train_utils.interp', (['early_feat', 'interp_indices', 'early_feat_channels'], {}), '(early_feat, interp_indices, early_feat_channels)\n', (3123, 3172), False, 'from recognition.utils import train_utils, googlenet_load\n'), ((3556, 3601), 'tensorflow.transpose', 'tf.transpose', (['rezoom_features_r', '[1, 2, 0, 3]'], {}), '(rezoom_features_r, [1, 2, 0, 3])\n', (3568, 3601), True, 'import tensorflow as tf\n'), ((4047, 4080), 'recognition.utils.googlenet_load.model', 'googlenet_load.model', (['x', 'H', 'reuse'], {}), '(x, H, reuse)\n', (4067, 4080), False, 'from recognition.utils import train_utils, googlenet_load\n'), ((5470, 5572), 'tensorflow.reshape', 'tf.reshape', (['cnn', "[H['batch_size'] * H['grid_width'] * H['grid_height'], H['later_feat_channels']\n ]"], {}), "(cnn, [H['batch_size'] * H['grid_width'] * H['grid_height'], H[\n 'later_feat_channels']])\n", (5480, 5572), True, 'import tensorflow as tf\n'), ((5607, 5647), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (5636, 5647), True, 'import tensorflow as tf\n'), ((679, 758), 'tensorflow.models.rnn.rnn_cell.MultiRNNCell', 'rnn_cell.MultiRNNCell', (["([lstm_cell] * H['num_lstm_layers'])"], {'state_is_tuple': '(False)'}), "([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)\n", (700, 758), False, 'from tensorflow.models.rnn import rnn_cell\n'), ((1528, 1582), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Overfeat"""'], {'initializer': 'initializer'}), "('Overfeat', initializer=initializer)\n", (1545, 1582), True, 'import tensorflow as tf\n'), ((1596, 1667), 'tensorflow.get_variable', 'tf.get_variable', (['"""ip"""'], {'shape': "[H['later_feat_channels'], H['lstm_size']]"}), "('ip', shape=[H['later_feat_channels'], H['lstm_size']])\n", (1611, 1667), True, 'import tensorflow as tf\n'), ((5657, 5723), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {'reuse': 'reuse', 'initializer': 'initializer'}), "('decoder', reuse=reuse, initializer=initializer)\n", (5674, 5723), True, 'import tensorflow as tf\n'), ((5772, 5862), 'tensorflow.reshape', 'tf.reshape', (['(cnn * scale_down)', "(H['batch_size'] * grid_size, H['later_feat_channels'])"], {}), "(cnn * scale_down, (H['batch_size'] * grid_size, H[\n 'later_feat_channels']))\n", (5782, 5862), True, 'import tensorflow as tf\n'), ((6865, 6889), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_boxes'], {}), '(1, pred_boxes)\n', (6874, 6889), True, 'import tensorflow as tf\n'), ((6912, 6937), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_logits'], {}), '(1, pred_logits)\n', (6921, 6937), True, 'import tensorflow as tf\n'), ((6967, 7037), 'tensorflow.reshape', 'tf.reshape', (['pred_logits', "[outer_size * H['rnn_len'], H['num_classes']]"], {}), "(pred_logits, [outer_size * H['rnn_len'], H['num_classes']])\n", (6977, 7037), True, 'import tensorflow as tf\n'), ((7112, 7145), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['pred_logits_squash'], {}), '(pred_logits_squash)\n', (7125, 7145), True, 'import tensorflow as tf\n'), ((7173, 7259), 'tensorflow.reshape', 'tf.reshape', (['pred_confidences_squash', "[outer_size, H['rnn_len'], H['num_classes']]"], {}), "(pred_confidences_squash, [outer_size, H['rnn_len'], H[\n 'num_classes']])\n", (7183, 7259), True, 'import tensorflow as tf\n'), ((1691, 1715), 'tensorflow.matmul', 'tf.matmul', (['lstm_input', 'w'], {}), '(lstm_input, w)\n', (1700, 1715), True, 'import tensorflow as tf\n'), ((1849, 1890), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1877, 1890), True, 'import tensorflow as tf\n'), ((4284, 4324), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""deconv"""'], {'reuse': 'reuse'}), "('deconv', reuse=reuse)\n", (4301, 4324), True, 'import tensorflow as tf\n'), ((4552, 4620), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['cnn', 'w'], {'strides': '[1, stride, stride, 1]', 'padding': '"""SAME"""'}), "(cnn, w, strides=[1, stride, stride, 1], padding='SAME')\n", (4564, 4620), True, 'import tensorflow as tf\n'), ((4646, 4760), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['cnn_s[:, :, :, :256]'], {'ksize': '[1, pool_size, pool_size, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(cnn_s[:, :, :, :256], ksize=[1, pool_size, pool_size, 1],\n strides=[1, 1, 1, 1], padding='SAME')\n", (4660, 4760), True, 'import tensorflow as tf\n'), ((4828, 4876), 'tensorflow.concat', 'tf.concat', (['(3)', '[cnn_s_pool, cnn_s[:, :, :, 256:]]'], {}), '(3, [cnn_s_pool, cnn_s[:, :, :, 256:]])\n', (4837, 4876), True, 'import tensorflow as tf\n'), ((5090, 5136), 'tensorflow.concat', 'tf.concat', (['(3)', '(cnn_deconv, cnn[:, :, :, 256:])'], {}), '(3, (cnn_deconv, cnn[:, :, :, 256:]))\n', (5099, 5136), True, 'import tensorflow as tf\n'), ((5293, 5392), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['cnn2'], {'ksize': '[1, pool_size, pool_size, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(cnn2, ksize=[1, pool_size, pool_size, 1], strides=[1, 1, 1, \n 1], padding='SAME')\n", (5307, 5392), True, 'import tensorflow as tf\n'), ((5432, 5458), 'tensorflow.concat', 'tf.concat', (['(3)', '[cnn1, cnn2]'], {}), '(3, [cnn1, cnn2])\n', (5441, 5458), True, 'import tensorflow as tf\n'), ((6256, 6314), 'tensorflow.get_variable', 'tf.get_variable', (["('box_ip%d' % k)"], {'shape': "(H['lstm_size'], 4)"}), "('box_ip%d' % k, shape=(H['lstm_size'], 4))\n", (6271, 6314), True, 'import tensorflow as tf\n'), ((6384, 6458), 'tensorflow.get_variable', 'tf.get_variable', (["('conf_ip%d' % k)"], {'shape': "(H['lstm_size'], H['num_classes'])"}), "('conf_ip%d' % k, shape=(H['lstm_size'], H['num_classes']))\n", (6399, 6458), True, 'import tensorflow as tf\n'), ((9070, 9101), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_confs_deltas'], {}), '(1, pred_confs_deltas)\n', (9079, 9101), True, 'import tensorflow as tf\n'), ((981, 1021), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (1010, 1021), True, 'import tensorflow as tf\n'), ((2722, 2821), 'recognition.utils.train_utils.bilinear_select', 'train_utils.bilinear_select', (['H', 'pred_boxes', 'early_feat', 'early_feat_channels', 'w_offset', 'h_offset'], {}), '(H, pred_boxes, early_feat, early_feat_channels,\n w_offset, h_offset)\n', (2749, 2821), False, 'from recognition.utils import train_utils, googlenet_load\n'), ((6203, 6229), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output', '(0.5)'], {}), '(output, 0.5)\n', (6216, 6229), True, 'import tensorflow as tf\n'), ((7714, 7749), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['rezoom_features', '(0.5)'], {}), '(rezoom_features, 0.5)\n', (7727, 7749), True, 'import tensorflow as tf\n'), ((7825, 7891), 'tensorflow.concat', 'tf.concat', (['(1)', '[lstm_outputs[k], rezoom_features[:, k, :] / 1000.0]'], {}), '(1, [lstm_outputs[k], rezoom_features[:, k, :] / 1000.0])\n', (7834, 7891), True, 'import tensorflow as tf\n'), ((7950, 8054), 'tensorflow.get_variable', 'tf.get_variable', (["('delta_ip1%d' % k)"], {'shape': "[H['lstm_size'] + early_feat_channels * num_offsets, dim]"}), "('delta_ip1%d' % k, shape=[H['lstm_size'] + \n early_feat_channels * num_offsets, dim])\n", (7965, 8054), True, 'import tensorflow as tf\n'), ((8335, 8400), 'tensorflow.get_variable', 'tf.get_variable', (["('delta_ip2%d' % k)"], {'shape': "[dim, H['num_classes']]"}), "('delta_ip2%d' % k, shape=[dim, H['num_classes']])\n", (8350, 8400), True, 'import tensorflow as tf\n'), ((9169, 9200), 'tensorflow.concat', 'tf.concat', (['(1)', 'pred_boxes_deltas'], {}), '(1, pred_boxes_deltas)\n', (9178, 9200), True, 'import tensorflow as tf\n'), ((4489, 4530), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (4517, 4530), True, 'import tensorflow as tf\n'), ((6544, 6574), 'tensorflow.matmul', 'tf.matmul', (['output', 'box_weights'], {}), '(output, box_weights)\n', (6553, 6574), True, 'import tensorflow as tf\n'), ((6732, 6763), 'tensorflow.matmul', 'tf.matmul', (['output', 'conf_weights'], {}), '(output, conf_weights)\n', (6741, 6763), True, 'import tensorflow as tf\n'), ((8167, 8208), 'tensorflow.matmul', 'tf.matmul', (['delta_features', 'delta_weights1'], {}), '(delta_features, delta_weights1)\n', (8176, 8208), True, 'import tensorflow as tf\n'), ((8273, 8296), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['ip1', '(0.5)'], {}), '(ip1, 0.5)\n', (8286, 8296), True, 'import tensorflow as tf\n'), ((8519, 8574), 'tensorflow.get_variable', 'tf.get_variable', (["('delta_ip_boxes%d' % k)"], {'shape': '[dim, 4]'}), "('delta_ip_boxes%d' % k, shape=[dim, 4])\n", (8534, 8574), True, 'import tensorflow as tf\n'), ((1100, 1123), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (1121, 1123), True, 'import tensorflow as tf\n'), ((8905, 8940), 'tensorflow.matmul', 'tf.matmul', (['ip1', 'delta_confs_weights'], {}), '(ip1, delta_confs_weights)\n', (8914, 8940), True, 'import tensorflow as tf\n'), ((8680, 8715), 'tensorflow.matmul', 'tf.matmul', (['ip1', 'delta_boxes_weights'], {}), '(ip1, delta_boxes_weights)\n', (8689, 8715), True, 'import tensorflow as tf\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: linear.ipynb (unless otherwise specified).
__all__ = ['vv', 'denoising_MRF']
# Cell
import numpy as np
import gtsam
from gtsam import noiseModel
from .display import show
from typing import Dict
# Cell
def vv(keys_vectors: Dict[int, np.ndarray]):
"""Create a VectorValues from a dict"""
result = gtsam.VectorValues()
for j, v in keys_vectors.items():
result.insert(j, v)
return result
# Cell
def denoising_MRF(M: int, N: int, sigma = 0.5, smoothness_sigma=0.5):
"""Create MxN MRF
@returns graph and symbols used for rows.
"""
row_symbols = [chr(ord('a')+row) for row in range(M)]
keys = {(row, col): gtsam.symbol(row_symbols[row], col+1)
for row in range(M) for col in range(N)}
rng = np.random.default_rng(42)
data = rng.normal(loc=0, scale=sigma, size=(M, N, 1))
data_model = noiseModel.Isotropic.Sigmas([sigma])
smoothness_model = noiseModel.Isotropic.Sigmas([smoothness_sigma])
I = np.eye(1, 1, dtype=float)
zero = np.zeros((1, 1))
graph = gtsam.GaussianFactorGraph()
for row in range(M):
for col in range(N):
# add data terms:
j = keys[(row, col)]
graph.add(j, I, np.array(data[row, col]), data_model)
# add smoothness terms:
if col > 0:
j1 = keys[(row, col-1)]
graph.add(j, I, j1, -I, zero, smoothness_model)
if row > 0:
j2 = keys[(row-1, col)]
graph.add(j, I, j2, -I, zero, smoothness_model)
return graph, row_symbols
# Cell
|
[
"numpy.eye",
"gtsam.noiseModel.Isotropic.Sigmas",
"numpy.random.default_rng",
"gtsam.symbol",
"numpy.array",
"numpy.zeros",
"gtsam.VectorValues",
"gtsam.GaussianFactorGraph"
] |
[((352, 372), 'gtsam.VectorValues', 'gtsam.VectorValues', ([], {}), '()\n', (370, 372), False, 'import gtsam\n'), ((800, 825), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (821, 825), True, 'import numpy as np\n'), ((901, 937), 'gtsam.noiseModel.Isotropic.Sigmas', 'noiseModel.Isotropic.Sigmas', (['[sigma]'], {}), '([sigma])\n', (928, 937), False, 'from gtsam import noiseModel\n'), ((962, 1009), 'gtsam.noiseModel.Isotropic.Sigmas', 'noiseModel.Isotropic.Sigmas', (['[smoothness_sigma]'], {}), '([smoothness_sigma])\n', (989, 1009), False, 'from gtsam import noiseModel\n'), ((1019, 1044), 'numpy.eye', 'np.eye', (['(1)', '(1)'], {'dtype': 'float'}), '(1, 1, dtype=float)\n', (1025, 1044), True, 'import numpy as np\n'), ((1056, 1072), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1064, 1072), True, 'import numpy as np\n'), ((1085, 1112), 'gtsam.GaussianFactorGraph', 'gtsam.GaussianFactorGraph', ([], {}), '()\n', (1110, 1112), False, 'import gtsam\n'), ((698, 737), 'gtsam.symbol', 'gtsam.symbol', (['row_symbols[row]', '(col + 1)'], {}), '(row_symbols[row], col + 1)\n', (710, 737), False, 'import gtsam\n'), ((1258, 1282), 'numpy.array', 'np.array', (['data[row, col]'], {}), '(data[row, col])\n', (1266, 1282), True, 'import numpy as np\n')]
|
"""Evaluating Prophet model on M4 timeseries
"""
from darts.models import Prophet
from darts.utils.statistics import check_seasonality
from darts.utils import _build_tqdm_iterator
import numpy as np
import pandas as pd
import pickle as pkl
from M4_metrics import owa_m4, mase_m4, smape_m4
if __name__ == "__main__":
data_categories = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']
info_dataset = pd.read_csv('dataset/M4-info.csv', delimiter=',').set_index('M4id')
for cat in data_categories[::-1]:
# Load TimeSeries from M4
ts_train = pkl.load(open("dataset/train_"+cat+".pkl", "rb"))
ts_test = pkl.load(open("dataset/test_"+cat+".pkl", "rb"))
# Test models on all time series
mase_all = []
smape_all = []
m = int(info_dataset.Frequency[cat[0]+"1"])
for train, test in _build_tqdm_iterator(zip(ts_train, ts_test), verbose=True):
train_des = train
seasonOut = 1
if m > 1:
if check_seasonality(train, m=int(m), max_lag=2*m):
pass
else:
m = 1
try:
prophet_args = {
'daily_seasonality': False,
'weekly_seasonality': False,
'yearly_seasonality': False,
'frequency': None,
'changepoint_range': 0.95,
}
if cat == 'Daily':
prophet_args['daily_seasonality'] = True
elif cat == 'Hourly':
prophet_args['daily_seasonality'] = True
elif cat == 'Weekly':
prophet_args['weekly_seasonality'] = True
elif cat == 'Monthly':
prophet_args['yearly_seasonality'] = True
elif cat == 'Quarterly':
prophet_args['yearly_seasonality'] = True
elif cat == 'Yearly':
prophet_args['yearly_seasonality'] = True
prophet = Prophet(**prophet_args)
derivate = np.diff(train.univariate_values(), n=1)
jump = derivate.max()/(train.max().max() - train.min().min())
try:
if jump <= 0.5:
prophet.fit(train)
else:
prophet.fit(train.drop_before(train.time_index()[np.argmax(derivate)+1]))
except ValueError as e:
raise e
forecast_prophet = prophet.predict(len(test))
m = info_dataset.Frequency[cat[0]+"1"]
mase_all.append(np.vstack([
mase_m4(train, test, forecast_prophet, m=m),
]))
smape_all.append(np.vstack([
smape_m4(test, forecast_prophet),
]))
except Exception as e:
print(e)
break
pkl.dump(mase_all, open("prophet_mase_"+cat+".pkl", "wb"))
pkl.dump(smape_all, open("prophet_smape_"+cat+".pkl", "wb"))
print("MASE; Prophet: {}".format(*tuple(np.nanmean(np.stack(mase_all), axis=(0, 2)))))
print("sMAPE; Prophet: {}".format(*tuple(np.nanmean(np.stack(smape_all), axis=(0, 2)))))
print("OWA: ", owa_m4(cat, np.nanmean(np.stack(smape_all), axis=(0, 2)),
np.nanmean(np.stack(mase_all), axis=(0, 2))))
|
[
"M4_metrics.mase_m4",
"pandas.read_csv",
"M4_metrics.smape_m4",
"numpy.argmax",
"numpy.stack",
"darts.models.Prophet"
] |
[((429, 478), 'pandas.read_csv', 'pd.read_csv', (['"""dataset/M4-info.csv"""'], {'delimiter': '""","""'}), "('dataset/M4-info.csv', delimiter=',')\n", (440, 478), True, 'import pandas as pd\n'), ((2152, 2175), 'darts.models.Prophet', 'Prophet', ([], {}), '(**prophet_args)\n', (2159, 2175), False, 'from darts.models import Prophet\n'), ((3442, 3461), 'numpy.stack', 'np.stack', (['smape_all'], {}), '(smape_all)\n', (3450, 3461), True, 'import numpy as np\n'), ((3518, 3536), 'numpy.stack', 'np.stack', (['mase_all'], {}), '(mase_all)\n', (3526, 3536), True, 'import numpy as np\n'), ((2794, 2837), 'M4_metrics.mase_m4', 'mase_m4', (['train', 'test', 'forecast_prophet'], {'m': 'm'}), '(train, test, forecast_prophet, m=m)\n', (2801, 2837), False, 'from M4_metrics import owa_m4, mase_m4, smape_m4\n'), ((2928, 2960), 'M4_metrics.smape_m4', 'smape_m4', (['test', 'forecast_prophet'], {}), '(test, forecast_prophet)\n', (2936, 2960), False, 'from M4_metrics import owa_m4, mase_m4, smape_m4\n'), ((3263, 3281), 'numpy.stack', 'np.stack', (['mase_all'], {}), '(mase_all)\n', (3271, 3281), True, 'import numpy as np\n'), ((3359, 3378), 'numpy.stack', 'np.stack', (['smape_all'], {}), '(smape_all)\n', (3367, 3378), True, 'import numpy as np\n'), ((2520, 2539), 'numpy.argmax', 'np.argmax', (['derivate'], {}), '(derivate)\n', (2529, 2539), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from vispy.testing import run_tests_if_main
from vispy.geometry import (create_box, create_cube, create_cylinder,
create_sphere, create_plane)
def test_box():
"""Test box function"""
vertices, filled, outline = create_box()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_cube():
"""Test cube function"""
vertices, filled, outline = create_cube()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_sphere():
"""Test sphere function"""
md = create_sphere(rows=10, cols=20, radius=10, method='latitude')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
md = create_sphere(subdivisions=5, radius=10, method='ico')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
md = create_sphere(rows=20, cols=20, depth=20, radius=10, method='cube')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_cylinder():
"""Test cylinder function"""
md = create_cylinder(10, 20, radius=[10, 10])
radii = np.sqrt((md.get_vertices()[:, :2] ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_plane():
"""Test plane function"""
vertices, filled, outline = create_plane()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
run_tests_if_main()
|
[
"numpy.ones_like",
"numpy.unique",
"vispy.geometry.create_cylinder",
"vispy.testing.run_tests_if_main",
"vispy.geometry.create_plane",
"vispy.geometry.create_sphere",
"vispy.geometry.create_cube",
"vispy.geometry.create_box"
] |
[((1913, 1932), 'vispy.testing.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (1930, 1932), False, 'from vispy.testing import run_tests_if_main\n'), ((475, 487), 'vispy.geometry.create_box', 'create_box', ([], {}), '()\n', (485, 487), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((705, 718), 'vispy.geometry.create_cube', 'create_cube', ([], {}), '()\n', (716, 718), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((917, 978), 'vispy.geometry.create_sphere', 'create_sphere', ([], {'rows': '(10)', 'cols': '(20)', 'radius': '(10)', 'method': '"""latitude"""'}), "(rows=10, cols=20, radius=10, method='latitude')\n", (930, 978), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1099, 1153), 'vispy.geometry.create_sphere', 'create_sphere', ([], {'subdivisions': '(5)', 'radius': '(10)', 'method': '"""ico"""'}), "(subdivisions=5, radius=10, method='ico')\n", (1112, 1153), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1274, 1341), 'vispy.geometry.create_sphere', 'create_sphere', ([], {'rows': '(20)', 'cols': '(20)', 'depth': '(20)', 'radius': '(10)', 'method': '"""cube"""'}), "(rows=20, cols=20, depth=20, radius=10, method='cube')\n", (1287, 1341), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1518, 1558), 'vispy.geometry.create_cylinder', 'create_cylinder', (['(10)', '(20)'], {'radius': '[10, 10]'}), '(10, 20, radius=[10, 10])\n', (1533, 1558), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((1759, 1773), 'vispy.geometry.create_plane', 'create_plane', ([], {}), '()\n', (1771, 1773), False, 'from vispy.geometry import create_box, create_cube, create_cylinder, create_sphere, create_plane\n'), ((537, 554), 'numpy.unique', 'np.unique', (['filled'], {}), '(filled)\n', (546, 554), True, 'import numpy as np\n'), ((605, 623), 'numpy.unique', 'np.unique', (['outline'], {}), '(outline)\n', (614, 623), True, 'import numpy as np\n'), ((768, 785), 'numpy.unique', 'np.unique', (['filled'], {}), '(filled)\n', (777, 785), True, 'import numpy as np\n'), ((836, 854), 'numpy.unique', 'np.unique', (['outline'], {}), '(outline)\n', (845, 854), True, 'import numpy as np\n'), ((1823, 1840), 'numpy.unique', 'np.unique', (['filled'], {}), '(filled)\n', (1832, 1840), True, 'import numpy as np\n'), ((1891, 1909), 'numpy.unique', 'np.unique', (['outline'], {}), '(outline)\n', (1900, 1909), True, 'import numpy as np\n'), ((1064, 1083), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1076, 1083), True, 'import numpy as np\n'), ((1239, 1258), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1251, 1258), True, 'import numpy as np\n'), ((1427, 1446), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1439, 1446), True, 'import numpy as np\n'), ((1651, 1670), 'numpy.ones_like', 'np.ones_like', (['radii'], {}), '(radii)\n', (1663, 1670), True, 'import numpy as np\n')]
|
import logging
import numpy as np
from bico.geometry.point import Point
from bico.nearest_neighbor.base import NearestNeighbor
from bico.utils.ClusteringFeature import ClusteringFeature
from datetime import datetime
from typing import Callable, TextIO, List
logger = logging.getLogger(__name__)
class BICONode:
def __init__(self, level: int, dim: int, proj: int, bico: 'BICO',
projection_func: Callable[[int, int, float], NearestNeighbor]):
self.level = level
self.dim = dim
self.proj = proj
self.point_to_biconode = []
self.projection_func = projection_func
self.nn_engine = projection_func(dim, proj, bico.get_radius(self.level))
self.num_cfs = 0
self.bico = bico
self.cf = ClusteringFeature(Point(np.zeros(dim)), Point(np.zeros(dim)), 0, 0)
def insert_point(self, point_cf: ClusteringFeature) -> int:
if self.bico.verbose:
logger.debug("Insert point: {}".format(point_cf))
# check whether geometry fits into CF
if self.level > 0:
if self.cf.size == 0:
self.cf += point_cf
self.cf.ref = point_cf.ref
else:
test = self.cf + point_cf
cost = test.kmeans_cost(self.cf.ref)
if self.bico.verbose:
logger.debug("Cost: " + str(cost) + ", Thresh: " + str(self.bico.get_threshold(self.level)))
if cost < self.bico.get_threshold(self.level):
self.cf = test
return 0
# search nearest neighbor and insert geometry there or open new BICONode
candidates = []
if self.num_cfs > 0:
if self.bico.track_time:
tstart = datetime.now()
candidates = self.nn_engine.get_candidates(point_cf.ref.p)
# candidates = self.ann_engine.neighbours(point_cf.ref.p)
if self.bico.track_time:
tend = datetime.now()
if len(self.bico.time) < self.level + 1:
self.bico.time.append(tend - tstart)
else:
self.bico.time[self.level] += tend - tstart
if len(candidates) == 0:
if self.bico.verbose:
logger.debug("No nearest neighbor found.")
self.num_cfs += 1
self.nn_engine.insert_candidate(point=point_cf.ref.p, metadata=self.num_cfs)
# self.ann_engine.store_vector(point_cf.ref.p, data=self.num_cfs)
new_node = BICONode(self.level + 1, self.dim, self.proj, self.bico, self.projection_func)
# new_node.cf = ClusteringFeature(geometry, geometry, geometry*geometry, 1)
new_node.cf = point_cf
# debug
if len(self.point_to_biconode) != self.num_cfs - 1:
logger.error("Something is wrong: {} != {}".format(len(self.point_to_biconode), self.num_cfs - 1))
self.point_to_biconode.append(new_node)
return 1
else:
if self.bico.verbose:
logger.debug(str(len(candidates)) + " nearest neighbor found!")
logger.debug(candidates)
nearest = candidates[0]
node = nearest.data # contains the index
# sanity check
if len(self.point_to_biconode) < node - 2:
logger.error("Something is wrong: {} > {}".format(len(self.point_to_biconode), node - 2))
return self.point_to_biconode[node - 1].insert_point(point_cf)
def output_cf(self, f: TextIO) -> None:
if self.level > 0:
f.write(str(self.cf) + "\n")
for node in self.point_to_biconode:
node.output_cf(f)
def get_cf(self) -> List[np.ndarray]:
cur = []
if self.level > 0:
cur.append(np.insert(self.cf.center().p, 0, self.cf.size))
for node in self.point_to_biconode:
cur = cur + node.get_cf()
return cur
|
[
"logging.getLogger",
"datetime.datetime.now",
"numpy.zeros"
] |
[((268, 295), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (285, 295), False, 'import logging\n'), ((796, 809), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (804, 809), True, 'import numpy as np\n'), ((818, 831), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (826, 831), True, 'import numpy as np\n'), ((1771, 1785), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1783, 1785), False, 'from datetime import datetime\n'), ((1987, 2001), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1999, 2001), False, 'from datetime import datetime\n')]
|
import numpy as np
from scipy.stats import linregress as li
from math import exp
def calc_factor(field,stepsize=0.01):
"""
Function for calculation of the summed binning.
The returned result is an integral over the binning of the velocities.
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple of numpy arrays
"""
result_pos = []
result_neg = []
alpha = 0.
#: binning of the positive half
while alpha <= np.max(field)+stepsize:
pos = alpha
neg = 0.
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
alpha = 0.
#: binning of the negative half
while alpha <= np.abs(np.min(field))+stepsize:
pos = 0.
neg = -1.*alpha
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def calc_derivative(field,stepsize=0.01):
"""
Function for calculation of the binning.
The returned result is the binning of the velocities.
It is called derivative because it is mathematically the derivative of the function:
.. function:: velofilter.calc_factor
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple
"""
result_pos = []
result_neg = []
outlier = 1.
alpha = 0.
while alpha <= np.max(field)+stepsize:
pos = alpha+stepsize
neg = alpha
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
outlier = 1.
alpha = 0.
while alpha <= np.abs(np.min(field))+stepsize:
pos = -1.*alpha
neg = -1.*(alpha+stepsize)
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def filter(piv,tfactor=3.,dalpha=.01):
"""
Function for calculating the cutoff values.
:param object piv: PIV class object
This is supposed to be an object from a Direct or adaptive Class
it is needed to get the velocities
:param double tfactor: Factor for cutoff in the velocity binning
The default value is set to 3 which works for many cases
:param double dalpha: value for differential velocity
The default is set to .01 which work for many cases
if the velocities vary over a larger ranger use a larger value
"""
#: pre sampling
numberup = np.count_nonzero(piv.u<=0.)/np.float(np.count_nonzero(piv.u))
numberun = np.count_nonzero(piv.u>0.)/np.float(np.count_nonzero(piv.u))
numbervp = np.count_nonzero(piv.v<=0.)/np.float(np.count_nonzero(piv.v))
numbervn = np.count_nonzero(piv.v>0.)/np.float(np.count_nonzero(piv.v))
upos = numberup
uneg = numberun
vpos = numbervp
vneg = numbervn
#: get alpha dependency
up_alpha, un_alpha = calc_factor(piv.u,dalpha)
vp_alpha, vn_alpha = calc_factor(piv.v,dalpha)
#: calculate derivative directly from data
dup_alpha1, dun_alpha1 = calc_derivative(piv.u,dalpha)
dvp_alpha1, dvn_alpha1 = calc_derivative(piv.v,dalpha)
dup_alpha = dup_alpha1[:,1]
dun_alpha = dun_alpha1[:,1]
dvp_alpha = dvp_alpha1[:,1]
dvn_alpha = dvn_alpha1[:,1]
#get boundaries
boundup = np.sum(dup_alpha[0:5])/5./np.exp(tfactor)
boundun = np.sum(dun_alpha[0:5])/5./np.exp(tfactor)
boundvp = np.sum(dvp_alpha[0:5])/5./np.exp(tfactor)
boundvn = np.sum(dvn_alpha[0:5])/5./np.exp(tfactor)
#get indices and exponential
if upos != 0.:
indexup = np.where(dup_alpha<boundup)
cut_up = np.int(np.sum(indexup[0][0:5])/5.)
nup = np.polyfit(np.log( up_alpha[1:cut_up,0]),np.log(up_alpha[1:cut_up,1]),1)
upos = exp(-nup[1]/nup[0])
if uneg != 0.:
indexun = np.where(dun_alpha<boundun)
cut_un = np.int(np.sum(indexun[0][0:5])/5.)
nun = np.polyfit(np.log(-un_alpha[1:cut_un,0]),np.log(un_alpha[1:cut_un,1]),1)
uneg = -exp(-nun[1]/nun[0])
if vpos != 0.:
indexvp = np.where(dvp_alpha<boundvp)
cut_vp = np.int(np.sum(indexvp[0][0:5])/5.)
nvp = np.polyfit(np.log( vp_alpha[1:cut_vp,0]),np.log(vp_alpha[1:cut_vp,1]),1)
vpos = exp(-nvp[1]/nvp[0])
if vneg != 0.:
indexvn = np.where(dvn_alpha<boundvn)
cut_vn = np.int(np.sum(indexvn[0][0:5])/5.)
nvn = np.polyfit(np.log(-vn_alpha[1:cut_vn,0]),np.log(vn_alpha[1:cut_vn,1]),1)
vneg = -exp(-nvn[1]/nvn[0])
#filter + clamping
if upos > np.max(piv.u):
upos = np.max(piv.u)
if uneg < np.min(piv.u):
uneg = np.min(piv.u)
if vpos > np.max(piv.v):
vpos = np.max(piv.v)
if vneg < np.min(piv.v):
vneg = np.min(piv.v)
#equalizing the cutoff
upos *= (0.5+numberup)
uneg *= (0.5+numberun)
vpos *= (0.5+numbervp)
vneg *= (0.5+numbervn)
#making the mask
masku = (piv.u<uneg) | (piv.u>upos)
maskv = (piv.v<vneg) | (piv.v>vpos)
piv.u[masku] = np.nan
piv.v[maskv] = np.nan
|
[
"numpy.copy",
"numpy.where",
"numpy.log",
"numpy.max",
"numpy.count_nonzero",
"numpy.array",
"numpy.exp",
"numpy.sum",
"numpy.isnan",
"numpy.min",
"math.exp"
] |
[((785, 799), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (792, 799), True, 'import numpy as np\n'), ((1213, 1227), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (1220, 1227), True, 'import numpy as np\n'), ((1496, 1516), 'numpy.array', 'np.array', (['result_pos'], {}), '(result_pos)\n', (1504, 1516), True, 'import numpy as np\n'), ((1517, 1537), 'numpy.array', 'np.array', (['result_neg'], {}), '(result_neg)\n', (1525, 1537), True, 'import numpy as np\n'), ((2334, 2348), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (2341, 2348), True, 'import numpy as np\n'), ((2781, 2795), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (2788, 2795), True, 'import numpy as np\n'), ((3084, 3104), 'numpy.array', 'np.array', (['result_pos'], {}), '(result_pos)\n', (3092, 3104), True, 'import numpy as np\n'), ((3105, 3125), 'numpy.array', 'np.array', (['result_neg'], {}), '(result_neg)\n', (3113, 3125), True, 'import numpy as np\n'), ((3750, 3780), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.u <= 0.0)'], {}), '(piv.u <= 0.0)\n', (3766, 3780), True, 'import numpy as np\n'), ((3827, 3856), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.u > 0.0)'], {}), '(piv.u > 0.0)\n', (3843, 3856), True, 'import numpy as np\n'), ((3903, 3933), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.v <= 0.0)'], {}), '(piv.v <= 0.0)\n', (3919, 3933), True, 'import numpy as np\n'), ((3980, 4009), 'numpy.count_nonzero', 'np.count_nonzero', (['(piv.v > 0.0)'], {}), '(piv.v > 0.0)\n', (3996, 4009), True, 'import numpy as np\n'), ((4610, 4625), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4616, 4625), True, 'import numpy as np\n'), ((4666, 4681), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4672, 4681), True, 'import numpy as np\n'), ((4722, 4737), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4728, 4737), True, 'import numpy as np\n'), ((4778, 4793), 'numpy.exp', 'np.exp', (['tfactor'], {}), '(tfactor)\n', (4784, 4793), True, 'import numpy as np\n'), ((4865, 4894), 'numpy.where', 'np.where', (['(dup_alpha < boundup)'], {}), '(dup_alpha < boundup)\n', (4873, 4894), True, 'import numpy as np\n'), ((5048, 5069), 'math.exp', 'exp', (['(-nup[1] / nup[0])'], {}), '(-nup[1] / nup[0])\n', (5051, 5069), False, 'from math import exp\n'), ((5105, 5134), 'numpy.where', 'np.where', (['(dun_alpha < boundun)'], {}), '(dun_alpha < boundun)\n', (5113, 5134), True, 'import numpy as np\n'), ((5345, 5374), 'numpy.where', 'np.where', (['(dvp_alpha < boundvp)'], {}), '(dvp_alpha < boundvp)\n', (5353, 5374), True, 'import numpy as np\n'), ((5528, 5549), 'math.exp', 'exp', (['(-nvp[1] / nvp[0])'], {}), '(-nvp[1] / nvp[0])\n', (5531, 5549), False, 'from math import exp\n'), ((5585, 5614), 'numpy.where', 'np.where', (['(dvn_alpha < boundvn)'], {}), '(dvn_alpha < boundvn)\n', (5593, 5614), True, 'import numpy as np\n'), ((5826, 5839), 'numpy.max', 'np.max', (['piv.u'], {}), '(piv.u)\n', (5832, 5839), True, 'import numpy as np\n'), ((5856, 5869), 'numpy.max', 'np.max', (['piv.u'], {}), '(piv.u)\n', (5862, 5869), True, 'import numpy as np\n'), ((5884, 5897), 'numpy.min', 'np.min', (['piv.u'], {}), '(piv.u)\n', (5890, 5897), True, 'import numpy as np\n'), ((5914, 5927), 'numpy.min', 'np.min', (['piv.u'], {}), '(piv.u)\n', (5920, 5927), True, 'import numpy as np\n'), ((5942, 5955), 'numpy.max', 'np.max', (['piv.v'], {}), '(piv.v)\n', (5948, 5955), True, 'import numpy as np\n'), ((5972, 5985), 'numpy.max', 'np.max', (['piv.v'], {}), '(piv.v)\n', (5978, 5985), True, 'import numpy as np\n'), ((6000, 6013), 'numpy.min', 'np.min', (['piv.v'], {}), '(piv.v)\n', (6006, 6013), True, 'import numpy as np\n'), ((6030, 6043), 'numpy.min', 'np.min', (['piv.v'], {}), '(piv.v)\n', (6036, 6043), True, 'import numpy as np\n'), ((705, 718), 'numpy.max', 'np.max', (['field'], {}), '(field)\n', (711, 718), True, 'import numpy as np\n'), ((2242, 2255), 'numpy.max', 'np.max', (['field'], {}), '(field)\n', (2248, 2255), True, 'import numpy as np\n'), ((3787, 3810), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.u'], {}), '(piv.u)\n', (3803, 3810), True, 'import numpy as np\n'), ((3863, 3886), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.u'], {}), '(piv.u)\n', (3879, 3886), True, 'import numpy as np\n'), ((3940, 3963), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.v'], {}), '(piv.v)\n', (3956, 3963), True, 'import numpy as np\n'), ((4016, 4039), 'numpy.count_nonzero', 'np.count_nonzero', (['piv.v'], {}), '(piv.v)\n', (4032, 4039), True, 'import numpy as np\n'), ((4584, 4606), 'numpy.sum', 'np.sum', (['dup_alpha[0:5]'], {}), '(dup_alpha[0:5])\n', (4590, 4606), True, 'import numpy as np\n'), ((4640, 4662), 'numpy.sum', 'np.sum', (['dun_alpha[0:5]'], {}), '(dun_alpha[0:5])\n', (4646, 4662), True, 'import numpy as np\n'), ((4696, 4718), 'numpy.sum', 'np.sum', (['dvp_alpha[0:5]'], {}), '(dvp_alpha[0:5])\n', (4702, 4718), True, 'import numpy as np\n'), ((4752, 4774), 'numpy.sum', 'np.sum', (['dvn_alpha[0:5]'], {}), '(dvn_alpha[0:5])\n', (4758, 4774), True, 'import numpy as np\n'), ((4970, 4999), 'numpy.log', 'np.log', (['up_alpha[1:cut_up, 0]'], {}), '(up_alpha[1:cut_up, 0])\n', (4976, 4999), True, 'import numpy as np\n'), ((5000, 5029), 'numpy.log', 'np.log', (['up_alpha[1:cut_up, 1]'], {}), '(up_alpha[1:cut_up, 1])\n', (5006, 5029), True, 'import numpy as np\n'), ((5210, 5240), 'numpy.log', 'np.log', (['(-un_alpha[1:cut_un, 0])'], {}), '(-un_alpha[1:cut_un, 0])\n', (5216, 5240), True, 'import numpy as np\n'), ((5240, 5269), 'numpy.log', 'np.log', (['un_alpha[1:cut_un, 1]'], {}), '(un_alpha[1:cut_un, 1])\n', (5246, 5269), True, 'import numpy as np\n'), ((5288, 5309), 'math.exp', 'exp', (['(-nun[1] / nun[0])'], {}), '(-nun[1] / nun[0])\n', (5291, 5309), False, 'from math import exp\n'), ((5450, 5479), 'numpy.log', 'np.log', (['vp_alpha[1:cut_vp, 0]'], {}), '(vp_alpha[1:cut_vp, 0])\n', (5456, 5479), True, 'import numpy as np\n'), ((5480, 5509), 'numpy.log', 'np.log', (['vp_alpha[1:cut_vp, 1]'], {}), '(vp_alpha[1:cut_vp, 1])\n', (5486, 5509), True, 'import numpy as np\n'), ((5690, 5720), 'numpy.log', 'np.log', (['(-vn_alpha[1:cut_vn, 0])'], {}), '(-vn_alpha[1:cut_vn, 0])\n', (5696, 5720), True, 'import numpy as np\n'), ((5720, 5749), 'numpy.log', 'np.log', (['vn_alpha[1:cut_vn, 1]'], {}), '(vn_alpha[1:cut_vn, 1])\n', (5726, 5749), True, 'import numpy as np\n'), ((5768, 5789), 'math.exp', 'exp', (['(-nvn[1] / nvn[0])'], {}), '(-nvn[1] / nvn[0])\n', (5771, 5789), False, 'from math import exp\n'), ((1128, 1141), 'numpy.min', 'np.min', (['field'], {}), '(field)\n', (1134, 1141), True, 'import numpy as np\n'), ((2678, 2691), 'numpy.min', 'np.min', (['field'], {}), '(field)\n', (2684, 2691), True, 'import numpy as np\n'), ((4917, 4940), 'numpy.sum', 'np.sum', (['indexup[0][0:5]'], {}), '(indexup[0][0:5])\n', (4923, 4940), True, 'import numpy as np\n'), ((5157, 5180), 'numpy.sum', 'np.sum', (['indexun[0][0:5]'], {}), '(indexun[0][0:5])\n', (5163, 5180), True, 'import numpy as np\n'), ((5397, 5420), 'numpy.sum', 'np.sum', (['indexvp[0][0:5]'], {}), '(indexvp[0][0:5])\n', (5403, 5420), True, 'import numpy as np\n'), ((5637, 5660), 'numpy.sum', 'np.sum', (['indexvn[0][0:5]'], {}), '(indexvn[0][0:5])\n', (5643, 5660), True, 'import numpy as np\n'), ((917, 935), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (925, 935), True, 'import numpy as np\n'), ((1345, 1363), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (1353, 1363), True, 'import numpy as np\n'), ((2486, 2504), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (2494, 2504), True, 'import numpy as np\n'), ((2933, 2951), 'numpy.isnan', 'np.isnan', (['filtered'], {}), '(filtered)\n', (2941, 2951), True, 'import numpy as np\n'), ((964, 979), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (972, 979), True, 'import numpy as np\n'), ((1392, 1407), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (1400, 1407), True, 'import numpy as np\n'), ((2533, 2548), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (2541, 2548), True, 'import numpy as np\n'), ((2980, 2995), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (2988, 2995), True, 'import numpy as np\n')]
|
import urllib3
import pandas as pd
import numpy as np
import zipfile
import copy
import pickle
import os
from esig import tosig
from tqdm import tqdm
from multiprocessing import Pool
from functools import partial
from os import listdir
from os.path import isfile, join
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, confusion_matrix
def get_inputs():
url = "https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/set-a.zip"
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open('data/input.zip', 'wb') as out:
while True:
data = r.read()
if not data:
break
out.write(data)
r.release_conn()
zip_ref = zipfile.ZipFile("data/input.zip", 'r')
zip_ref.extractall("data/")
zip_ref.close()
data = {}
list_files = [f for f in listdir(
"data/set-a") if isfile(join("data/set-a", f))]
for f in list_files:
df = pd.read_csv(join("data/set-a", f))
patient_id = int(df.values[0, 2])
data[patient_id] = df
return data
def get_outputs():
url = "https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/Outcomes-a.txt"
data_df = pd.read_csv(url)
data = {}
for patient in data_df.values:
patient_id = int(patient[0])
data[patient_id] = patient[-1]
return data
def download():
X_dict, Y_dict = get_inputs(), get_outputs()
X = []
Y = []
for patient_id in X_dict:
X.append(X_dict[patient_id])
Y.append(Y_dict[patient_id])
print("Data for %s patients downloaded." % len(X))
return X, Y
def split(X, Y, proportion=0.75):
idx = int(len(X)*proportion)
print("Dataset split in a training set of %s and testing set of %s patients." % (
idx, len(X)-idx))
return X[:idx], Y[:idx], X[idx:], Y[idx:]
def features_point(x):
static, path = x
maximums = np.max(path, axis=0)
minimums = np.min(path, axis=0)
last_observation = path[-1]
return np.concatenate([static, maximums, minimums, last_observation])
def extract(X):
return list(map(features_point, X))
def lead_lag(mylist):
leadlist = np.concatenate([[mylist[0]], mylist])
laglist = np.concatenate([mylist, [mylist[-1]]])
return np.concatenate([leadlist, laglist], axis=1)
def add_time(mylist, init_time=0., total_time=1.):
ans = [[init_time + xn * total_time /
(len(mylist)-1)] + list(x) for (xn, x) in enumerate(mylist)]
return np.array(ans)
def home_and_pen_off(mylist):
ans = [list(x) + [1.] for x in mylist]
last = list(ans[-1])
last[-1] = 0.
ans.append(last)
ans.append([0 for item in last])
return np.array(ans)
def refocus(path, centre):
return np.concatenate((centre[::-1], path), axis=0)
def train(features, Y):
classifier = RandomForestClassifier()
classifier.fit(features, Y)
return classifier
def normalise_point(x):
static, path = x
path[:, 0] /= 2.
return [static, path]
def normalise(X):
return list(map(normalise_point, X))
def evaluate(classifier, features, Y):
THRESHOLD = .3
predictions_proba = classifier.predict_proba(features)[:, 1]
predictions = [1. if pred >
THRESHOLD else 0. for pred in predictions_proba]
cm = confusion_matrix(Y, predictions)
Se = cm[1, 1] / float(cm[1, 1] + cm[1, 0])
P = cm[1, 1] / float(cm[1, 1] + cm[0, 1])
score = min(Se, P)
print("Score of predictions: %s" % score)
def to_path(df, dynamic_variables):
dim = len(dynamic_variables) + 1
path = [[0.]*dim]
for event in df.values:
if event[1] in dynamic_variables:
new_value = copy.deepcopy(path[-1])
idx = 1 + dynamic_variables.index(event[1])
new_value[idx] = event[2]
hour, min = event[0].split(":")
days = (float(hour) + float(min) / 60.)/24.
new_value[0] = days
path.append(new_value)
path = np.array(path)
unique_times = np.unique(path[:, 0])
idx = []
for time in unique_times:
last_idx = np.where(path[:, 0] == time)[0][-1]
idx.append(last_idx)
path = path[idx]
return path
def static_features(df, static_variables):
return df[df["Parameter"].isin(static_variables)]["Value"].values
def reformat(X, static_variables, dynamic_variables):
for i, x in enumerate(X):
dynamic = to_path(x, dynamic_variables=dynamic_variables)
static = static_features(x, static_variables=static_variables)
X[i] = [static, dynamic]
return X
def st2si(order, stream):
if order > 1:
return(tosig.stream2sig(stream, order))
else:
if order == 1:
return np.concatenate((np.array([1]), stream[-1] - stream[0]), axis=0)
else:
return np.array([1])
def compute(X, order=2):
func = partial(st2si, order)
pool = Pool()
n_samples = len(X)
signatures = []
try:
signatures = np.array(list(tqdm(pool.imap(func, X), total=n_samples)))
except Exception as e:
print('Failed to compute signatures: ' + repr(e))
signatures = []
return signatures
def predict(classifier, url):
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open('data/test_input.txt', 'wb') as out:
while True:
data = r.read()
if not data:
break
out.write(data)
r.release_conn()
data = {}
df = pd.read_csv("data/test_input.txt")
patient_id = int(df.values[0, 2])
data[patient_id] = df
X = []
for patient_id in data:
X.append(data[patient_id])
X = reformat(X, static_variables=["Age", "Gender"], dynamic_variables=[
"Creatinine", "Glucose"])
X = normalise(X)
X = extract(X)
# [0] means in-house dead [1] means in-house alive
print('Predicted result: ' + classifier.predict(X))
if __name__ == "__main__":
# DOWNLOAD & REFORMAT EVENT DATA, TRANSFORM TIME DEPENDENT VARIABLES
X, Y = download()
X = reformat(X, static_variables=["Age", "Gender"], dynamic_variables=[
"Creatinine", "Glucose"])
# NORMALISE & EXTRACT FEATURES
X = normalise(X)
features = extract(X)
# TRAIN THE MODEL BY SPLITING
features_train, Y_train, features_test, Y_test = split(
features, Y, proportion=0.75)
classifier = train(features_train, Y_train)
predict(classifier, 'https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/set-a/132539.txt')
# EVALUATE PERFORMANCE
evaluate(classifier, features_test, Y_test)
|
[
"os.listdir",
"copy.deepcopy",
"numpy.unique",
"zipfile.ZipFile",
"pandas.read_csv",
"numpy.where",
"os.path.join",
"sklearn.ensemble.RandomForestClassifier",
"numpy.max",
"numpy.array",
"urllib3.PoolManager",
"functools.partial",
"numpy.concatenate",
"numpy.min",
"multiprocessing.Pool",
"esig.tosig.stream2sig",
"sklearn.metrics.confusion_matrix"
] |
[((500, 521), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (519, 521), False, 'import urllib3\n'), ((784, 822), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""data/input.zip"""', '"""r"""'], {}), "('data/input.zip', 'r')\n", (799, 822), False, 'import zipfile\n'), ((1275, 1291), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (1286, 1291), True, 'import pandas as pd\n'), ((1989, 2009), 'numpy.max', 'np.max', (['path'], {'axis': '(0)'}), '(path, axis=0)\n', (1995, 2009), True, 'import numpy as np\n'), ((2025, 2045), 'numpy.min', 'np.min', (['path'], {'axis': '(0)'}), '(path, axis=0)\n', (2031, 2045), True, 'import numpy as np\n'), ((2090, 2152), 'numpy.concatenate', 'np.concatenate', (['[static, maximums, minimums, last_observation]'], {}), '([static, maximums, minimums, last_observation])\n', (2104, 2152), True, 'import numpy as np\n'), ((2250, 2287), 'numpy.concatenate', 'np.concatenate', (['[[mylist[0]], mylist]'], {}), '([[mylist[0]], mylist])\n', (2264, 2287), True, 'import numpy as np\n'), ((2302, 2340), 'numpy.concatenate', 'np.concatenate', (['[mylist, [mylist[-1]]]'], {}), '([mylist, [mylist[-1]]])\n', (2316, 2340), True, 'import numpy as np\n'), ((2352, 2395), 'numpy.concatenate', 'np.concatenate', (['[leadlist, laglist]'], {'axis': '(1)'}), '([leadlist, laglist], axis=1)\n', (2366, 2395), True, 'import numpy as np\n'), ((2575, 2588), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2583, 2588), True, 'import numpy as np\n'), ((2776, 2789), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (2784, 2789), True, 'import numpy as np\n'), ((2830, 2874), 'numpy.concatenate', 'np.concatenate', (['(centre[::-1], path)'], {'axis': '(0)'}), '((centre[::-1], path), axis=0)\n', (2844, 2874), True, 'import numpy as np\n'), ((2918, 2942), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2940, 2942), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3386, 3418), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y', 'predictions'], {}), '(Y, predictions)\n', (3402, 3418), False, 'from sklearn.metrics import roc_auc_score, confusion_matrix\n'), ((4069, 4083), 'numpy.array', 'np.array', (['path'], {}), '(path)\n', (4077, 4083), True, 'import numpy as np\n'), ((4103, 4124), 'numpy.unique', 'np.unique', (['path[:, 0]'], {}), '(path[:, 0])\n', (4112, 4124), True, 'import numpy as np\n'), ((4968, 4989), 'functools.partial', 'partial', (['st2si', 'order'], {}), '(st2si, order)\n', (4975, 4989), False, 'from functools import partial\n'), ((5001, 5007), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (5005, 5007), False, 'from multiprocessing import Pool\n'), ((5313, 5334), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (5332, 5334), False, 'import urllib3\n'), ((5612, 5646), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_input.txt"""'], {}), "('data/test_input.txt')\n", (5623, 5646), True, 'import pandas as pd\n'), ((4734, 4765), 'esig.tosig.stream2sig', 'tosig.stream2sig', (['stream', 'order'], {}), '(stream, order)\n', (4750, 4765), False, 'from esig import tosig\n'), ((919, 940), 'os.listdir', 'listdir', (['"""data/set-a"""'], {}), "('data/set-a')\n", (926, 940), False, 'from os import listdir\n'), ((1035, 1056), 'os.path.join', 'join', (['"""data/set-a"""', 'f'], {}), "('data/set-a', f)\n", (1039, 1056), False, 'from os.path import isfile, join\n'), ((3772, 3795), 'copy.deepcopy', 'copy.deepcopy', (['path[-1]'], {}), '(path[-1])\n', (3785, 3795), False, 'import copy\n'), ((4916, 4929), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4924, 4929), True, 'import numpy as np\n'), ((960, 981), 'os.path.join', 'join', (['"""data/set-a"""', 'f'], {}), "('data/set-a', f)\n", (964, 981), False, 'from os.path import isfile, join\n'), ((4187, 4215), 'numpy.where', 'np.where', (['(path[:, 0] == time)'], {}), '(path[:, 0] == time)\n', (4195, 4215), True, 'import numpy as np\n'), ((4835, 4848), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4843, 4848), True, 'import numpy as np\n')]
|
#-------------------------------------------------------------------------------
# Filename: create_pics.py
# Description: creates square pictures out of a picture which is mostly empty
# for training a neural network later.
# The parameters to fool around with include:
# factor: scaled down image for faster image processing
# sq_size: size of square that is used to construct the standard-deviation map
# cutoff: cutoff for standard deviation
# Authors: <NAME>, <NAME>
#-------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from os import listdir, path, makedirs
import argparse
import sys
# class MyParser(argparse.ArgumentParser):
# def error(self, message):
# sys.stderr.write('error: %s\n' % message)
# self.print_help()
# sys.exit(2)
def pics(from_path='raw_data',to_path='preproc_data'):
# parser = MyParser()
# parser.add_argument('input_folder', nargs='+')
# parser.add_argument('output_folder', nargs='+')
# args = parser.parse_args()
# from_path = args.input_folder[0]
if not from_path[-1]=='/':
from_path+=('/')
# to_path = args.output_folder[0]
if not to_path[-1]=='/':
to_path+=('/')
#check whether input path exists
if not path.exists(from_path):
raise IOError("input directory {0} does not exist, exiting script".format(from_path))
#possible image file extensions.
exts = ['.jpg', '.png', '.tif', '.bmp']
# input file dimensions
xdim = 1330 #2560
ydim = 884 #1920
# output file dimensions
dim = 80 #256
export_ext = '.png' #extension files will be saved
#first, find all the image file in the directory
files = listdir(from_path)
filenames = []
extensions = []
for f in files:
name, ext = path.splitext(from_path+f)
if ext in exts:
filenames.append(name)
extensions.append(ext)
print("found {0} image files in folder {1}".format(len(filenames), from_path))
total_flakes = 0
good_flakes = 0
missed_flakes = 0
#start the actual work of cutting the pictures into smaller pictures
for i, filename in enumerate(filenames):
print("starting with new image file: {0}{1}".format(filename,
extensions[i]))
#first, check for the .csv file with the coordinates of good flakes
good_ones = []
try:
with open(filename+".csv") as f:
content = f.read().splitlines()
for line in content:
good_ones.append(line.split(','))
except IOError:
print("Warning: Couldn't find file {0}.csv, assume there's no good flakes".format(filename))
# open image
full_im = Image.open(filename+extensions[i])
Lx = full_im.size[0] #x dimension of picture
Ly = full_im.size[1] #y dimension of picture
# we want to work on pictures of equal size, so if they are not the right
# size, we rescale them.
scalex = 1.
scaley = 1.
if not Lx == xdim:
scalex = float(xdim) / Lx
scaley = float(ydim) / Ly
full_im = full_im.resize((xdim, ydim))
print("picture is too big, resizing to ({0}, {1})".format(xdim, ydim))
#to speed up the whole work, we resize the image for the first step
factor = 8
lx = int(xdim/factor) # resized x dimension
ly = int(ydim/factor) # resized y dimension
small_im = full_im.resize((lx, ly))
sq_size = dim//factor # size of square in resized image
cutoff = 5 #was 2.75 # cutoff for standard deviation
#calculate the standard deviation of the black and white images
# (convert('L') returns a BW image)
stds = np.zeros((lx-sq_size, ly-sq_size))
for k in range(lx-sq_size):
for l in range(ly-sq_size):
tmp_im = small_im.crop((k, l, k+sq_size, l+sq_size))
stds[k,l] = np.std(list(tmp_im.convert('L').getdata()))
Lstds = np.reshape(stds, (lx-sq_size)*(ly-sq_size))
sorted_stds = np.argsort(Lstds)
centers = []
for j in reversed(sorted_stds):
if Lstds[j]< cutoff: break
ix = int(j/(ly-sq_size))+sq_size/2
iy = j%(ly-sq_size)+sq_size/2
included = False
for c in centers:
if (abs(c[0]-ix) < sq_size) and (abs(c[1]-iy)<sq_size):
included = True
continue
if included: continue
ix = min(max(sq_size, ix), lx-sq_size)
iy = min(max(sq_size, iy), ly-sq_size)
centers.append((ix, iy))
print("identified {0} potential candidates in image {1}".format(len(centers), filename))
total_flakes += len(centers)
squares = []
coordinates = []
for c in centers:
ix = c[0]*factor
iy = c[1]*factor
coordinates.append([ix, iy])
x0 = ix - factor*sq_size
x1 = ix + factor*sq_size
y0 = iy - factor*sq_size
y1 = iy + factor*sq_size
squares.append(full_im.crop((x0, y0, x1, y1)))
if not path.exists(to_path):
print("{0} does not exist yet, creating it".format(to_path))
makedirs(to_path)
found = np.zeros(len(good_ones)) # to make sure we found all good ones
for k in range(len(squares)):
x = coordinates[k][0]
y = coordinates[k][1]
bad = True
name = filename.split('/')[-1]
for j, good in enumerate(good_ones):
g0 = scalex*float(good[0])
g1 = scaley*float(good[1])
if (abs(g0-x) < factor*sq_size) and (abs(g1-y)<factor*sq_size):
this_file = to_path+name+"_" + str(coordinates[k][0])\
+ "_" + str(coordinates[k][1])+"_0A"+ export_ext
squares[k].resize((dim, dim)).save(this_file)
for t in range(5):
this_file = to_path+name + "_" + str(coordinates[k][0]) + \
"_" + str(coordinates[k][1])+"_{0}A".format(t+1)+ export_ext
squares[k].transpose(t).resize((dim, dim)).save(this_file)
found[j]=1
bad = False
good_flakes += 1
if not bad: continue
this_file = to_path + name +"_" + str(coordinates[k][0]) + "_" + \
str(coordinates[k][1])+"_B" + export_ext
squares[k].resize((dim, dim)).save(this_file)
if np.sum(found)<len(good_ones):
missed_flakes += len(good_ones) - np.sum(found)
print("Warning: We have missed a good one in {0}".format(filename))
print("(should have found {0}, found {1}instead".format( \
len(good_ones), np.sum(found)))
print("")
print("total flakes found: {0}".format(total_flakes))
print("of which are good : {0}".format(good_flakes))
print("good flakes missed: {0}".format(int(missed_flakes)))
|
[
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"numpy.reshape",
"os.makedirs",
"os.path.splitext",
"numpy.argsort",
"numpy.sum",
"numpy.zeros"
] |
[((1785, 1803), 'os.listdir', 'listdir', (['from_path'], {}), '(from_path)\n', (1792, 1803), False, 'from os import listdir, path, makedirs\n'), ((1343, 1365), 'os.path.exists', 'path.exists', (['from_path'], {}), '(from_path)\n', (1354, 1365), False, 'from os import listdir, path, makedirs\n'), ((1883, 1911), 'os.path.splitext', 'path.splitext', (['(from_path + f)'], {}), '(from_path + f)\n', (1896, 1911), False, 'from os import listdir, path, makedirs\n'), ((2826, 2862), 'PIL.Image.open', 'Image.open', (['(filename + extensions[i])'], {}), '(filename + extensions[i])\n', (2836, 2862), False, 'from PIL import Image\n'), ((3863, 3901), 'numpy.zeros', 'np.zeros', (['(lx - sq_size, ly - sq_size)'], {}), '((lx - sq_size, ly - sq_size))\n', (3871, 3901), True, 'import numpy as np\n'), ((4132, 4181), 'numpy.reshape', 'np.reshape', (['stds', '((lx - sq_size) * (ly - sq_size))'], {}), '(stds, (lx - sq_size) * (ly - sq_size))\n', (4142, 4181), True, 'import numpy as np\n'), ((4198, 4215), 'numpy.argsort', 'np.argsort', (['Lstds'], {}), '(Lstds)\n', (4208, 4215), True, 'import numpy as np\n'), ((5304, 5324), 'os.path.exists', 'path.exists', (['to_path'], {}), '(to_path)\n', (5315, 5324), False, 'from os import listdir, path, makedirs\n'), ((5411, 5428), 'os.makedirs', 'makedirs', (['to_path'], {}), '(to_path)\n', (5419, 5428), False, 'from os import listdir, path, makedirs\n'), ((6750, 6763), 'numpy.sum', 'np.sum', (['found'], {}), '(found)\n', (6756, 6763), True, 'import numpy as np\n'), ((6826, 6839), 'numpy.sum', 'np.sum', (['found'], {}), '(found)\n', (6832, 6839), True, 'import numpy as np\n'), ((7027, 7040), 'numpy.sum', 'np.sum', (['found'], {}), '(found)\n', (7033, 7040), True, 'import numpy as np\n')]
|
"""
Helper functions for the tests
"""
import os
import numpy as np
from msl.io import read
def read_sample(filename, **kwargs):
"""Read a file in the 'samples' directory.
Parameters
----------
filename : str
The name of the file in the samples/ directory
Returns
-------
A root object
"""
return read(os.path.join(os.path.dirname(__file__), 'samples', filename), **kwargs)
def metadata_equal(m1, m2):
"""Assert that two Metadata objects are equal."""
assert len(m1) == len(m2)
for k1, v1 in m1.items():
v2 = m2[k1]
if isinstance(v1, (list, tuple, np.ndarray)):
assert np.array_equal(v1, v2), '{}\n{}'.format(v1, v2)
else:
assert v1 == v2, '{} != {}'.format(v1, v2)
return True
def datasets_equal(d1, d2):
"""Assert that two Dataset objects are equal."""
assert d1.name == d2.name, '{} != {}'.format(d1.name, d2.name)
assert np.array_equal(d1.data, d2.data), '{}\n{}'.format(d1.data, d2.data)
assert metadata_equal(d1.metadata, d2.metadata)
return True
def roots_equal(r1, r2):
"""Assert that two Root objects are equal."""
assert metadata_equal(r1.metadata, r2.metadata)
groups1 = list(r1.groups())
groups1.sort(key=lambda x: x.name)
groups2 = list(r2.groups())
groups2.sort(key=lambda x: x.name)
assert len(groups1) == len(groups2)
for g1, g2 in zip(groups1, groups2):
assert g1.name == g2.name, '{} != {}'.format(g1.name, g2.name)
assert metadata_equal(g1.metadata, g2.metadata)
datasets1 = list(r1.datasets())
datasets1.sort(key=lambda x: x.name)
datasets2 = list(r2.datasets())
datasets2.sort(key=lambda x: x.name)
assert len(datasets1) == len(datasets2)
for d1, d2 in zip(datasets1, datasets2):
assert datasets_equal(d1, d2)
return True
|
[
"os.path.dirname",
"numpy.array_equal"
] |
[((955, 987), 'numpy.array_equal', 'np.array_equal', (['d1.data', 'd2.data'], {}), '(d1.data, d2.data)\n', (969, 987), True, 'import numpy as np\n'), ((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((661, 683), 'numpy.array_equal', 'np.array_equal', (['v1', 'v2'], {}), '(v1, v2)\n', (675, 683), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 19 17:35:09 2016
@author: yxl
"""
from imagepy.core.engine import Tool
import numpy as np
from imagepy.core.manager import ColorManager
from imagepy.core.draw.fill import floodfill
class Plugin(Tool):
title = 'Flood Fill'
para = {'tor':10, 'con':'8-connect'}
view = [(int, 'tor', (0,1000), 0, 'torlorance', 'value'),
(list, 'con', ['4-connect', '8-connect'], str, 'fill', 'pix')]
def mouse_down(self, ips, x, y, btn, **key):
ips.snapshot()
msk = floodfill(ips.img, x, y, self.para['tor'], self.para['con']=='8-connect')
#plt.imshow(msk)
#plt.show()
color = ColorManager.get_front()
if ips.get_nchannels()==1:color = np.mean(color)
ips.img[msk] = color
ips.update()
def mouse_up(self, ips, x, y, btn, **key):
pass
def mouse_move(self, ips, x, y, btn, **key):
pass
def mouse_wheel(self, ips, x, y, d, **key):
pass
|
[
"imagepy.core.manager.ColorManager.get_front",
"numpy.mean",
"imagepy.core.draw.fill.floodfill"
] |
[((549, 624), 'imagepy.core.draw.fill.floodfill', 'floodfill', (['ips.img', 'x', 'y', "self.para['tor']", "(self.para['con'] == '8-connect')"], {}), "(ips.img, x, y, self.para['tor'], self.para['con'] == '8-connect')\n", (558, 624), False, 'from imagepy.core.draw.fill import floodfill\n'), ((684, 708), 'imagepy.core.manager.ColorManager.get_front', 'ColorManager.get_front', ([], {}), '()\n', (706, 708), False, 'from imagepy.core.manager import ColorManager\n'), ((751, 765), 'numpy.mean', 'np.mean', (['color'], {}), '(color)\n', (758, 765), True, 'import numpy as np\n')]
|
from typing import Union
from scipy.spatial.qhull import Delaunay
from shapely.geometry import LineString
from subsurface.structs.base_structures import StructuredData
import numpy as np
try:
import segyio
segyio_imported = True
except ImportError:
segyio_imported = False
def read_in_segy(filepath: str, coords=None) -> StructuredData:
"""Reader for seismic data stored in sgy/segy files
Args:
filepath (str): the path of the sgy/segy file
coords (dict): If data is a numpy array coords provides the values for
the xarray dimension. These dimensions are 'x', 'y' and 'z'
Returns: a StructuredData object with data, the traces with samples written into an xr.Dataset, optionally with
labels defined by coords
"""
segyfile = segyio.open(filepath, ignore_geometry=True)
data = np.asarray([np.copy(tr) for tr in segyfile.trace[:]])
sd = StructuredData.from_numpy(data) # data holds traces * (samples per trace) values
segyfile.close()
return sd
def create_mesh_from_coords(coords: Union[dict, LineString],
zmin: Union[float, int], zmax: Union[float, int] = 0.0):
"""Creates a mesh for plotting StructuredData
Args:
coords (Union[dict, LineString]): the x and y, i.e. latitude and longitude, location of the traces of the seismic profile
zmax (float): the maximum elevation of the seismic profile, by default 0.0
zmin (float): the location in z where the lowest sample was taken
Returns: vertices and faces for creating an UnstructuredData object
"""
if type(coords) == LineString:
linestring = coords
n = len(list(linestring.coords))
coords = np.array([[x[0] for x in list(linestring.coords)],
[y[1] for y in list(linestring.coords)]]).T
else:
n = len(coords['x'])
coords = np.array([coords['x'],
coords['y']]).T
# duplicating the line, once with z=lower and another with z=upper values
vertices = np.zeros((2*n, 3))
vertices[:n, :2] = coords
vertices[:n, 2] = zmin
vertices[n:, :2] = coords
vertices[n:, 2] = zmax
# i+n --- i+n+1
# |\ |
# | \ |
# | \ |
# | \ |
# i --- i+1
tri = Delaunay(vertices[:, [0, 2]])
faces = tri.simplices
return vertices, faces
|
[
"numpy.copy",
"subsurface.structs.base_structures.StructuredData.from_numpy",
"numpy.array",
"numpy.zeros",
"scipy.spatial.qhull.Delaunay",
"segyio.open"
] |
[((793, 836), 'segyio.open', 'segyio.open', (['filepath'], {'ignore_geometry': '(True)'}), '(filepath, ignore_geometry=True)\n', (804, 836), False, 'import segyio\n'), ((913, 944), 'subsurface.structs.base_structures.StructuredData.from_numpy', 'StructuredData.from_numpy', (['data'], {}), '(data)\n', (938, 944), False, 'from subsurface.structs.base_structures import StructuredData\n'), ((2065, 2085), 'numpy.zeros', 'np.zeros', (['(2 * n, 3)'], {}), '((2 * n, 3))\n', (2073, 2085), True, 'import numpy as np\n'), ((2310, 2339), 'scipy.spatial.qhull.Delaunay', 'Delaunay', (['vertices[:, [0, 2]]'], {}), '(vertices[:, [0, 2]])\n', (2318, 2339), False, 'from scipy.spatial.qhull import Delaunay\n'), ((861, 872), 'numpy.copy', 'np.copy', (['tr'], {}), '(tr)\n', (868, 872), True, 'import numpy as np\n'), ((1906, 1942), 'numpy.array', 'np.array', (["[coords['x'], coords['y']]"], {}), "([coords['x'], coords['y']])\n", (1914, 1942), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#author: zhaofeng-shu33
import numpy as np
from ace_cream import ace_cream
def pearson_correlation(X,Y):
return (np.mean(X*Y, axis=0) -np.mean(X, axis = 0)* np.mean(Y, axis = 0)) / ( np.std(X, axis = 0) * np.std(Y, axis = 0))
if __name__ == '__main__':
N_SIZE = 1000
ERROR_PROBABILITY = 0.1
x = np.random.choice([0,1],size=N_SIZE)
y = np.random.uniform(size=N_SIZE)
for i in range(len(x)):
if(y[i] < ERROR_PROBABILITY):
y[i] = 2
else:
y[i] = x[i]
dic_Y = {0:6, 1:8, 2:3}
dic_X = {0:7, 1:9}
for i in range(len(y)):
y[i] = dic_Y[y[i]]
x[i] = dic_X[x[i]]
print('rho(x,y)',pearson_correlation(x,y))
# use fortran ace by 1985 article author
tx, ty = ace_cream(x, y, cat = [-1,0])
print('mapped X symbol list: ')
print(np.unique(tx))
print('mapped Y symbol list: ')
print(np.unique(ty))
print('mean(tx) = %f, std(tx) = %f'%(np.mean(tx), np.std(tx)))
print('mean(ty) = %f, std(ty) = %f'%(np.mean(ty), np.std(ty)))
print('rho(tx,ty)',pearson_correlation(tx,ty))
# matches theoretical result: np.sqrt(1-ERROR_PROBABILITY)
|
[
"numpy.mean",
"numpy.unique",
"numpy.random.choice",
"ace_cream.ace_cream",
"numpy.std",
"numpy.random.uniform"
] |
[((332, 369), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'N_SIZE'}), '([0, 1], size=N_SIZE)\n', (348, 369), True, 'import numpy as np\n'), ((376, 406), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'N_SIZE'}), '(size=N_SIZE)\n', (393, 406), True, 'import numpy as np\n'), ((770, 798), 'ace_cream.ace_cream', 'ace_cream', (['x', 'y'], {'cat': '[-1, 0]'}), '(x, y, cat=[-1, 0])\n', (779, 798), False, 'from ace_cream import ace_cream\n'), ((846, 859), 'numpy.unique', 'np.unique', (['tx'], {}), '(tx)\n', (855, 859), True, 'import numpy as np\n'), ((907, 920), 'numpy.unique', 'np.unique', (['ty'], {}), '(ty)\n', (916, 920), True, 'import numpy as np\n'), ((137, 159), 'numpy.mean', 'np.mean', (['(X * Y)'], {'axis': '(0)'}), '(X * Y, axis=0)\n', (144, 159), True, 'import numpy as np\n'), ((207, 224), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (213, 224), True, 'import numpy as np\n'), ((229, 246), 'numpy.std', 'np.std', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (235, 246), True, 'import numpy as np\n'), ((159, 177), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (166, 177), True, 'import numpy as np\n'), ((181, 199), 'numpy.mean', 'np.mean', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (188, 199), True, 'import numpy as np\n'), ((964, 975), 'numpy.mean', 'np.mean', (['tx'], {}), '(tx)\n', (971, 975), True, 'import numpy as np\n'), ((977, 987), 'numpy.std', 'np.std', (['tx'], {}), '(tx)\n', (983, 987), True, 'import numpy as np\n'), ((1031, 1042), 'numpy.mean', 'np.mean', (['ty'], {}), '(ty)\n', (1038, 1042), True, 'import numpy as np\n'), ((1044, 1054), 'numpy.std', 'np.std', (['ty'], {}), '(ty)\n', (1050, 1054), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import math
import datetime
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class Indicators():
def __init__(self, dataframe, params = []):
self.dataframe = dataframe
self.params = params
self.dataframe['return'] = 0
for i in range(1,len(dataframe['return'])):
#http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
dataframe.loc[i,'return'] = (self.dataframe.loc[i,'open']-self.dataframe.loc[i-1,'open'])/self.dataframe.loc[i-1,'open']
self.Return = dataframe['return']
self.dataframe['time'] = dataframe['tradeDate']
self.dataframe['cumulative_return'] = self.dataframe['open']
self.dataframe['cumulative_return'] = self.dataframe['cumulative_return']/self.dataframe.loc[0,'open']
self.dataframe['cumulative_return'] = dataframe['cumulative_return']#*1000000
self.dataframe.index = pd.to_datetime(dataframe['tradeDate'])#!!!!!
#分年计算
self.year_slice = {}
i = 0
y = time.strptime(self.dataframe['time'].iat[0],"%Y-%m-%d").tm_year
for j in range(1,len(self.dataframe)):
if y != time.strptime(self.dataframe['time'].iat[j],"%Y-%m-%d").tm_year:
self.year_slice[str(y)] = dataframe[i:j-1]
y = time.strptime(self.dataframe['time'].iat[j],"%Y-%m-%d").tm_year
i = j
self.year_slice[str(y)] = dataframe[i:]
###年化收益
def annual_return(self,asset,year):
R = self.year_slice[year][asset].iat[-1]/self.year_slice[year][asset].iat[0]
t1 = time.strptime(self.year_slice[year]['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.year_slice[year]['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
n = (d2-d1).days
n = n/244
# print('The annual return for %s in %s is %f' %(asset,year,math.pow(R, 1/n)-1))
return math.pow(R, 1/n)-1
###最大回撤
def max_draw(self,asset,year):
self.year_slice[year]['max'] = 0
self.year_slice[year].ix[0,'max'] = self.year_slice[year].ix[0,asset]#loc, iloc, and ix
for i in range(1, len(self.year_slice[year][asset])):
if self.year_slice[year].ix[i, asset] > self.year_slice[year].ix[i-1, 'max']:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i, asset]
else:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i-1, 'max']
self.year_slice[year]['retreat']=(self.year_slice[year][asset]- self.year_slice[year]['max'])/self.year_slice[year]['max']
print('The max draw for %s in %s is %f' %(asset,year,abs(min(self.year_slice[year]['retreat']))))
return abs(min(self.year_slice[year]['retreat']))
###波动率
def volatility(self,asset,year):
print('The volatility for %s in %s is %f' %(asset,year,np.std(self.year_slice[year][asset])*math.sqrt(244/len(self.year_slice[year][asset]))))
return np.std(self.year_slice[year][asset])*math.sqrt(244/len(self.year_slice[year][asset]))
###夏普比率
def sharp(self, asset,no_risk_R,year):
print('The Sharp Ratio for %s in %s is %.7f' %(asset,year,(self.annual_return(asset,year)-no_risk_R)/(self.volatility(asset,year)*math.sqrt(244/len(self.year_slice[year][asset]))+1e-10)))
return (self.annual_return(asset,year)-no_risk_R)/(self.volatility(asset,year)*math.sqrt(244/len(self.year_slice[year][asset]))+1e-10)
###卡玛比率
def calmar(self,asset,year):
print('The Calmar Ratio for %s in %s is %f' %(asset,year,self.annual_return(asset,year)/self.max_draw(asset,year)))
return self.annual_return(asset,year)/self.max_draw(asset,year)
###日胜率
def daily_win_ratio(self,asset,year):
#df的条件选择不是self.dataframe[asset][self.dataframe[asset] > 0]而是self.dataframe[self.dataframe[asset] > 0][asset]
#!!
pnl = asset.replace('asset','pnl')
n1 = len(self.year_slice[year][self.year_slice[year][pnl] > 0][pnl])
n2 = len(self.year_slice[year][pnl])
print('The daily win ratio for %s in %s is %f' %(asset,year,n1/n2))
return n1/n2
###日盈亏比
def win_lose_ratio(self,asset,year):
self.year_slice[year]['dif'] = self.year_slice[year][asset] - self.year_slice[year][asset].shift(1)
print('The win lose ratio for %s in %s is %f' %(asset,year,abs(min(self.year_slice[year]['retreat']))))
return abs(sum(self.year_slice[year][self.year_slice[year]['dif']>0]['dif']))/abs(sum(self.year_slice[year][self.year_slice[year]['dif']<0]['dif']))
###大回撤区间
def worst_draw_interval(self,asset,year):
self.year_slice[year]['max'] = 0
self.year_slice[year].ix[0,'max'] = self.year_slice[year].ix[0,asset]
self.year_slice[year]['max_time'] = self.year_slice[year]['time']
for i in range(1, len(self.year_slice[year][asset])):
if self.year_slice[year].ix[i, asset] > self.year_slice[year].ix[i-1, 'max']:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i, asset]
else:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i-1, 'max']
self.year_slice[year].ix[i, 'max_time'] = self.year_slice[year].ix[i-1, 'max_time']
self.year_slice[year]['retreat']=(self.year_slice[year][asset]- self.year_slice[year]['max'])/self.year_slice[year]['max']
max_draw = min(self.year_slice[year]['retreat'])
data = self.year_slice[year][self.year_slice[year]['retreat'] == max_draw]
t1 = data['tradeDate']#
t2 = data['max_time']
#print('The worst draw interval for %s in %s is %s %s' %(asset,year,str(t1),str(t2)))
return t1,t2
###总换手
def total_turnover(self,asset,year):
turnover = asset.replace('asset','turnover')
print('The total turnover for %s in %s is %f' %(asset,year,sum(self.year_slice[year][turnover])))
return sum(self.year_slice[year][turnover])
###日均换手
def average_daily_turnover(self,asset,year):
t1 = time.strptime(self.year_slice[year]['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.year_slice[year]['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
n = (d2-d1).days
print('The average daily turnover for %s in %s is %f' %(asset,year,self.total_turnover(asset,year)/n))
return self.total_turnover(asset,year)/n
###日均持仓
def average_daily_position(self,asset,year):
position = asset.replace('asset','position')
print('The average daily position for %s in %s is %f' %(asset,year,self.year_slice[year][position].mean()))
return self.year_slice[year][position].mean()
###次均收益
def minor_average_return(self,asset,year):
position = asset.replace('asset','position')
sum_pos = sum(self.year_slice[year][self.year_slice[year][position]!=0][position])
num = len(self.year_slice[year][self.year_slice[year][position]!=0][position])
print('The minor average return for %s in %s is %f' %(asset,year,sum_pos/num))
return sum_pos/num
def write_indicators_concat(self,path):
frames = []
for items in self.year_slice:
temp_data = []
temp_index = []
for k in self.params:
x = [items,
self.annual_return('asset'+ str(k),items),
self.max_draw('asset'+ str(k),items),
self.volatility('asset'+ str(k),items),
self.sharp('asset'+ str(k),0,items),
self.calmar('asset'+ str(k),items),
self.daily_win_ratio('asset'+ str(k),items),
self.win_lose_ratio('asset'+ str(k),items),
self.total_turnover('asset'+ str(k),items),
self.average_daily_turnover('asset'+ str(k),items),
self.average_daily_position('asset'+ str(k),items),
self.minor_average_return('asset'+ str(k),items)]
temp_data.append(x)
temp_index.append('asset'+ str(k))
DataFrame = pd.DataFrame(temp_data,index=temp_index,columns=['year','annual_return', 'max_draw', 'volatility', 'sharp','calmar','daily_win_ratio','win_lose_ratio','total_turnover','average_daily_turnover','average_daily_position','minor_average_return'])
frames.append(DataFrame)
DataFrame = pd.concat(frames)
DataFrame.to_csv(path_or_buf=path)
def plot_figure(self,asset_num):
t1 = time.strptime(self.dataframe['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.dataframe['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
plt.figure()
plt.subplots_adjust(hspace=1, wspace=1)
plt.subplot(3,1,1)
self.dataframe['asset'+ str(asset_num)].plot(legend = True)
self.dataframe['cumulative_return'].plot(x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False)
plt.subplot(3,1,2)
f2 = plt.bar(range(len(self.dataframe['transaction'+ str(asset_num)])), self.dataframe['transaction'+ str(asset_num)].tolist(),tick_label= None,label='transaction'+ str(asset_num))
plt.legend((f2,),('transaction'+ str(asset_num),))
plt.subplot(3,1,3)
f3 = plt.bar(range(len(self.dataframe['pnl'+ str(asset_num)])),self.dataframe['pnl'+ str(asset_num)].tolist(),label='pnl'+ str(asset_num))
plt.legend((f3,),('pnl'+ str(asset_num),))
plt.show()
if __name__=='__main__':
indicators = Indicators('/Users/zhubaobao/Documents/Quant/ZXJT/total3.csv', [5,10,20])
#indicators.write_indicators_concat('/Users/zhubaobao/Documents/Quant/ZXJT/write_indicators.csv')
indicators.plot_figure(10)
|
[
"datetime.datetime",
"matplotlib.pyplot.subplots_adjust",
"time.strptime",
"math.pow",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"numpy.std",
"pandas.DataFrame",
"pandas.concat",
"warnings.filterwarnings",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] |
[((151, 184), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (174, 184), False, 'import warnings\n'), ((1033, 1071), 'pandas.to_datetime', 'pd.to_datetime', (["dataframe['tradeDate']"], {}), "(dataframe['tradeDate'])\n", (1047, 1071), True, 'import pandas as pd\n'), ((1704, 1767), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[0], '%Y-%m-%d')\n", (1717, 1767), False, 'import time\n'), ((1780, 1844), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[-1]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[-1], '%Y-%m-%d')\n", (1793, 1844), False, 'import time\n'), ((1857, 1909), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't1.tm_mon', 't1.tm_mday'], {}), '(t1.tm_year, t1.tm_mon, t1.tm_mday)\n', (1874, 1909), False, 'import datetime\n'), ((1923, 1975), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't2.tm_mon', 't2.tm_mday'], {}), '(t1.tm_year, t2.tm_mon, t2.tm_mday)\n', (1940, 1975), False, 'import datetime\n'), ((6236, 6299), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[0], '%Y-%m-%d')\n", (6249, 6299), False, 'import time\n'), ((6312, 6376), 'time.strptime', 'time.strptime', (["self.year_slice[year]['time'].iat[-1]", '"""%Y-%m-%d"""'], {}), "(self.year_slice[year]['time'].iat[-1], '%Y-%m-%d')\n", (6325, 6376), False, 'import time\n'), ((6389, 6441), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't1.tm_mon', 't1.tm_mday'], {}), '(t1.tm_year, t1.tm_mon, t1.tm_mday)\n', (6406, 6441), False, 'import datetime\n'), ((6455, 6507), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't2.tm_mon', 't2.tm_mday'], {}), '(t1.tm_year, t2.tm_mon, t2.tm_mday)\n', (6472, 6507), False, 'import datetime\n'), ((8663, 8680), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (8672, 8680), True, 'import pandas as pd\n'), ((8775, 8831), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[0], '%Y-%m-%d')\n", (8788, 8831), False, 'import time\n'), ((8844, 8901), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[-1]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[-1], '%Y-%m-%d')\n", (8857, 8901), False, 'import time\n'), ((8914, 8966), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't1.tm_mon', 't1.tm_mday'], {}), '(t1.tm_year, t1.tm_mon, t1.tm_mday)\n', (8931, 8966), False, 'import datetime\n'), ((8980, 9032), 'datetime.datetime', 'datetime.datetime', (['t1.tm_year', 't2.tm_mon', 't2.tm_mday'], {}), '(t1.tm_year, t2.tm_mon, t2.tm_mday)\n', (8997, 9032), False, 'import datetime\n'), ((9041, 9053), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9051, 9053), True, 'import matplotlib.pyplot as plt\n'), ((9062, 9101), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)', 'wspace': '(1)'}), '(hspace=1, wspace=1)\n', (9081, 9101), True, 'import matplotlib.pyplot as plt\n'), ((9111, 9131), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (9122, 9131), True, 'import matplotlib.pyplot as plt\n'), ((9628, 9648), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (9639, 9648), True, 'import matplotlib.pyplot as plt\n'), ((9904, 9924), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (9915, 9924), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10138, 10140), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1203), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[0]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[0], '%Y-%m-%d')\n", (1160, 1203), False, 'import time\n'), ((2122, 2140), 'math.pow', 'math.pow', (['R', '(1 / n)'], {}), '(R, 1 / n)\n', (2130, 2140), False, 'import math\n'), ((3176, 3212), 'numpy.std', 'np.std', (['self.year_slice[year][asset]'], {}), '(self.year_slice[year][asset])\n', (3182, 3212), True, 'import numpy as np\n'), ((8363, 8627), 'pandas.DataFrame', 'pd.DataFrame', (['temp_data'], {'index': 'temp_index', 'columns': "['year', 'annual_return', 'max_draw', 'volatility', 'sharp', 'calmar',\n 'daily_win_ratio', 'win_lose_ratio', 'total_turnover',\n 'average_daily_turnover', 'average_daily_position', 'minor_average_return']"}), "(temp_data, index=temp_index, columns=['year', 'annual_return',\n 'max_draw', 'volatility', 'sharp', 'calmar', 'daily_win_ratio',\n 'win_lose_ratio', 'total_turnover', 'average_daily_turnover',\n 'average_daily_position', 'minor_average_return'])\n", (8375, 8627), True, 'import pandas as pd\n'), ((1278, 1334), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[j]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[j], '%Y-%m-%d')\n", (1291, 1334), False, 'import time\n'), ((1422, 1478), 'time.strptime', 'time.strptime', (["self.dataframe['time'].iat[j]", '"""%Y-%m-%d"""'], {}), "(self.dataframe['time'].iat[j], '%Y-%m-%d')\n", (1435, 1478), False, 'import time\n'), ((3073, 3109), 'numpy.std', 'np.std', (['self.year_slice[year][asset]'], {}), '(self.year_slice[year][asset])\n', (3079, 3109), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
import csv
import datetime
import math
from tabulate import tabulate
import scipy.stats as st
from tqdm import tqdm
import numpy as np
np.seterr(all='ignore')
def isfloat(val):
try:
val = float(val)
if math.isnan(val):
return False
return True
except:
return False
class Describe:
def __init__(self, filename):
self.filename = filename
self.content = []
self.listed = {}
self.mean = {}
self.count = {}
self.columns = []
self.min = {}
self.max = {}
self.std = {}
self.Q25 = {}
self.Q50 = {}
self.Q75 = {}
self.iqr = {}
self.range = {}
self.best_dist = {}
self.dist_params = {}
self.dist_pval = {}
def ReadFile(self):
with open(self.filename, 'r') as file:
coco = csv.DictReader(file)
for row in coco:
del row['Index']
newrow = {}
for k, v in row.items():
if isfloat(v):
newrow[k] = float(v)
if k not in self.listed.keys():
self.listed[k] = [float(v)]
else:
self.listed[k] += [float(v)]
elif k == 'Birthday':
split = v.split('-')
year, month, day = int(split[0]), int(split[1]), int(split[2])
newrow[k] = datetime.datetime(year, month, day, 0, 0).timestamp()
if k not in self.listed.keys():
self.listed[k] = [newrow[k]]
else:
self.listed[k] += [newrow[k]]
self.content += [newrow]
def FilterNumerics(self):
for k, v in self.content[0].items():
try:
float(v)
self.columns += [k]
self.mean[k] = 0
self.count[k] = 0
self.std[k] = 0
self.min[k] = 0
self.max[k] = 0
except:
pass
def GetCount(self):
for x in self.content:
for k, v in x.items():
self.count[k] += 1
def GetMean(self):
for x in self.content:
for k, v in x.items():
self.mean[k] += v / self.count[k]
def GetStd(self):
for x in self.content:
for k, v in x.items():
self.std[k] += (v - self.mean[k]) ** 2 / self.count[k]
for k, v in self.std.items():
self.std[k] = math.sqrt(self.std[k])
def GetQMinMax(self):
for k in self.listed.keys():
self.listed[k] = sorted(self.listed[k])
if self.listed[k] != []:
self.min[k] = self.listed[k][0]
self.max[k] = self.listed[k][-1]
self.range[k] = self.max[k] - self.min[k]
else:
continue
L25 = (self.count[k] + 1) * 0.25
L50 = (self.count[k] + 1) * 0.5
L75 = (self.count[k] + 1) * 0.75
try:
P25 = self.listed[k][int(L25)] + (L25 - int(L25)) * (self.listed[k][int(L25) + 1] - self.listed[k][int(L25)])
P50 = self.listed[k][int(L50)] + (L50 - int(L50)) * (self.listed[k][int(L50) + 1] - self.listed[k][int(L25)])
P75 = self.listed[k][int(L75)] + (L75 - int(L75)) * (self.listed[k][int(L75) + 1] - self.listed[k][int(L25)])
except:
P25 = self.listed[k][0]
P50 = self.listed[k][0]
P75 = self.listed[k][0]
self.Q25[k] = P25
self.Q50[k] = P50
self.Q75[k] = P75
self.iqr[k] = P75 - P25
def get_best_distribution(self):
dist_names = ["norm", "exponweib", "weibull_max", "weibull_min", "pareto", "genextreme"]
dist_results = []
params = {}
with tqdm(total=len(self.listed.keys()) * len(dist_names)) as tq:
for k in self.listed.keys():
for dist_name in dist_names:
dist = getattr(st, dist_name)
param = dist.fit(self.listed[k])
params[dist_name] = param
# Applying the Kolmogorov-Smirnov test
D, p = st.kstest(self.listed[k], dist_name, args=param)
dist_results.append((dist_name, p))
tq.update(1)
# select the best fitted distribution
best_dist, best_p = (max(dist_results, key=lambda item: item[1]))
self.best_dist[k] = best_dist
self.dist_params[k] = params[dist_name]
self.dist_pval[k] = best_p
def Describe(self):
self.GetCount()
self.GetMean()
self.GetStd()
self.GetQMinMax()
if len(sys.argv) > 2 and sys.argv[2] == "-dist":
self.get_best_distribution()
def Print(self):
self.columns = sorted(self.columns)
if len(sys.argv) > 2 and sys.argv[2] == "-dist":
i = 0
for k, v in self.best_dist.items():
self.columns[i] += '\n(' + v + ')'
i += 1
self.mean = {k: v for k, v in sorted(self.mean.items(), key=lambda item: item[0])}
self.count = {k: v for k, v in sorted(self.count.items(), key=lambda item: item[0])}
self.min = {k: v for k, v in sorted(self.min.items(), key=lambda item: item[0])}
self.max = {k: v for k, v in sorted(self.max.items(), key=lambda item: item[0])}
self.std = {k: v for k, v in sorted(self.std.items(), key=lambda item: item[0])}
self.Q25 = {k: v for k, v in sorted(self.Q25.items(), key=lambda item: item[0])}
self.Q50 = {k: v for k, v in sorted(self.Q50.items(), key=lambda item: item[0])}
self.Q75 = {k: v for k, v in sorted(self.Q75.items(), key=lambda item: item[0])}
self.iqr = {k: v for k, v in sorted(self.iqr.items(), key=lambda item: item[0])}
self.range = {k: v for k, v in sorted(self.range.items(), key=lambda item: item[0])}
self.best_dist = {k: v for k, v in sorted(self.best_dist.items(), key=lambda item: item[0])}
columns = [''] + self.columns
print(tabulate([
['Count'] + list(self.count.values()),
['Mean'] + list(self.mean.values()),
['Std'] + list(self.std.values()),
['Min'] + list(self.min.values()),
['25%'] + list(self.Q25.values()),
['50%'] + list(self.Q50.values()),
['75%'] + list(self.Q75.values()),
['Max'] + list(self.max.values()),
['IQR'] + list(self.iqr.values()),
['Range'] + list(self.range.values())], headers=columns, tablefmt='plain', floatfmt=".6f"))
#print(tabulate([
# ['Distribution'] + list(self.best_dist.values())], headers=columns, tablefmt='plain', floatfmt=".6f"))
def ConvertBirthday(self):
start = datetime.datetime.fromtimestamp(0)
self.mean['Birthday'] = datetime.datetime.fromtimestamp(self.mean['Birthday']).strftime('%Y-%m-%d')
self.std['Birthday'] = str((datetime.datetime.fromtimestamp(self.std['Birthday']) - start).days) + '(d)'
self.min['Birthday'] = datetime.datetime.fromtimestamp(self.min['Birthday']).strftime('%Y-%m-%d')
self.max['Birthday'] = datetime.datetime.fromtimestamp(self.max['Birthday']).strftime('%Y-%m-%d')
self.Q25['Birthday'] = datetime.datetime.fromtimestamp(self.Q25['Birthday']).strftime('%Y-%m-%d')
self.Q50['Birthday'] = datetime.datetime.fromtimestamp(self.Q50['Birthday']).strftime('%Y-%m-%d')
self.Q75['Birthday'] = datetime.datetime.fromtimestamp(self.Q75['Birthday']).strftime('%Y-%m-%d')
self.iqr['Birthday'] = str((datetime.datetime.fromtimestamp(self.iqr['Birthday']) - start).days) + '(d)'
self.range['Birthday'] = str((datetime.datetime.fromtimestamp(self.range['Birthday']) - start).days) + '(d)'
pass
def __call__(self):
self.ReadFile()
self.FilterNumerics()
self.Describe()
self.ConvertBirthday()
self.Print()
def main():
best_class = Describe(sys.argv[1])
best_class()
def CheckArgs():
if len(sys.argv) < 2:
print(f"Usage: {__file__} <dataset_name.csv> <flags>")
exit()
if __name__ == '__main__':
CheckArgs()
main()
|
[
"datetime.datetime",
"csv.DictReader",
"datetime.datetime.fromtimestamp",
"scipy.stats.kstest",
"math.sqrt",
"numpy.seterr",
"math.isnan"
] |
[((175, 198), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (184, 198), True, 'import numpy as np\n'), ((263, 278), 'math.isnan', 'math.isnan', (['val'], {}), '(val)\n', (273, 278), False, 'import math\n'), ((7153, 7187), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(0)'], {}), '(0)\n', (7184, 7187), False, 'import datetime\n'), ((923, 943), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (937, 943), False, 'import csv\n'), ((2712, 2734), 'math.sqrt', 'math.sqrt', (['self.std[k]'], {}), '(self.std[k])\n', (2721, 2734), False, 'import math\n'), ((7220, 7274), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.mean['Birthday']"], {}), "(self.mean['Birthday'])\n", (7251, 7274), False, 'import datetime\n'), ((7440, 7493), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.min['Birthday']"], {}), "(self.min['Birthday'])\n", (7471, 7493), False, 'import datetime\n'), ((7546, 7599), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.max['Birthday']"], {}), "(self.max['Birthday'])\n", (7577, 7599), False, 'import datetime\n'), ((7652, 7705), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.Q25['Birthday']"], {}), "(self.Q25['Birthday'])\n", (7683, 7705), False, 'import datetime\n'), ((7758, 7811), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.Q50['Birthday']"], {}), "(self.Q50['Birthday'])\n", (7789, 7811), False, 'import datetime\n'), ((7864, 7917), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.Q75['Birthday']"], {}), "(self.Q75['Birthday'])\n", (7895, 7917), False, 'import datetime\n'), ((4459, 4507), 'scipy.stats.kstest', 'st.kstest', (['self.listed[k]', 'dist_name'], {'args': 'param'}), '(self.listed[k], dist_name, args=param)\n', (4468, 4507), True, 'import scipy.stats as st\n'), ((7332, 7385), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.std['Birthday']"], {}), "(self.std['Birthday'])\n", (7363, 7385), False, 'import datetime\n'), ((7975, 8028), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.iqr['Birthday']"], {}), "(self.iqr['Birthday'])\n", (8006, 8028), False, 'import datetime\n'), ((8090, 8145), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["self.range['Birthday']"], {}), "(self.range['Birthday'])\n", (8121, 8145), False, 'import datetime\n'), ((1564, 1605), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', '(0)', '(0)'], {}), '(year, month, day, 0, 0)\n', (1581, 1605), False, 'import datetime\n')]
|
import numpy as np
def flip_axis(x_in, axis):
x_out = np.zeros(x_in.shape, dtype=x_in.dtype)
for i, x in enumerate(x_in):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x_out[i] = x.swapaxes(0, axis)
return x_out
def flip_axis_fra(x, flipping_axis):
pattern = [flipping_axis]
pattern += [el for el in range(x.ndim) if el != flipping_axis]
inv_pattern = [pattern.index(el) for el in range(x.ndim)]
x = x.transpose(pattern) # "flipping_axis" first
x = x[::-1, ...]
x = x.transpose(inv_pattern)
return x
if __name__ == '__main__':
aa = np.random.random((10, 2, 3, 4)) # b, *, *, *
for axis in [1, 2, 3]:
print('Testing channel in axis {}'.format(axis))
mm = flip_axis(aa.copy(), axis-1)
ff = flip_axis_fra(aa.copy(), axis)
assert np.array_equal(mm, ff)
print('Test passed!')
|
[
"numpy.random.random",
"numpy.zeros",
"numpy.array_equal",
"numpy.asarray"
] |
[((60, 98), 'numpy.zeros', 'np.zeros', (['x_in.shape'], {'dtype': 'x_in.dtype'}), '(x_in.shape, dtype=x_in.dtype)\n', (68, 98), True, 'import numpy as np\n'), ((614, 645), 'numpy.random.random', 'np.random.random', (['(10, 2, 3, 4)'], {}), '((10, 2, 3, 4))\n', (630, 645), True, 'import numpy as np\n'), ((845, 867), 'numpy.array_equal', 'np.array_equal', (['mm', 'ff'], {}), '(mm, ff)\n', (859, 867), True, 'import numpy as np\n'), ((144, 157), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (154, 157), True, 'import numpy as np\n')]
|
def elastic_rate(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
t,
y,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
type_0,
forcing,
):
# we compute rates that will be used for Runge-Kutta time-stepping
#
import first_derivative_sbp_operators
import numpy as np
import boundarycondition
V = np.zeros((nx, 1))
S = np.zeros((nx, 1))
Vt = np.zeros((nx, 1))
St = np.zeros((nx, 1))
Vx = np.zeros((nx, 1))
Sx = np.zeros((nx, 1))
mms(V, S, Vt, St, Vx, Sx, y, t, type_0)
# initialize arrays for computing derivatives
vx = np.zeros((nx, 1))
sx = np.zeros((nx, 1))
# compute first derivatives for velocity and stress fields
first_derivative_sbp_operators.dx(vx, v, nx, dx, order)
first_derivative_sbp_operators.dx(sx, s, nx, dx, order)
# compute the elastic rates
hv[:, :] = (1.0 / rho) * sx + forcing * (Vt - (1.0 / rho) * Sx)
hs[:, :] = mu * vx + forcing * (St - mu * Vx)
# impose boundary conditions using penalty: SAT
impose_bc(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
forcing * V,
forcing * S,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
)
def advection_rate(hv, v, nx, dx, order, t, y, tau):
# we compute rates that will be used for Runge-Kutta time-stepping
#
import first_derivative_sbp_operators
import numpy as np
# initialize arrays for computing derivatives
vx = np.zeros((nx, 1))
# compute first derivatives of the advected field v
first_derivative_sbp_operators.dx(vx, v, nx, dx, order)
# compute rates
hv[:, :] = -vx
# impose boundary conditions using penalty: SAT
# penalty weights
h11 = np.zeros((1, 1))
penaltyweight(h11, dx, order)
V0 = np.zeros((1, 1))
# boundary forcing
g(V0, t)
# print(Vn)
# penalize boundaries with the SAT terms
hv[0, :] = hv[0, :] - tau / h11 * (v[0, :] - V0)
def impose_bc(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
V,
S,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
):
# impose boundary conditions
import numpy as np
import boundarycondition
# penalty weights
h11 = np.zeros((1, 1))
penaltyweight(h11, dx, order)
mv = np.zeros((1, 1))
ms = np.zeros((1, 1))
pv = np.zeros((1, 1))
ps = np.zeros((1, 1))
v0 = v[0, :]
s0 = s[0, :]
vn = v[nx - 1, :]
sn = s[nx - 1, :]
# boundary forcing
V0 = V[0, :]
S0 = S[0, :]
Vn = V[nx - 1, :]
Sn = S[nx - 1, :]
# compute SAT terms
boundarycondition.bcm(mv, ms, v0, s0, V0, S0, rho, mu, r0)
boundarycondition.bcp(pv, ps, vn, sn, Vn, Sn, rho, mu, r1)
# penalize boundaries with the SAT terms
hv[0, :] = hv[0, :] - tau0_1 / h11 * mv
hs[0, :] = hs[0, :] - tau0_2 / h11 * ms
hv[nx - 1, :] = hv[nx - 1, :] - tauN_1 / h11 * pv
def mms(V, S, V_t, S_t, V_x, S_x, y, t, type_0):
import numpy as np
if type_0 in ("Gaussian"):
delta = 0.015 * (y[-1, 0] - y[0, 0])
cs = 3.464
rho = 2.6702
Zs = rho * cs
x0 = 0.5 * (y[-1, 0] - y[0, 0])
V[:, :] = (
1
/ np.sqrt(2.0 * np.pi * delta ** 2)
* 0.5
* (
np.exp(-(y + cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
+ np.exp(-(y - cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
)
)
S[:, :] = (
1
/ np.sqrt(2.0 * np.pi * delta ** 2)
* 0.5
* Zs
* (
np.exp(-(y + cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
- np.exp(-(y - cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
)
)
V_t[:, :] = 0
S_t[:, :] = 0
V_x[:, :] = 0
S_x[:, :] = 0
if type_0 in ("Sinusoidal"):
delta = y[-1, 0] - y[0, 0]
ny = 20.5 / delta * np.pi
nt = 2.5 * np.pi
fs = 9.33
V[:, :] = np.cos(nt * t) * np.sin(ny * y + fs)
S[:, :] = ny * np.sin(nt * t) * np.cos(ny * y - fs)
V_t[:, :] = -nt * np.sin(nt * t) * np.sin(ny * y + fs)
S_t[:, :] = nt * ny * np.cos(nt * t) * np.cos(ny * y - fs)
V_x[:, :] = ny * np.cos(nt * t) * np.cos(ny * y + fs)
S_x[:, :] = -ny * ny * np.sin(nt * t) * np.sin(ny * y - fs)
def g(V, t):
import numpy as np
V[:, :] = 0.0
if t <= 1.0 and t >= 0.0:
V[:, :] = (np.sin(np.pi * t)) ** 4
def penaltyweight(h11, dx, order):
if order == 2:
h11[:] = 0.5 * dx
if order == 4:
h11[:] = (17.0 / 48.0) * dx
if order == 6:
h11[:] = 13649.0 / 43200.0 * dx
|
[
"boundarycondition.bcm",
"numpy.sqrt",
"first_derivative_sbp_operators.dx",
"boundarycondition.bcp",
"numpy.exp",
"numpy.zeros",
"numpy.cos",
"numpy.sin"
] |
[((378, 395), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (386, 395), True, 'import numpy as np\n'), ((404, 421), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (412, 421), True, 'import numpy as np\n'), ((431, 448), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (439, 448), True, 'import numpy as np\n'), ((458, 475), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (466, 475), True, 'import numpy as np\n'), ((485, 502), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (493, 502), True, 'import numpy as np\n'), ((512, 529), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (520, 529), True, 'import numpy as np\n'), ((635, 652), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (643, 652), True, 'import numpy as np\n'), ((662, 679), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (670, 679), True, 'import numpy as np\n'), ((748, 803), 'first_derivative_sbp_operators.dx', 'first_derivative_sbp_operators.dx', (['vx', 'v', 'nx', 'dx', 'order'], {}), '(vx, v, nx, dx, order)\n', (781, 803), False, 'import first_derivative_sbp_operators\n'), ((808, 863), 'first_derivative_sbp_operators.dx', 'first_derivative_sbp_operators.dx', (['sx', 's', 'nx', 'dx', 'order'], {}), '(sx, s, nx, dx, order)\n', (841, 863), False, 'import first_derivative_sbp_operators\n'), ((1586, 1603), 'numpy.zeros', 'np.zeros', (['(nx, 1)'], {}), '((nx, 1))\n', (1594, 1603), True, 'import numpy as np\n'), ((1665, 1720), 'first_derivative_sbp_operators.dx', 'first_derivative_sbp_operators.dx', (['vx', 'v', 'nx', 'dx', 'order'], {}), '(vx, v, nx, dx, order)\n', (1698, 1720), False, 'import first_derivative_sbp_operators\n'), ((1847, 1863), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1855, 1863), True, 'import numpy as np\n'), ((1908, 1924), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1916, 1924), True, 'import numpy as np\n'), ((2367, 2383), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2375, 2383), True, 'import numpy as np\n'), ((2428, 2444), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2436, 2444), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2462, 2470), True, 'import numpy as np\n'), ((2481, 2497), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2489, 2497), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (2515, 2523), True, 'import numpy as np\n'), ((2736, 2794), 'boundarycondition.bcm', 'boundarycondition.bcm', (['mv', 'ms', 'v0', 's0', 'V0', 'S0', 'rho', 'mu', 'r0'], {}), '(mv, ms, v0, s0, V0, S0, rho, mu, r0)\n', (2757, 2794), False, 'import boundarycondition\n'), ((2799, 2857), 'boundarycondition.bcp', 'boundarycondition.bcp', (['pv', 'ps', 'vn', 'sn', 'Vn', 'Sn', 'rho', 'mu', 'r1'], {}), '(pv, ps, vn, sn, Vn, Sn, rho, mu, r1)\n', (2820, 2857), False, 'import boundarycondition\n'), ((4148, 4162), 'numpy.cos', 'np.cos', (['(nt * t)'], {}), '(nt * t)\n', (4154, 4162), True, 'import numpy as np\n'), ((4165, 4184), 'numpy.sin', 'np.sin', (['(ny * y + fs)'], {}), '(ny * y + fs)\n', (4171, 4184), True, 'import numpy as np\n'), ((4226, 4245), 'numpy.cos', 'np.cos', (['(ny * y - fs)'], {}), '(ny * y - fs)\n', (4232, 4245), True, 'import numpy as np\n'), ((4290, 4309), 'numpy.sin', 'np.sin', (['(ny * y + fs)'], {}), '(ny * y + fs)\n', (4296, 4309), True, 'import numpy as np\n'), ((4357, 4376), 'numpy.cos', 'np.cos', (['(ny * y - fs)'], {}), '(ny * y - fs)\n', (4363, 4376), True, 'import numpy as np\n'), ((4420, 4439), 'numpy.cos', 'np.cos', (['(ny * y + fs)'], {}), '(ny * y + fs)\n', (4426, 4439), True, 'import numpy as np\n'), ((4488, 4507), 'numpy.sin', 'np.sin', (['(ny * y - fs)'], {}), '(ny * y - fs)\n', (4494, 4507), True, 'import numpy as np\n'), ((4616, 4633), 'numpy.sin', 'np.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (4622, 4633), True, 'import numpy as np\n'), ((3438, 3490), 'numpy.exp', 'np.exp', (['(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3444, 3490), True, 'import numpy as np\n'), ((3511, 3563), 'numpy.exp', 'np.exp', (['(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3517, 3563), True, 'import numpy as np\n'), ((3740, 3792), 'numpy.exp', 'np.exp', (['(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y + cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3746, 3792), True, 'import numpy as np\n'), ((3813, 3865), 'numpy.exp', 'np.exp', (['(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))'], {}), '(-(y - cs * t - x0) ** 2 / (2.0 * delta ** 2))\n', (3819, 3865), True, 'import numpy as np\n'), ((4209, 4223), 'numpy.sin', 'np.sin', (['(nt * t)'], {}), '(nt * t)\n', (4215, 4223), True, 'import numpy as np\n'), ((4273, 4287), 'numpy.sin', 'np.sin', (['(nt * t)'], {}), '(nt * t)\n', (4279, 4287), True, 'import numpy as np\n'), ((4340, 4354), 'numpy.cos', 'np.cos', (['(nt * t)'], {}), '(nt * t)\n', (4346, 4354), True, 'import numpy as np\n'), ((4403, 4417), 'numpy.cos', 'np.cos', (['(nt * t)'], {}), '(nt * t)\n', (4409, 4417), True, 'import numpy as np\n'), ((4471, 4485), 'numpy.sin', 'np.sin', (['(nt * t)'], {}), '(nt * t)\n', (4477, 4485), True, 'import numpy as np\n'), ((3354, 3387), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi * delta ** 2)'], {}), '(2.0 * np.pi * delta ** 2)\n', (3361, 3387), True, 'import numpy as np\n'), ((3639, 3672), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi * delta ** 2)'], {}), '(2.0 * np.pi * delta ** 2)\n', (3646, 3672), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_const_df
from etna.datasets import generate_periodic_df
from etna.metrics import R2
from etna.models import LinearPerSegmentModel
from etna.transforms import FilterFeaturesTransform
from etna.transforms.encoders.categorical import LabelEncoderTransform
from etna.transforms.encoders.categorical import OneHotEncoderTransform
@pytest.fixture
def two_df_with_new_values():
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 5, 9, 5, 9],
"target": [1, 2, 3, 4, 5, 6],
}
df1 = TSDataset.to_dataset(pd.DataFrame(d))
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 9, 5, 0, 0],
"target": [1, 2, 3, 4, 5, 6],
}
df2 = TSDataset.to_dataset(pd.DataFrame(d))
return df1, df2
@pytest.fixture
def df_for_ohe_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test_0"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 5))
answer_on_regressor_0["test_1"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test_0"] = answer_on_regressor_0["test_0"].astype("category")
answer_on_regressor_0["test_1"] = answer_on_regressor_0["test_1"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test_0"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 5))
answer_on_regressor_1["test_1"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test_0"] = answer_on_regressor_1["test_0"].astype("category")
answer_on_regressor_1["test_1"] = answer_on_regressor_1["test_1"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test_0"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 0))
answer_on_regressor_2["test_0"] = answer_on_regressor_2["test_0"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_label_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp": pd.date_range(start="2021-01-01", end="2021-01-12"),
"regressor_0": [5, 8, 5, 8, 5, 8, 5, 8, 5, 8, 5, 8],
"regressor_1": [9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5],
"regressor_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"regressor_3": [1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7],
}
df_regressors = pd.DataFrame(d)
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
answer_on_regressor_0 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_0["test"] = answer_on_regressor_0["regressor_0"].apply(lambda x: float(x == 8))
answer_on_regressor_0["test"] = answer_on_regressor_0["test"].astype("category")
answer_on_regressor_1 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_1["test"] = answer_on_regressor_1["regressor_1"].apply(lambda x: float(x == 9))
answer_on_regressor_1["test"] = answer_on_regressor_1["test"].astype("category")
answer_on_regressor_2 = tsdataset.df.copy()["segment_0"]
answer_on_regressor_2["test"] = answer_on_regressor_2["regressor_2"].apply(lambda x: float(x == 1))
answer_on_regressor_2["test"] = answer_on_regressor_2["test"].astype("category")
return tsdataset.df, (answer_on_regressor_0, answer_on_regressor_1, answer_on_regressor_2)
@pytest.fixture
def df_for_naming():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
df_regressors = generate_periodic_df(12, start_time="2021-01-01", scale=10, period=2, n_segments=2)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + ["regressor_1", "2"]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
tsdataset = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return tsdataset.df
def test_label_encoder_simple(df_for_label_encoding):
"""Test that LabelEncoderTransform works correct in a simple cases."""
df, answers = df_for_label_encoding
for i in range(3):
le = LabelEncoderTransform(in_column=f"regressor_{i}", out_column="test")
le.fit(df)
cols = le.transform(df)["segment_0"].columns
assert le.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_ohe_encoder_simple(df_for_ohe_encoding):
"""Test that OneHotEncoderTransform works correct in a simple case."""
df, answers = df_for_ohe_encoding
for i in range(3):
ohe = OneHotEncoderTransform(in_column=f"regressor_{i}", out_column="test")
ohe.fit(df)
cols = ohe.transform(df)["segment_0"].columns
assert ohe.transform(df)["segment_0"][cols].equals(answers[i][cols])
def test_value_error_label_encoder(df_for_label_encoding):
"""Test LabelEncoderTransform with wrong strategy."""
df, _ = df_for_label_encoding
with pytest.raises(ValueError, match="The strategy"):
le = LabelEncoderTransform(in_column="target", strategy="new_vlue")
le.fit(df)
le.transform(df)
@pytest.mark.parametrize(
"strategy, expected_values",
[
("new_value", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]])),
("none", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np.nan, 6]])),
("mean", np.array([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]])),
],
)
def test_new_value_label_encoder(two_df_with_new_values, strategy, expected_values):
"""Test LabelEncoderTransform correct works with unknown values."""
df1, df2 = two_df_with_new_values
le = LabelEncoderTransform(in_column="regressor_0", strategy=strategy)
le.fit(df1)
np.testing.assert_array_almost_equal(le.transform(df2).values, expected_values)
def test_new_value_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform correct works with unknown values."""
expected_values = np.array(
[
[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0],
[8.0, 2.0, 0.0, 1.0, 0.0, 5.0, 0.0, 0.0],
[9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0],
]
)
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
np.testing.assert_array_almost_equal(ohe.transform(df2).values, expected_values)
def test_naming_ohe_encoder(two_df_with_new_values):
"""Test OneHotEncoderTransform gives the correct columns."""
df1, df2 = two_df_with_new_values
ohe = OneHotEncoderTransform(in_column="regressor_0", out_column="targets")
ohe.fit(df1)
segments = ["segment_0", "segment_1"]
target = ["target", "targets_0", "targets_1", "regressor_0"]
assert set([(i, j) for i in segments for j in target]) == set(ohe.transform(df2).columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_ohe_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test OneHotEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
ohe = OneHotEncoderTransform(in_column=in_column)
ohe.fit(df)
answer = set(
list(df["segment_0"].columns) + [prefix + str(ohe.__repr__()) + "_0", prefix + str(ohe.__repr__()) + "_1"]
)
assert answer == set(ohe.transform(df)["segment_0"].columns.values)
@pytest.mark.parametrize(
"in_column, prefix",
[("2", ""), ("regressor_1", "regressor_")],
)
def test_naming_label_encoder_no_out_column(df_for_naming, in_column, prefix):
"""Test LabelEncoderTransform gives the correct columns with no out_column."""
df = df_for_naming
le = LabelEncoderTransform(in_column=in_column)
le.fit(df)
answer = set(list(df["segment_0"].columns) + [prefix + str(le.__repr__())])
assert answer == set(le.transform(df)["segment_0"].columns.values)
@pytest.fixture
def ts_for_ohe_sanity():
df_to_forecast = generate_const_df(periods=100, start_time="2021-01-01", scale=0, n_segments=1)
df_regressors = generate_periodic_df(periods=120, start_time="2021-01-01", scale=10, period=4, n_segments=1)
df_regressors = df_regressors.pivot(index="timestamp", columns="segment").reset_index()
df_regressors.columns = ["timestamp"] + [f"regressor_{i}" for i in range(1)]
df_regressors["segment"] = "segment_0"
df_to_forecast = TSDataset.to_dataset(df_to_forecast)
df_regressors = TSDataset.to_dataset(df_regressors)
rng = np.random.default_rng(12345)
def f(x):
return x ** 2 + rng.normal(0, 0.01)
df_to_forecast["segment_0", "target"] = df_regressors["segment_0"]["regressor_0"][:100].apply(f)
ts = TSDataset(df=df_to_forecast, freq="D", df_exog=df_regressors)
return ts
def test_ohe_sanity(ts_for_ohe_sanity):
"""Test for correct work in the full forecasting pipeline."""
horizon = 10
train_ts, test_ts = ts_for_ohe_sanity.train_test_split(test_size=horizon)
ohe = OneHotEncoderTransform(in_column="regressor_0")
filt = FilterFeaturesTransform(exclude=["regressor_0"])
train_ts.fit_transform([ohe, filt])
model = LinearPerSegmentModel()
model.fit(train_ts)
future_ts = train_ts.make_future(horizon)
forecast_ts = model.forecast(future_ts)
r2 = R2()
assert 1 - r2(test_ts, forecast_ts)["segment_0"] < 1e-5
|
[
"etna.transforms.encoders.categorical.LabelEncoderTransform",
"etna.datasets.TSDataset.to_dataset",
"etna.datasets.generate_periodic_df",
"numpy.random.default_rng",
"etna.metrics.R2",
"etna.datasets.TSDataset",
"etna.models.LinearPerSegmentModel",
"etna.transforms.FilterFeaturesTransform",
"etna.datasets.generate_const_df",
"etna.transforms.encoders.categorical.OneHotEncoderTransform",
"pytest.mark.parametrize",
"numpy.array",
"pytest.raises",
"pandas.DataFrame",
"etna.datasets.generate_ar_df",
"pandas.date_range"
] |
[((8503, 8595), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_column, prefix"""', "[('2', ''), ('regressor_1', 'regressor_')]"], {}), "('in_column, prefix', [('2', ''), ('regressor_1',\n 'regressor_')])\n", (8526, 8595), False, 'import pytest\n'), ((9071, 9163), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_column, prefix"""', "[('2', ''), ('regressor_1', 'regressor_')]"], {}), "('in_column, prefix', [('2', ''), ('regressor_1',\n 'regressor_')])\n", (9094, 9163), False, 'import pytest\n'), ((1409, 1466), 'etna.datasets.generate_ar_df', 'generate_ar_df', (['(10)'], {'start_time': '"""2021-01-01"""', 'n_segments': '(1)'}), "(10, start_time='2021-01-01', n_segments=1)\n", (1423, 1466), False, 'from etna.datasets import generate_ar_df\n'), ((1821, 1836), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1833, 1836), True, 'import pandas as pd\n'), ((1901, 1937), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (1921, 1937), False, 'from etna.datasets import TSDataset\n'), ((1958, 1993), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (1978, 1993), False, 'from etna.datasets import TSDataset\n'), ((2010, 2071), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (2019, 2071), False, 'from etna.datasets import TSDataset\n'), ((3397, 3454), 'etna.datasets.generate_ar_df', 'generate_ar_df', (['(10)'], {'start_time': '"""2021-01-01"""', 'n_segments': '(1)'}), "(10, start_time='2021-01-01', n_segments=1)\n", (3411, 3454), False, 'from etna.datasets import generate_ar_df\n'), ((3809, 3824), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3821, 3824), True, 'import pandas as pd\n'), ((3889, 3925), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (3909, 3925), False, 'from etna.datasets import TSDataset\n'), ((3946, 3981), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (3966, 3981), False, 'from etna.datasets import TSDataset\n'), ((3998, 4059), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (4007, 4059), False, 'from etna.datasets import TSDataset\n'), ((4969, 5026), 'etna.datasets.generate_ar_df', 'generate_ar_df', (['(10)'], {'start_time': '"""2021-01-01"""', 'n_segments': '(1)'}), "(10, start_time='2021-01-01', n_segments=1)\n", (4983, 5026), False, 'from etna.datasets import generate_ar_df\n'), ((5047, 5134), 'etna.datasets.generate_periodic_df', 'generate_periodic_df', (['(12)'], {'start_time': '"""2021-01-01"""', 'scale': '(10)', 'period': '(2)', 'n_segments': '(2)'}), "(12, start_time='2021-01-01', scale=10, period=2,\n n_segments=2)\n", (5067, 5134), False, 'from etna.datasets import generate_periodic_df\n'), ((5352, 5388), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (5372, 5388), False, 'from etna.datasets import TSDataset\n'), ((5409, 5444), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (5429, 5444), False, 'from etna.datasets import TSDataset\n'), ((5461, 5522), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (5470, 5522), False, 'from etna.datasets import TSDataset\n'), ((7300, 7365), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': '"""regressor_0"""', 'strategy': 'strategy'}), "(in_column='regressor_0', strategy=strategy)\n", (7321, 7365), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((7619, 7760), 'numpy.array', 'np.array', (['[[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0], [8.0, 2.0, 0.0, 1.0, 0.0, 5.0, \n 0.0, 0.0], [9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0]]'], {}), '([[5.0, 1.0, 1.0, 0.0, 5.0, 4.0, 1.0, 0.0], [8.0, 2.0, 0.0, 1.0, \n 0.0, 5.0, 0.0, 0.0], [9.0, 3.0, 0.0, 0.0, 0.0, 6.0, 0.0, 0.0]])\n', (7627, 7760), True, 'import numpy as np\n'), ((7865, 7934), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': '"""regressor_0"""', 'out_column': '"""targets"""'}), "(in_column='regressor_0', out_column='targets')\n", (7887, 7934), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((8205, 8274), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': '"""regressor_0"""', 'out_column': '"""targets"""'}), "(in_column='regressor_0', out_column='targets')\n", (8227, 8274), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((8797, 8840), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': 'in_column'}), '(in_column=in_column)\n', (8819, 8840), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((9365, 9407), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': 'in_column'}), '(in_column=in_column)\n', (9386, 9407), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((9638, 9716), 'etna.datasets.generate_const_df', 'generate_const_df', ([], {'periods': '(100)', 'start_time': '"""2021-01-01"""', 'scale': '(0)', 'n_segments': '(1)'}), "(periods=100, start_time='2021-01-01', scale=0, n_segments=1)\n", (9655, 9716), False, 'from etna.datasets import generate_const_df\n'), ((9737, 9834), 'etna.datasets.generate_periodic_df', 'generate_periodic_df', ([], {'periods': '(120)', 'start_time': '"""2021-01-01"""', 'scale': '(10)', 'period': '(4)', 'n_segments': '(1)'}), "(periods=120, start_time='2021-01-01', scale=10, period\n =4, n_segments=1)\n", (9757, 9834), False, 'from etna.datasets import generate_periodic_df\n'), ((10067, 10103), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_to_forecast'], {}), '(df_to_forecast)\n', (10087, 10103), False, 'from etna.datasets import TSDataset\n'), ((10124, 10159), 'etna.datasets.TSDataset.to_dataset', 'TSDataset.to_dataset', (['df_regressors'], {}), '(df_regressors)\n', (10144, 10159), False, 'from etna.datasets import TSDataset\n'), ((10170, 10198), 'numpy.random.default_rng', 'np.random.default_rng', (['(12345)'], {}), '(12345)\n', (10191, 10198), True, 'import numpy as np\n'), ((10369, 10430), 'etna.datasets.TSDataset', 'TSDataset', ([], {'df': 'df_to_forecast', 'freq': '"""D"""', 'df_exog': 'df_regressors'}), "(df=df_to_forecast, freq='D', df_exog=df_regressors)\n", (10378, 10430), False, 'from etna.datasets import TSDataset\n'), ((10658, 10705), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': '"""regressor_0"""'}), "(in_column='regressor_0')\n", (10680, 10705), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((10717, 10765), 'etna.transforms.FilterFeaturesTransform', 'FilterFeaturesTransform', ([], {'exclude': "['regressor_0']"}), "(exclude=['regressor_0'])\n", (10740, 10765), False, 'from etna.transforms import FilterFeaturesTransform\n'), ((10818, 10841), 'etna.models.LinearPerSegmentModel', 'LinearPerSegmentModel', ([], {}), '()\n', (10839, 10841), False, 'from etna.models import LinearPerSegmentModel\n'), ((10965, 10969), 'etna.metrics.R2', 'R2', ([], {}), '()\n', (10967, 10969), False, 'from etna.metrics import R2\n'), ((914, 929), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (926, 929), True, 'import pandas as pd\n'), ((1306, 1321), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1318, 1321), True, 'import pandas as pd\n'), ((1498, 1549), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-12"""'}), "(start='2021-01-01', end='2021-01-12')\n", (1511, 1549), True, 'import pandas as pd\n'), ((3486, 3537), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-12"""'}), "(start='2021-01-01', end='2021-01-12')\n", (3499, 3537), True, 'import pandas as pd\n'), ((5754, 5822), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': 'f"""regressor_{i}"""', 'out_column': '"""test"""'}), "(in_column=f'regressor_{i}', out_column='test')\n", (5775, 5822), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((6173, 6242), 'etna.transforms.encoders.categorical.OneHotEncoderTransform', 'OneHotEncoderTransform', ([], {'in_column': 'f"""regressor_{i}"""', 'out_column': '"""test"""'}), "(in_column=f'regressor_{i}', out_column='test')\n", (6195, 6242), False, 'from etna.transforms.encoders.categorical import OneHotEncoderTransform\n'), ((6556, 6603), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""The strategy"""'}), "(ValueError, match='The strategy')\n", (6569, 6603), False, 'import pytest\n'), ((6618, 6680), 'etna.transforms.encoders.categorical.LabelEncoderTransform', 'LabelEncoderTransform', ([], {'in_column': '"""target"""', 'strategy': '"""new_vlue"""'}), "(in_column='target', strategy='new_vlue')\n", (6639, 6680), False, 'from etna.transforms.encoders.categorical import LabelEncoderTransform\n'), ((6814, 6887), 'numpy.array', 'np.array', (['[[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]]'], {}), '([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, -1, 5], [9, -1, 3, 0, -1, 6]])\n', (6822, 6887), True, 'import numpy as np\n'), ((6907, 6997), 'numpy.array', 'np.array', (['[[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np.nan, 6]]'], {}), '([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, np.nan, 5], [9, np.nan, 3, 0, np\n .nan, 6]])\n', (6915, 6997), True, 'import numpy as np\n'), ((7012, 7084), 'numpy.array', 'np.array', (['[[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]]'], {}), '([[5, 0, 1, 5, 0, 4], [8, 1, 2, 0, 0, 5], [9, 0.5, 3, 0, 0, 6]])\n', (7020, 7084), True, 'import numpy as np\n'), ((575, 626), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (588, 626), True, 'import pandas as pd\n'), ((643, 694), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (656, 694), True, 'import pandas as pd\n'), ((967, 1018), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (980, 1018), True, 'import pandas as pd\n'), ((1035, 1086), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2021-01-01"""', 'end': '"""2021-01-03"""'}), "(start='2021-01-01', end='2021-01-03')\n", (1048, 1086), True, 'import pandas as pd\n')]
|
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score, confusion_matrix
from keras.callbacks import ModelCheckpoint
import seaborn as sns
from keras.optimizers import Adam
import pickle
import matplotlib.pyplot as plt
import lime
import lime.lime_tabular
from lime.lime_tabular import LimeTabularExplainer
import os
# fix random seed for reproducibility
np.random.seed(7)
# load dataset
dataset = np.genfromtxt("covid_filtered_1-5_allMin3.csv", delimiter=",", encoding="utf8")
dataset = dataset[1:, :]
np.random.shuffle(dataset)
# split into input and output variables
df_label = dataset[:, 23]
label = []
for lab in df_label:
if lab == 1:
label.append([0]) # class 1
elif lab == 2 or lab == 3:
label.append([1]) # class 23
elif lab == 4 or lab == 5:
label.append([2]) # class 45
else:
print("DATA ERROR")
inputColumns = [0, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
label = np.array(label)
xFit, xTest, yFit, yTest = train_test_split(dataset[:, inputColumns], label, test_size=0.3, random_state=42,
stratify=label)
'''
# test:
xTest_c1 = []
yTest_c1 = []
xTest_c23 = []
yTest_c23 = []
xTest_c45 = []
yTest_c45 = []
for i in range(len(yTest)):
if yTest[i][0] == 1: # class 1
xTest_c1.append(xTest[i])
yTest_c1.append(yTest[i])
elif yTest[i][1] == 1: # class 2-3
xTest_c23.append(xTest[i])
yTest_c23.append(yTest[i])
elif yTest[i][2] == 1: # class 4-5
xTest_c45.append(xTest[i])
yTest_c45.append(yTest[i])
xTest_c1 = numpy.array(xTest_c1)
yTest_c1 = numpy.array(yTest_c1)
xTest_c23 = numpy.array(xTest_c23)
yTest_c23 = numpy.array(yTest_c23)
xTest_c45 = numpy.array(xTest_c45)
yTest_c45 = numpy.array(yTest_c45)
'''
parameters = {'bootstrap': True,
'min_samples_leaf': 3,
'n_estimators': 50,
'min_samples_split': 10,
'max_features': 'sqrt',
'max_depth': 6,
'max_leaf_nodes': None}
RF_model = RandomForestClassifier(**parameters)
yFit = np.array(yFit).ravel()
RF_model.fit(xFit, yFit)
RF_predictions = RF_model.predict(xTest)
score = accuracy_score(yTest, RF_predictions)
print(score)
from sklearn import tree
import matplotlib.pyplot as plt
fn = ['sex', 'HSD', 'entry_month', 'symptoms_month', 'pneumonia', 'age_group', 'pregnancy', 'diabetes',
'copd', 'asthma', 'immsupr', 'hypertension', 'other_disease', 'cardiovascular', 'obesity',
'renal_chronic', 'tobacco', 'contact_other_covid']
cn = ['Low', 'Middle', 'High']
fig = plt.figure(figsize=(35, 6), dpi=900)
tree.plot_tree(RF_model.estimators_[0],
feature_names=fn,
class_names=cn,
filled=True,
rounded=True,
precision=2,
fontsize=4)
fig.savefig('rf_individualtree.png')
'''
# Get and reshape confusion matrix data
matrix = confusion_matrix(yTest, RF_predictions)
matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
# Build the plot
plt.figure(figsize=(16, 7))
sns.set(font_scale=1.4)
sns.heatmap(matrix, annot=True, annot_kws={'size': 10},
cmap=plt.cm.Greens, linewidths=0.2)
# Add labels to the plot
class_names = ['Low severity', 'Medium severity', 'High severity']
tick_marks = np.arange(len(class_names))
tick_marks2 = tick_marks + 0.5
plt.xticks(tick_marks, class_names, rotation=25)
plt.yticks(tick_marks2, class_names, rotation=0)
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.title('Confusion Matrix for Random Forest Model')
plt.show()
# create model
model = Sequential()
model.add(Dense(729, input_dim=len(inputColumns), activation='sigmoid'))
model.add(Dense(243, activation='sigmoid'))
model.add(Dense(81, activation='sigmoid'))
model.add(Dense(27, activation='sigmoid'))
model.add(Dense(9, activation='sigmoid'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.002), metrics=['accuracy'])
# Fit the model (train the model)
model.fit(xFit, yFit, epochs=1000, batch_size=50)
# evaluate the model
print("\n-------------------------------------------------------")
print("\ntotal(%i):" % len(xTest))
scores = model.evaluate(xTest, yTest)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
# test:
print("\nclass1(%i):" % len(xTest_c1))
scores = model.evaluate(xTest_c1, yTest_c1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print("\nclass23(%i):" % len(xTest_c23))
scores = model.evaluate(xTest_c23, yTest_c23)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print("\nclass45(%i):" % len(xTest_c45))
scores = model.evaluate(xTest_c45, yTest_c45)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
'''
|
[
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"sklearn.tree.plot_tree",
"numpy.genfromtxt",
"sklearn.metrics.accuracy_score",
"numpy.random.shuffle"
] |
[((613, 630), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (627, 630), True, 'import numpy as np\n'), ((657, 736), 'numpy.genfromtxt', 'np.genfromtxt', (['"""covid_filtered_1-5_allMin3.csv"""'], {'delimiter': '""","""', 'encoding': '"""utf8"""'}), "('covid_filtered_1-5_allMin3.csv', delimiter=',', encoding='utf8')\n", (670, 736), True, 'import numpy as np\n'), ((762, 788), 'numpy.random.shuffle', 'np.random.shuffle', (['dataset'], {}), '(dataset)\n', (779, 788), True, 'import numpy as np\n'), ((1207, 1222), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1215, 1222), True, 'import numpy as np\n'), ((1250, 1351), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset[:, inputColumns]', 'label'], {'test_size': '(0.3)', 'random_state': '(42)', 'stratify': 'label'}), '(dataset[:, inputColumns], label, test_size=0.3,\n random_state=42, stratify=label)\n', (1266, 1351), False, 'from sklearn.model_selection import train_test_split\n'), ((2317, 2353), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**parameters)\n', (2339, 2353), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2458, 2495), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['yTest', 'RF_predictions'], {}), '(yTest, RF_predictions)\n', (2472, 2495), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((2863, 2899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(35, 6)', 'dpi': '(900)'}), '(figsize=(35, 6), dpi=900)\n', (2873, 2899), True, 'import matplotlib.pyplot as plt\n'), ((2900, 3029), 'sklearn.tree.plot_tree', 'tree.plot_tree', (['RF_model.estimators_[0]'], {'feature_names': 'fn', 'class_names': 'cn', 'filled': '(True)', 'rounded': '(True)', 'precision': '(2)', 'fontsize': '(4)'}), '(RF_model.estimators_[0], feature_names=fn, class_names=cn,\n filled=True, rounded=True, precision=2, fontsize=4)\n', (2914, 3029), False, 'from sklearn import tree\n'), ((2361, 2375), 'numpy.array', 'np.array', (['yFit'], {}), '(yFit)\n', (2369, 2375), True, 'import numpy as np\n')]
|
import numpy as np
from pytope import Polytope
import matplotlib.pyplot as plt
np.random.seed(1)
# Create a polytope in R^2 with -1 <= x1 <= 4, -2 <= x2 <= 3
lower_bound1 = (-1, -2) # [-1, -2]' <= x
upper_bound1 = (4, 3) # x <= [4, 3]'
P1 = Polytope(lb=lower_bound1, ub=upper_bound1)
# Print the halfspace representation A*x <= b and H = [A b]
print('P1: ', repr(P1))
print('A =\n', P1.A)
print('b =\n', P1.b)
print('H =\n', P1.H)
# Create a square polytope in R^2 from specifying the four vertices
V2 = np.array([[1, 0], [0, -1], [-1, 0], [0, 1]])
P2 = Polytope(V2)
# Print the array of vertices:
print('P2: ', repr(P2))
print('V =\n', P2.V)
# Create a triangle in R^2 from specifying three half spaces (inequalities)
A3 = [[1, 0], [0, 1], [-1, -1]]
b3 = (2, 1, -1.5)
P3 = Polytope(A3, b3)
# Print the halfspace representation A*x <= b and H = [A b]
print('P3: ', repr(P3))
print('A =\n', P3.A)
print('b =\n', P3.b)
print('H =\n', P3.H)
# Determine and print the vertices:
print('V =\n', P3.V)
# P4: P3 shifted by a point p4
p4 = (1.4, 0.7)
P4 = P3 + p4
# P5: P4 shifted by a point p5 (in negative direction)
p5 = [0.4, 2]
P5 = P4 - p5
# P6: P2 scaled by s6 and shifted by p6
s6 = 0.2
p6 = -np.array([[0.4], [1.6]])
P6 = s6 * P2 + p6
# P7: P2 rotated 20 degrees (both clockwise and counter-clockwise)
rot7 = np.pi / 9.0
rot_mat7 = np.array([[np.cos(rot7), -np.sin(rot7)],
[np.sin(rot7), np.cos(rot7)]])
P7 = rot_mat7 * P2
P7_inv = P2 * rot_mat7
# P8: -P6
P8 = -P6
# P9: The convex hull of a set of 30 random points in [1, 2]' <= x [2, 3]'
V9 = np.random.uniform((1, 2), (2, 3), (30, 2))
P9 = Polytope(V9)
P9.minimize_V_rep()
# P10: the Minkowski sum of two squares (one large and one rotated and smaller)
P10_1 = Polytope(lb=(-0.6, -0.6), ub=(0.6, 0.6))
P10_2 = rot_mat7 * Polytope(lb=(-0.3, -0.3), ub=(0.3, 0.3))
P10 = P10_1 + P10_2
# Plot all of the polytopes.
# See the matplotlib.patches.Polygon documentation for a list of valid kwargs
fig1, ax1 = plt.subplots(num=1)
plt.grid()
plt.axis([-1.5, 4.5, -2.5, 3.5])
P1.plot(ax1, fill=False, edgecolor='r', linewidth=2)
P2.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), linewidth=1)
P3.plot(ax1, facecolor='b', edgecolor='k', linewidth=2, alpha=0.5)
P4.plot(ax1, facecolor='lightsalmon')
plt.scatter(P4.V[:, 0], P4.V[:, 1], c='k', marker='x') # the vertices of P4
# Polytope implements an additional keyword edgealpha:
P5.plot(ax1, fill=False, edgecolor='b', linewidth=8, edgealpha=0.2)
plt.plot(P5.centroid[0], P5.centroid[1], 'o') # the centroid of P5
P6.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), linewidth=1)
P7.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3)
P7_inv.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3, linestyle='--')
P8.plot(ax1, facecolor='g', edgecolor=(0, 0, 0), alpha=0.3,
linewidth=1, edgealpha=0.3)
P9.plot(ax1, facecolor='gray', alpha=0.6, edgecolor='k')
plt.plot(V9[:, 0], V9[:, 1], 'or', marker='o', markersize=2) # random points
plt.plot(P9.V[:, 0], P9.V[:, 1], 'og', marker='o', markersize=1) # P9's vertices
plt.title('Demonstration of various polytope operations')
# Plot the Minkowski sum of two squares
fig2, ax2 = plt.subplots(num=2)
plt.grid()
plt.axis([-2.5, 2.5, -2.5, 2.5])
P10_1.plot(ax2, fill=False, edgecolor=(1, 0, 0))
P10_2.plot(ax2, fill=False, edgecolor=(0, 0, 1))
P10.plot(ax2, fill=False,
edgecolor=(1, 0, 1), linestyle='--', linewidth=2)
for p in P10_1.V: # the smaller square + each of the vertices of the larger one
(P10_2 + p).plot(ax2, facecolor='grey', alpha=0.4,
edgecolor='k', linewidth=0.5)
ax2.legend((r'$P$', r'$Q$', r'$P \oplus Q$'))
plt.title('Minkowski sum of two polytopes')
# Plot two rotated rectangles and their intersection
rot1 = -np.pi / 18.0
rot_mat1 = np.array([[np.cos(rot1), -np.sin(rot1)],
[np.sin(rot1), np.cos(rot1)]])
rot2 = np.pi / 18.0
rot_mat2 = np.array([[np.cos(rot2), -np.sin(rot2)],
[np.sin(rot2), np.cos(rot2)]])
P_i1 = rot_mat1 * Polytope(lb=(-2, -1), ub=(1, 1))
P_i2 = rot_mat2 * Polytope(lb=(0, 0), ub=(2, 2))
P_i = P_i1 & P_i2 # intersection
fig3, ax3 = plt.subplots(num=3)
plt.grid()
plt.axis([-3.5, 3.5, -3.5, 3.5])
P_i1.plot(fill=False, edgecolor=(1, 0, 0), linestyle='--')
P_i2.plot(fill=False, edgecolor=(0, 0, 1), linestyle='--')
P_i.plot(fill=False,
edgecolor=(1, 0, 1), linestyle='-', linewidth=2)
ax3.legend((r'$P$', r'$Q$', r'$P \cap Q$'))
plt.title('Intersection of two polytopes')
# Plot two polytopes and their Pontryagin difference
P_m1 = Polytope(lb=(-3, -3), ub=(3, 3))
P_m2 = Polytope([[1, 0], [0, -1], [-1, 0], [0, 1]])
P_diff = P_m1 - P_m2
fig4, ax4 = plt.subplots(num=4)
plt.grid()
plt.axis([-3.5, 3.5, -3.5, 3.5])
P_m1.plot(fill=False, edgecolor=(1, 0, 0))
P_m2.plot(fill=False, edgecolor=(0, 0, 1))
P_diff.plot(fill=False,
edgecolor=(1, 0, 1), linestyle='--', linewidth=2)
ax4.legend((r'$P$', r'$Q$', r'$P \ominus Q$'))
plt.title('Pontryagin difference of two polytopes')
plt.setp([ax1, ax2, ax3, ax4], xlabel=r'$x_1$', ylabel=r'$x_2$')
|
[
"matplotlib.pyplot.setp",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"numpy.cos",
"matplotlib.pyplot.axis",
"pytope.Polytope",
"matplotlib.pyplot.subplots"
] |
[((82, 99), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (96, 99), True, 'import numpy as np\n'), ((247, 289), 'pytope.Polytope', 'Polytope', ([], {'lb': 'lower_bound1', 'ub': 'upper_bound1'}), '(lb=lower_bound1, ub=upper_bound1)\n', (255, 289), False, 'from pytope import Polytope\n'), ((511, 555), 'numpy.array', 'np.array', (['[[1, 0], [0, -1], [-1, 0], [0, 1]]'], {}), '([[1, 0], [0, -1], [-1, 0], [0, 1]])\n', (519, 555), True, 'import numpy as np\n'), ((561, 573), 'pytope.Polytope', 'Polytope', (['V2'], {}), '(V2)\n', (569, 573), False, 'from pytope import Polytope\n'), ((782, 798), 'pytope.Polytope', 'Polytope', (['A3', 'b3'], {}), '(A3, b3)\n', (790, 798), False, 'from pytope import Polytope\n'), ((1580, 1622), 'numpy.random.uniform', 'np.random.uniform', (['(1, 2)', '(2, 3)', '(30, 2)'], {}), '((1, 2), (2, 3), (30, 2))\n', (1597, 1622), True, 'import numpy as np\n'), ((1628, 1640), 'pytope.Polytope', 'Polytope', (['V9'], {}), '(V9)\n', (1636, 1640), False, 'from pytope import Polytope\n'), ((1750, 1790), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-0.6, -0.6)', 'ub': '(0.6, 0.6)'}), '(lb=(-0.6, -0.6), ub=(0.6, 0.6))\n', (1758, 1790), False, 'from pytope import Polytope\n'), ((1991, 2010), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(1)'}), '(num=1)\n', (2003, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2054), 'matplotlib.pyplot.axis', 'plt.axis', (['[-1.5, 4.5, -2.5, 3.5]'], {}), '([-1.5, 4.5, -2.5, 3.5])\n', (2030, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2329), 'matplotlib.pyplot.scatter', 'plt.scatter', (['P4.V[:, 0]', 'P4.V[:, 1]'], {'c': '"""k"""', 'marker': '"""x"""'}), "(P4.V[:, 0], P4.V[:, 1], c='k', marker='x')\n", (2286, 2329), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2520), 'matplotlib.pyplot.plot', 'plt.plot', (['P5.centroid[0]', 'P5.centroid[1]', '"""o"""'], {}), "(P5.centroid[0], P5.centroid[1], 'o')\n", (2483, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2970, 3030), 'matplotlib.pyplot.plot', 'plt.plot', (['V9[:, 0]', 'V9[:, 1]', '"""or"""'], {'marker': '"""o"""', 'markersize': '(2)'}), "(V9[:, 0], V9[:, 1], 'or', marker='o', markersize=2)\n", (2978, 3030), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3112), 'matplotlib.pyplot.plot', 'plt.plot', (['P9.V[:, 0]', 'P9.V[:, 1]', '"""og"""'], {'marker': '"""o"""', 'markersize': '(1)'}), "(P9.V[:, 0], P9.V[:, 1], 'og', marker='o', markersize=1)\n", (3056, 3112), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3186), 'matplotlib.pyplot.title', 'plt.title', (['"""Demonstration of various polytope operations"""'], {}), "('Demonstration of various polytope operations')\n", (3138, 3186), True, 'import matplotlib.pyplot as plt\n'), ((3240, 3259), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(2)'}), '(num=2)\n', (3252, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3260, 3270), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3268, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3303), 'matplotlib.pyplot.axis', 'plt.axis', (['[-2.5, 2.5, -2.5, 2.5]'], {}), '([-2.5, 2.5, -2.5, 2.5])\n', (3279, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3716, 3759), 'matplotlib.pyplot.title', 'plt.title', (['"""Minkowski sum of two polytopes"""'], {}), "('Minkowski sum of two polytopes')\n", (3725, 3759), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4228), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(3)'}), '(num=3)\n', (4221, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4239), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4237, 4239), True, 'import matplotlib.pyplot as plt\n'), ((4240, 4272), 'matplotlib.pyplot.axis', 'plt.axis', (['[-3.5, 3.5, -3.5, 3.5]'], {}), '([-3.5, 3.5, -3.5, 3.5])\n', (4248, 4272), True, 'import matplotlib.pyplot as plt\n'), ((4514, 4556), 'matplotlib.pyplot.title', 'plt.title', (['"""Intersection of two polytopes"""'], {}), "('Intersection of two polytopes')\n", (4523, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4618, 4650), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-3, -3)', 'ub': '(3, 3)'}), '(lb=(-3, -3), ub=(3, 3))\n', (4626, 4650), False, 'from pytope import Polytope\n'), ((4658, 4702), 'pytope.Polytope', 'Polytope', (['[[1, 0], [0, -1], [-1, 0], [0, 1]]'], {}), '([[1, 0], [0, -1], [-1, 0], [0, 1]])\n', (4666, 4702), False, 'from pytope import Polytope\n'), ((4736, 4755), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'num': '(4)'}), '(num=4)\n', (4748, 4755), True, 'import matplotlib.pyplot as plt\n'), ((4756, 4766), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4764, 4766), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4799), 'matplotlib.pyplot.axis', 'plt.axis', (['[-3.5, 3.5, -3.5, 3.5]'], {}), '([-3.5, 3.5, -3.5, 3.5])\n', (4775, 4799), True, 'import matplotlib.pyplot as plt\n'), ((5019, 5070), 'matplotlib.pyplot.title', 'plt.title', (['"""Pontryagin difference of two polytopes"""'], {}), "('Pontryagin difference of two polytopes')\n", (5028, 5070), True, 'import matplotlib.pyplot as plt\n'), ((5072, 5134), 'matplotlib.pyplot.setp', 'plt.setp', (['[ax1, ax2, ax3, ax4]'], {'xlabel': '"""$x_1$"""', 'ylabel': '"""$x_2$"""'}), "([ax1, ax2, ax3, ax4], xlabel='$x_1$', ylabel='$x_2$')\n", (5080, 5134), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1227), 'numpy.array', 'np.array', (['[[0.4], [1.6]]'], {}), '([[0.4], [1.6]])\n', (1211, 1227), True, 'import numpy as np\n'), ((1810, 1850), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-0.3, -0.3)', 'ub': '(0.3, 0.3)'}), '(lb=(-0.3, -0.3), ub=(0.3, 0.3))\n', (1818, 1850), False, 'from pytope import Polytope\n'), ((4081, 4113), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-2, -1)', 'ub': '(1, 1)'}), '(lb=(-2, -1), ub=(1, 1))\n', (4089, 4113), False, 'from pytope import Polytope\n'), ((4132, 4162), 'pytope.Polytope', 'Polytope', ([], {'lb': '(0, 0)', 'ub': '(2, 2)'}), '(lb=(0, 0), ub=(2, 2))\n', (4140, 4162), False, 'from pytope import Polytope\n'), ((1355, 1367), 'numpy.cos', 'np.cos', (['rot7'], {}), '(rot7)\n', (1361, 1367), True, 'import numpy as np\n'), ((1407, 1419), 'numpy.sin', 'np.sin', (['rot7'], {}), '(rot7)\n', (1413, 1419), True, 'import numpy as np\n'), ((1421, 1433), 'numpy.cos', 'np.cos', (['rot7'], {}), '(rot7)\n', (1427, 1433), True, 'import numpy as np\n'), ((3857, 3869), 'numpy.cos', 'np.cos', (['rot1'], {}), '(rot1)\n', (3863, 3869), True, 'import numpy as np\n'), ((3909, 3921), 'numpy.sin', 'np.sin', (['rot1'], {}), '(rot1)\n', (3915, 3921), True, 'import numpy as np\n'), ((3923, 3935), 'numpy.cos', 'np.cos', (['rot1'], {}), '(rot1)\n', (3929, 3935), True, 'import numpy as np\n'), ((3981, 3993), 'numpy.cos', 'np.cos', (['rot2'], {}), '(rot2)\n', (3987, 3993), True, 'import numpy as np\n'), ((4033, 4045), 'numpy.sin', 'np.sin', (['rot2'], {}), '(rot2)\n', (4039, 4045), True, 'import numpy as np\n'), ((4047, 4059), 'numpy.cos', 'np.cos', (['rot2'], {}), '(rot2)\n', (4053, 4059), True, 'import numpy as np\n'), ((1370, 1382), 'numpy.sin', 'np.sin', (['rot7'], {}), '(rot7)\n', (1376, 1382), True, 'import numpy as np\n'), ((3872, 3884), 'numpy.sin', 'np.sin', (['rot1'], {}), '(rot1)\n', (3878, 3884), True, 'import numpy as np\n'), ((3996, 4008), 'numpy.sin', 'np.sin', (['rot2'], {}), '(rot2)\n', (4002, 4008), True, 'import numpy as np\n')]
|
import retro # pip install gym-retro
import numpy as np # pip install numpy
import cv2 # pip install opencv-python
import neat # pip install neat-python
import pickle # pip install cloudpickle
class Worker(object):
def __init__(self, genome, config):
self.genome = genome
self.config = config
def work(self):
self.env = retro.make('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')
self.env.reset()
ob, _, _, _ = self.env.step(self.env.action_space.sample())
inx = int(ob.shape[0]/8)
iny = int(ob.shape[1]/8)
done = False
net = neat.nn.FeedForwardNetwork.create(self.genome, self.config)
fitness = 0
xpos = 0
xpos_max = 0
counter = 0
imgarray = []
while not done:
# self.env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
actions = net.activate(imgarray)
ob, rew, done, info = self.env.step(actions)
xpos = info['x']
if xpos > xpos_max:
xpos_max = xpos
counter = 0
fitness += 1
else:
counter += 1
if counter > 250:
done = True
if xpos == info['screen_x_end'] and xpos > 500:
fitness += 100000
done = True
print(fitness)
return fitness
def eval_genomes(genome, config):
worky = Worker(genome, config)
return worky.work()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
p = neat.Population(config)
p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-13')
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(10))
pe = neat.ParallelEvaluator(10, eval_genomes)
winner = p.run(pe.evaluate)
with open('winner.pkl', 'wb') as output:
pickle.dump(winner, output, 1)
|
[
"pickle.dump",
"neat.StdOutReporter",
"neat.Population",
"numpy.reshape",
"neat.Config",
"neat.nn.FeedForwardNetwork.create",
"neat.Checkpointer.restore_checkpoint",
"neat.StatisticsReporter",
"numpy.ndarray.flatten",
"cv2.cvtColor",
"numpy.interp",
"neat.ParallelEvaluator",
"cv2.resize",
"retro.make",
"neat.Checkpointer"
] |
[((1918, 2050), 'neat.Config', 'neat.Config', (['neat.DefaultGenome', 'neat.DefaultReproduction', 'neat.DefaultSpeciesSet', 'neat.DefaultStagnation', '"""config-feedforward"""'], {}), "(neat.DefaultGenome, neat.DefaultReproduction, neat.\n DefaultSpeciesSet, neat.DefaultStagnation, 'config-feedforward')\n", (1929, 2050), False, 'import neat\n'), ((2094, 2117), 'neat.Population', 'neat.Population', (['config'], {}), '(config)\n', (2109, 2117), False, 'import neat\n'), ((2122, 2180), 'neat.Checkpointer.restore_checkpoint', 'neat.Checkpointer.restore_checkpoint', (['"""neat-checkpoint-13"""'], {}), "('neat-checkpoint-13')\n", (2158, 2180), False, 'import neat\n'), ((2231, 2256), 'neat.StatisticsReporter', 'neat.StatisticsReporter', ([], {}), '()\n', (2254, 2256), False, 'import neat\n'), ((2323, 2363), 'neat.ParallelEvaluator', 'neat.ParallelEvaluator', (['(10)', 'eval_genomes'], {}), '(10, eval_genomes)\n', (2345, 2363), False, 'import neat\n'), ((2196, 2221), 'neat.StdOutReporter', 'neat.StdOutReporter', (['(True)'], {}), '(True)\n', (2215, 2221), False, 'import neat\n'), ((2294, 2315), 'neat.Checkpointer', 'neat.Checkpointer', (['(10)'], {}), '(10)\n', (2311, 2315), False, 'import neat\n'), ((2439, 2469), 'pickle.dump', 'pickle.dump', (['winner', 'output', '(1)'], {}), '(winner, output, 1)\n', (2450, 2469), False, 'import pickle\n'), ((403, 463), 'retro.make', 'retro.make', (['"""SonicTheHedgehog-Genesis"""', '"""GreenHillZone.Act1"""'], {}), "('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')\n", (413, 463), False, 'import retro\n'), ((694, 753), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['self.genome', 'self.config'], {}), '(self.genome, self.config)\n', (727, 753), False, 'import neat\n'), ((945, 971), 'cv2.resize', 'cv2.resize', (['ob', '(inx, iny)'], {}), '(ob, (inx, iny))\n', (955, 971), False, 'import cv2\n'), ((989, 1025), 'cv2.cvtColor', 'cv2.cvtColor', (['ob', 'cv2.COLOR_BGR2GRAY'], {}), '(ob, cv2.COLOR_BGR2GRAY)\n', (1001, 1025), False, 'import cv2\n'), ((1043, 1069), 'numpy.reshape', 'np.reshape', (['ob', '(inx, iny)'], {}), '(ob, (inx, iny))\n', (1053, 1069), True, 'import numpy as np\n'), ((1106, 1128), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ob'], {}), '(ob)\n', (1124, 1128), True, 'import numpy as np\n'), ((1152, 1191), 'numpy.interp', 'np.interp', (['imgarray', '(0, 254)', '(-1, +1)'], {}), '(imgarray, (0, 254), (-1, +1))\n', (1161, 1191), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import torch
import numpy as np
import torchvision
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from utils import *
from IPython import embed
class DCGAN(object):
def __init__(self, args):
self.args = args
self.model = dict()
self.data = dict()
self.rescache = dict()
self.device = args.use_gpu and torch.cuda.is_available()
def _report_settings(self):
''' Report the settings '''
str = '-' * 16
print('%sEnvironment Versions%s' % (str, str))
print("- Python : {}".format(sys.version.strip().split('|')[0]))
print("- PyTorch : {}".format(torch.__version__))
print("- TorchVison: {}".format(torchvision.__version__))
print("- USE_GPU : {}".format(self.device))
print('-' * 52)
def _model_loader(self):
self.model['generator'] = Generator(self.args.in_dim, self.args.gchannels)
self.model['discriminator'] = Discriminator(self.args.dchannels)
self.model['criterion'] = nn.BCELoss()
self.model['opti_gene'] = optim.Adam(self.model['generator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
self.model['opti_disc'] = optim.Adam(self.model['discriminator'].parameters(), \
lr=self.args.base_lr, betas=(self.args.beta, 0.999))
# self.model['scheduler'] = torch.optim.lr_scheduler.MultiStepLR(
# self.model['optimizer'], milestones=[12, 20, 30, 45], gamma=self.args.gamma)
if self.device:
self.model['generator'] = self.model['generator'].cuda()
self.model['discriminator'] = self.model['discriminator'].cuda()
if len(self.args.gpu_ids) > 1:
self.model['generator'] = torch.nn.DataParallel(self.model['generator'], device_ids=self.args.gpu_ids)
self.model['discriminator'] = torch.nn.DataParallel(self.model['discriminator'], device_ids=self.args.gpu_ids)
torch.backends.cudnn.benchmark = True
print('Parallel mode was going ...')
else:
print('Single-gpu mode was going ...')
else:
print('CPU mode was going ...')
if len(self.args.resume) > 2:
checkpoint = torch.load(self.args.resume, map_location=lambda storage, loc: storage)
self.args.start = checkpoint['epoch']
self.model['generator'].load_state_dict(checkpoint['generator'])
self.model['discriminator'].load_state_dict(checkpoint['discriminator'])
print('Resuming the train process at %3d epoches ...' % self.args.start)
print('Model loading was finished ...')
def _data_loader(self):
self.data['train_loader'] = DataLoader(
CelebA(args=self.args),
batch_size = self.args.batch_size, \
shuffle = True,\
num_workers= self.args.workers)
self.data['fixed_noise'] = torch.randn(64, self.args.in_dim ,1, 1)
if self.device:
self.data['fixed_noise'] = self.data['fixed_noise'].cuda()
self.rescache['gloss'] = []
self.rescache['dloss'] = []
self.rescache['fake'] = []
print('Data loading was finished ...')
def _model_train(self, epoch = 0):
total_dloss, total_gloss = 0, 0
for idx, imgs in enumerate(self.data['train_loader']):
# update discriminator
self.model['discriminator'].train()
self.model['generator'].eval()
imgs.requires_grad = False
if self.device:
imgs = imgs.cuda()
b_size = imgs.size(0)
self.model['discriminator'].zero_grad()
gty = torch.full((b_size,), 1)
if self.device:
gty = gty.cuda()
predy = self.model['discriminator'](imgs).view(-1)
dloss_real = self.model['criterion'](predy, gty)
dloss_real.backward()
noise = torch.randn(b_size, self.args.in_dim, 1, 1)
if self.device:
noise = noise.cuda()
fake = self.model['generator'](noise)
gty.fill_(0) # TODO
predy = self.model['discriminator'](fake.detach()).view(-1)
dloss_fake = self.model['criterion'](predy, gty)
dloss_fake.backward()
self.model['opti_disc'].step()
d_loss_real = dloss_real.mean().item()
d_loss_fake = dloss_fake.mean().item()
d_loss = d_loss_real + d_loss_fake
self.rescache['dloss'].append(d_loss)
total_dloss += d_loss
# update generator
self.model['generator'].train()
self.model['discriminator'].eval()
self.model['generator'].zero_grad()
gty.fill_(1) # TODO
predy = self.model['discriminator'](fake).view(-1)
gloss = self.model['criterion'](predy, gty)
gloss.backward()
self.model['opti_gene'].step()
g_loss = gloss.mean().item()
self.rescache['gloss'].append(g_loss)
total_gloss += g_loss
if (idx + 1) % self.args.print_freq == 0:
print('epoch : %2d|%2d, iter : %4d|%4d, dloss : %.4f, gloss : %.4f' % \
(epoch, self.args.epoches, idx+1, len(self.data['train_loader']), \
np.mean(self.rescache['dloss']), np.mean(self.rescache['gloss'])))
if (idx + 1) % self.args.monitor_freq == 0:
with torch.no_grad():
self.model['generator'].eval()
fake = self.model['generator'](self.data['fixed_noise']).detach().cpu()
self.rescache['fake'].append(fake)
return total_dloss, total_gloss
def _main_loop(self):
min_loss = 1e3
for epoch in range(self.args.start, self.args.epoches + 1):
start_time = time.time()
dloss, gloss = self._model_train(epoch)
train_loss = dloss + gloss
# self.model['scheduler'].step()
end_time = time.time()
print('Single epoch cost time : %.2f mins' % ((end_time - start_time)/60))
if not os.path.exists(self.args.save_to):
os.mkdir(self.args.save_to)
if (min_loss > train_loss) and (not self.args.is_debug):
print('%snew SOTA was found%s' % ('*'*16, '*'*16))
min_loss = train_loss
filename = os.path.join(self.args.save_to, 'sota.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : min_loss,
}, filename)
if (epoch % self.args.save_freq == 0) and (not self.args.is_debug):
filename = os.path.join(self.args.save_to, 'epoch_'+str(epoch)+'.pth.tar')
torch.save({
'epoch' : epoch,
'generator' : self.model['generator'].state_dict(),
'discriminator' : self.model['discriminator'].state_dict(),
'loss' : train_loss,
}, filename)
if self.args.is_debug:
break
def _visual_res(self):
''' Visual the training process '''
# gloss and dloss
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(self.rescache['gloss'], label="gloss")
plt.plot(self.rescache['dloss'], label="dloss")
plt.xlabel("iterations")
plt.ylabel("loss")
plt.legend()
plt.savefig('loss.jpg', dpi=400)
# save the fake-images
np.save('fake.npy', self.rescache['fake'])
def train_runner(self):
self._report_settings()
self._model_loader()
self._data_loader()
self._main_loop()
self._visual_res()
if __name__ == "__main__":
faceu = DCGAN(training_args())
faceu.train_runner()
|
[
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"numpy.save",
"os.path.exists",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.mkdir",
"torch.randn",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.legend",
"torch.full",
"sys.version.strip",
"torch.load",
"os.path.join",
"torch.nn.DataParallel",
"torch.nn.BCELoss",
"matplotlib.pyplot.figure",
"torch.no_grad"
] |
[((1189, 1201), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1199, 1201), True, 'import torch.nn as nn\n'), ((3336, 3375), 'torch.randn', 'torch.randn', (['(64)', 'self.args.in_dim', '(1)', '(1)'], {}), '(64, self.args.in_dim, 1, 1)\n', (3347, 3375), False, 'import torch\n'), ((7981, 8008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7991, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8016, 8077), 'matplotlib.pyplot.title', 'plt.title', (['"""Generator and Discriminator Loss During Training"""'], {}), "('Generator and Discriminator Loss During Training')\n", (8025, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8086, 8133), 'matplotlib.pyplot.plot', 'plt.plot', (["self.rescache['gloss']"], {'label': '"""gloss"""'}), "(self.rescache['gloss'], label='gloss')\n", (8094, 8133), True, 'import matplotlib.pyplot as plt\n'), ((8142, 8189), 'matplotlib.pyplot.plot', 'plt.plot', (["self.rescache['dloss']"], {'label': '"""dloss"""'}), "(self.rescache['dloss'], label='dloss')\n", (8150, 8189), True, 'import matplotlib.pyplot as plt\n'), ((8198, 8222), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (8208, 8222), True, 'import matplotlib.pyplot as plt\n'), ((8231, 8249), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (8241, 8249), True, 'import matplotlib.pyplot as plt\n'), ((8258, 8270), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8268, 8270), True, 'import matplotlib.pyplot as plt\n'), ((8279, 8311), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""loss.jpg"""'], {'dpi': '(400)'}), "('loss.jpg', dpi=400)\n", (8290, 8311), True, 'import matplotlib.pyplot as plt\n'), ((8360, 8402), 'numpy.save', 'np.save', (['"""fake.npy"""', "self.rescache['fake']"], {}), "('fake.npy', self.rescache['fake'])\n", (8367, 8402), True, 'import numpy as np\n'), ((512, 537), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (535, 537), False, 'import torch\n'), ((2532, 2603), 'torch.load', 'torch.load', (['self.args.resume'], {'map_location': '(lambda storage, loc: storage)'}), '(self.args.resume, map_location=lambda storage, loc: storage)\n', (2542, 2603), False, 'import torch\n'), ((4116, 4140), 'torch.full', 'torch.full', (['(b_size,)', '(1)'], {}), '((b_size,), 1)\n', (4126, 4140), False, 'import torch\n'), ((4381, 4424), 'torch.randn', 'torch.randn', (['b_size', 'self.args.in_dim', '(1)', '(1)'], {}), '(b_size, self.args.in_dim, 1, 1)\n', (4392, 4424), False, 'import torch\n'), ((6396, 6407), 'time.time', 'time.time', ([], {}), '()\n', (6405, 6407), False, 'import time\n'), ((6567, 6578), 'time.time', 'time.time', ([], {}), '()\n', (6576, 6578), False, 'import time\n'), ((2026, 2102), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (["self.model['generator']"], {'device_ids': 'self.args.gpu_ids'}), "(self.model['generator'], device_ids=self.args.gpu_ids)\n", (2047, 2102), False, 'import torch\n'), ((2149, 2234), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (["self.model['discriminator']"], {'device_ids': 'self.args.gpu_ids'}), "(self.model['discriminator'], device_ids=self.args.gpu_ids\n )\n", (2170, 2234), False, 'import torch\n'), ((6686, 6719), 'os.path.exists', 'os.path.exists', (['self.args.save_to'], {}), '(self.args.save_to)\n', (6700, 6719), False, 'import os\n'), ((6737, 6764), 'os.mkdir', 'os.mkdir', (['self.args.save_to'], {}), '(self.args.save_to)\n', (6745, 6764), False, 'import os\n'), ((6967, 7014), 'os.path.join', 'os.path.join', (['self.args.save_to', '"""sota.pth.tar"""'], {}), "(self.args.save_to, 'sota.pth.tar')\n", (6979, 7014), False, 'import os\n'), ((5989, 6004), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6002, 6004), False, 'import torch\n'), ((727, 746), 'sys.version.strip', 'sys.version.strip', ([], {}), '()\n', (744, 746), False, 'import sys\n'), ((5844, 5875), 'numpy.mean', 'np.mean', (["self.rescache['dloss']"], {}), "(self.rescache['dloss'])\n", (5851, 5875), True, 'import numpy as np\n'), ((5877, 5908), 'numpy.mean', 'np.mean', (["self.rescache['gloss']"], {}), "(self.rescache['gloss'])\n", (5884, 5908), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for objax.util.image."""
import io
import unittest
from typing import Tuple
import jax.numpy as jn
import numpy as np
from PIL import Image
import objax
class TestUtilImage(unittest.TestCase):
def ndimarange(self, dims: Tuple[int, ...]):
return np.arange(np.prod(dims), dtype=float).reshape(dims)
def test_nchw(self):
"""Test nchw."""
x = self.ndimarange((2, 3, 4, 5))
self.assertEqual(objax.util.image.nchw(x).tolist(), x.transpose((0, 3, 1, 2)).tolist())
self.assertEqual(objax.util.image.nchw(jn.array(x)).tolist(), x.transpose((0, 3, 1, 2)).tolist())
x = self.ndimarange((2, 3, 4, 5, 6))
self.assertEqual(objax.util.image.nchw(x).tolist(), x.transpose((0, 1, 4, 2, 3)).tolist())
self.assertEqual(objax.util.image.nchw(jn.array(x)).tolist(), x.transpose((0, 1, 4, 2, 3)).tolist())
def test_nhwc(self):
"""Test nhwc."""
x = self.ndimarange((2, 3, 4, 5))
self.assertEqual(objax.util.image.nhwc(x).tolist(), x.transpose((0, 2, 3, 1)).tolist())
self.assertEqual(objax.util.image.nhwc(jn.array(x)).tolist(), x.transpose((0, 2, 3, 1)).tolist())
x = self.ndimarange((2, 3, 4, 5, 6))
self.assertEqual(objax.util.image.nhwc(x).tolist(), x.transpose((0, 1, 3, 4, 2)).tolist())
self.assertEqual(objax.util.image.nhwc(jn.array(x)).tolist(), x.transpose((0, 1, 3, 4, 2)).tolist())
def test_normalize(self):
"""Test normalize methods."""
x = np.arange(256)
y = objax.util.image.normalize_to_unit_float(x)
self.assertEqual((x / 128 - (1 - 1 / 256)).tolist(), y.tolist())
self.assertEqual(y.tolist(), y.clip(-1, 1).tolist())
z = objax.util.image.normalize_to_uint8(y)
self.assertEqual(x.tolist(), z.tolist())
z = objax.util.image.normalize_to_uint8(y + 1 / 128)
self.assertEqual((x + 1).clip(0, 255).tolist(), z.tolist())
z = objax.util.image.normalize_to_uint8(y - 1 / 128)
self.assertEqual((x - 1).clip(0, 255).tolist(), z.tolist())
def test_to_png(self):
"""Test to_png."""
x = np.zeros((3, 32, 32), np.float) + 1 / 255
x[:, :12, :12] = 1
x[:, -12:, -12:] = -1
y = objax.util.image.to_png(x)
self.assertEqual(y, b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\x00\x00 \x08\x02\x00\x00\x00\xfc'
b'\x18\xed\xa3\x00\x00\x00FIDATx\x9cc\xfc\xff\xff?\x03!\xd0\xd8\xd8HP\r.\xc0D\xb6\xceQ'
b'\x0bF-\x18\xb5`\x04Y\xc0BI9C\x0c\x18\xfaA4j\xc1\x08\xb0\x80\x85\x12\xcd\r\r\r\x04\xd5'
b'\x0c\xfd \x1a\xb5`\xd4\x82Q\x0b\xe8`\x01\x00\xe3\xf1\x07\xc7\x82\x83p\xa5\x00\x00\x00\x00'
b'IEND\xaeB`\x82')
z = np.array(Image.open(io.BytesIO(y)))
z = (z.transpose((2, 0, 1)) - 127.5) / 127.5
self.assertEqual(x.tolist(), z.tolist())
if __name__ == '__main__':
unittest.main()
|
[
"numpy.prod",
"objax.util.image.nhwc",
"objax.util.image.normalize_to_uint8",
"objax.util.image.nchw",
"io.BytesIO",
"jax.numpy.array",
"numpy.zeros",
"objax.util.image.to_png",
"objax.util.image.normalize_to_unit_float",
"unittest.main",
"numpy.arange"
] |
[((3549, 3564), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3562, 3564), False, 'import unittest\n'), ((2080, 2094), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (2089, 2094), True, 'import numpy as np\n'), ((2107, 2150), 'objax.util.image.normalize_to_unit_float', 'objax.util.image.normalize_to_unit_float', (['x'], {}), '(x)\n', (2147, 2150), False, 'import objax\n'), ((2297, 2335), 'objax.util.image.normalize_to_uint8', 'objax.util.image.normalize_to_uint8', (['y'], {}), '(y)\n', (2332, 2335), False, 'import objax\n'), ((2397, 2445), 'objax.util.image.normalize_to_uint8', 'objax.util.image.normalize_to_uint8', (['(y + 1 / 128)'], {}), '(y + 1 / 128)\n', (2432, 2445), False, 'import objax\n'), ((2526, 2574), 'objax.util.image.normalize_to_uint8', 'objax.util.image.normalize_to_uint8', (['(y - 1 / 128)'], {}), '(y - 1 / 128)\n', (2561, 2574), False, 'import objax\n'), ((2821, 2847), 'objax.util.image.to_png', 'objax.util.image.to_png', (['x'], {}), '(x)\n', (2844, 2847), False, 'import objax\n'), ((2710, 2741), 'numpy.zeros', 'np.zeros', (['(3, 32, 32)', 'np.float'], {}), '((3, 32, 32), np.float)\n', (2718, 2741), True, 'import numpy as np\n'), ((3398, 3411), 'io.BytesIO', 'io.BytesIO', (['y'], {}), '(y)\n', (3408, 3411), False, 'import io\n'), ((861, 874), 'numpy.prod', 'np.prod', (['dims'], {}), '(dims)\n', (868, 874), True, 'import numpy as np\n'), ((1021, 1045), 'objax.util.image.nchw', 'objax.util.image.nchw', (['x'], {}), '(x)\n', (1042, 1045), False, 'import objax\n'), ((1268, 1292), 'objax.util.image.nchw', 'objax.util.image.nchw', (['x'], {}), '(x)\n', (1289, 1292), False, 'import objax\n'), ((1569, 1593), 'objax.util.image.nhwc', 'objax.util.image.nhwc', (['x'], {}), '(x)\n', (1590, 1593), False, 'import objax\n'), ((1816, 1840), 'objax.util.image.nhwc', 'objax.util.image.nhwc', (['x'], {}), '(x)\n', (1837, 1840), False, 'import objax\n'), ((1139, 1150), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1147, 1150), True, 'import jax.numpy as jn\n'), ((1389, 1400), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1397, 1400), True, 'import jax.numpy as jn\n'), ((1687, 1698), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1695, 1698), True, 'import jax.numpy as jn\n'), ((1937, 1948), 'jax.numpy.array', 'jn.array', (['x'], {}), '(x)\n', (1945, 1948), True, 'import jax.numpy as jn\n')]
|
import os
import sys
import json
import time
import numpy as np
import tensorflow as tf
from blocks.helpers import Monitor
from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np
from blocks.optimizers import adam_updates
import data.load_data as load_data
from masks import get_generator
from .learner import Learner
class FullyObservedLearner(Learner):
def __init__(self, nr_gpu, save_dir, img_size, exp_name="default"):
super().__init__(nr_gpu, save_dir, img_size, exp_name)
def train_epoch(self, mgen, which_set='train'):
if which_set == 'train':
data_set = self.train_set
elif which_set == 'eval':
data_set = self.eval_set
elif which_set == 'test':
data_set = self.test_set
for data in data_set:
if self.num_channels == 3:
data = np.cast[np.float32]((data - 127.5) / 127.5)
ds = np.split(data, self.nr_gpu)
feed_dict = {}
feed_dict.update({model.is_training: True for model in self.models})
feed_dict.update({model.dropout_p: 0.5 for model in self.models})
feed_dict.update({model.x: ds[i] for i, model in enumerate(self.models)})
feed_dict.update({model.x_bar: ds[i] for i, model in enumerate(self.models)})
masks_np = [mgen.gen(self.batch_size//self.nr_gpu) for i in range(self.nr_gpu)]
feed_dict.update({model.masks: masks_np[i] for i, model in enumerate(self.models)})
self.sess.run(self.train_step, feed_dict=feed_dict)
def eval_epoch(self, mgen, which_set='eval'):
if which_set == 'train':
data_set = self.train_set
elif which_set == 'eval':
data_set = self.eval_set
elif which_set == 'test':
data_set = self.test_set
for data in data_set:
if self.num_channels == 3:
data = np.cast[np.float32]((data - 127.5) / 127.5)
ds = np.split(data, self.nr_gpu)
feed_dict = {}
feed_dict.update({model.is_training: False for model in self.models})
feed_dict.update({model.dropout_p: 0.0 for model in self.models})
feed_dict.update({model.x: ds[i] for i, model in enumerate(self.models)})
feed_dict.update({model.x_bar: ds[i] for i, model in enumerate(self.models)})
masks_np = [mgen.gen(self.batch_size//self.nr_gpu) for i in range(self.nr_gpu)]
feed_dict.update({model.masks: masks_np[i] for i, model in enumerate(self.models)})
self.monitor.evaluate(self.sess, feed_dict)
def sample(self, data, mgen, same_inputs=False, use_mask_at=None):
if self.num_channels == 3:
data = np.cast[np.float32]((data - 127.5) / 127.5)
if same_inputs:
for i in range(data.shape[0]):
data[i] = data[3]
ori_data = data.copy()
ds = np.split(data.copy(), self.nr_gpu)
feed_dict = {}
feed_dict.update({model.is_training: False for model in self.models})
feed_dict.update({model.dropout_p: 0.0 for model in self.models})
feed_dict.update({model.x: ds[i] for i, model in enumerate(self.models)})
feed_dict.update({model.x_bar: ds[i] for i, model in enumerate(self.models)})
if use_mask_at is not None:
masks_np = np.load(use_mask_at)['masks']
masks_np = np.split(masks_np, self.nr_gpu)
else:
masks_np = [mgen.gen(self.batch_size//self.nr_gpu) for i in range(self.nr_gpu)]
np.savez(mgen.name+"_"+self.data_set, masks=np.concatenate(masks_np))
if same_inputs:
for g in range(self.nr_gpu):
for i in range(self.batch_size//self.nr_gpu):
masks_np[g][i] = masks_np[0][0]
feed_dict.update({model.masks: masks_np[i] for i, model in enumerate(self.models)})
#
for i in range(self.nr_gpu):
ds[i] *= broadcast_masks_np(masks_np[i], num_channels=self.num_channels)
masked_data = np.concatenate(ds, axis=0)
x_gen = [ds[i].copy() for i in range(self.nr_gpu)]
for yi in range(self.img_size):
for xi in range(self.img_size):
if np.min(np.array([masks_np[i][:, yi, xi] for i in range(self.nr_gpu)])) > 0:
continue
feed_dict.update({model.x_bar:x_gen[i] for i, model in enumerate(self.models)})
x_hats = self.sess.run([model.x_hat for model in self.models], feed_dict=feed_dict)
for i in range(self.nr_gpu):
bmask = broadcast_masks_np(masks_np[i][:, yi, xi] , num_channels=self.num_channels)
x_gen[i][:, yi, xi, :] = x_hats[i][:, yi, xi, :] * (1.-bmask) + x_gen[i][:, yi, xi, :] * bmask
gen_data = np.concatenate(x_gen, axis=0)
if self.num_channels == 1:
masks_np = np.concatenate(masks_np, axis=0)
masks_np = broadcast_masks_np(masks_np, num_channels=self.num_channels)
masked_data += (1-masks_np) * 0.5
return ori_data, masked_data, gen_data
|
[
"numpy.split",
"numpy.load",
"blocks.helpers.broadcast_masks_np",
"numpy.concatenate"
] |
[((4122, 4148), 'numpy.concatenate', 'np.concatenate', (['ds'], {'axis': '(0)'}), '(ds, axis=0)\n', (4136, 4148), True, 'import numpy as np\n'), ((4895, 4924), 'numpy.concatenate', 'np.concatenate', (['x_gen'], {'axis': '(0)'}), '(x_gen, axis=0)\n', (4909, 4924), True, 'import numpy as np\n'), ((974, 1001), 'numpy.split', 'np.split', (['data', 'self.nr_gpu'], {}), '(data, self.nr_gpu)\n', (982, 1001), True, 'import numpy as np\n'), ((2034, 2061), 'numpy.split', 'np.split', (['data', 'self.nr_gpu'], {}), '(data, self.nr_gpu)\n', (2042, 2061), True, 'import numpy as np\n'), ((3476, 3507), 'numpy.split', 'np.split', (['masks_np', 'self.nr_gpu'], {}), '(masks_np, self.nr_gpu)\n', (3484, 3507), True, 'import numpy as np\n'), ((4036, 4099), 'blocks.helpers.broadcast_masks_np', 'broadcast_masks_np', (['masks_np[i]'], {'num_channels': 'self.num_channels'}), '(masks_np[i], num_channels=self.num_channels)\n', (4054, 4099), False, 'from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\n'), ((4983, 5015), 'numpy.concatenate', 'np.concatenate', (['masks_np'], {'axis': '(0)'}), '(masks_np, axis=0)\n', (4997, 5015), True, 'import numpy as np\n'), ((5039, 5099), 'blocks.helpers.broadcast_masks_np', 'broadcast_masks_np', (['masks_np'], {'num_channels': 'self.num_channels'}), '(masks_np, num_channels=self.num_channels)\n', (5057, 5099), False, 'from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\n'), ((3423, 3443), 'numpy.load', 'np.load', (['use_mask_at'], {}), '(use_mask_at)\n', (3430, 3443), True, 'import numpy as np\n'), ((3670, 3694), 'numpy.concatenate', 'np.concatenate', (['masks_np'], {}), '(masks_np)\n', (3684, 3694), True, 'import numpy as np\n'), ((4685, 4759), 'blocks.helpers.broadcast_masks_np', 'broadcast_masks_np', (['masks_np[i][:, yi, xi]'], {'num_channels': 'self.num_channels'}), '(masks_np[i][:, yi, xi], num_channels=self.num_channels)\n', (4703, 4759), False, 'from blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\n')]
|
from __future__ import print_function
import os
import numpy as np
from tqdm import trange
from models import *
from utils import save_image
class Trainer(object):
def __init__(self, config, batch_manager):
tf.compat.v1.set_random_seed(config.random_seed)
self.config = config
self.batch_manager = batch_manager
self.x, self.y = batch_manager.batch()
self.xt =tf.compat.v1.placeholder(tf.float32, shape=int_shape(self.x))
self.yt =tf.compat.v1.placeholder(tf.float32, shape=int_shape(self.y))
self.dataset = config.dataset
self.beta1 = config.beta1
self.beta2 = config.beta2
self.optimizer = config.optimizer
self.batch_size = config.batch_size
self.lr = tf.Variable(config.lr, name='lr')
self.lr_update = tf.assign(self.lr, tf.maximum(self.lr*0.1, config.lr_lower_boundary), name='lr_update')
self.height = config.height
self.width = config.width
self.b_num = config.batch_size
self.conv_hidden_num = config.conv_hidden_num
self.repeat_num = config.repeat_num
self.use_l2 = config.use_l2
self.use_norm = config.use_norm
self.model_dir = config.model_dir
self.use_gpu = config.use_gpu
self.data_format = config.data_format
if self.data_format == 'NCHW':
self.x = nhwc_to_nchw(self.x)
self.y = nhwc_to_nchw(self.y)
self.xt = nhwc_to_nchw(self.xt)
self.yt = nhwc_to_nchw(self.yt)
self.start_step = config.start_step
self.log_step = config.log_step
self.test_step = config.test_step
self.max_step = config.max_step
self.save_sec = config.save_sec
self.lr_update_step = config.lr_update_step
self.step = tf.Variable(self.start_step, name='step', trainable=False)
self.is_train = config.is_train
self.build_model()
self.saver = tf.compat.v1.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=self.saver,
summary_op=None,
summary_writer=self.summary_writer,
save_model_secs=self.save_sec,
global_step=self.step,
ready_for_local_init_op=None)
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
sess_config = tf.compat.v1.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
if self.is_train:
self.batch_manager.start_thread(self.sess)
def build_model(self):
self.y_, self.var = VDSR(
self.x, self.conv_hidden_num, self.repeat_num, self.data_format, self.use_norm)
self.y_img = denorm_img(self.y_, self.data_format) # for debug
self.yt_, _ = VDSR(
self.xt, self.conv_hidden_num, self.repeat_num, self.data_format, self.use_norm,
train=False, reuse=True)
self.yt_ = tf.clip_by_value(self.yt_, 0, 1)
self.yt_img = denorm_img(self.yt_, self.data_format)
show_all_variables()
if self.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer
else:
raise Exception("[!] Caution! Paper didn't use {} opimizer other than Adam".format(self.config.optimizer))
optimizer = optimizer(self.lr, beta1=self.beta1, beta2=self.beta2)
# losses
# l1 and l2
self.loss_l1 = tf.reduce_mean(tf.abs(self.y_ - self.y))
self.loss_l2 = tf.reduce_mean(tf.squared_difference(self.y_, self.y))
# total
if self.use_l2:
self.loss = self.loss_l2
else:
self.loss = self.loss_l1
# test loss
self.tl1 = 1 - tf.reduce_mean(tf.abs(self.yt_ - self.yt))
self.tl2 = 1 - tf.reduce_mean(tf.squared_difference(self.yt_, self.yt))
self.test_acc_l1 =tf.compat.v1.placeholder(tf.float32)
self.test_acc_l2 =tf.compat.v1.placeholder(tf.float32)
self.test_acc_iou =tf.compat.v1.placeholder(tf.float32)
self.optim = optimizer.minimize(self.loss, global_step=self.step, var_list=self.var)
summary = [
tf.summary.image("y", self.y_img),
tf.summary.scalar("loss/loss", self.loss),
tf.summary.scalar("loss/loss_l1", self.loss_l1),
tf.summary.scalar("loss/loss_l2", self.loss_l2),
tf.summary.scalar("misc/lr", self.lr),
tf.summary.scalar('misc/q', self.batch_manager.q.size())
]
self.summary_op = tf.summary.merge(summary)
summary = [
tf.summary.image("x_sample", denorm_img(self.x, self.data_format)),
tf.summary.image("y_sample", denorm_img(self.y, self.data_format)),
]
self.summary_once = tf.summary.merge(summary) # call just once
summary = [
tf.summary.scalar("loss/test_acc_l1", self.test_acc_l1),
tf.summary.scalar("loss/test_acc_l2", self.test_acc_l2),
tf.summary.scalar("loss/test_acc_iou", self.test_acc_iou),
]
self.summary_test = tf.summary.merge(summary)
def train(self):
x_list, xs, ys, sample_list = self.batch_manager.random_list(self.b_num)
save_image(xs, '{}/x_gt.png'.format(self.model_dir))
save_image(ys, '{}/y_gt.png'.format(self.model_dir))
with open('{}/gt.txt'.format(self.model_dir), 'w') as f:
for sample in sample_list:
f.write(sample + '\n')
# call once
summary_once = self.sess.run(self.summary_once)
self.summary_writer.add_summary(summary_once, 0)
self.summary_writer.flush()
for step in trange(self.start_step, self.max_step):
fetch_dict = {
"optim": self.optim,
"loss": self.loss,
}
if step % self.log_step == 0 or step == self.max_step-1:
fetch_dict.update({
"summary": self.summary_op,
})
# if step % self.test_step == self.test_step-1 or step == self.max_step-1:
if True:
l1, l2, iou, nb = 0, 0, 0, 0
for x, y in self.batch_manager.test_batch():
if self.data_format == 'NCHW':
x = to_nchw_numpy(x)
y = to_nchw_numpy(y)
tl1, tl2, y_ = self.sess.run([self.tl1, self.tl2, self.yt_], {self.xt: x, self.yt: y})
l1 += tl1
l2 += tl2
nb += 1
# iou
y_I = np.logical_and(y>0, y_>0)
y_I_sum = np.sum(y_I, axis=(1, 2, 3))
y_U = np.logical_or(y>0, y_>0)
y_U_sum = np.sum(y_U, axis=(1, 2, 3))
# print(y_I_sum, y_U_sum)
nonzero_id = np.where(y_U_sum != 0)[0]
if nonzero_id.shape[0] == 0:
acc = 1.0
else:
acc = np.average(y_I_sum[nonzero_id] / y_U_sum[nonzero_id])
iou += acc
if nb > 500:
break
l1 /= float(nb)
l2 /= float(nb)
iou /= float(nb)
summary_test = self.sess.run(self.summary_test,
{self.test_acc_l1: l1, self.test_acc_l2: l2, self.test_acc_iou: iou})
self.summary_writer.add_summary(summary_test, step)
self.summary_writer.flush()
result = self.sess.run(fetch_dict)
if step % self.log_step == 0 or step == self.max_step-1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
loss = result['loss']
assert not np.isnan(loss), 'Model diverged with loss = NaN'
print("\n[{}/{}] Loss: {:.6f}".format(step, self.max_step, loss))
if step % (self.log_step * 10) == 0 or step == self.max_step-1:
self.generate(x_list, self.model_dir, idx=step)
if step % self.lr_update_step == self.lr_update_step - 1:
self.sess.run(self.lr_update)
# save last checkpoint..
save_path = os.path.join(self.model_dir, 'model.ckpt')
self.saver.save(self.sess, save_path, global_step=self.step)
self.batch_manager.stop_thread()
def generate(self, x_samples, root_path=None, idx=None):
if self.data_format == 'NCHW':
x_samples = to_nchw_numpy(x_samples)
generated = self.sess.run(self.yt_img, {self.xt: x_samples})
y_path = os.path.join(root_path, 'y_{}.png'.format(idx))
save_image(generated, y_path, nrow=self.b_num)
print("[*] Samples saved: {}".format(y_path))
|
[
"numpy.logical_and",
"numpy.average",
"numpy.where",
"os.path.join",
"numpy.logical_or",
"utils.save_image",
"numpy.sum",
"numpy.isnan",
"tqdm.trange"
] |
[((6028, 6066), 'tqdm.trange', 'trange', (['self.start_step', 'self.max_step'], {}), '(self.start_step, self.max_step)\n', (6034, 6066), False, 'from tqdm import trange\n'), ((8716, 8758), 'os.path.join', 'os.path.join', (['self.model_dir', '"""model.ckpt"""'], {}), "(self.model_dir, 'model.ckpt')\n", (8728, 8758), False, 'import os\n'), ((9161, 9207), 'utils.save_image', 'save_image', (['generated', 'y_path'], {'nrow': 'self.b_num'}), '(generated, y_path, nrow=self.b_num)\n', (9171, 9207), False, 'from utils import save_image\n'), ((6989, 7018), 'numpy.logical_and', 'np.logical_and', (['(y > 0)', '(y_ > 0)'], {}), '(y > 0, y_ > 0)\n', (7003, 7018), True, 'import numpy as np\n'), ((7045, 7072), 'numpy.sum', 'np.sum', (['y_I'], {'axis': '(1, 2, 3)'}), '(y_I, axis=(1, 2, 3))\n', (7051, 7072), True, 'import numpy as np\n'), ((7099, 7127), 'numpy.logical_or', 'np.logical_or', (['(y > 0)', '(y_ > 0)'], {}), '(y > 0, y_ > 0)\n', (7112, 7127), True, 'import numpy as np\n'), ((7154, 7181), 'numpy.sum', 'np.sum', (['y_U'], {'axis': '(1, 2, 3)'}), '(y_U, axis=(1, 2, 3))\n', (7160, 7181), True, 'import numpy as np\n'), ((8272, 8286), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (8280, 8286), True, 'import numpy as np\n'), ((7261, 7283), 'numpy.where', 'np.where', (['(y_U_sum != 0)'], {}), '(y_U_sum != 0)\n', (7269, 7283), True, 'import numpy as np\n'), ((7426, 7479), 'numpy.average', 'np.average', (['(y_I_sum[nonzero_id] / y_U_sum[nonzero_id])'], {}), '(y_I_sum[nonzero_id] / y_U_sum[nonzero_id])\n', (7436, 7479), True, 'import numpy as np\n')]
|
from zerocopy import send_from
from socket import *
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', 25000))
s.listen(1)
c,a = s.accept()
import numpy
a = numpy.arange(0.0, 50000000.0)
send_from(a, c)
c.close()
|
[
"zerocopy.send_from",
"numpy.arange"
] |
[((153, 182), 'numpy.arange', 'numpy.arange', (['(0.0)', '(50000000.0)'], {}), '(0.0, 50000000.0)\n', (165, 182), False, 'import numpy\n'), ((183, 198), 'zerocopy.send_from', 'send_from', (['a', 'c'], {}), '(a, c)\n', (192, 198), False, 'from zerocopy import send_from\n')]
|
import os
from json import JSONDecodeError
from json import dump
from json import load
import numpy as np
from core.net_errors import JsonFileStructureIncorrect, JsonFileNotFound
def upload(net_object, path):
if not os.path.isfile(path):
raise JsonFileNotFound()
try:
with open(path, 'r') as file:
deserialized_file = load(file)
net_object.config = deserialized_file['config']
net_object.tags = deserialized_file.get('tags')
net_object.net = deserialized_file.get('net')
net_object.deviation = deserialized_file.get('normalization')
if net_object.net:
for l in range(1, len(net_object.config)):
net_object.net[l - 1]['w'] = np.array(net_object.net[l - 1]['w'])
net_object.net[l - 1]['o'] = np.zeros((net_object.config[l]))
except KeyError:
raise JsonFileStructureIncorrect()
except JSONDecodeError:
raise
def unload(net_object, path):
try:
net_copy = []
for l in range(len(net_object.net)):
net_copy.append({'w': net_object.net[l]['w'].tolist()})
with open(path, 'w') as file:
file_dictionary = {
'config': net_object.config,
'tags': net_object.tags,
'net': net_copy,
'normalization': net_object.normalization
}
dump(file_dictionary, file, sort_keys=True, indent=4)
except JSONDecodeError:
raise
|
[
"core.net_errors.JsonFileStructureIncorrect",
"core.net_errors.JsonFileNotFound",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"json.load",
"json.dump"
] |
[((224, 244), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (238, 244), False, 'import os\n'), ((260, 278), 'core.net_errors.JsonFileNotFound', 'JsonFileNotFound', ([], {}), '()\n', (276, 278), False, 'from core.net_errors import JsonFileStructureIncorrect, JsonFileNotFound\n'), ((359, 369), 'json.load', 'load', (['file'], {}), '(file)\n', (363, 369), False, 'from json import load\n'), ((917, 945), 'core.net_errors.JsonFileStructureIncorrect', 'JsonFileStructureIncorrect', ([], {}), '()\n', (943, 945), False, 'from core.net_errors import JsonFileStructureIncorrect, JsonFileNotFound\n'), ((1438, 1491), 'json.dump', 'dump', (['file_dictionary', 'file'], {'sort_keys': '(True)', 'indent': '(4)'}), '(file_dictionary, file, sort_keys=True, indent=4)\n', (1442, 1491), False, 'from json import dump\n'), ((762, 798), 'numpy.array', 'np.array', (["net_object.net[l - 1]['w']"], {}), "(net_object.net[l - 1]['w'])\n", (770, 798), True, 'import numpy as np\n'), ((848, 878), 'numpy.zeros', 'np.zeros', (['net_object.config[l]'], {}), '(net_object.config[l])\n', (856, 878), True, 'import numpy as np\n')]
|
from env_wrapper import SubprocVecEnv, DummyVecEnv
import numpy as np
import multiagent.scenarios as scenarios
from multiagent.environment import MultiAgentEnv
def make_parallel_env(n_rollout_threads, seed=1):
def get_env_fn(rank):
def init_env():
env = make_env("simple_adversary")
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
# if n_rollout_threads == 1:
# return DummyVecEnv([get_env_fn(0)])
# else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def make_env(scenario_name, benchmark=False):
scenario = scenarios.load(scenario_name + ".py").Scenario()
world = scenario.make_world()
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
|
[
"multiagent.scenarios.load",
"multiagent.environment.MultiAgentEnv",
"numpy.random.seed"
] |
[((765, 851), 'multiagent.environment.MultiAgentEnv', 'MultiAgentEnv', (['world', 'scenario.reset_world', 'scenario.reward', 'scenario.observation'], {}), '(world, scenario.reset_world, scenario.reward, scenario.\n observation)\n', (778, 851), False, 'from multiagent.environment import MultiAgentEnv\n'), ((362, 396), 'numpy.random.seed', 'np.random.seed', (['(seed + rank * 1000)'], {}), '(seed + rank * 1000)\n', (376, 396), True, 'import numpy as np\n'), ((672, 709), 'multiagent.scenarios.load', 'scenarios.load', (["(scenario_name + '.py')"], {}), "(scenario_name + '.py')\n", (686, 709), True, 'import multiagent.scenarios as scenarios\n')]
|
from AlphaGo.models.policy import CNNPolicy
from AlphaGo import go
from AlphaGo.go import GameState
from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer
import numpy as np
import unittest
import os
class TestCNNPolicy(unittest.TestCase):
def test_default_policy(self):
policy = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"])
policy.eval_state(GameState())
# just hope nothing breaks
def test_batch_eval_state(self):
policy = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"])
results = policy.batch_eval_state([GameState(), GameState()])
self.assertEqual(len(results), 2) # one result per GameState
self.assertEqual(len(results[0]), 361) # each one has 361 (move,prob) pairs
def test_output_size(self):
policy19 = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"], board=19)
output = policy19.forward(policy19.preprocessor.state_to_tensor(GameState(19)))
self.assertEqual(output.shape, (1, 19 * 19))
policy13 = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"], board=13)
output = policy13.forward(policy13.preprocessor.state_to_tensor(GameState(13)))
self.assertEqual(output.shape, (1, 13 * 13))
def test_save_load(self):
policy = CNNPolicy(["board", "liberties", "sensibleness", "capture_size"])
model_file = 'TESTPOLICY.json'
weights_file = 'TESTWEIGHTS.h5'
model_file2 = 'TESTPOLICY2.json'
weights_file2 = 'TESTWEIGHTS2.h5'
# test saving model/weights separately
policy.save_model(model_file)
policy.model.save_weights(weights_file)
# test saving them together
policy.save_model(model_file2, weights_file2)
copypolicy = CNNPolicy.load_model(model_file)
copypolicy.model.load_weights(weights_file)
copypolicy2 = CNNPolicy.load_model(model_file2)
for w1, w2 in zip(copypolicy.model.get_weights(), copypolicy2.model.get_weights()):
self.assertTrue(np.all(w1 == w2))
os.remove(model_file)
os.remove(weights_file)
os.remove(model_file2)
os.remove(weights_file2)
class TestPlayers(unittest.TestCase):
def test_greedy_player(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = GreedyPolicyPlayer(policy)
for i in range(20):
move = player.get_move(gs)
self.assertIsNotNone(move)
gs.do_move(move)
def test_probabilistic_player(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = ProbabilisticPolicyPlayer(policy)
for i in range(20):
move = player.get_move(gs)
self.assertIsNotNone(move)
gs.do_move(move)
def test_sensible_probabilistic(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = ProbabilisticPolicyPlayer(policy)
empty = (10, 10)
for x in range(19):
for y in range(19):
if (x, y) != empty:
gs.do_move((x, y), go.BLACK)
gs.current_player = go.BLACK
self.assertIsNone(player.get_move(gs))
def test_sensible_greedy(self):
gs = GameState()
policy = CNNPolicy(["board", "ones", "turns_since"])
player = GreedyPolicyPlayer(policy)
empty = (10, 10)
for x in range(19):
for y in range(19):
if (x, y) != empty:
gs.do_move((x, y), go.BLACK)
gs.current_player = go.BLACK
self.assertIsNone(player.get_move(gs))
if __name__ == '__main__':
unittest.main()
|
[
"AlphaGo.models.policy.CNNPolicy.load_model",
"AlphaGo.models.policy.CNNPolicy",
"AlphaGo.ai.GreedyPolicyPlayer",
"AlphaGo.go.GameState",
"unittest.main",
"AlphaGo.ai.ProbabilisticPolicyPlayer",
"numpy.all",
"os.remove"
] |
[((3295, 3310), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3308, 3310), False, 'import unittest\n'), ((300, 365), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {}), "(['board', 'liberties', 'sensibleness', 'capture_size'])\n", (309, 365), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((474, 539), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {}), "(['board', 'liberties', 'sensibleness', 'capture_size'])\n", (483, 539), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((790, 865), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {'board': '(19)'}), "(['board', 'liberties', 'sensibleness', 'capture_size'], board=19)\n", (799, 865), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1009, 1084), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {'board': '(13)'}), "(['board', 'liberties', 'sensibleness', 'capture_size'], board=13)\n", (1018, 1084), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1253, 1318), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'liberties', 'sensibleness', 'capture_size']"], {}), "(['board', 'liberties', 'sensibleness', 'capture_size'])\n", (1262, 1318), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1668, 1700), 'AlphaGo.models.policy.CNNPolicy.load_model', 'CNNPolicy.load_model', (['model_file'], {}), '(model_file)\n', (1688, 1700), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1764, 1797), 'AlphaGo.models.policy.CNNPolicy.load_model', 'CNNPolicy.load_model', (['model_file2'], {}), '(model_file2)\n', (1784, 1797), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((1925, 1946), 'os.remove', 'os.remove', (['model_file'], {}), '(model_file)\n', (1934, 1946), False, 'import os\n'), ((1949, 1972), 'os.remove', 'os.remove', (['weights_file'], {}), '(weights_file)\n', (1958, 1972), False, 'import os\n'), ((1975, 1997), 'os.remove', 'os.remove', (['model_file2'], {}), '(model_file2)\n', (1984, 1997), False, 'import os\n'), ((2000, 2024), 'os.remove', 'os.remove', (['weights_file2'], {}), '(weights_file2)\n', (2009, 2024), False, 'import os\n'), ((2104, 2115), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2113, 2115), False, 'from AlphaGo.go import GameState\n'), ((2127, 2170), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2136, 2170), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((2182, 2208), 'AlphaGo.ai.GreedyPolicyPlayer', 'GreedyPolicyPlayer', (['policy'], {}), '(policy)\n', (2200, 2208), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((2357, 2368), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2366, 2368), False, 'from AlphaGo.go import GameState\n'), ((2380, 2423), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2389, 2423), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((2435, 2468), 'AlphaGo.ai.ProbabilisticPolicyPlayer', 'ProbabilisticPolicyPlayer', (['policy'], {}), '(policy)\n', (2460, 2468), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((2619, 2630), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2628, 2630), False, 'from AlphaGo.go import GameState\n'), ((2642, 2685), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2651, 2685), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((2697, 2730), 'AlphaGo.ai.ProbabilisticPolicyPlayer', 'ProbabilisticPolicyPlayer', (['policy'], {}), '(policy)\n', (2722, 2730), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((2966, 2977), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (2975, 2977), False, 'from AlphaGo.go import GameState\n'), ((2989, 3032), 'AlphaGo.models.policy.CNNPolicy', 'CNNPolicy', (["['board', 'ones', 'turns_since']"], {}), "(['board', 'ones', 'turns_since'])\n", (2998, 3032), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((3044, 3070), 'AlphaGo.ai.GreedyPolicyPlayer', 'GreedyPolicyPlayer', (['policy'], {}), '(policy)\n', (3062, 3070), False, 'from AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n'), ((386, 397), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (395, 397), False, 'from AlphaGo.go import GameState\n'), ((577, 588), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (586, 588), False, 'from AlphaGo.go import GameState\n'), ((590, 601), 'AlphaGo.go.GameState', 'GameState', ([], {}), '()\n', (599, 601), False, 'from AlphaGo.go import GameState\n'), ((932, 945), 'AlphaGo.go.GameState', 'GameState', (['(19)'], {}), '(19)\n', (941, 945), False, 'from AlphaGo.go import GameState\n'), ((1151, 1164), 'AlphaGo.go.GameState', 'GameState', (['(13)'], {}), '(13)\n', (1160, 1164), False, 'from AlphaGo.go import GameState\n'), ((1904, 1920), 'numpy.all', 'np.all', (['(w1 == w2)'], {}), '(w1 == w2)\n', (1910, 1920), True, 'import numpy as np\n')]
|
import os
import torch
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
def get_param_matrix(model_prefix, model_dir):
"""
Grabs the parameters of a saved model and returns them as a matrix
"""
# Load and combine the parameters
param_matrix = []
for file in os.listdir(model_dir):
if file.startswith(model_prefix):
model_path = os.path.join(model_dir, file)
state_dict = torch.load(model_path)
# Grab all params in state dict
params = [state_dict[param].data.float() for param in state_dict]
# Reshape to one long parameter vector
params = nn.utils.parameters_to_vector(params)
param_matrix.append(params.cpu().numpy())
params_matrix = np.array(param_matrix)
return params_matrix
def plot_trajectory(projected_params):
# Separate components
x = projected_params[:, 0]
y = projected_params[:, 1]
z = projected_params[:, 2]
# Creating figure
fig = plt.figure(figsize = (10, 7))
ax = plt.axes(projection ="3d")
# Creating plot
ax.scatter3D(x, y, z, color="green")
plt.title("Projected Learning Trajectory")
|
[
"os.listdir",
"torch.load",
"os.path.join",
"torch.nn.utils.parameters_to_vector",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.title"
] |
[((309, 330), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (319, 330), False, 'import os\n'), ((785, 807), 'numpy.array', 'np.array', (['param_matrix'], {}), '(param_matrix)\n', (793, 807), True, 'import numpy as np\n'), ((1024, 1051), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1034, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1088), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1071, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1197), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected Learning Trajectory"""'], {}), "('Projected Learning Trajectory')\n", (1164, 1197), True, 'import matplotlib.pyplot as plt\n'), ((399, 428), 'os.path.join', 'os.path.join', (['model_dir', 'file'], {}), '(model_dir, file)\n', (411, 428), False, 'import os\n'), ((454, 476), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (464, 476), False, 'import torch\n'), ((673, 710), 'torch.nn.utils.parameters_to_vector', 'nn.utils.parameters_to_vector', (['params'], {}), '(params)\n', (702, 710), True, 'import torch.nn as nn\n')]
|
# Author: <NAME> <<EMAIL>>
"""Module implementing the FASTA algorithm"""
import numpy as np
from math import sqrt
from scipy import linalg
import time
import logging
def _next_stepsize(deltax, deltaF, t=0):
"""A variation of spectral descent step-size selection: 'adaptive' BB method.
Reference:
---------
<NAME>, <NAME>, and <NAME>, 'Gradient methods with adaptive step-sizes,'
Comput. Optim. Appl., vol. 35, pp. 69-86, Sept. 2006
parameters
----------
deltax: ndarray
difference between coefs_current and coefs_next
deltaF: ndarray
difference between grad operator evaluated at coefs_current and coefs_next
returns
-------
float
adaptive step-size
"""
n_deltax = (deltax ** 2).sum() # linalg.norm(deltax, 'fro') ** 2
n_deltaF = (deltaF ** 2).sum() # linalg.norm(deltaF, 'fro') ** 2
innerproduct_xF = np.real((deltax * deltaF).sum())
if n_deltax == 0:
return 0
elif (n_deltaF == 0) | (innerproduct_xF == 0):
return -1
else:
tau_s = n_deltax / innerproduct_xF # steepest descent
tau_m = innerproduct_xF / n_deltaF # minimum residual
# adaptive BB method
if 2 * tau_m > tau_s:
return tau_m
else:
return tau_s - 0.5 * tau_m
def _compute_residual(deltaf, sg):
"""Computes residuals"""
res = sqrt(((deltaf + sg) ** 2).sum())
a = sqrt((deltaf ** 2).sum())
b = sqrt((sg ** 2).sum())
res_r = res / (max(a, b) + 1e-15)
return res, res_r
def _update_coefs(x, tau, gradfx, prox, f, g, beta, fk, linesearch=True):
"""Non-monotone line search
parameters
----------
x: ndarray
current coefficients
tau: float
step size
gradfx: ndarry
gradient operator evaluated at current coefficients
prox: function handle
proximal operator of :math:`g(x)`
f: callable
smooth differentiable function, :math:`f(x)`
g: callable
non-smooth function, :math:`g(x)`
beta: float
backtracking parameter
fk: float
maximum of previous function values
returns
-------
z: ndarray
next coefficients
"""
x_hat = x - tau * gradfx
z = prox(x_hat, tau)
fz = f(z)
count = 0
if linesearch:
while fz > fk + (gradfx * (z - x)).sum() + ((z - x) ** 2).sum() / (2 * tau):
# np.square(linalg.norm(z - x, 'fro')) / (2 * tau):
count += 1
tau *= beta
x_hat = x - tau * gradfx
z = prox(x_hat, tau)
fz = f(z)
sg = (x_hat - z) / tau
return z, fz, sg, tau, count
class Fasta:
r"""Fast adaptive shrinkage/threshold Algorithm
Reference
---------
Goldstein, Tom, <NAME>, and <NAME>. "A field guide to forward-backward
splitting with a FASTA implementation." arXiv preprint arXiv:1411.3406 (2014).
Parameters
----------
f: function handle
smooth differentiable function, :math:`f(x)`
g: function handle
non-smooth convex function, :math:`g(x)`
gradf: function handle
gradient of smooth differentiable function, :math:`\\nabla f(x)`
proxg: function handle
proximal operator of non-smooth convex function
:math:`proxg(v, \\lambda) = argmin g(x) + \\frac{1}{2*\\lambda}\|x-v\|^2`
beta: float, optional
backtracking parameter
default is 0.5
n_iter: int, optional
number of iterations
default is 1000
Attributes
----------
coefs: ndvar
learned coefficients
objective_value: float
optimum objective value
residuals: list
residual values at each iteration
initial_stepsize: float, optional
created only with verbose=1 option
objective: list, optional
objective values at each iteration
created only with verbose=1 option
stepsizes: list, optional
stepsizes at each iteration
created only with verbose=1 option
backtracks: list, optional
number of backtracking steps
created only with verbose=1 option
Notes
-----
Make sure that outputs of gradf and proxg is of same size as x.
The implementation does not check for any such discrepancies.
Use
---
Solve following least square problem using fastapy
:math:`\\min .5||Ax-b||^2 + \\mu*\|x\|_1`
Create function handles
>>> def f(x): return 0.5 * linalg.norm(np.dot(A, x) - b, 2)**2 # f(x) = .5||Ax-b||^2
>>> def gradf(x): return np.dot(A.T, np.dot(A, x) - b) # gradient of f(x)
>>> def g(x): return mu * linalg.norm(x, 1) # mu|x|
>>> def proxg(x, t): return shrink(x, mu*t)
>>> def shrink(x, mu): return np.multiply(np.sign(x), np.maximum(np.abs(x) - mu, 0)) #proxg(z,t) = sign(x)*max(
|x|-mu,0)
Create FASTA instance
>>> lsq = Fasta(f, g, gradf, proxg)
Call solver
>>> lsq.learn(x0, verbose=True)
"""
def __init__(self, f, g, gradf, proxg, beta=0.5, n_iter=1000):
self.f = f
self.g = g
self.grad = gradf
self.prox = proxg
self.beta = beta
self.n_iter = n_iter
self.residuals = []
self._funcValues = []
self.coefs_ = None
def __str__(self):
return "Fast adaptive shrinkage/thresholding Algorithm instance"
def learn(self, coefs_init, tol=1e-4, verbose=True, linesearch=True, next_stepsize=_next_stepsize):
r"""fits the model using FASTA algorithm
parameters
----------
coefs_init: ndarray
initial guess
tol: float, optional
tolerance parameter
default is 1e-8
verbose: bool
verbosity of the method : 1 will display informations while 0 will display nothing
default = 0
linesearch: bool
if True (Default) uses line-search to fine step-size
next_stepsize: callable
a callable with argument (\deltax, \deltaGradf) which provides next step-size.
Default is a non-monotone step-size selection ('adaptive' BB) method.
returns
-------
self
"""
logger = logging.getLogger("FASTA")
coefs_current = np.copy(coefs_init)
grad_current = self.grad(coefs_current)
coefs_next = coefs_current + 0.01 * np.random.randn(coefs_current.shape[0], coefs_current.shape[1])
grad_next = self.grad(coefs_next)
tau_current = next_stepsize(coefs_next - coefs_current, grad_next - grad_current, 0)
self._funcValues.append(self.f(coefs_current))
if verbose:
self.objective = []
self.objective.append(self._funcValues[-1] + self.g(coefs_current))
self.initial_stepsize = np.copy(tau_current)
self.stepsizes = []
self.backtracks = []
start = time.time()
logger.debug(f"Iteration \t objective value \t step-size \t backtracking steps taken \t residual")
for i in range(self.n_iter):
coefs_next, objective_next, sub_grad, tau, n_backtracks \
= _update_coefs(coefs_current, tau_current, grad_current,
self.prox, self.f, self.g, self.beta, max(self._funcValues), linesearch)
self._funcValues.append(objective_next)
grad_next = self.grad(coefs_next)
# Find residual
delta_coef = coefs_current - coefs_next
delta_grad = grad_current - grad_next
residual, residual_r = _compute_residual(grad_next, sub_grad)
self.residuals.append(residual)
residual_n = residual / (self.residuals[0] + 1e-15)
# Find step size for next iteration
tau_next = next_stepsize(delta_coef, delta_grad, i)
if verbose:
self.stepsizes.append(tau)
self.backtracks.append(n_backtracks)
self.objective.append(objective_next + self.g(coefs_next))
logger.debug(
f"{i} \t {self.objective[i]} \t {self.stepsizes[i]} \t {self.backtracks[i]} \t {self.residuals[i]}")
# Prepare for next iteration
coefs_current = coefs_next
grad_current = grad_next
if tau_next == 0.0 or min(residual_n, residual_r) < tol: # convergence reached
break
elif tau_next < 0.0: # non-convex probelms -> negative stepsize -> use the previous value
tau_current = tau
else:
tau_current = tau_next
end = time.time()
self.coefs_ = coefs_current
self.objective_value = objective_next + self.g(coefs_current)
if verbose:
logger.debug(f"total time elapsed : {end - start}s")
return self
|
[
"logging.getLogger",
"numpy.copy",
"numpy.random.randn",
"time.time"
] |
[((6219, 6245), 'logging.getLogger', 'logging.getLogger', (['"""FASTA"""'], {}), "('FASTA')\n", (6236, 6245), False, 'import logging\n'), ((6270, 6289), 'numpy.copy', 'np.copy', (['coefs_init'], {}), '(coefs_init)\n', (6277, 6289), True, 'import numpy as np\n'), ((6908, 6919), 'time.time', 'time.time', ([], {}), '()\n', (6917, 6919), False, 'import time\n'), ((8629, 8640), 'time.time', 'time.time', ([], {}), '()\n', (8638, 8640), False, 'import time\n'), ((6805, 6825), 'numpy.copy', 'np.copy', (['tau_current'], {}), '(tau_current)\n', (6812, 6825), True, 'import numpy as np\n'), ((6382, 6445), 'numpy.random.randn', 'np.random.randn', (['coefs_current.shape[0]', 'coefs_current.shape[1]'], {}), '(coefs_current.shape[0], coefs_current.shape[1])\n', (6397, 6445), True, 'import numpy as np\n')]
|
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import model_from_json
import numpy as np
import tensorflow.keras.models as models
def predict(temp_file):
test_image = image.load_img(temp_file, target_size = (224, 224))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
with open('Model Weights _ Json/model.json','r') as json_file:
json_model = json_file.read()
model = model_from_json(json_model)
model.load_weights('Model Weights _ Json/model_weights.h5')
result = model.predict(test_image)
return np.argmax(result)
|
[
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.models.model_from_json",
"numpy.argmax",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array"
] |
[((203, 252), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['temp_file'], {'target_size': '(224, 224)'}), '(temp_file, target_size=(224, 224))\n', (217, 252), False, 'from tensorflow.keras.preprocessing import image\n'), ((272, 302), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (290, 302), False, 'from tensorflow.keras.preprocessing import image\n'), ((320, 354), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (334, 354), True, 'import numpy as np\n'), ((474, 501), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['json_model'], {}), '(json_model)\n', (489, 501), False, 'from tensorflow.keras.models import model_from_json\n'), ((616, 633), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (625, 633), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import datetime
def func(x, a, b):
return a + b*x
def exp_regression(x, y):
p, _ = curve_fit(func, x, np.log(y))
p[0] = np.exp(p[0])
return p
def r2(coeffs, x, y):
return r2_score(np.log(y), np.log(out[0]*np.exp(out[1]*x)))
# calculate exponential fit for error rate extrapolation
# report as annual decay (i.e. error rate decreases by fixed factor every year)
errors = pd.read_csv('error_rates.csv')
x = pd.to_datetime(errors.iloc[:, 0]).astype(int)
y = errors.iloc[:, 1]
out = exp_regression(x, y)
print('annual error rate decay', np.exp(out[1]*pd.Timedelta(datetime.timedelta(days=365.2422)).delta))
print('R^2', r2(out, x, y))
|
[
"pandas.read_csv",
"numpy.log",
"numpy.exp",
"datetime.timedelta",
"pandas.to_datetime"
] |
[((543, 573), 'pandas.read_csv', 'pd.read_csv', (['"""error_rates.csv"""'], {}), "('error_rates.csv')\n", (554, 573), True, 'import pandas as pd\n'), ((279, 291), 'numpy.exp', 'np.exp', (['p[0]'], {}), '(p[0])\n', (285, 291), True, 'import numpy as np\n'), ((257, 266), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (263, 266), True, 'import numpy as np\n'), ((352, 361), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (358, 361), True, 'import numpy as np\n'), ((578, 611), 'pandas.to_datetime', 'pd.to_datetime', (['errors.iloc[:, 0]'], {}), '(errors.iloc[:, 0])\n', (592, 611), True, 'import pandas as pd\n'), ((377, 395), 'numpy.exp', 'np.exp', (['(out[1] * x)'], {}), '(out[1] * x)\n', (383, 395), True, 'import numpy as np\n'), ((733, 766), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(365.2422)'}), '(days=365.2422)\n', (751, 766), False, 'import datetime\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.applications.xception import Xception
import h5py
import json
import cv2
import math
import logging
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.xception import preprocess_input, decode_predictions
logging.basicConfig(level = logging.INFO)
sampling_rate = 5
sampled_frames = frame_stamps = []
top1_labels = top1_scores = []
def sampling_time_stamps(_sample_path):
cap = cv2.VideoCapture(_sample_path)
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
logging.info('Total no. of frames in video:', total_frame_count)
for i in range(sampling_rate):
val = round(total_frame_count/sampling_rate)*(i+1)
frame_stamps.append(val)
def sampling_frames():
frameId , frame_count = 5, 0
success,frame = cap.read()
while success:
frame_count+=1
if frame_count in frame_stamps and frameId >= 1:
frame = cv2.resize(frame, (299,299))
sampled_frames.append(frame)
success,frame = cap.read()
frameId-=1
else:
success,frame = cap.read()
pass
def generate_and_average_predictions():
base_model = keras.applications.Xception(
weights='imagenet') # Load weights pre-trained on ImageNet.
for i in range(len(sampled_frames)):
img = sampled_frames[i]
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = base_model.predict(x)
print('Prediction level:', (i+1), decode_predictions(preds, top=5)[0])
top1_labels.append(decode_predictions(preds, top=1)[0][0][1])
top1_scores.append(decode_predictions(preds, top=1)[0][0][2])
return top1_labels, top1_scores
def run():
sampling_time_stamps(_sample_path)
sampling_frames()
labels, scores = generate_and_average_predictions()
return labels, scores
|
[
"logging.basicConfig",
"cv2.resize",
"tensorflow.keras.applications.xception.preprocess_input",
"tensorflow.keras.applications.xception.decode_predictions",
"tensorflow.keras.applications.Xception",
"cv2.VideoCapture",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array",
"logging.info"
] |
[((333, 372), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (352, 372), False, 'import logging\n'), ((519, 549), 'cv2.VideoCapture', 'cv2.VideoCapture', (['_sample_path'], {}), '(_sample_path)\n', (535, 549), False, 'import cv2\n'), ((613, 677), 'logging.info', 'logging.info', (['"""Total no. of frames in video:"""', 'total_frame_count'], {}), "('Total no. of frames in video:', total_frame_count)\n", (625, 677), False, 'import logging\n'), ((1271, 1318), 'tensorflow.keras.applications.Xception', 'keras.applications.Xception', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (1298, 1318), False, 'from tensorflow import keras\n'), ((1448, 1471), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1466, 1471), False, 'from tensorflow.keras.preprocessing import image\n'), ((1482, 1507), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1496, 1507), True, 'import numpy as np\n'), ((1518, 1537), 'tensorflow.keras.applications.xception.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1534, 1537), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n'), ((1006, 1035), 'cv2.resize', 'cv2.resize', (['frame', '(299, 299)'], {}), '(frame, (299, 299))\n', (1016, 1035), False, 'import cv2\n'), ((1614, 1646), 'tensorflow.keras.applications.xception.decode_predictions', 'decode_predictions', (['preds'], {'top': '(5)'}), '(preds, top=5)\n', (1632, 1646), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n'), ((1678, 1710), 'tensorflow.keras.applications.xception.decode_predictions', 'decode_predictions', (['preds'], {'top': '(1)'}), '(preds, top=1)\n', (1696, 1710), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n'), ((1746, 1778), 'tensorflow.keras.applications.xception.decode_predictions', 'decode_predictions', (['preds'], {'top': '(1)'}), '(preds, top=1)\n', (1764, 1778), False, 'from tensorflow.keras.applications.xception import preprocess_input, decode_predictions\n')]
|
from collections import namedtuple
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import minimum_spanning_tree
from .. import logging as logg
from ..neighbors import Neighbors
from .. import utils
from .. import settings
def paga(
adata,
groups='louvain',
use_rna_velocity=False,
copy=False):
"""\
Generate cellular maps of differentiation manifolds with complex
topologies [Wolf17i]_.
Partition-based graph abstraction (PAGA) quantifies the connectivities of
partitions of a neighborhood graph of single cells, thereby generating a
much simpler abstracted graph whose nodes label the partitions. Together
with a random walk-based distance measure, this generates a partial
coordinatization of data useful for exploring and explaining its variation.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
groups : categorical annotation of observations or 'louvain_groups', optional (default: 'louvain_groups')
Criterion to determine the resulting partitions of the single-cell
graph. 'louvain_groups' uses the Louvain algorithm and optimizes
modularity of the graph. You can also pass your predefined groups by
choosing any categorical annotation of observations (`adata.obs`).
use_rna_velocity : `bool` (default: `False`)
Use RNA velocity to orient edges in the abstracted graph and estimate transitions.
copy : `bool`, optional (default: `False`)
Copy `adata` before computation and return a copy. Otherwise, perform
computation inplace and return `None`.
Returns
-------
Returns or updates `adata` depending on `copy` with
connectivities : np.ndarray (adata.uns['connectivities'])
The full adjacency matrix of the abstracted graph, weights
correspond to connectivities.
confidence : np.ndarray (adata.uns['confidence'])
The full adjacency matrix of the abstracted graph, weights
correspond to confidence in the presence of an edge.
confidence_tree : sc.sparse csr matrix (adata.uns['confidence_tree'])
The adjacency matrix of the tree-like subgraph that best explains
the topology.
"""
if 'neighbors' not in adata.uns:
raise ValueError(
'You need to run `pp.neighbors` first to compute a neighborhood graph.')
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
logg.info('running partition-based graph abstraction (PAGA)', reset=True)
paga = PAGA(adata, groups, use_rna_velocity=use_rna_velocity)
paga.compute()
# only add if not present
if 'paga' not in adata.uns:
adata.uns['paga'] = {}
if not use_rna_velocity:
adata.uns['paga']['connectivities'] = paga.connectivities_coarse
adata.uns['paga']['confidence'] = paga.confidence
adata.uns['paga']['confidence_tree'] = paga.confidence_tree
adata.uns[groups + '_sizes'] = np.array(paga.vc.sizes())
else:
adata.uns['paga']['transitions_confidence'] = paga.transitions_confidence
adata.uns['paga']['transitions_ttest'] = paga.transitions_ttest
adata.uns['paga']['groups'] = groups
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
if use_rna_velocity:
logg.hint(
'added\n'
' \'paga/transitions_confidence\', confidence adjacency (adata.uns)\n'
' \'paga/transitions_ttest\', confidence subtree (adata.uns)')
else:
logg.hint(
'added\n'
' \'paga/connectivities\', connectivities adjacency (adata.uns)\n'
' \'paga/confidence\', confidence adjacency (adata.uns)\n'
' \'paga/confidence_tree\', confidence subtree (adata.uns)')
return adata if copy else None
class PAGA(Neighbors):
def __init__(self, adata, groups, use_rna_velocity=False,
tree_based_confidence=False):
super(PAGA, self).__init__(adata)
self._groups = groups
self._tree_based_confidence = tree_based_confidence
self._use_rna_velocity = use_rna_velocity
def compute(self):
if self._use_rna_velocity:
self.compute_transitions_coarse()
else:
self.compute_connectivities_coarse()
self.compute_confidence()
def compute_connectivities_coarse(self):
import igraph
ones = self.connectivities.copy()
# graph where edges carry weight 1
ones.data = np.ones(len(ones.data))
g = utils.get_igraph_from_adjacency(ones)
self.vc = igraph.VertexClustering(
g, membership=self._adata.obs[self._groups].cat.codes.values)
cg = self.vc.cluster_graph(combine_edges='sum')
self.connectivities_coarse = utils.get_sparse_from_igraph(cg, weight_attr='weight')/2
def compute_confidence(self):
"""Translates the connectivities_coarse measure into a confidence measure.
"""
pseudo_distance = self.connectivities_coarse.copy()
pseudo_distance.data = 1./pseudo_distance.data
connectivities_coarse_tree = minimum_spanning_tree(pseudo_distance)
connectivities_coarse_tree.data = 1./connectivities_coarse_tree.data
connectivities_coarse_tree_indices = [
connectivities_coarse_tree[i].nonzero()[1]
for i in range(connectivities_coarse_tree.shape[0])]
# inter- and intra-cluster based confidence
if not self._tree_based_confidence:
total_n = self.n_neighbors * np.array(self.vc.sizes())
maximum = self.connectivities_coarse.max()
confidence = self.connectivities_coarse.copy() # initializing
for i in range(self.connectivities_coarse.shape[0]):
for j in range(i+1, self.connectivities_coarse.shape[1]):
if self.connectivities_coarse[i, j] > 0:
geom_mean = np.sqrt(total_n[i] * total_n[j])
confidence[i, j] = self.connectivities_coarse[i, j] / geom_mean
confidence[j, i] = confidence[i, j]
# tree-based confidence
else:
median_connectivities_coarse_tree = np.median(connectivities_coarse_tree.data)
confidence = self.connectivities_coarse.copy()
confidence.data[self.connectivities_coarse.data >= median_connectivities_coarse_tree] = 1
connectivities_coarse_adjusted = self.connectivities_coarse.copy()
connectivities_coarse_adjusted.data -= median_connectivities_coarse_tree
connectivities_coarse_adjusted.data = np.exp(connectivities_coarse_adjusted.data)
index = self.connectivities_coarse.data < median_connectivities_coarse_tree
confidence.data[index] = connectivities_coarse_adjusted.data[index]
confidence_tree = self.compute_confidence_tree(
confidence, connectivities_coarse_tree_indices)
self.confidence = confidence
self.confidence_tree = confidence_tree
def compute_confidence_tree(
self, confidence, connectivities_coarse_tree_indices):
confidence_tree = sp.sparse.lil_matrix(confidence.shape, dtype=float)
for i, neighbors in enumerate(connectivities_coarse_tree_indices):
if len(neighbors) > 0:
confidence_tree[i, neighbors] = confidence[i, neighbors]
return confidence_tree.tocsr()
def compute_transitions_coarse(self):
# analogous code using networkx
# membership = adata.obs['clusters'].cat.codes.tolist()
# partition = defaultdict(list)
# for n, p in zip(list(range(len(G))), membership):
# partition[p].append(n)
# partition = partition.values()
# g_abstracted = nx.quotient_graph(g, partition, relabel=True)
# for some reason, though, edges aren't oriented in the quotient
# graph...
import igraph
g = utils.get_igraph_from_adjacency(
self._adata.uns['velocyto_transitions'], directed=True)
vc = igraph.VertexClustering(
g, membership=self._adata.obs[self._groups].cat.codes.values)
cg_full = vc.cluster_graph(combine_edges=False)
g_bool = utils.get_igraph_from_adjacency(
self._adata.uns['velocyto_transitions'].astype('bool'), directed=True)
vc_bool = igraph.VertexClustering(
g_bool, membership=self._adata.obs[self._groups].cat.codes.values)
cg_bool = vc_bool.cluster_graph(combine_edges='sum') # collapsed version
transitions_coarse = utils.get_sparse_from_igraph(cg_bool, weight_attr='weight')
# translate this into a confidence measure
# the number of outgoing edges
# total_n = np.zeros(len(vc.sizes()))
# # (this is not the convention of standard stochastic matrices)
# total_outgoing = transitions_coarse.sum(axis=1)
# for i in range(len(total_n)):
# total_n[i] = vc.subgraph(i).ecount()
# total_n[i] += total_outgoing[i, 0]
# use the topology based reference, the velocity one might have very small numbers
total_n = self.n_neighbors * np.array(vc_bool.sizes())
transitions_ttest = transitions_coarse.copy()
transitions_confidence = transitions_coarse.copy()
from scipy.stats import ttest_1samp
for i in range(transitions_coarse.shape[0]):
# no symmetry in transitions_coarse, hence we should not restrict to
# upper triangle
neighbors = transitions_coarse[i].nonzero()[1]
for j in neighbors:
forward = cg_full.es.select(_source=i, _target=j)['weight']
backward = cg_full.es.select(_source=j, _target=i)['weight']
# backward direction: add minus sign
values = np.array(list(forward) + list(-np.array(backward)))
# require some minimal number of observations
if len(values) < 5:
transitions_ttest[i, j] = 0
transitions_ttest[j, i] = 0
transitions_confidence[i, j] = 0
transitions_confidence[j, i] = 0
continue
t, prob = ttest_1samp(values, 0.0)
if t > 0:
# number of outgoing edges greater than number of ingoing edges
# i.e., transition from i to j
transitions_ttest[i, j] = -np.log10(max(prob, 1e-10))
transitions_ttest[j, i] = 0
else:
transitions_ttest[j, i] = -np.log10(max(prob, 1e-10))
transitions_ttest[i, j] = 0
# geom_mean
geom_mean = np.sqrt(total_n[i] * total_n[j])
diff = (len(forward) - len(backward)) / geom_mean
if diff > 0:
transitions_confidence[i, j] = diff
transitions_confidence[j, i] = 0
else:
transitions_confidence[j, i] = -diff
transitions_confidence[i, j] = 0
transitions_ttest.eliminate_zeros()
transitions_confidence.eliminate_zeros()
# transpose in order to match convention of stochastic matrices
# entry ij means transition from j to i
self.transitions_ttest = transitions_ttest.T
self.transitions_confidence = transitions_confidence.T
def paga_degrees(adata):
"""Compute the degree of each node in the abstracted graph.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
degrees : list
List of degrees for each node.
"""
import networkx as nx
g = nx.Graph(adata.uns['paga']['confidence'])
degrees = [d for _, d in g.degree(weight='weight')]
return degrees
def paga_expression_entropies(adata):
"""Compute the median expression entropy for each node-group.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
entropies : list
Entropies of median expressions for each node.
"""
from scipy.stats import entropy
groups_order, groups_masks = utils.select_groups(
adata, key=adata.uns['paga']['groups'])
entropies = []
for mask in groups_masks:
X_mask = adata.X[mask]
x_median = np.median(X_mask, axis=0)
x_probs = (x_median - np.min(x_median)) / (np.max(x_median) - np.min(x_median))
entropies.append(entropy(x_probs))
return entropies
def paga_compare_paths(adata1, adata2,
adjacency_key='confidence', adjacency_key2=None):
"""Compare paths in abstracted graphs in two datasets.
Compute the fraction of consistent paths between leafs, a measure for the
topological similarity between graphs.
By increasing the verbosity to level 4 and 5, the paths that do not agree
and the paths that agree are written to the output, respectively.
The PAGA "groups key" needs to be the same in both objects.
Parameters
----------
adata1, adata2 : AnnData
Annotated data matrices to compare.
adjacency_key : str
Key for indexing the adjacency matrices in `.uns['paga']` to be used in
adata1 and adata2.
adjacency_key2 : str, None
If provided, used for adata2.
Returns
-------
OrderedTuple with attributes ``n_steps`` (total number of steps in paths)
and ``frac_steps`` (fraction of consistent steps), ``n_paths`` and
``frac_paths``.
"""
import networkx as nx
g1 = nx.Graph(adata1.uns['paga'][adjacency_key])
g2 = nx.Graph(adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else adjacency_key])
leaf_nodes1 = [str(x) for x in g1.nodes() if g1.degree(x) == 1]
logg.msg('leaf nodes in graph 1: {}'.format(leaf_nodes1), v=5, no_indent=True)
paga_groups = adata1.uns['paga']['groups']
asso_groups1 = utils.identify_groups(adata1.obs[paga_groups].values,
adata2.obs[paga_groups].values)
asso_groups2 = utils.identify_groups(adata2.obs[paga_groups].values,
adata1.obs[paga_groups].values)
orig_names1 = adata1.obs[paga_groups].cat.categories
orig_names2 = adata2.obs[paga_groups].cat.categories
import itertools
n_steps = 0
n_agreeing_steps = 0
n_paths = 0
n_agreeing_paths = 0
# loop over all pairs of leaf nodes in the reference adata1
for (r, s) in itertools.combinations(leaf_nodes1, r=2):
r2, s2 = asso_groups1[r][0], asso_groups1[s][0]
orig_names = [orig_names1[int(i)] for i in [r, s]]
orig_names += [orig_names2[int(i)] for i in [r2, s2]]
logg.msg('compare shortest paths between leafs ({}, {}) in graph1 and ({}, {}) in graph2:'
.format(*orig_names), v=4, no_indent=True)
no_path1 = False
try:
path1 = [str(x) for x in nx.shortest_path(g1, int(r), int(s))]
except nx.NetworkXNoPath:
no_path1 = True
no_path2 = False
try:
path2 = [str(x) for x in nx.shortest_path(g2, int(r2), int(s2))]
except nx.NetworkXNoPath:
no_path2 = True
if no_path1 and no_path2:
# consistent behavior
n_paths += 1
n_agreeing_paths += 1
n_steps += 1
n_agreeing_steps += 1
logg.msg('there are no connecting paths in both graphs', v=5, no_indent=True)
continue
elif no_path1 or no_path2:
# non-consistent result
n_paths += 1
n_steps += 1
continue
if len(path1) >= len(path2):
path_mapped = [asso_groups1[l] for l in path1]
path_compare = path2
path_compare_id = 2
path_compare_orig_names = [[orig_names2[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names2[int(s)] for s in l] for l in path_mapped]
else:
path_mapped = [asso_groups2[l] for l in path2]
path_compare = path1
path_compare_id = 1
path_compare_orig_names = [[orig_names1[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names1[int(s)] for s in l] for l in path_mapped]
n_agreeing_steps_path = 0
ip_progress = 0
for il, l in enumerate(path_compare[:-1]):
for ip, p in enumerate(path_mapped):
if ip >= ip_progress and l in p:
# check whether we can find the step forward of path_compare in path_mapped
if (ip + 1 < len(path_mapped)
and
path_compare[il + 1] in path_mapped[ip + 1]):
# make sure that a step backward leads us to the same value of l
# in case we "jumped"
logg.msg('found matching step ({} -> {}) at position {} in path{} and position {} in path_mapped'
.format(l, path_compare_orig_names[il + 1], il, path_compare_id, ip), v=6)
consistent_history = True
for iip in range(ip, ip_progress, -1):
if l not in path_mapped[iip - 1]:
consistent_history = False
if consistent_history:
# here, we take one step further back (ip_progress - 1); it's implied that this
# was ok in the previous step
logg.msg(' step(s) backward to position(s) {} in path_mapped are fine, too: valid step'
.format(list(range(ip - 1, ip_progress - 2, -1))), v=6)
n_agreeing_steps_path += 1
ip_progress = ip + 1
break
n_steps_path = len(path_compare) - 1
n_agreeing_steps += n_agreeing_steps_path
n_steps += n_steps_path
n_paths += 1
if n_agreeing_steps_path == n_steps_path: n_agreeing_paths += 1
# only for the output, use original names
path1_orig_names = [orig_names1[int(s)] for s in path1]
path2_orig_names = [orig_names2[int(s)] for s in path2]
logg.msg(' path1 = {},\n'
'path_mapped = {},\n'
' path2 = {},\n'
'-> n_agreeing_steps = {} / n_steps = {}.'
.format(path1_orig_names,
[list(p) for p in path_mapped_orig_names],
path2_orig_names,
n_agreeing_steps_path, n_steps_path), v=5, no_indent=True)
Result = namedtuple('paga_compare_paths_result',
['frac_steps', 'n_steps', 'frac_paths', 'n_paths'])
return Result(frac_steps=n_agreeing_steps/n_steps if n_steps > 0 else np.nan,
n_steps=n_steps if n_steps > 0 else np.nan,
frac_paths=n_agreeing_paths/n_paths if n_steps > 0 else np.nan,
n_paths=n_paths if n_steps > 0 else np.nan)
|
[
"collections.namedtuple",
"scipy.sparse.lil_matrix",
"numpy.median",
"scipy.stats.entropy",
"numpy.sqrt",
"networkx.Graph",
"numpy.max",
"itertools.combinations",
"numpy.exp",
"numpy.array",
"scipy.sparse.csgraph.minimum_spanning_tree",
"igraph.VertexClustering",
"scipy.stats.ttest_1samp",
"numpy.min"
] |
[((11850, 11891), 'networkx.Graph', 'nx.Graph', (["adata.uns['paga']['confidence']"], {}), "(adata.uns['paga']['confidence'])\n", (11858, 11891), True, 'import networkx as nx\n'), ((13730, 13773), 'networkx.Graph', 'nx.Graph', (["adata1.uns['paga'][adjacency_key]"], {}), "(adata1.uns['paga'][adjacency_key])\n", (13738, 13773), True, 'import networkx as nx\n'), ((13783, 13880), 'networkx.Graph', 'nx.Graph', (["adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else\n adjacency_key]"], {}), "(adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else\n adjacency_key])\n", (13791, 13880), True, 'import networkx as nx\n'), ((14667, 14707), 'itertools.combinations', 'itertools.combinations', (['leaf_nodes1'], {'r': '(2)'}), '(leaf_nodes1, r=2)\n', (14689, 14707), False, 'import itertools\n'), ((18931, 19026), 'collections.namedtuple', 'namedtuple', (['"""paga_compare_paths_result"""', "['frac_steps', 'n_steps', 'frac_paths', 'n_paths']"], {}), "('paga_compare_paths_result', ['frac_steps', 'n_steps',\n 'frac_paths', 'n_paths'])\n", (18941, 19026), False, 'from collections import namedtuple\n'), ((4669, 4759), 'igraph.VertexClustering', 'igraph.VertexClustering', (['g'], {'membership': 'self._adata.obs[self._groups].cat.codes.values'}), '(g, membership=self._adata.obs[self._groups].cat.\n codes.values)\n', (4692, 4759), False, 'import igraph\n'), ((5200, 5238), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['pseudo_distance'], {}), '(pseudo_distance)\n', (5221, 5238), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((7244, 7295), 'scipy.sparse.lil_matrix', 'sp.sparse.lil_matrix', (['confidence.shape'], {'dtype': 'float'}), '(confidence.shape, dtype=float)\n', (7264, 7295), True, 'import scipy as sp\n'), ((8154, 8244), 'igraph.VertexClustering', 'igraph.VertexClustering', (['g'], {'membership': 'self._adata.obs[self._groups].cat.codes.values'}), '(g, membership=self._adata.obs[self._groups].cat.\n codes.values)\n', (8177, 8244), False, 'import igraph\n'), ((8461, 8556), 'igraph.VertexClustering', 'igraph.VertexClustering', (['g_bool'], {'membership': 'self._adata.obs[self._groups].cat.codes.values'}), '(g_bool, membership=self._adata.obs[self._groups].\n cat.codes.values)\n', (8484, 8556), False, 'import igraph\n'), ((12501, 12526), 'numpy.median', 'np.median', (['X_mask'], {'axis': '(0)'}), '(X_mask, axis=0)\n', (12510, 12526), True, 'import numpy as np\n'), ((6287, 6329), 'numpy.median', 'np.median', (['connectivities_coarse_tree.data'], {}), '(connectivities_coarse_tree.data)\n', (6296, 6329), True, 'import numpy as np\n'), ((6705, 6748), 'numpy.exp', 'np.exp', (['connectivities_coarse_adjusted.data'], {}), '(connectivities_coarse_adjusted.data)\n', (6711, 6748), True, 'import numpy as np\n'), ((12640, 12656), 'scipy.stats.entropy', 'entropy', (['x_probs'], {}), '(x_probs)\n', (12647, 12656), False, 'from scipy.stats import entropy\n'), ((10346, 10370), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['values', '(0.0)'], {}), '(values, 0.0)\n', (10357, 10370), False, 'from scipy.stats import ttest_1samp\n'), ((10854, 10886), 'numpy.sqrt', 'np.sqrt', (['(total_n[i] * total_n[j])'], {}), '(total_n[i] * total_n[j])\n', (10861, 10886), True, 'import numpy as np\n'), ((12557, 12573), 'numpy.min', 'np.min', (['x_median'], {}), '(x_median)\n', (12563, 12573), True, 'import numpy as np\n'), ((12578, 12594), 'numpy.max', 'np.max', (['x_median'], {}), '(x_median)\n', (12584, 12594), True, 'import numpy as np\n'), ((12597, 12613), 'numpy.min', 'np.min', (['x_median'], {}), '(x_median)\n', (12603, 12613), True, 'import numpy as np\n'), ((6012, 6044), 'numpy.sqrt', 'np.sqrt', (['(total_n[i] * total_n[j])'], {}), '(total_n[i] * total_n[j])\n', (6019, 6044), True, 'import numpy as np\n'), ((9970, 9988), 'numpy.array', 'np.array', (['backward'], {}), '(backward)\n', (9978, 9988), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 5 01:34:00 2021
@author: yrc2
"""
import biosteam as bst
import biorefineries.oilcane as oc
from biosteam.utils import CABBI_colors, colors
from thermosteam.utils import set_figure_size, set_font, roundsigfigs
from thermosteam.units_of_measure import format_units
from colorpalette import Palette
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from warnings import warn
import numpy as np
import pandas as pd
from matplotlib.gridspec import GridSpec
from . import _variable_mockups as variables
from ._variable_mockups import (
tea_monte_carlo_metric_mockups,
tea_monte_carlo_derivative_metric_mockups,
lca_monte_carlo_metric_mockups,
lca_monte_carlo_derivative_metric_mockups,
MFPP, TCI, electricity_production, natural_gas_consumption,
ethanol_production, biodiesel_production,
GWP_ethanol, GWP_biodiesel, GWP_electricity,
GWP_ethanol_allocation, GWP_biodiesel_allocation,
GWP_economic, MFPP_derivative,
TCI_derivative,
ethanol_production_derivative,
biodiesel_production_derivative,
electricity_production_derivative,
natural_gas_consumption_derivative,
GWP_ethanol_derivative,
)
from ._load_data import (
images_folder,
get_monte_carlo,
spearman_file,
)
import os
from._parse_configuration import format_name
__all__ = (
'plot_all',
'plot_montecarlo_main_manuscript',
'plot_breakdowns',
'plot_montecarlo_feedstock_comparison',
'plot_montecarlo_configuration_comparison',
'plot_montecarlo_agile_comparison',
'plot_montecarlo_derivative',
'plot_montecarlo_absolute',
'plot_spearman_tea',
'plot_spearman_lca',
'plot_spearman_tea_short',
'plot_spearman_lca_short',
'plot_monte_carlo_across_coordinate',
'monte_carlo_box_plot',
'plot_monte_carlo',
'plot_spearman',
'plot_configuration_breakdown',
'plot_TCI_areas_across_oil_content',
'plot_heatmap_comparison',
'plot_feedstock_conventional_comparison_kde',
'plot_feedstock_cellulosic_comparison_kde',
'plot_configuration_comparison_kde',
'plot_open_comparison_kde',
'plot_feedstock_comparison_kde',
'plot_crude_configuration_comparison_kde',
'plot_agile_comparison_kde',
'plot_separated_configuration_comparison_kde',
'area_colors',
'area_hatches',
)
area_colors = {
'Feedstock handling': CABBI_colors.teal,
'Juicing': CABBI_colors.green_dirty,
'EtOH prod.': CABBI_colors.blue,
'Ethanol production': CABBI_colors.blue,
'Oil ext.': CABBI_colors.brown,
'Oil extraction': CABBI_colors.brown,
'Biod. prod.': CABBI_colors.orange,
'Biodiesel production': CABBI_colors.orange,
'Pretreatment': CABBI_colors.green,
'Wastewater treatment': colors.purple,
'CH&P': CABBI_colors.yellow,
'Co-Heat and Power': CABBI_colors.yellow,
'Utilities': colors.red,
'Storage': CABBI_colors.grey,
'HXN': colors.orange,
'Heat exchanger network': colors.orange,
}
area_hatches = {
'Feedstock handling': 'x',
'Juicing': '-',
'EtOH prod.': '/',
'Ethanol production': '/',
'Oil ext.': '\\',
'Oil extraction': '\\',
'Biod. prod.': '/|',
'Biodiesel production': '/|',
'Pretreatment': '//',
'Wastewater treatment': r'\\',
'CH&P': '',
'Co-Heat and Power': '',
'Utilities': '\\|',
'Storage': '',
'HXN': '+',
'Heat exchanger network': '+',
}
for i in area_colors: area_colors[i] = area_colors[i].tint(20)
palette = Palette(**area_colors)
letter_color = colors.neutral.shade(25).RGBn
GWP_units_L = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{L}^{-1}$'
GWP_units_L_small = GWP_units_L.replace('kg', 'g')
CABBI_colors.orange_hatch = CABBI_colors.orange.copy(hatch='////')
ethanol_over_biodiesel = bst.MockVariable('Ethanol over biodiesel', 'L/MT', 'Biorefinery')
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
production = (ethanol_production, biodiesel_production)
mc_metric_settings = {
'MFPP': (MFPP, f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': ((GWP_ethanol, GWP_biodiesel), "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': ((GWP_ethanol_allocation, GWP_biodiesel_allocation), "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
}
mc_comparison_settings = {
'MFPP': (MFPP, r"$\Delta$" + f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, r"$\Delta$" + f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, r"$\Delta$" + f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, r"$\Delta$" + f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, r"$\Delta$" + f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, r"$\Delta$" + "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': (GWP_ethanol, r"$\Delta$" + "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': (GWP_ethanol_allocation, r"$\Delta$" + "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
'GWP_property_allocation': ((GWP_ethanol, GWP_ethanol_allocation), r"$\Delta$" + f"GWP\n[{GWP_units_L}]", None),
}
mc_derivative_metric_settings = {
'MFPP': (MFPP_derivative, r"$\Delta$" + format_units(r"MFPP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('USD/MT')}]", None),
'TCI': (TCI_derivative, r"$\Delta$" + format_units(r"TCI/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('10^6*USD')}]", None),
'production': ((ethanol_production_derivative, biodiesel_production_derivative), r"$\Delta$" + format_units(r"Prod./OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production_derivative, r"$\Delta$" + format_units(r"EP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption_derivative, r"$\Delta$" + format_units(r"NGC/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('m^3/MT')}]", None),
'GWP_economic': (GWP_ethanol_derivative, r"$\Delta$" + r"GWP $\cdot \Delta \mathrm{OC}^{-1}$" f"\n[{GWP_units_L_small}]", 1000),
}
kde_metric_settings = {j[0]: j for j in mc_metric_settings.values()}
kde_comparison_settings = {j[0]: j for j in mc_comparison_settings.values()}
kde_derivative_settings = {j[0]: j for j in mc_derivative_metric_settings.values()}
# %% Plots for publication
def plot_all():
# plot_montecarlo_main_manuscript()
plot_montecarlo_absolute()
plot_spearman_tea()
plot_spearman_lca()
plot_breakdowns()
def plot_montecarlo_main_manuscript():
set_font(size=8)
set_figure_size(aspect_ratio=0.85)
fig = plt.figure()
everything = GridSpec(4, 3, fig, hspace=1.5, wspace=0.7,
top=0.90, bottom=0.05,
left=0.11, right=0.97)
def spec2axes(spec, x, y, hspace=0, wspace=0.7, **kwargs):
subspec = spec.subgridspec(x, y, hspace=hspace, wspace=wspace, **kwargs)
return np.array([[fig.add_subplot(subspec[i, j]) for j in range(y)] for i in range(x)], object)
gs_feedstock_comparison = everything[:2, :]
gs_configuration_comparison = everything[2:, :2]
gs_agile_comparison = everything[2:, 2]
axes_feedstock_comparison = spec2axes(gs_feedstock_comparison, 2, 3)
axes_configuration_comparison = spec2axes(gs_configuration_comparison, 2, 2)
axes_agile_comparison = spec2axes(gs_agile_comparison, 2, 1)
plot_montecarlo_feedstock_comparison(axes_feedstock_comparison, letters='ABCDEFG')
plot_montecarlo_configuration_comparison(axes_configuration_comparison, letters='ABCDEFG')
plot_montecarlo_agile_comparison(axes_agile_comparison, letters='ABCDEFG')
def add_title(gs, title):
ax = fig.add_subplot(gs)
ax._frameon = False
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_title(
title, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold', y=1.1
)
add_title(gs_feedstock_comparison, '(I) Impact of opting to process oilcane over sugarcane')
add_title(gs_configuration_comparison, '(II) Impact of cellulosic ethanol integration')
add_title(gs_agile_comparison, '(III) Impact of\noilsorghum\nintegration')
plt.show()
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_main_manuscript.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_feedstock_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 3
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation',
# 'Direct Cogeneration',
# 'Integrated Co-Fermentation',
],
comparison_names=['O1 - S1', 'O2 - S2'],
metrics = ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch', 'grey', 'brown',
])
)
for ax, letter in zip(axes, 'ABCDEFGH' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
# if axes_box is None and letter in 'DH':
# x = 0.5
# plt.text(x, ylb - (yub - ylb) * 0.3,
# 'Impact of processing\noilcane over sugarcane',
# horizontalalignment='center',verticalalignment='center',
# fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_feedstock_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_configuration_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
x = 1.65
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production']
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 2
x = 0.58
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation']
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Oilcane',
# 'Sugarcane',
],
comparison_names=[
'O2 - O1',
# 'S2 - S1'
],
metrics=metrics,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch',
])
)
for ax, letter in zip(axes, 'ABCDEF' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(x, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_configuration_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_agile_comparison(axes_box=None, letters=None):
if axes_box is None:
set_font(size=8)
set_figure_size(width=3.3071, aspect_ratio=1.0)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile_only=True, ncols=1,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation'
],
metrics=['MFPP', 'TCI'],
axes_box=axes_box,
)
for ax, letter in zip(axes, 'AB' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None and letter == 'B':
plt.text(0.5, ylb - (yub - ylb) * 0.25,
'Impact of integrating oilsorghum\nat an agile oilcane biorefinery',
horizontalalignment='center',verticalalignment='center',
fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_agile_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_derivative():
set_font(size=8)
set_figure_size(
aspect_ratio=0.5,
# width=3.3071, aspect_ratio=1.85
)
fig, axes = plot_monte_carlo(
derivative=True, absolute=True,
comparison=False, agile=False,
ncols=3,
# tickmarks=np.array([
# [-3, -2, -1, 0, 1, 2, 3, 4, 5],
# [-9, -6, -3, 0, 3, 6, 9, 12, 15],
# [-2.0, -1.5, -1.0, -0.5, 0, 0.5, 1.0, 1.5, 2],
# [-16, -8, 0, 8, 16, 24, 32, 40, 48],
# [-400, -300, -200, -100, 0, 100, 200, 300, 400],
# [-300, -225, -150, -75, 0, 75, 150, 225, 300]
# ], dtype=object),
labels=['DC', 'ICF'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange',
])
)
for ax, letter in zip(axes, 'ABCDEFGH'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(
hspace=0, wspace=0.7,
top=0.95, bottom=0.1,
left=0.12, right=0.96
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_derivative.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_absolute():
set_font(size=8)
set_figure_size(aspect_ratio=1.05)
fig, axes = plot_monte_carlo(
absolute=True, comparison=False, ncols=2,
expand=0.1,
labels=['Sugarcane\nDC', 'Oilcane\nDC',
'Sugarcane\nICF', 'Oilcane\nICF',
'Sugarcane &\nSorghum DC', 'Oilcane &\nOil-sorghum DC',
'Sugarcane &\nSorghum ICF', 'Oilcane &\nOil-sorghum ICF'],
xrot=90,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange', 'orange', 'green', 'orange', 'green',
])
)
for ax, letter in zip(axes, 'ABCDEFGHIJ'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(left=0.12, right=0.95, wspace=0.40, top=0.98, bottom=0.2)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_absolute.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea(with_units=None, aspect_ratio=0.8, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='TEA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.08)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_tea.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea_short(**kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=0.65, width=6.6142 * 2/3)
plot_spearman(
configurations=[
'O1',
'O2',
],
labels=[
'DC',
'ICF',
],
kind='TEA',
with_units=False,
cutoff=0.03,
top=5,
legend=True,
legend_kwargs={'loc': 'upper left'},
**kwargs
)
plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_tea.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_lca_short(with_units=False, aspect_ratio=0.65, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio, width=6.6142 * 2/3)
plot_spearman(
configurations=[
'O1',
'O2',
],
labels=[
'DC',
'ICF',
],
kind='LCA',
with_units=with_units,
cutoff=0.03,
top=5,
legend=False,
**kwargs
)
plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_lca.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_lca(with_units=None, aspect_ratio=0.65, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='LCA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.10)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_lca.{i}')
plt.savefig(file, transparent=True)
def plot_breakdowns():
set_font(size=8)
set_figure_size(aspect_ratio=0.68)
fig, axes = plt.subplots(nrows=1, ncols=2)
plt.sca(axes[0])
plot_configuration_breakdown('O1', ax=axes[0], legend=False)
plt.sca(axes[1])
plot_configuration_breakdown('O2', ax=axes[1], legend=True)
yticks = axes[1].get_yticks()
plt.yticks(yticks, ['']*len(yticks))
plt.ylabel('')
plt.subplots_adjust(left=0.09, right=0.96, wspace=0., top=0.84, bottom=0.31)
for ax, letter in zip(axes, ['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation']):
plt.sca(ax)
ylb, yub = plt.ylim()
xlb, xub = plt.xlim()
plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.2, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'breakdowns.{i}')
plt.savefig(file, transparent=True)
# %% Heatmap
def get_fraction_in_same_direction(data, direction):
return (direction * data >= 0.).sum(axis=0) / data.size
def get_median(data):
return roundsigfigs(np.percentile(data, 50, axis=0))
def plot_heatmap_comparison(comparison_names=None, xlabels=None):
if comparison_names is None: comparison_names = oc.comparison_names
columns = comparison_names
if xlabels is None: xlabels = [format_name(i).replace(' ', '') for i in comparison_names]
def get_data(metric, name):
df = get_monte_carlo(name, metric)
values = df.values
return values
GWP_economic, GWP_ethanol, GWP_biodiesel, GWP_electricity, GWP_crude_glycerol, = lca_monte_carlo_metric_mockups
MFPP, TCI, ethanol_production, biodiesel_production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
GWP_ethanol_allocation = variables.GWP_ethanol_allocation
rows = [
MFPP,
TCI,
ethanol_production,
biodiesel_production,
electricity_production,
natural_gas_consumption,
GWP_ethanol_displacement,
GWP_ethanol_allocation,
GWP_ethanol, # economic
]
ylabels = [
f"MFPP\n[{format_units('USD/MT')}]",
f"TCI\n[{format_units('10^6*USD')}]",
f"Ethanol production\n[{format_units('L/MT')}]",
f"Biodiesel production\n[{format_units('L/MT')}]",
f"Elec. prod.\n[{format_units('kWhr/MT')}]",
f"NG cons.\n[{format_units('m^3/MT')}]",
"GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]",
"GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]",
"GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]",
]
N_rows = len(rows)
N_cols = len(comparison_names)
data = np.zeros([N_rows, N_cols], dtype=object)
data[:] = [[get_data(i, j) for j in columns] for i in rows]
medians = np.zeros_like(data, dtype=float)
fractions = medians.copy()
for i in range(N_rows):
for j in range(N_cols):
medians[i, j] = x = get_median(data[i, j])
fractions[i, j] = get_fraction_in_same_direction(data[i, j], 1 if x > 0 else -1)
fig, ax = plt.subplots()
mbar = bst.plots.MetricBar(
'Fraction in the same direction [%]', ticks=[-100, -75, -50, -25, 0, 25, 50, 75, 100],
cmap=plt.cm.get_cmap('RdYlGn')
)
im, cbar = bst.plots.plot_heatmap(
100 * fractions, vmin=0, vmax=100, ax=ax, cell_labels=medians,
metric_bar=mbar, xlabels=xlabels, ylabels=ylabels,
)
cbar.ax.set_ylabel(mbar.title, rotation=-90, va="bottom")
plt.sca(ax)
ax.spines[:].set_visible(False)
plt.grid(True, 'major', 'both', lw=1, color='w', ls='-')
# %% KDE
def plot_kde(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None,
xbox_kwargs=None, ybox_kwargs=None, top_left='',
top_right='Tradeoff', bottom_left='Tradeoff',
bottom_right=''):
set_font(size=8)
set_figure_size(width='half', aspect_ratio=1.20)
Xi, Yi = [i.index for i in metrics]
df = oc.get_monte_carlo(name, metrics)
y = df[Yi].values
x = df[Xi].values
sX, sY = [kde_comparison_settings[i] for i in metrics]
_, xlabel, fx = sX
_, ylabel, fy = sY
if fx: x *= fx
if fy: y *= fy
ax = bst.plots.plot_kde(
y=y, x=x, xticks=xticks, yticks=yticks,
xticklabels=True, yticklabels=True,
xbox_kwargs=xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn),
ybox_kwargs=ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn),
)
plt.sca(ax)
plt.xlabel(xlabel.replace('\n', ' '))
plt.ylabel(ylabel.replace('\n', ' '))
bst.plots.plot_quadrants()
xlb, xub = plt.xlim()
ylb, yub = plt.ylim()
xpos = lambda x: xlb + (xub - xlb) * x
# xlpos = lambda x: xlb * (1 - x)
ypos = lambda y: ylb + (yub - ylb) * y
y_mt_0 = y > 0
y_lt_0 = y < 0
x_mt_0 = x > 0
x_lt_0 = x < 0
xleft = 0.02
xright = 0.98
ytop = 0.94
ybottom = 0.02
if yub > 0. and xlb < 0.:
if top_left.endswith('()'):
p = (y_mt_0 & x_lt_0).sum() / y.size
top_left = f"{p:.0%} {top_left.strip('()')}"
plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn,
horizontalalignment='left', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
if ylb < 0. and xlb < 0.:
if bottom_left.endswith('()'):
p = (y_lt_0 & x_lt_0).sum() / y.size
bottom_left = f"{p:.0%} {bottom_left.strip('()')}"
plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='left', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
if yub > 0. and xub > 0.:
if top_right.endswith('()'):
p = (y_mt_0 & x_mt_0).sum() / y.size
top_right = f"{p:.0%} {top_right.strip('()')}"
plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='right', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
if ylb < 0. and xub > 0.:
if bottom_right.endswith('()'):
p = (y_lt_0 & x_mt_0).sum() / y.size
bottom_right = f"{p:.0%} {bottom_right.strip('()')}"
plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn,
horizontalalignment='right', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
plt.subplots_adjust(
hspace=0.05, wspace=0.05,
top=0.98, bottom=0.15,
left=0.15, right=0.98,
)
def plot_kde_2d(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None,
top_left='', top_right='Tradeoff', bottom_left='Tradeoff',
bottom_right='', xbox_kwargs=None, ybox_kwargs=None, titles=None):
set_font(size=8)
set_figure_size(aspect_ratio=0.65)
if isinstance(name, str): name = (name,)
Xi, Yi = [i.index for i in metrics]
dfs = [oc.get_monte_carlo(i, metrics) for i in name]
sX, sY = [kde_comparison_settings[i] for i in metrics]
_, xlabel, fx = sX
_, ylabel, fy = sY
xs = np.array([[df[Xi] for df in dfs]])
ys = np.array([[df[Yi] for df in dfs]])
if fx: xs *= fx
if fy: ys *= fy
axes = bst.plots.plot_kde_2d(
xs=xs, ys=ys,
xticks=xticks, yticks=yticks,
xticklabels=[True, True], yticklabels=[True, True],
xbox_kwargs=2*[xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn)],
ybox_kwargs=[ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn)],
)
M, N = axes.shape
xleft = 0.02
xright = 0.98
ytop = 0.94
ybottom = 0.02
for i in range(M):
for j in range(N):
ax = axes[i, j]
plt.sca(ax)
if i == M - 1: plt.xlabel(xlabel.replace('\n', ' '))
if j == 0: plt.ylabel(ylabel.replace('\n', ' '))
bst.plots.plot_quadrants()
xlb, xub = plt.xlim()
ylb, yub = plt.ylim()
xpos = lambda x: xlb + (xub - xlb) * x
# xlpos = lambda x: xlb * (1 - x)
ypos = lambda y: ylb + (yub - ylb) * y
df = dfs[j]
x = df[Xi]
y = df[Yi]
y_mt_0 = y > 0
y_lt_0 = y < 0
x_mt_0 = x > 0
x_lt_0 = x < 0
if yub > 0. and xlb < 0. and top_left:
if top_left.endswith('()'):
p = (y_mt_0 & x_lt_0).sum() / y.size
top_left = f"{p:.0%} {top_left.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn,
horizontalalignment='left', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
top_left = replacement
if ylb < 0. and xlb < 0. and bottom_left:
if bottom_left.endswith('()'):
p = (y_lt_0 & x_lt_0).sum() / y.size
bottom_left = f"{p:.0%} {bottom_left.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='left', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
bottom_left = replacement
if yub > 0. and xub > 0. and top_right:
if top_right.endswith('()'):
p = (y_mt_0 & x_mt_0).sum() / y.size
top_right = f"{p:.0%} {top_right.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='right', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
top_right = replacement
if ylb < 0. and xub > 0. and bottom_right:
if bottom_right.endswith('()'):
p = (y_lt_0 & x_mt_0).sum() / y.size
bottom_right = f"{p:.0%} {bottom_right.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn,
horizontalalignment='right', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
bottom_right = replacement
plt.subplots_adjust(
hspace=0, wspace=0,
top=0.98, bottom=0.15,
left=0.1, right=0.98,
)
if titles:
plt.subplots_adjust(
top=0.90,
)
for ax, letter in zip(axes[0, :], titles):
plt.sca(ax)
ylb, yub = plt.ylim()
xlb, xub = plt.xlim()
plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.17, letter, color=letter_color,
horizontalalignment='center', verticalalignment='center',
fontsize=12, fontweight='bold')
def plot_feedstock_conventional_comparison_kde():
plot_kde(
'O1 - S1',
yticks=[-20, -10, 0, 10, 20, 30, 40],
xticks=[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06],
top_left='Oilcane Favored',
bottom_right='Sugarcane\nFavored',
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_conventional_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_feedstock_cellulosic_comparison_kde():
plot_kde(
'O2 - S2',
yticks=[-40, -20, 0, 20, 40, 60, 80],
xticks=[-5, -4, -3, -2, -1, 0],
top_left='Oilcane Favored',
bottom_right='Sugarcane Favored',
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
fx=1000.,
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_cellulosic_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_feedstock_comparison_kde():
plot_kde_2d(
('O1 - S1', 'O2 - S2'),
yticks=[[-10, 0, 10, 20, 30, 40, 50, 60]],
xticks=[[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06],
[-2.0, -1.5, -1, -0.5, 0., 0.5, 1.0]],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Oilcane\nFavored()',
bottom_right='\nSugarcane\nFavored()',
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
plt.subplots_adjust(
wspace=0,
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_configuration_comparison_kde():
plot_kde(
'O1 - O2',
yticks=[-20, 0, 20, 40, 60],
xticks=[-2, -1.5, -1, -0.5, 0, 0.5, 1],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='DC Favored()',
bottom_right='ICF\nFavored()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_separated_configuration_comparison_kde():
plot_kde_2d(
('O1', 'O2'),
yticks=[[-20, 0, 20, 40, 60]],
xticks=[
[0, 0.5, 1, 1.5],
[0, 2, 4, 6, 8, 10]
],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='DC Favored()',
bottom_right='ICF\nFavored()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'separated_configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_crude_configuration_comparison_kde():
plot_kde_2d(
('O1 - O3', 'O2 - O4'),
yticks=[[-12, 0, 12, 24, 36, 48]],
xticks=[
[-0.5, -0.4, -0.3, -0.2, -0.1, 0],
[-1, -0.8, -0.6, -0.4, -0.2, 0]
],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Biodiesel\nProduction Favored()',
bottom_right='Crude Oil\nProduction Favored()',
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'crude_configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_agile_comparison_kde():
plot_kde_2d(
('O1* - O1', 'O2* - O2'),
metrics=[TCI, MFPP],
yticks=[[0, 3, 6, 9, 12, 15]],
xticks=2*[[-150, -125, -100, -75, -50, -25, 0]],
top_right='TCI-Tradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Sorghum\nIntegration Favored()',
bottom_right='Cane-only\nFavored()',
xbox_kwargs=dict(light=CABBI_colors.green_dirty.RGBn,
dark=CABBI_colors.green_dirty.shade(60).RGBn),
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'agile_conventional_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_open_comparison_kde(overlap=False):
metrics = [MFPP, TCI, GWP_ethanol, biodiesel_production]
df_conventional_oc = oc.get_monte_carlo('O1', metrics)
df_cellulosic_oc = oc.get_monte_carlo('O2', metrics)
df_conventional_sc = oc.get_monte_carlo('S1', metrics)
df_cellulosic_sc = oc.get_monte_carlo('S2', metrics)
MFPPi = MFPP.index
TCIi = TCI.index
if overlap:
ys = np.zeros([1, 2], dtype=object)
xs = np.zeros([1, 2], dtype=object)
ys[0, 0] = (df_conventional_oc[MFPPi], df_cellulosic_oc[MFPPi])
ys[0, 1] = (df_conventional_sc[MFPPi], df_cellulosic_sc[MFPPi])
xs[0, 0] = (df_conventional_oc[TCIi], df_cellulosic_oc[TCIi])
xs[0, 1] = (df_conventional_sc[TCIi], df_cellulosic_sc[TCIi])
yticks = [[-30, -15, 0, 15, 30, 45, 60, 75]]
xticks = 2*[[200, 300, 400, 500, 600]]
else:
ys = np.array([
[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]],
[df_cellulosic_oc[MFPPi], df_cellulosic_sc[MFPPi]]
])
xs = np.array([
[df_conventional_oc[TCIi], df_conventional_sc[TCIi]],
[df_cellulosic_oc[TCIi], df_cellulosic_sc[TCIi]]
])
yticks = 2*[[-30, -15, 0, 15, 30, 45, 60, 75]]
xticks = 2*[[200, 300, 400, 500, 600]]
bst.plots.plot_kde_2d(
ys=ys, xs=xs, xticks=xticks, yticks=yticks,
xbox_kwargs=[dict(position=1), dict(position=1)],
ybox_kwargs=[dict(position=0), dict(position=0)],
)
#%% General Monte Carlo box plots
def plot_monte_carlo_across_coordinate(coordinate, data, color_wheel):
if isinstance(data, list):
return [plot_monte_carlo_across_coordinate(coordinate, i, color_wheel) for i in data]
else:
color = color_wheel.next()
return bst.plots.plot_montecarlo_across_coordinate(
coordinate, data,
light_color=color.tint(50).RGBn,
dark_color=color.shade(50).RGBn,
)
def monte_carlo_box_plot(data, positions, light_color, dark_color, width=None,
hatch=None, outliers=False, **kwargs):
if width is None: width = 0.8
if outliers:
flierprops = {'marker':'D',
'markerfacecolor': light_color,
'markeredgecolor': dark_color,
'markersize':3}
else:
flierprops = {'marker':''}
bp = plt.boxplot(
x=data, positions=positions, patch_artist=True,
widths=width, whis=[5, 95],
boxprops={'facecolor':light_color,
'edgecolor':dark_color},
medianprops={'color':dark_color,
'linewidth':1.5},
flierprops=flierprops,
**kwargs
)
if hatch:
for box in bp['boxes']:
box.set(hatch = hatch)
def plot_monte_carlo(derivative=False, absolute=True, comparison=True,
configuration_names=None, comparison_names=None,
metrics=None, labels=None, tickmarks=None, agile=True,
ncols=1, expand=None, step_min=None,
agile_only=False, xrot=None,
color_wheel=None, axes_box=None):
if derivative:
default_configuration_names = ['O1', 'O2']
default_comparison_names = ['O2 - O1']
metric_info = mc_derivative_metric_settings
default_metrics = list(metric_info)
else:
default_configuration_names = oc.configuration_names[:-2]
default_comparison_names = oc.comparison_names
if comparison:
metric_info = mc_comparison_settings
else:
metric_info = mc_metric_settings
if agile_only:
default_configuration_names = [i for i in default_configuration_names if '*' in i]
default_comparison_names = [i for i in default_comparison_names if '*' in i]
default_metrics = ['MFPP', 'TCI', 'production']
else:
default_metrics = list(metric_info)
if configuration_names is None: configuration_names = default_configuration_names
if comparison_names is None: comparison_names = default_comparison_names
if metrics is None: metrics = default_metrics
combined = absolute and comparison
if agile_only:
configuration_names = [i for i in configuration_names if '*' in i]
comparison_names = [i for i in comparison_names if '*' in i]
elif not agile:
configuration_names = [i for i in configuration_names if '*' not in i]
comparison_names = [i for i in comparison_names if '*' not in i]
if combined:
columns = configurations = configuration_names + comparison_names
elif absolute:
columns = configurations = configuration_names
elif comparison:
columns = configurations = comparison_names
else:
columns = configurations = []
rows, ylabels, factors = zip(*[metric_info[i] for i in metrics])
factors = [(i, j) for i, j in enumerate(factors) if j is not None]
if color_wheel is None: color_wheel = CABBI_colors.wheel()
N_rows = len(rows)
if axes_box is None:
fig, axes_box = plt.subplots(ncols=ncols, nrows=int(round(N_rows / ncols)))
plt.subplots_adjust(wspace=0.45)
else:
fig = None
axes = axes_box.transpose()
axes = axes.flatten()
N_cols = len(columns)
xtext = labels or [format_name(i).replace(' ', '') for i in configurations]
N_marks = len(xtext)
xticks = tuple(range(N_marks))
def get_data(metric, name):
try:
df = get_monte_carlo(name, metric)
except:
return np.zeros([1, 1])
else:
values = df.values
return values
def plot(arr, position):
if arr.ndim == 2:
N = arr.shape[1]
width = 0.618 / N
boxwidth = 0.618 / (N + 1/N)
plots = []
for i in range(N):
color = color_wheel.next()
boxplot = monte_carlo_box_plot(
data=arr[:, i], positions=[position + (i-(N-1)/2)*width],
light_color=color.RGBn,
dark_color=color.shade(60).RGBn,
width=boxwidth,
hatch=getattr(color, 'hatch', None),
)
plots.append(boxplot)
return plots
else:
color = color_wheel.next()
return monte_carlo_box_plot(
data=arr, positions=[position],
light_color=color.RGBn,
dark_color=color.shade(60).RGBn,
width=0.618,
)
data = np.zeros([N_rows, N_cols], dtype=object)
data[:] = [[get_data(i, j) for j in columns] for i in rows]
for i, j in factors: data[i, :] *= j
if tickmarks is None:
tickmarks = [
bst.plots.rounded_tickmarks_from_data(
i, step_min=step_min, N_ticks=8, lb_max=0, center=0,
f=roundsigfigs, expand=expand,
f_min=lambda x: np.percentile(x, 5),
f_max=lambda x: np.percentile(x, 95),
)
for i in data
]
x0 = len(configuration_names) - 0.5
xf = len(columns) - 0.5
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
if combined:
bst.plots.plot_vertical_line(x0)
ax.axvspan(x0, xf, color=colors.purple_tint.tint(60).RGBn)
plt.xlim(-0.5, xf)
for j in range(N_cols):
color_wheel.restart()
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
plot(data[i, j], j)
plt.ylabel(ylabels[i])
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
yticks = tickmarks[i]
plt.ylim([yticks[0], yticks[1]])
if yticks[0] < 0.:
bst.plots.plot_horizontal_line(0, color=CABBI_colors.black.RGBn, lw=0.8, linestyle='--')
try:
xticklabels = xtext if ax in axes_box[-1] else []
except:
xticklabels = xtext if i == N_rows - 1 else []
bst.plots.style_axis(ax,
xticks = xticks,
yticks = yticks,
xticklabels= xticklabels,
ytick0=False,
ytickf=False,
offset_xticks=True,
xrot=xrot,
)
if fig is None:
fig = plt.gcf()
else:
plt.subplots_adjust(hspace=0)
fig.align_ylabels(axes)
return fig, axes
#%% Spearman
def plot_spearman(configurations, labels=None, metric=None,
kind=None, with_units=None, legend=None, legend_kwargs=None, **kwargs):
if kind is None: kind = 'TEA'
if with_units is None: with_units = True
if legend is None: legend = True
if metric is None:
if kind == 'TEA':
metric = MFPP
metric_name = metric.name
elif kind == 'LCA':
metric = GWP_economic
metric_name = r'GWP$_{\mathrm{economic}}$'
else:
raise ValueError(f"invalid kind '{kind}'")
else:
if metric == 'MFPP':
metric = MFPP
elif metric == 'GWP':
metric = GWP_economic
metric_name = metric.name
stream_price = format_units('USD/L')
USD_MT = format_units('USD/MT')
ng_price = format_units('USD/m^3')
electricity_price = format_units('USD/kWhr')
operating_days = format_units('day/yr')
capacity = format_units('10^6 MT/yr')
titer = format_units('g/L')
productivity = format_units('g/L/hr')
material_GWP = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$'
feedstock_GWP = '$\\mathrm{g} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$'
index, ignored_list = zip(*[
('Crushing mill oil recovery [60 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Saccharification oil recovery [70 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*', 'O1', 'O1*']),
(f'Cane operating days [120 $-$ 180 {operating_days}]', []),
(f'Sorghum operating days [30 $-$ 60 {operating_days}]', ['S2', 'S1', 'O1', 'O2']),
(f'Crushing capacity [1.2 $-$ 2.0 {capacity}]', []),
(f'Ethanol price [0.269, 0.476, 0.758 {stream_price}]', []),
(f'Relative biodiesel price [0.0819, 0.786, 1.09 {stream_price}]', []),
(f'Natural gas price [0.105, 0.122, 0.175 {ng_price}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Electricity price [0.0583, 0.065, 0.069 {electricity_price}]', ['S2', 'O2', 'S2*', 'O2*']),
('IRR [10 $-$ 15 %]', []),
(f'Crude glycerol price [100 $-$ 220 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']),
(f'Pure glycerol price [488 $-$ 812 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']),
('Saccharification reaction time [54 $-$ 90 hr]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Cellulase price [159 $-$ 265 {USD_MT}]', ['S1', 'O1', 'S1*', 'O1*']),
('Cellulase loading [1.5 $-$ 2.5 wt. % cellulose]', ['S1', 'O1', 'S1*', 'O1*']),
('PTRS base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']),
# ('Pretreatment reactor system base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Sorghum glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Sorghum xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Glucose to ethanol yield [90 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Xylose to ethanol yield [50 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Titer [65 $-$ 130 {titer}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Productivity [1.0 $-$ 2.0 {productivity}]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Sorghum PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Cane FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Sorghum FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Cane oil content [5 $-$ 15 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*']),
('Relative sorghum oil content [-3 $-$ 0 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*', 'O2', 'O1']),
('TAG to FFA conversion [17.25 $-$ 28.75 % theoretical]', ['S1', 'O1', 'S1*', 'O1*']),
# TODO: change lower upper values to baseline +- 10%
(f'Feedstock GWPCF [26.3 $-$ 44.0 {feedstock_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Methanol GWPCF [0.338 $-$ 0.563 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Pure glycerine GWPCF [1.25 $-$ 2.08 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Cellulase GWPCF [6.05 $-$ 10.1 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Natural gas GWPCF [0.297 $-$ 0.363 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']),
])
if not with_units: index = [i.split(' [')[0] for i in index]
ignored_dct = {
'S1': [],
'O1': [],
'S2': [],
'O2': [],
'S1*': [],
'O1*': [],
'S2*': [],
'O2*': [],
}
for i, ignored in enumerate(ignored_list):
for name in ignored: ignored_dct[name].append(i)
index_name = index[i]
if kind == 'LCA':
for term in ('cost', 'price', 'IRR', 'time', 'capacity'):
if term in index_name:
for name in ignored_dct: ignored_dct[name].append(i)
break
elif kind == 'TEA':
if 'GWP' in index_name:
for name in ignored_dct: ignored_dct[name].append(i)
else:
raise ValueError(f"invalid kind '{kind}'")
rhos = []
for name in configurations:
file = spearman_file(name)
try:
df = pd.read_excel(file, header=[0, 1], index_col=[0, 1])
except:
warning = RuntimeWarning(f"file '{file}' not found")
warn(warning)
continue
s = df[metric.index]
s.iloc[ignored_dct[name]] = 0.
rhos.append(s)
color_wheel = [CABBI_colors.orange, CABBI_colors.green_soft, CABBI_colors.blue, CABBI_colors.brown]
fig, ax = bst.plots.plot_spearman_2d(rhos, index=index,
color_wheel=color_wheel,
name=metric_name,
**kwargs)
if legend:
if legend_kwargs is None:
legend_kwargs = {'loc': 'lower left'}
plt.legend(
handles=[
mpatches.Patch(
color=color_wheel[i].RGBn,
label=labels[i] if labels else format_name(configurations[i])
)
for i in range(len(configurations))
],
**legend_kwargs,
)
return fig, ax
# %% Other
def plot_configuration_breakdown(name, across_coordinate=False, **kwargs):
oc.load(name)
if across_coordinate:
return bst.plots.plot_unit_groups_across_coordinate(
oc.set_cane_oil_content,
[5, 7.5, 10, 12.5],
'Feedstock oil content [dry wt. %]',
oc.unit_groups,
colors=[area_colors[i.name].RGBn for i in oc.unit_groups],
hatches=[area_hatches[i.name] for i in oc.unit_groups],
**kwargs,
)
else:
def format_total(x):
if x < 1e3:
return format(x, '.3g')
else:
x = int(x)
n = 10 ** (len(str(x)) - 3)
value = int(round(x / n) * n)
return format(value, ',')
for i in oc.unit_groups:
if i.name == 'EtOH prod.':
i.name = 'Ethanol production'
elif i.name == 'Oil ext.':
i.name = 'Oil extraction'
elif i.name == 'Biod. prod.':
i.name = 'Biodiesel production'
i.metrics[0].name = 'Inst. eq.\ncost'
i.metrics[3].name = 'Elec.\ncons.'
i.metrics[4].name = 'Mat.\ncost'
return bst.plots.plot_unit_groups(
oc.unit_groups,
colors=[area_colors[i.name].RGBn for i in oc.unit_groups],
hatches=[area_hatches[i.name] for i in oc.unit_groups],
format_total=format_total,
fraction=True,
legend_kwargs=dict(
loc='lower center',
ncol=4,
bbox_to_anchor=(0, -0.52),
labelspacing=1.5, handlelength=2.8,
handleheight=1, scale=0.8,
),
**kwargs,
)
def plot_TCI_areas_across_oil_content(configuration='O2'):
oc.load(configuration)
data = {i.name: [] for i in oc.unit_groups}
increasing_areas = []
decreasing_areas = []
oil_contents = np.linspace(5, 15, 10)
for i in oil_contents:
oc.set_cane_oil_content(i)
oc.sys.simulate()
for i in oc.unit_groups: data[i.name].append(i.get_installed_cost())
for name, group_data in data.items():
lb, *_, ub = group_data
if ub > lb:
increasing_areas.append(group_data)
else:
decreasing_areas.append(group_data)
increasing_values = np.sum(increasing_areas, axis=0)
increasing_values -= increasing_values[0]
decreasing_values = np.sum(decreasing_areas, axis=0)
decreasing_values -= decreasing_values[-1]
plt.plot(oil_contents, increasing_values, label='Oil & fiber areas')
plt.plot(oil_contents, decreasing_values, label='Sugar areas')
# def plot_monte_carlo_across_oil_content(kind=0, derivative=False):
# MFPP, TCI, *production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups
# rows = [MFPP, TCI, production]
# if kind == 0:
# columns = across_oil_content_names
# elif kind == 1:
# columns = across_oil_content_agile_names
# elif kind == 2:
# columns = across_oil_content_comparison_names
# elif kind == 3:
# columns = across_oil_content_agile_comparison_names
# elif kind == 4:
# columns = across_oil_content_agile_direct_comparison_names
# else:
# raise NotImplementedError(str(kind))
# if derivative:
# x = 100 * (oil_content[:-1] + np.diff(oil_content) / 2.)
# ylabels = [
# f"MFPP der. [{format_units('USD/MT')}]",
# f"TCI der. [{format_units('10^6*USD')}]",
# f"Production der. [{format_units('L/MT')}]"
# ]
# else:
# x = 100 * oil_content
# ylabels = [
# f"MFPP$\backprime$ [{format_units('USD/MT')}]",
# f"TCI [{format_units('10^6*USD')}]",
# f"Production [{format_units('L/MT')}]"
# ]
# N_cols = len(columns)
# N_rows = len(rows)
# fig, axes = plt.subplots(ncols=N_cols, nrows=N_rows)
# data = np.zeros([N_rows, N_cols], dtype=object)
# def get_data(metric, name):
# if isinstance(metric, bst.Variable):
# return get_monte_carlo_across_oil_content(name, metric, derivative)
# else:
# return [get_data(i, name) for i in metric]
# data = np.array([[get_data(i, j) for j in columns] for i in rows])
# tickmarks = [None] * N_rows
# get_max = lambda x: max([i.max() for i in x]) if isinstance(x, list) else x.max()
# get_min = lambda x: min([i.min() for i in x]) if isinstance(x, list) else x.min()
# N_ticks = 5
# for r in range(N_rows):
# lb = min(min([get_min(i) for i in data[r, :]]), 0)
# ub = max([get_max(i) for i in data[r, :]])
# diff = 0.1 * (ub - lb)
# ub += diff
# if derivative:
# lb = floor(lb)
# ub = ceil(ub)
# step = (ub - lb) / (N_ticks - 1)
# tickmarks[r] = [0, 1] if step == 0 else [int(lb + step * i) for i in range(N_ticks)]
# else:
# if rows[r] is MFPP:
# if kind == 0 or kind == 1:
# tickmarks[r] = [-20, 0, 20, 40, 60]
# elif kind == 2:
# tickmarks[r] = [-20, -10, 0, 10, 20]
# elif kind == 3:
# tickmarks[r] = [-10, 0, 10, 20, 30]
# elif kind == 4:
# tickmarks[r] = [-5, 0, 5, 10, 15]
# continue
# lb = floor(lb / 15) * 15
# ub = ceil(ub / 15) * 15
# step = (ub - lb) / (N_ticks - 1)
# tickmarks[r] = [0, 1] if step == 0 else [int(lb + step * i) for i in range(N_ticks)]
# color_wheel = CABBI_colors.wheel()
# for j in range(N_cols):
# color_wheel.restart()
# for i in range(N_rows):
# arr = data[i, j]
# ax = axes[i, j]
# plt.sca(ax)
# percentiles = plot_monte_carlo_across_coordinate(x, arr, color_wheel)
# if i == 0: ax.set_title(format_name(columns[j]))
# xticklabels = i == N_rows - 1
# yticklabels = j == 0
# if xticklabels: plt.xlabel('Oil content [dry wt. %]')
# if yticklabels: plt.ylabel(ylabels[i])
# bst.plots.style_axis(ax,
# xticks = [5, 10, 15],
# yticks = tickmarks[i],
# xticklabels= xticklabels,
# yticklabels= yticklabels,
# ytick0=False)
# for i in range(N_cols): fig.align_ylabels(axes[:, i])
# plt.subplots_adjust(hspace=0.1, wspace=0.1)
|
[
"matplotlib.pyplot.boxplot",
"biosteam.utils.colors.red.shade",
"matplotlib.pyplot.grid",
"biosteam.utils.CABBI_colors.orange.shade",
"matplotlib.pyplot.ylabel",
"biosteam.plots.plot_quadrants",
"biosteam.plots.style_axis",
"biosteam.MockVariable",
"numpy.array",
"biosteam.utils.CABBI_colors.green_dirty.shade",
"biosteam.utils.CABBI_colors.wheel",
"pandas.read_excel",
"colorpalette.Palette",
"biosteam.plots.plot_vertical_line",
"thermosteam.units_of_measure.format_units",
"biorefineries.oilcane.set_cane_oil_content",
"biorefineries.oilcane.sys.simulate",
"matplotlib.pyplot.plot",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"biosteam.utils.CABBI_colors.orange.copy",
"warnings.warn",
"matplotlib.pyplot.ylim",
"biosteam.utils.CABBI_colors.blue.shade",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"biosteam.utils.CABBI_colors.grey.shade",
"biosteam.utils.colors.neutral.shade",
"matplotlib.pyplot.cm.get_cmap",
"thermosteam.utils.set_figure_size",
"matplotlib.pyplot.xlim",
"biorefineries.oilcane.get_monte_carlo",
"biosteam.plots.plot_heatmap",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"biosteam.plots.plot_spearman_2d",
"matplotlib.pyplot.text",
"biosteam.plots.plot_horizontal_line",
"biosteam.utils.CABBI_colors.teal.shade",
"thermosteam.utils.set_font",
"biosteam.plots.plot_unit_groups_across_coordinate",
"os.path.join",
"matplotlib.pyplot.sca",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"biosteam.utils.colors.purple_tint.tint",
"numpy.percentile",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"biorefineries.oilcane.load"
] |
[((3524, 3546), 'colorpalette.Palette', 'Palette', ([], {}), '(**area_colors)\n', (3531, 3546), False, 'from colorpalette import Palette\n'), ((3762, 3800), 'biosteam.utils.CABBI_colors.orange.copy', 'CABBI_colors.orange.copy', ([], {'hatch': '"""////"""'}), "(hatch='////')\n", (3786, 3800), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((3831, 3896), 'biosteam.MockVariable', 'bst.MockVariable', (['"""Ethanol over biodiesel"""', '"""L/MT"""', '"""Biorefinery"""'], {}), "('Ethanol over biodiesel', 'L/MT', 'Biorefinery')\n", (3847, 3896), True, 'import biosteam as bst\n'), ((3562, 3586), 'biosteam.utils.colors.neutral.shade', 'colors.neutral.shade', (['(25)'], {}), '(25)\n', (3582, 3586), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((7273, 7289), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (7281, 7289), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((7294, 7328), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.85)'}), '(aspect_ratio=0.85)\n', (7309, 7328), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((7339, 7351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7349, 7351), True, 'import matplotlib.pyplot as plt\n'), ((7369, 7461), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(3)', 'fig'], {'hspace': '(1.5)', 'wspace': '(0.7)', 'top': '(0.9)', 'bottom': '(0.05)', 'left': '(0.11)', 'right': '(0.97)'}), '(4, 3, fig, hspace=1.5, wspace=0.7, top=0.9, bottom=0.05, left=0.11,\n right=0.97)\n', (7377, 7461), False, 'from matplotlib.gridspec import GridSpec\n'), ((9025, 9035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9033, 9035), True, 'import matplotlib.pyplot as plt\n'), ((14658, 14674), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (14666, 14674), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((14679, 14712), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.5)'}), '(aspect_ratio=0.5)\n', (14694, 14712), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((15779, 15869), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)', 'wspace': '(0.7)', 'top': '(0.95)', 'bottom': '(0.1)', 'left': '(0.12)', 'right': '(0.96)'}), '(hspace=0, wspace=0.7, top=0.95, bottom=0.1, left=0.12,\n right=0.96)\n', (15798, 15869), True, 'import matplotlib.pyplot as plt\n'), ((16079, 16095), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (16087, 16095), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((16100, 16134), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(1.05)'}), '(aspect_ratio=1.05)\n', (16115, 16134), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((16999, 17075), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.12)', 'right': '(0.95)', 'wspace': '(0.4)', 'top': '(0.98)', 'bottom': '(0.2)'}), '(left=0.12, right=0.95, wspace=0.4, top=0.98, bottom=0.2)\n', (17018, 17075), True, 'import matplotlib.pyplot as plt\n'), ((17298, 17314), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (17306, 17314), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((17319, 17361), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': 'aspect_ratio'}), '(aspect_ratio=aspect_ratio)\n', (17334, 17361), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((17680, 17746), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.45)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.08)'}), '(left=0.45, right=0.975, top=0.98, bottom=0.08)\n', (17699, 17746), True, 'import matplotlib.pyplot as plt\n'), ((17928, 17944), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (17936, 17944), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((17949, 18005), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.65)', 'width': '(6.6142 * 2 / 3)'}), '(aspect_ratio=0.65, width=6.6142 * 2 / 3)\n', (17964, 18005), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((18338, 18404), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.35)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.15)'}), '(left=0.35, right=0.975, top=0.98, bottom=0.15)\n', (18357, 18404), True, 'import matplotlib.pyplot as plt\n'), ((18623, 18639), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (18631, 18639), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((18644, 18708), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': 'aspect_ratio', 'width': '(6.6142 * 2 / 3)'}), '(aspect_ratio=aspect_ratio, width=6.6142 * 2 / 3)\n', (18659, 18708), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19002, 19068), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.35)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.15)'}), '(left=0.35, right=0.975, top=0.98, bottom=0.15)\n', (19021, 19068), True, 'import matplotlib.pyplot as plt\n'), ((19280, 19296), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (19288, 19296), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19301, 19343), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': 'aspect_ratio'}), '(aspect_ratio=aspect_ratio)\n', (19316, 19343), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19662, 19727), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.45)', 'right': '(0.975)', 'top': '(0.98)', 'bottom': '(0.1)'}), '(left=0.45, right=0.975, top=0.98, bottom=0.1)\n', (19681, 19727), True, 'import matplotlib.pyplot as plt\n'), ((19894, 19910), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (19902, 19910), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19915, 19949), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.68)'}), '(aspect_ratio=0.68)\n', (19930, 19949), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((19966, 19996), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (19978, 19996), True, 'import matplotlib.pyplot as plt\n'), ((20001, 20017), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[0]'], {}), '(axes[0])\n', (20008, 20017), True, 'import matplotlib.pyplot as plt\n'), ((20087, 20103), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[1]'], {}), '(axes[1])\n', (20094, 20103), True, 'import matplotlib.pyplot as plt\n'), ((20247, 20261), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (20257, 20261), True, 'import matplotlib.pyplot as plt\n'), ((20266, 20343), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.09)', 'right': '(0.96)', 'wspace': '(0.0)', 'top': '(0.84)', 'bottom': '(0.31)'}), '(left=0.09, right=0.96, wspace=0.0, top=0.84, bottom=0.31)\n', (20285, 20343), True, 'import matplotlib.pyplot as plt\n'), ((22700, 22740), 'numpy.zeros', 'np.zeros', (['[N_rows, N_cols]'], {'dtype': 'object'}), '([N_rows, N_cols], dtype=object)\n', (22708, 22740), True, 'import numpy as np\n'), ((22819, 22851), 'numpy.zeros_like', 'np.zeros_like', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (22832, 22851), True, 'import numpy as np\n'), ((23118, 23132), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23130, 23132), True, 'import matplotlib.pyplot as plt\n'), ((23320, 23460), 'biosteam.plots.plot_heatmap', 'bst.plots.plot_heatmap', (['(100 * fractions)'], {'vmin': '(0)', 'vmax': '(100)', 'ax': 'ax', 'cell_labels': 'medians', 'metric_bar': 'mbar', 'xlabels': 'xlabels', 'ylabels': 'ylabels'}), '(100 * fractions, vmin=0, vmax=100, ax=ax,\n cell_labels=medians, metric_bar=mbar, xlabels=xlabels, ylabels=ylabels)\n', (23342, 23460), True, 'import biosteam as bst\n'), ((23546, 23557), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (23553, 23557), True, 'import matplotlib.pyplot as plt\n'), ((23598, 23654), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)', '"""major"""', '"""both"""'], {'lw': '(1)', 'color': '"""w"""', 'ls': '"""-"""'}), "(True, 'major', 'both', lw=1, color='w', ls='-')\n", (23606, 23654), True, 'import matplotlib.pyplot as plt\n'), ((23897, 23913), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (23905, 23913), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((23918, 23965), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': '"""half"""', 'aspect_ratio': '(1.2)'}), "(width='half', aspect_ratio=1.2)\n", (23933, 23965), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((24016, 24049), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['name', 'metrics'], {}), '(name, metrics)\n', (24034, 24049), True, 'import biorefineries.oilcane as oc\n'), ((24592, 24603), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (24599, 24603), True, 'import matplotlib.pyplot as plt\n'), ((24692, 24718), 'biosteam.plots.plot_quadrants', 'bst.plots.plot_quadrants', ([], {}), '()\n', (24716, 24718), True, 'import biosteam as bst\n'), ((24734, 24744), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (24742, 24744), True, 'import matplotlib.pyplot as plt\n'), ((24760, 24770), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (24768, 24770), True, 'import matplotlib.pyplot as plt\n'), ((26662, 26758), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.05)', 'wspace': '(0.05)', 'top': '(0.98)', 'bottom': '(0.15)', 'left': '(0.15)', 'right': '(0.98)'}), '(hspace=0.05, wspace=0.05, top=0.98, bottom=0.15, left=\n 0.15, right=0.98)\n', (26681, 26758), True, 'import matplotlib.pyplot as plt\n'), ((27025, 27041), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (27033, 27041), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((27046, 27080), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'aspect_ratio': '(0.65)'}), '(aspect_ratio=0.65)\n', (27061, 27080), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((27337, 27371), 'numpy.array', 'np.array', (['[[df[Xi] for df in dfs]]'], {}), '([[df[Xi] for df in dfs]])\n', (27345, 27371), True, 'import numpy as np\n'), ((27381, 27415), 'numpy.array', 'np.array', (['[[df[Yi] for df in dfs]]'], {}), '([[df[Yi] for df in dfs]])\n', (27389, 27415), True, 'import numpy as np\n'), ((31052, 31140), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)', 'wspace': '(0)', 'top': '(0.98)', 'bottom': '(0.15)', 'left': '(0.1)', 'right': '(0.98)'}), '(hspace=0, wspace=0, top=0.98, bottom=0.15, left=0.1,\n right=0.98)\n', (31071, 31140), True, 'import matplotlib.pyplot as plt\n'), ((33141, 33170), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)'}), '(wspace=0)\n', (33160, 33170), True, 'import matplotlib.pyplot as plt\n'), ((35929, 35962), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""O1"""', 'metrics'], {}), "('O1', metrics)\n", (35947, 35962), True, 'import biorefineries.oilcane as oc\n'), ((35986, 36019), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""O2"""', 'metrics'], {}), "('O2', metrics)\n", (36004, 36019), True, 'import biorefineries.oilcane as oc\n'), ((36045, 36078), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""S1"""', 'metrics'], {}), "('S1', metrics)\n", (36063, 36078), True, 'import biorefineries.oilcane as oc\n'), ((36102, 36135), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['"""S2"""', 'metrics'], {}), "('S2', metrics)\n", (36120, 36135), True, 'import biorefineries.oilcane as oc\n'), ((38200, 38447), 'matplotlib.pyplot.boxplot', 'plt.boxplot', ([], {'x': 'data', 'positions': 'positions', 'patch_artist': '(True)', 'widths': 'width', 'whis': '[5, 95]', 'boxprops': "{'facecolor': light_color, 'edgecolor': dark_color}", 'medianprops': "{'color': dark_color, 'linewidth': 1.5}", 'flierprops': 'flierprops'}), "(x=data, positions=positions, patch_artist=True, widths=width,\n whis=[5, 95], boxprops={'facecolor': light_color, 'edgecolor':\n dark_color}, medianprops={'color': dark_color, 'linewidth': 1.5},\n flierprops=flierprops, **kwargs)\n", (38211, 38447), True, 'import matplotlib.pyplot as plt\n'), ((42466, 42506), 'numpy.zeros', 'np.zeros', (['[N_rows, N_cols]'], {'dtype': 'object'}), '([N_rows, N_cols], dtype=object)\n', (42474, 42506), True, 'import numpy as np\n'), ((45080, 45101), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/L"""'], {}), "('USD/L')\n", (45092, 45101), False, 'from thermosteam.units_of_measure import format_units\n'), ((45115, 45137), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (45127, 45137), False, 'from thermosteam.units_of_measure import format_units\n'), ((45153, 45176), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/m^3"""'], {}), "('USD/m^3')\n", (45165, 45176), False, 'from thermosteam.units_of_measure import format_units\n'), ((45201, 45225), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/kWhr"""'], {}), "('USD/kWhr')\n", (45213, 45225), False, 'from thermosteam.units_of_measure import format_units\n'), ((45247, 45269), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""day/yr"""'], {}), "('day/yr')\n", (45259, 45269), False, 'from thermosteam.units_of_measure import format_units\n'), ((45285, 45311), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6 MT/yr"""'], {}), "('10^6 MT/yr')\n", (45297, 45311), False, 'from thermosteam.units_of_measure import format_units\n'), ((45324, 45343), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""g/L"""'], {}), "('g/L')\n", (45336, 45343), False, 'from thermosteam.units_of_measure import format_units\n'), ((45363, 45385), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""g/L/hr"""'], {}), "('g/L/hr')\n", (45375, 45385), False, 'from thermosteam.units_of_measure import format_units\n'), ((50066, 50169), 'biosteam.plots.plot_spearman_2d', 'bst.plots.plot_spearman_2d', (['rhos'], {'index': 'index', 'color_wheel': 'color_wheel', 'name': 'metric_name'}), '(rhos, index=index, color_wheel=color_wheel, name\n =metric_name, **kwargs)\n', (50092, 50169), True, 'import biosteam as bst\n'), ((50827, 50840), 'biorefineries.oilcane.load', 'oc.load', (['name'], {}), '(name)\n', (50834, 50840), True, 'import biorefineries.oilcane as oc\n'), ((52583, 52605), 'biorefineries.oilcane.load', 'oc.load', (['configuration'], {}), '(configuration)\n', (52590, 52605), True, 'import biorefineries.oilcane as oc\n'), ((52725, 52747), 'numpy.linspace', 'np.linspace', (['(5)', '(15)', '(10)'], {}), '(5, 15, 10)\n', (52736, 52747), True, 'import numpy as np\n'), ((53142, 53174), 'numpy.sum', 'np.sum', (['increasing_areas'], {'axis': '(0)'}), '(increasing_areas, axis=0)\n', (53148, 53174), True, 'import numpy as np\n'), ((53245, 53277), 'numpy.sum', 'np.sum', (['decreasing_areas'], {'axis': '(0)'}), '(decreasing_areas, axis=0)\n', (53251, 53277), True, 'import numpy as np\n'), ((53329, 53397), 'matplotlib.pyplot.plot', 'plt.plot', (['oil_contents', 'increasing_values'], {'label': '"""Oil & fiber areas"""'}), "(oil_contents, increasing_values, label='Oil & fiber areas')\n", (53337, 53397), True, 'import matplotlib.pyplot as plt\n'), ((53402, 53464), 'matplotlib.pyplot.plot', 'plt.plot', (['oil_contents', 'decreasing_values'], {'label': '"""Sugar areas"""'}), "(oil_contents, decreasing_values, label='Sugar areas')\n", (53410, 53464), True, 'import matplotlib.pyplot as plt\n'), ((9080, 9142), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_main_manuscript.{i}"""'], {}), "(images_folder, f'montecarlo_main_manuscript.{i}')\n", (9092, 9142), False, 'import os\n'), ((9151, 9186), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (9162, 9186), True, 'import matplotlib.pyplot as plt\n'), ((9608, 9624), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (9616, 9624), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((9633, 9688), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': 'width', 'aspect_ratio': 'aspect_ratio'}), '(width=width, aspect_ratio=aspect_ratio)\n', (9648, 9688), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((10503, 10514), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (10510, 10514), True, 'import matplotlib.pyplot as plt\n'), ((10534, 10544), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (10542, 10544), True, 'import matplotlib.pyplot as plt\n'), ((10553, 10718), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.65, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (10561, 10718), True, 'import matplotlib.pyplot as plt\n'), ((11089, 11174), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.96)', 'left': 'left', 'wspace': '(0.38)', 'top': '(0.98)', 'bottom': 'bottom'}), '(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom\n )\n', (11108, 11174), True, 'import matplotlib.pyplot as plt\n'), ((12024, 12040), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (12032, 12040), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((12049, 12104), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': 'width', 'aspect_ratio': 'aspect_ratio'}), '(width=width, aspect_ratio=aspect_ratio)\n', (12064, 12104), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((12699, 12710), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (12706, 12710), True, 'import matplotlib.pyplot as plt\n'), ((12730, 12740), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (12738, 12740), True, 'import matplotlib.pyplot as plt\n'), ((12749, 12911), 'matplotlib.pyplot.text', 'plt.text', (['x', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(x, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (12757, 12911), True, 'import matplotlib.pyplot as plt\n'), ((12971, 13056), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.96)', 'left': 'left', 'wspace': '(0.38)', 'top': '(0.98)', 'bottom': 'bottom'}), '(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom\n )\n', (12990, 13056), True, 'import matplotlib.pyplot as plt\n'), ((13329, 13345), 'thermosteam.utils.set_font', 'set_font', ([], {'size': '(8)'}), '(size=8)\n', (13337, 13345), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((13354, 13401), 'thermosteam.utils.set_figure_size', 'set_figure_size', ([], {'width': '(3.3071)', 'aspect_ratio': '(1.0)'}), '(width=3.3071, aspect_ratio=1.0)\n', (13369, 13401), False, 'from thermosteam.utils import set_figure_size, set_font, roundsigfigs\n'), ((13795, 13806), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (13802, 13806), True, 'import matplotlib.pyplot as plt\n'), ((13826, 13836), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (13834, 13836), True, 'import matplotlib.pyplot as plt\n'), ((13845, 14010), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.65, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (13853, 14010), True, 'import matplotlib.pyplot as plt\n'), ((14375, 14450), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.9)', 'left': '(0.2)', 'wspace': '(0.5)', 'top': '(0.98)', 'bottom': '(0.15)'}), '(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15)\n', (14394, 14450), True, 'import matplotlib.pyplot as plt\n'), ((15533, 15544), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (15540, 15544), True, 'import matplotlib.pyplot as plt\n'), ((15564, 15574), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (15572, 15574), True, 'import matplotlib.pyplot as plt\n'), ((15583, 15748), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(ylb + (yub - ylb) * 0.9)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.65, ylb + (yub - ylb) * 0.9, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (15591, 15748), True, 'import matplotlib.pyplot as plt\n'), ((15940, 15997), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_derivative.{i}"""'], {}), "(images_folder, f'montecarlo_derivative.{i}')\n", (15952, 15997), False, 'import os\n'), ((16006, 16041), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (16017, 16041), True, 'import matplotlib.pyplot as plt\n'), ((16754, 16765), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (16761, 16765), True, 'import matplotlib.pyplot as plt\n'), ((16785, 16795), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (16793, 16795), True, 'import matplotlib.pyplot as plt\n'), ((16804, 16969), 'matplotlib.pyplot.text', 'plt.text', (['(7.8)', '(ylb + (yub - ylb) * 0.92)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color,\n horizontalalignment='center', verticalalignment='center', fontsize=12,\n fontweight='bold')\n", (16812, 16969), True, 'import matplotlib.pyplot as plt\n'), ((17121, 17176), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_absolute.{i}"""'], {}), "(images_folder, f'montecarlo_absolute.{i}')\n", (17133, 17176), False, 'import os\n'), ((17185, 17220), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (17196, 17220), True, 'import matplotlib.pyplot as plt\n'), ((17791, 17839), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_tea.{i}"""'], {}), "(images_folder, f'spearman_tea.{i}')\n", (17803, 17839), False, 'import os\n'), ((17848, 17883), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (17859, 17883), True, 'import matplotlib.pyplot as plt\n'), ((18449, 18497), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_tea.{i}"""'], {}), "(images_folder, f'spearman_tea.{i}')\n", (18461, 18497), False, 'import os\n'), ((18506, 18541), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (18517, 18541), True, 'import matplotlib.pyplot as plt\n'), ((19113, 19161), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_lca.{i}"""'], {}), "(images_folder, f'spearman_lca.{i}')\n", (19125, 19161), False, 'import os\n'), ((19170, 19205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (19181, 19205), True, 'import matplotlib.pyplot as plt\n'), ((19773, 19821), 'os.path.join', 'os.path.join', (['images_folder', 'f"""spearman_lca.{i}"""'], {}), "(images_folder, f'spearman_lca.{i}')\n", (19785, 19821), False, 'import os\n'), ((19830, 19865), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (19841, 19865), True, 'import matplotlib.pyplot as plt\n'), ((20447, 20458), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (20454, 20458), True, 'import matplotlib.pyplot as plt\n'), ((20478, 20488), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (20486, 20488), True, 'import matplotlib.pyplot as plt\n'), ((20508, 20518), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (20516, 20518), True, 'import matplotlib.pyplot as plt\n'), ((20527, 20706), 'matplotlib.pyplot.text', 'plt.text', (['((xlb + xub) * 0.5)', '(ylb + (yub - ylb) * 1.2)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.2, letter, color=\n letter_color, horizontalalignment='center', verticalalignment='center',\n fontsize=12, fontweight='bold')\n", (20535, 20706), True, 'import matplotlib.pyplot as plt\n'), ((20777, 20823), 'os.path.join', 'os.path.join', (['images_folder', 'f"""breakdowns.{i}"""'], {}), "(images_folder, f'breakdowns.{i}')\n", (20789, 20823), False, 'import os\n'), ((20832, 20867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (20843, 20867), True, 'import matplotlib.pyplot as plt\n'), ((21043, 21074), 'numpy.percentile', 'np.percentile', (['data', '(50)'], {'axis': '(0)'}), '(data, 50, axis=0)\n', (21056, 21074), True, 'import numpy as np\n'), ((27177, 27207), 'biorefineries.oilcane.get_monte_carlo', 'oc.get_monte_carlo', (['i', 'metrics'], {}), '(i, metrics)\n', (27195, 27207), True, 'import biorefineries.oilcane as oc\n'), ((31191, 31219), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (31210, 31219), True, 'import matplotlib.pyplot as plt\n'), ((32011, 32084), 'os.path.join', 'os.path.join', (['images_folder', 'f"""feedstock_conventional_comparison_kde.{i}"""'], {}), "(images_folder, f'feedstock_conventional_comparison_kde.{i}')\n", (32023, 32084), False, 'import os\n'), ((32093, 32128), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (32104, 32128), True, 'import matplotlib.pyplot as plt\n'), ((32520, 32591), 'os.path.join', 'os.path.join', (['images_folder', 'f"""feedstock_cellulosic_comparison_kde.{i}"""'], {}), "(images_folder, f'feedstock_cellulosic_comparison_kde.{i}')\n", (32532, 32591), False, 'import os\n'), ((32600, 32635), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (32611, 32635), True, 'import matplotlib.pyplot as plt\n'), ((33239, 33299), 'os.path.join', 'os.path.join', (['images_folder', 'f"""feedstock_comparison_kde.{i}"""'], {}), "(images_folder, f'feedstock_comparison_kde.{i}')\n", (33251, 33299), False, 'import os\n'), ((33308, 33343), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (33319, 33343), True, 'import matplotlib.pyplot as plt\n'), ((33703, 33767), 'os.path.join', 'os.path.join', (['images_folder', 'f"""configuration_comparison_kde.{i}"""'], {}), "(images_folder, f'configuration_comparison_kde.{i}')\n", (33715, 33767), False, 'import os\n'), ((33776, 33811), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (33787, 33811), True, 'import matplotlib.pyplot as plt\n'), ((34231, 34305), 'os.path.join', 'os.path.join', (['images_folder', 'f"""separated_configuration_comparison_kde.{i}"""'], {}), "(images_folder, f'separated_configuration_comparison_kde.{i}')\n", (34243, 34305), False, 'import os\n'), ((34314, 34349), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (34325, 34349), True, 'import matplotlib.pyplot as plt\n'), ((34922, 34992), 'os.path.join', 'os.path.join', (['images_folder', 'f"""crude_configuration_comparison_kde.{i}"""'], {}), "(images_folder, f'crude_configuration_comparison_kde.{i}')\n", (34934, 34992), False, 'import os\n'), ((35001, 35036), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (35012, 35036), True, 'import matplotlib.pyplot as plt\n'), ((35683, 35752), 'os.path.join', 'os.path.join', (['images_folder', 'f"""agile_conventional_comparison_kde.{i}"""'], {}), "(images_folder, f'agile_conventional_comparison_kde.{i}')\n", (35695, 35752), False, 'import os\n'), ((35761, 35796), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (35772, 35796), True, 'import matplotlib.pyplot as plt\n'), ((36209, 36239), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'object'}), '([1, 2], dtype=object)\n', (36217, 36239), True, 'import numpy as np\n'), ((36253, 36283), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'object'}), '([1, 2], dtype=object)\n', (36261, 36283), True, 'import numpy as np\n'), ((36695, 36818), 'numpy.array', 'np.array', (['[[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]], [df_cellulosic_oc[\n MFPPi], df_cellulosic_sc[MFPPi]]]'], {}), '([[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]], [\n df_cellulosic_oc[MFPPi], df_cellulosic_sc[MFPPi]]])\n', (36703, 36818), True, 'import numpy as np\n'), ((36861, 36980), 'numpy.array', 'np.array', (['[[df_conventional_oc[TCIi], df_conventional_sc[TCIi]], [df_cellulosic_oc[\n TCIi], df_cellulosic_sc[TCIi]]]'], {}), '([[df_conventional_oc[TCIi], df_conventional_sc[TCIi]], [\n df_cellulosic_oc[TCIi], df_cellulosic_sc[TCIi]]])\n', (36869, 36980), True, 'import numpy as np\n'), ((40847, 40867), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', ([], {}), '()\n', (40865, 40867), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((41008, 41040), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.45)'}), '(wspace=0.45)\n', (41027, 41040), True, 'import matplotlib.pyplot as plt\n'), ((43121, 43132), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (43128, 43132), True, 'import matplotlib.pyplot as plt\n'), ((43278, 43296), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', 'xf'], {}), '(-0.5, xf)\n', (43286, 43296), True, 'import matplotlib.pyplot as plt\n'), ((43566, 43577), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (43573, 43577), True, 'import matplotlib.pyplot as plt\n'), ((43616, 43648), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[yticks[0], yticks[1]]'], {}), '([yticks[0], yticks[1]])\n', (43624, 43648), True, 'import matplotlib.pyplot as plt\n'), ((43935, 44078), 'biosteam.plots.style_axis', 'bst.plots.style_axis', (['ax'], {'xticks': 'xticks', 'yticks': 'yticks', 'xticklabels': 'xticklabels', 'ytick0': '(False)', 'ytickf': '(False)', 'offset_xticks': '(True)', 'xrot': 'xrot'}), '(ax, xticks=xticks, yticks=yticks, xticklabels=\n xticklabels, ytick0=False, ytickf=False, offset_xticks=True, xrot=xrot)\n', (43955, 44078), True, 'import biosteam as bst\n'), ((44210, 44219), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (44217, 44219), True, 'import matplotlib.pyplot as plt\n'), ((44238, 44267), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (44257, 44267), True, 'import matplotlib.pyplot as plt\n'), ((50882, 51163), 'biosteam.plots.plot_unit_groups_across_coordinate', 'bst.plots.plot_unit_groups_across_coordinate', (['oc.set_cane_oil_content', '[5, 7.5, 10, 12.5]', '"""Feedstock oil content [dry wt. %]"""', 'oc.unit_groups'], {'colors': '[area_colors[i.name].RGBn for i in oc.unit_groups]', 'hatches': '[area_hatches[i.name] for i in oc.unit_groups]'}), "(oc.set_cane_oil_content, [5, \n 7.5, 10, 12.5], 'Feedstock oil content [dry wt. %]', oc.unit_groups,\n colors=[area_colors[i.name].RGBn for i in oc.unit_groups], hatches=[\n area_hatches[i.name] for i in oc.unit_groups], **kwargs)\n", (50926, 51163), True, 'import biosteam as bst\n'), ((52783, 52809), 'biorefineries.oilcane.set_cane_oil_content', 'oc.set_cane_oil_content', (['i'], {}), '(i)\n', (52806, 52809), True, 'import biorefineries.oilcane as oc\n'), ((52818, 52835), 'biorefineries.oilcane.sys.simulate', 'oc.sys.simulate', ([], {}), '()\n', (52833, 52835), True, 'import biorefineries.oilcane as oc\n'), ((10264, 10379), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'orange', 'orange_hatch',\n 'grey', 'brown']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green',\n 'orange', 'orange_hatch', 'grey', 'brown'])\n", (10282, 10379), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((11222, 11289), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_feedstock_comparison.{i}"""'], {}), "(images_folder, f'montecarlo_feedstock_comparison.{i}')\n", (11234, 11289), False, 'import os\n'), ((11302, 11337), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (11313, 11337), True, 'import matplotlib.pyplot as plt\n'), ((12478, 12576), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'orange', 'orange_hatch']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green',\n 'orange', 'orange_hatch'])\n", (12496, 12576), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((13104, 13175), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_configuration_comparison.{i}"""'], {}), "(images_folder, f'montecarlo_configuration_comparison.{i}')\n", (13116, 13175), False, 'import os\n'), ((13188, 13223), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (13199, 13223), True, 'import matplotlib.pyplot as plt\n'), ((14096, 14285), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(ylb - (yub - ylb) * 0.25)', '"""Impact of integrating oilsorghum\nat an agile oilcane biorefinery"""'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(8)'}), '(0.5, ylb - (yub - ylb) * 0.25,\n """Impact of integrating oilsorghum\nat an agile oilcane biorefinery""",\n horizontalalignment=\'center\', verticalalignment=\'center\', fontsize=8)\n', (14104, 14285), True, 'import matplotlib.pyplot as plt\n'), ((14503, 14566), 'os.path.join', 'os.path.join', (['images_folder', 'f"""montecarlo_agile_comparison.{i}"""'], {}), "(images_folder, f'montecarlo_agile_comparison.{i}')\n", (14515, 14566), False, 'import os\n'), ((14579, 14614), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'transparent': '(True)'}), '(file, transparent=True)\n', (14590, 14614), True, 'import matplotlib.pyplot as plt\n'), ((15342, 15441), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown', 'orange']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green', 'grey',\n 'brown', 'orange'])\n", (15360, 15441), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((16524, 16661), 'biosteam.utils.CABBI_colors.wheel', 'CABBI_colors.wheel', (["['blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown', 'orange',\n 'orange', 'green', 'orange', 'green']"], {}), "(['blue_light', 'green_dirty', 'orange', 'green', 'grey',\n 'brown', 'orange', 'orange', 'green', 'orange', 'green'])\n", (16542, 16661), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((23273, 23298), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdYlGn"""'], {}), "('RdYlGn')\n", (23288, 23298), True, 'import matplotlib.pyplot as plt\n'), ((28028, 28039), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (28035, 28039), True, 'import matplotlib.pyplot as plt\n'), ((28178, 28204), 'biosteam.plots.plot_quadrants', 'bst.plots.plot_quadrants', ([], {}), '()\n', (28202, 28204), True, 'import biosteam as bst\n'), ((28228, 28238), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (28236, 28238), True, 'import matplotlib.pyplot as plt\n'), ((28262, 28272), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (28270, 28272), True, 'import matplotlib.pyplot as plt\n'), ((31307, 31318), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (31314, 31318), True, 'import matplotlib.pyplot as plt\n'), ((31342, 31352), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (31350, 31352), True, 'import matplotlib.pyplot as plt\n'), ((31376, 31386), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (31384, 31386), True, 'import matplotlib.pyplot as plt\n'), ((31399, 31579), 'matplotlib.pyplot.text', 'plt.text', (['((xlb + xub) * 0.5)', '(ylb + (yub - ylb) * 1.17)', 'letter'], {'color': 'letter_color', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""', 'fontsize': '(12)', 'fontweight': '"""bold"""'}), "((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.17, letter, color=\n letter_color, horizontalalignment='center', verticalalignment='center',\n fontsize=12, fontweight='bold')\n", (31407, 31579), True, 'import matplotlib.pyplot as plt\n'), ((43166, 43198), 'biosteam.plots.plot_vertical_line', 'bst.plots.plot_vertical_line', (['x0'], {}), '(x0)\n', (43194, 43198), True, 'import biosteam as bst\n'), ((43425, 43436), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (43432, 43436), True, 'import matplotlib.pyplot as plt\n'), ((43481, 43503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabels[i]'], {}), '(ylabels[i])\n', (43491, 43503), True, 'import matplotlib.pyplot as plt\n'), ((43688, 43780), 'biosteam.plots.plot_horizontal_line', 'bst.plots.plot_horizontal_line', (['(0)'], {'color': 'CABBI_colors.black.RGBn', 'lw': '(0.8)', 'linestyle': '"""--"""'}), "(0, color=CABBI_colors.black.RGBn, lw=0.8,\n linestyle='--')\n", (43718, 43780), True, 'import biosteam as bst\n'), ((49675, 49727), 'pandas.read_excel', 'pd.read_excel', (['file'], {'header': '[0, 1]', 'index_col': '[0, 1]'}), '(file, header=[0, 1], index_col=[0, 1])\n', (49688, 49727), True, 'import pandas as pd\n'), ((4068, 4090), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (4080, 4090), False, 'from thermosteam.units_of_measure import format_units\n'), ((4128, 4152), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (4140, 4152), False, 'from thermosteam.units_of_measure import format_units\n'), ((4211, 4231), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (4223, 4231), False, 'from thermosteam.units_of_measure import format_units\n'), ((4315, 4338), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (4327, 4338), False, 'from thermosteam.units_of_measure import format_units\n'), ((4421, 4443), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (4433, 4443), False, 'from thermosteam.units_of_measure import format_units\n'), ((22155, 22177), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (22167, 22177), False, 'from thermosteam.units_of_measure import format_units\n'), ((22199, 22223), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (22211, 22223), False, 'from thermosteam.units_of_measure import format_units\n'), ((22260, 22280), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (22272, 22280), False, 'from thermosteam.units_of_measure import format_units\n'), ((22319, 22339), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (22331, 22339), False, 'from thermosteam.units_of_measure import format_units\n'), ((22369, 22392), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (22381, 22392), False, 'from thermosteam.units_of_measure import format_units\n'), ((22419, 22441), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (22431, 22441), False, 'from thermosteam.units_of_measure import format_units\n'), ((41430, 41446), 'numpy.zeros', 'np.zeros', (['[1, 1]'], {}), '([1, 1])\n', (41438, 41446), True, 'import numpy as np\n'), ((49822, 49835), 'warnings.warn', 'warn', (['warning'], {}), '(warning)\n', (49826, 49835), False, 'from warnings import warn\n'), ((4885, 4907), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (4897, 4907), False, 'from thermosteam.units_of_measure import format_units\n'), ((4959, 4983), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (4971, 4983), False, 'from thermosteam.units_of_measure import format_units\n'), ((5056, 5076), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (5068, 5076), False, 'from thermosteam.units_of_measure import format_units\n'), ((5174, 5197), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (5186, 5197), False, 'from thermosteam.units_of_measure import format_units\n'), ((5294, 5316), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (5306, 5316), False, 'from thermosteam.units_of_measure import format_units\n'), ((5945, 5967), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""USD/MT"""'], {}), "('USD/MT')\n", (5957, 5967), False, 'from thermosteam.units_of_measure import format_units\n'), ((6086, 6110), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""10^6*USD"""'], {}), "('10^6*USD')\n", (6098, 6110), False, 'from thermosteam.units_of_measure import format_units\n'), ((6287, 6307), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""L/MT"""'], {}), "('L/MT')\n", (6299, 6307), False, 'from thermosteam.units_of_measure import format_units\n'), ((6462, 6485), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""kWhr/MT"""'], {}), "('kWhr/MT')\n", (6474, 6485), False, 'from thermosteam.units_of_measure import format_units\n'), ((6643, 6665), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""m^3/MT"""'], {}), "('m^3/MT')\n", (6655, 6665), False, 'from thermosteam.units_of_measure import format_units\n'), ((25271, 25298), 'biosteam.utils.CABBI_colors.teal.shade', 'CABBI_colors.teal.shade', (['(50)'], {}), '(50)\n', (25294, 25298), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((25680, 25707), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (25703, 25707), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((26082, 26109), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (26105, 26109), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((26497, 26517), 'biosteam.utils.colors.red.shade', 'colors.red.shade', (['(50)'], {}), '(50)\n', (26513, 26517), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((5880, 5903), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""MFPP/OC"""'], {}), "('MFPP/OC')\n", (5892, 5903), False, 'from thermosteam.units_of_measure import format_units\n'), ((6022, 6044), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""TCI/OC"""'], {}), "('TCI/OC')\n", (6034, 6044), False, 'from thermosteam.units_of_measure import format_units\n'), ((6221, 6245), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""Prod./OC"""'], {}), "('Prod./OC')\n", (6233, 6245), False, 'from thermosteam.units_of_measure import format_units\n'), ((6399, 6420), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""EP/OC"""'], {}), "('EP/OC')\n", (6411, 6420), False, 'from thermosteam.units_of_measure import format_units\n'), ((6579, 6601), 'thermosteam.units_of_measure.format_units', 'format_units', (['"""NGC/OC"""'], {}), "('NGC/OC')\n", (6591, 6601), False, 'from thermosteam.units_of_measure import format_units\n'), ((35513, 35547), 'biosteam.utils.CABBI_colors.green_dirty.shade', 'CABBI_colors.green_dirty.shade', (['(60)'], {}), '(60)\n', (35543, 35547), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((42869, 42888), 'numpy.percentile', 'np.percentile', (['x', '(5)'], {}), '(x, 5)\n', (42882, 42888), True, 'import numpy as np\n'), ((42922, 42942), 'numpy.percentile', 'np.percentile', (['x', '(95)'], {}), '(x, 95)\n', (42935, 42942), True, 'import numpy as np\n'), ((43236, 43263), 'biosteam.utils.colors.purple_tint.tint', 'colors.purple_tint.tint', (['(60)'], {}), '(60)\n', (43259, 43263), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((24435, 24464), 'biosteam.utils.CABBI_colors.orange.shade', 'CABBI_colors.orange.shade', (['(60)'], {}), '(60)\n', (24460, 24464), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((24547, 24574), 'biosteam.utils.CABBI_colors.blue.shade', 'CABBI_colors.blue.shade', (['(60)'], {}), '(60)\n', (24570, 24574), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((28982, 29009), 'biosteam.utils.CABBI_colors.teal.shade', 'CABBI_colors.teal.shade', (['(50)'], {}), '(50)\n', (29005, 29009), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((29602, 29629), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (29625, 29629), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((30216, 30243), 'biosteam.utils.CABBI_colors.grey.shade', 'CABBI_colors.grey.shade', (['(75)'], {}), '(75)\n', (30239, 30243), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((30836, 30856), 'biosteam.utils.colors.red.shade', 'colors.red.shade', (['(50)'], {}), '(50)\n', (30852, 30856), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((27804, 27831), 'biosteam.utils.CABBI_colors.blue.shade', 'CABBI_colors.blue.shade', (['(60)'], {}), '(60)\n', (27827, 27831), False, 'from biosteam.utils import CABBI_colors, colors\n'), ((27690, 27719), 'biosteam.utils.CABBI_colors.orange.shade', 'CABBI_colors.orange.shade', (['(60)'], {}), '(60)\n', (27715, 27719), False, 'from biosteam.utils import CABBI_colors, colors\n')]
|
import numpy as np
from ..layers.Layer import LayerTrainable
class LayeredModel(object):
def __init__(self, layers):
"""
layers : a list of layers. Treated as a feed-forward model
"""
assert len(layers) > 0, "Model layers must be non-empty"
# check that the output of each layer is the same size as the input of
# the next layer
#for l1, l2 in zip(layers[:-1], layers[1:]):
# print(l1.output_size, l2.input_size)
for l1, l2 in zip(layers[:-1], layers[1:]):
#print(l1,l2)
#print(l1.output_size,l2.input_size)
assert l1.output_size == l2.input_size, "layers do not match input to output in the model"
self.layers = layers
def reset(self):
for l in self.layers:
l.reset()
def forward(self, x, end_layer=None):
"""
x : data to push through the network
end_layer : the layer to stop the forward movement of the data. Used for training. (default=None)
"""
x = x.squeeze()
assert (self.layers[0].input_size == 1 and x.shape == ()) or len(x) == self.layers[0].input_size, "unexpected input dimensionality (check bias)"
# if an end layer has not been named, feedforward the entire model
if end_layer is None:
f_layers = self.layers
else:
f_layers = self.layers[:end_layer]
# for l in f_layers:
# x = np.array(l.forward(x))
for l in f_layers:
#print(l.info())
x = l.forward(x)
return x
def train(self, X, y, warmup_timesteps=100, data_repeats=1):
"""
x : input data to train on
y : output data to train on
warmup_timesteps : number of timesteps to run the data before training (default=100)
"""
assert isinstance(self.layers[-1], LayerTrainable), "This model cannot be trained because the final layer of type {} is not trainable".format(type(self.layers[-1]))
# TODO: for now we assume ONLY the last layer can be trained
# warmup stage
# for x in X[:warmup_timesteps]:
# # some function that allows us to display
# self.display()
# _ = self.forward(x, len(self.layers)-1)
# # training stage
# y_forward = np.zeros((np.shape(X[warmup_timesteps:])[0],
# self.layers[-1].input_size))
# for idx, x in enumerate(X[warmup_timesteps:]):
# # some function that allows us to display
# self.display()
# y_p = self.forward(x, len(self.layers)-1)
# y_forward[idx, :] = y_p
# y_nonwarmup = y[warmup_timesteps:]
y_forward = np.zeros((np.shape(X)[0] - data_repeats*warmup_timesteps,
self.layers[-1].input_size))
y_nonwarmup = np.zeros((np.shape(y)[0] - data_repeats*warmup_timesteps,
np.shape(y)[1]))
y_idx = 0
data_rate = np.shape(X)[0] / data_repeats
# print(data_rate)
# print(X[:10])
# print(X[data_rate:(data_rate+10)])
for idx,x in enumerate(X):
# some function that allows us to display
self.display()
# if idx % data_rate == 0:
# print(x)
# self.reset()
if idx % data_rate < warmup_timesteps:
_ = self.forward(x, len(self.layers)-1)
else:
y_p = self.forward(x, len(self.layers)-1)
y_forward[y_idx, :] = y_p
y_nonwarmup[y_idx, :] = y[idx, :]
y_idx += 1
# training stage
# y_forward = np.zeros((np.shape(X[warmup_timesteps:])[0],
# self.layers[-1].input_size))
# for idx, x in enumerate(X[warmup_timesteps:]):
# # some function that allows us to display
# self.display()
# y_p = self.forward(x, len(self.layers)-1)
# y_forward[idx, :] = y_p
# y_nonwarmup = y[warmup_timesteps:]
self.layers[-1].train(y_forward, y_nonwarmup)
def generate(self, x_data, count, reset_increment=-1, warmup_timesteps=0):
"""
Given a single datapoint, the model will feed this back into itself
to produce generative output data.
x_data : data to generate from (the first data point will be used unless reset_increment != -1)
count : number of times to run the generative process
reset_increment : how often to feed the generator the 'real' data value (default=-1 <= no reset)
"""
# y_outputs = []
y_outputs = np.zeros(count)
# x = np.array(x_data[0])
x = x_data[0]
for e in range(-warmup_timesteps, count, 1):
# some function that allows us to display
self.display()
# if we enable reseting, feed the 'real' data in (e == 0) is for warm-up swap
if e == 0 or (reset_increment != -1 and e % reset_increment == 0):
assert e < len(x_data), "generating data is less than the specified count"
x = x_data[e + warmup_timesteps]
# forward generating without 'warmup'
if e >= 0:
x = self.forward(x)
y_outputs[e] = x
x = np.hstack((x, 1))
# forward generating with 'warmup'
else:
_ = self.forward(x_data[e + warmup_timesteps])
# return np.array(y_outputs).squeeze()
return y_outputs.squeeze()
def get_output_size(self):
return self.layers[-1].output_size
def get_input_size(self):
return self.layers[0].input_size
def display(self):
pass
|
[
"numpy.shape",
"numpy.zeros",
"numpy.hstack"
] |
[((4802, 4817), 'numpy.zeros', 'np.zeros', (['count'], {}), '(count)\n', (4810, 4817), True, 'import numpy as np\n'), ((3096, 3107), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3104, 3107), True, 'import numpy as np\n'), ((5482, 5499), 'numpy.hstack', 'np.hstack', (['(x, 1)'], {}), '((x, 1))\n', (5491, 5499), True, 'import numpy as np\n'), ((3041, 3052), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (3049, 3052), True, 'import numpy as np\n'), ((2821, 2832), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (2829, 2832), True, 'import numpy as np\n'), ((2961, 2972), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (2969, 2972), True, 'import numpy as np\n')]
|
#%%
import numpy as np
from sapai.data import data
from sapai.rand import MockRandomState
#%%
class Food():
def __init__(self,
name="food-none",
shop=None,
team=[],
seed_state = None):
"""
Food class definition the types of interactions that food undergoes
"""
if len(name) != 0:
if not name.startswith("food-"):
name = "food-{}".format(name)
self.eaten = False
self.shop = shop
self.seed_state = seed_state
if self.seed_state != None:
self.rs = np.random.RandomState()
self.rs.set_state(self.seed_state)
else:
### Otherwise, set use
self.rs = MockRandomState()
self.attack = 0
self.health = 0
self.base_attack = 0
self.base_health = 0
self.status = "none"
self.effect = "none"
self.fd = {}
self.name = name
if name not in data["foods"]:
raise Exception("Food {} not found".format(name))
fd = data["foods"][name]["ability"]
self.fd = fd
self.attack = 0
self.health = 0
self.effect = fd["effect"]["kind"]
if "attackAmount" in fd["effect"]:
self.attack = fd["effect"]["attackAmount"]
self.base_attack = fd["effect"]["attackAmount"]
if "healthAmount" in fd["effect"]:
self.health = fd["effect"]["healthAmount"]
self.base_health = fd["effect"]["healthAmount"]
if "status" in fd["effect"]:
self.status = fd["effect"]["status"]
def apply(self, pet=None):
"""
Serve the food object to the input pet
"""
if self.eaten == True:
raise Exception("This should not be possible")
if self.name == "food-canned-food":
self.shop.can += self.attack
return
pet.attack += self.attack
pet.health += self.health
if self.effect == "ModifyStats":
### Done
return pet
elif self.effect == "ApplyStatus":
pet.status = self.status
def copy(self):
copy_food = Food(self.name, self.shop)
for key,value in self.__dict__.items():
### Although this approach will copy the internal dictionaries by
### reference rather than copy by value, these dictionaries will
### never be modified anyways.
### All integers and strings are copied by value automatically with
### Python, therefore, this achieves the correct behavior
copy_food.__dict__[key] = value
return copy_food
@property
def state(self):
#### Ensure that state can be JSON serialized
if getattr(self, "rs", False):
if type(self.rs).__name__ == "MockRandomState":
seed_state = None
else:
seed_state = list(self.rs.get_state())
seed_state[1] = seed_state[1].tolist()
else:
seed_state = None
state_dict = {
"type": "Food",
"name": self.name,
"eaten": self.eaten,
"attack": self.attack,
"health": self.health,
"seed_state": seed_state
}
return state_dict
@classmethod
def from_state(cls, state):
food = cls(name=state["name"])
food.attack = state["attack"]
food.health = state["health"]
food.eaten = state["eaten"],
### Supply seed_state in state dict should be optional
if "seed_state" in state:
if state["seed_state"] != None:
food.seed_state = state["seed_state"]
food.rs = np.random.RandomState()
food.rs.set_state(state["seed_state"])
return food
def __repr__(self):
return "< {} {}-{} {} >".format(
self.name, self.attack, self.health, self.status)
# %%
|
[
"sapai.rand.MockRandomState",
"numpy.random.RandomState"
] |
[((661, 684), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (682, 684), True, 'import numpy as np\n'), ((804, 821), 'sapai.rand.MockRandomState', 'MockRandomState', ([], {}), '()\n', (819, 821), False, 'from sapai.rand import MockRandomState\n'), ((3925, 3948), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3946, 3948), True, 'import numpy as np\n')]
|
import traceback
import copy
import gc
from ctypes import c_void_p
import itertools
import array
import math
import numpy as np
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler
from PyEngine3D.OpenGLContext import OpenGLContext
def get_numpy_dtype(data_type):
if GL_BYTE == data_type:
return np.int8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_SHORT == data_type:
return np.int16
elif GL_UNSIGNED_SHORT == data_type:
return np.uint16
elif GL_INT == data_type:
return np.int32
elif GL_UNSIGNED_INT == data_type:
return np.uint32
elif GL_UNSIGNED_INT64 == data_type:
return np.uint64
elif GL_FLOAT == data_type:
return np.float32
elif GL_DOUBLE == data_type:
return np.float64
logger.error('Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)', data_type)
return np.uint8
def get_internal_format(str_image_mode):
if str_image_mode == "RGBA":
return GL_RGBA8
elif str_image_mode == "RGB":
return GL_RGB8
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_R8
else:
logger.error("get_internal_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA8
def get_texture_format(str_image_mode):
if str_image_mode == "RGBA":
# R,G,B,A order. GL_BGRA is faster than GL_RGBA
return GL_RGBA # GL_BGRA
elif str_image_mode == "RGB":
return GL_RGB
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_RED
else:
logger.error("get_texture_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA
def get_image_mode(texture_internal_format):
if texture_internal_format in (GL_RGBA, GL_BGRA):
return "RGBA"
elif texture_internal_format in (GL_RGB, GL_BGR):
return "RGB"
elif texture_internal_format == GL_RG:
return "RG"
elif texture_internal_format in (GL_R8, GL_R16F, GL_RED, GL_DEPTH_STENCIL, GL_DEPTH_COMPONENT):
return "R"
elif texture_internal_format == GL_LUMINANCE:
return "L"
else:
logger.error("get_image_mode::unknown image format ( %s )" % texture_internal_format)
return "RGBA"
def CreateTexture(**texture_datas):
texture_class = texture_datas.get('texture_type', Texture2D)
if texture_class is not None:
if type(texture_class) is str:
texture_class = eval(texture_class)
return texture_class(**texture_datas)
return None
class Texture:
target = GL_TEXTURE_2D
default_wrap = GL_REPEAT
use_glTexStorage = False
def __init__(self, **texture_data):
self.name = texture_data.get('name')
self.attachment = False
self.image_mode = "RGBA"
self.internal_format = GL_RGBA8
self.texture_format = GL_RGBA
self.sRGB = False
self.clear_color = None
self.multisample_count = 0
self.width = 0
self.height = 0
self.depth = 1
self.data_type = GL_UNSIGNED_BYTE
self.min_filter = GL_LINEAR_MIPMAP_LINEAR
self.mag_filter = GL_LINEAR
self.enable_mipmap = False
self.wrap = self.default_wrap
self.wrap_s = self.default_wrap
self.wrap_t = self.default_wrap
self.wrap_r = self.default_wrap
self.buffer = -1
self.sampler_handle = -1
self.attribute = Attributes()
self.create_texture(**texture_data)
def create_texture(self, **texture_data):
if self.buffer != -1:
self.delete()
self.attachment = False
self.image_mode = texture_data.get('image_mode')
self.internal_format = texture_data.get('internal_format')
self.texture_format = texture_data.get('texture_format')
self.sRGB = texture_data.get('sRGB', False)
self.clear_color = texture_data.get('clear_color')
self.multisample_count = 0
if self.internal_format is None and self.image_mode:
self.internal_format = get_internal_format(self.image_mode)
if self.texture_format is None and self.image_mode:
self.texture_format = get_texture_format(self.image_mode)
if self.image_mode is None and self.texture_format:
self.image_mode = get_image_mode(self.texture_format)
# Convert to sRGB
if self.sRGB:
if self.internal_format == GL_RGB:
self.internal_format = GL_SRGB8
elif self.internal_format == GL_RGBA:
self.internal_format = GL_SRGB8_ALPHA8
if GL_RGBA == self.internal_format:
self.internal_format = GL_RGBA8
if GL_RGB == self.internal_format:
self.internal_format = GL_RGB8
self.width = int(texture_data.get('width', 0))
self.height = int(texture_data.get('height', 0))
self.depth = int(max(1, texture_data.get('depth', 1)))
self.data_type = texture_data.get('data_type', GL_UNSIGNED_BYTE)
self.min_filter = texture_data.get('min_filter', GL_LINEAR_MIPMAP_LINEAR)
self.mag_filter = texture_data.get('mag_filter', GL_LINEAR) # GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR, GL_NEAREST
mipmap_filters = (GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_NEAREST,
GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST_MIPMAP_NEAREST)
self.enable_mipmap = self.min_filter in mipmap_filters
if self.target == GL_TEXTURE_2D_MULTISAMPLE:
self.enable_mipmap = False
self.wrap = texture_data.get('wrap', self.default_wrap) # GL_REPEAT, GL_CLAMP
self.wrap_s = texture_data.get('wrap_s')
self.wrap_t = texture_data.get('wrap_t')
self.wrap_r = texture_data.get('wrap_r')
self.buffer = -1
self.sampler_handle = -1
# texture parameter overwrite
# self.sampler_handle = glGenSamplers(1)
# glSamplerParameteri(self.sampler_handle, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
# glBindSampler(0, self.sampler_handle)
logger.info("Create %s : %s %dx%dx%d %s mipmap(%s)." % (
GetClassName(self), self.name, self.width, self.height, self.depth, str(self.internal_format),
'Enable' if self.enable_mipmap else 'Disable'))
self.attribute = Attributes()
def __del__(self):
pass
def delete(self):
logger.info("Delete %s : %s" % (GetClassName(self), self.name))
glDeleteTextures([self.buffer, ])
self.buffer = -1
def get_texture_info(self):
return dict(
texture_type=self.__class__.__name__,
width=self.width,
height=self.height,
depth=self.depth,
image_mode=self.image_mode,
internal_format=self.internal_format,
texture_format=self.texture_format,
data_type=self.data_type,
min_filter=self.min_filter,
mag_filter=self.mag_filter,
wrap=self.wrap,
wrap_s=self.wrap_s,
wrap_t=self.wrap_t,
wrap_r=self.wrap_r,
)
def get_save_data(self):
save_data = self.get_texture_info()
data = self.get_image_data()
if data is not None:
save_data['data'] = data
return save_data
def get_mipmap_size(self, level=0):
if 0 < level:
divider = 2.0 ** level
width = max(1, int(self.width / divider))
height = max(1, int(self.height / divider))
return width, height
return self.width, self.height
def get_image_data(self, level=0):
if self.target not in (GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_3D):
return None
level = min(level, self.get_mipmap_count())
dtype = get_numpy_dtype(self.data_type)
try:
glBindTexture(self.target, self.buffer)
data = OpenGLContext.glGetTexImage(self.target, level, self.texture_format, self.data_type)
# convert to numpy array
if type(data) is bytes:
data = np.fromstring(data, dtype=dtype)
else:
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
return data
except:
logger.error(traceback.format_exc())
logger.error('%s failed to get image data.' % self.name)
logger.info('Try to glReadPixels.')
glBindTexture(self.target, self.buffer)
fb = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, fb)
data = []
for layer in range(self.depth):
if GL_TEXTURE_2D == self.target:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.buffer, level)
elif GL_TEXTURE_3D == self.target:
glFramebufferTexture3D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_3D, self.buffer, level, layer)
elif GL_TEXTURE_2D_ARRAY == self.target:
glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, self.buffer, level, layer)
glReadBuffer(GL_COLOR_ATTACHMENT0)
width, height = self.get_mipmap_size(level)
pixels = glReadPixels(0, 0, width, height, self.texture_format, self.data_type)
# convert to numpy array
if type(pixels) is bytes:
pixels = np.fromstring(pixels, dtype=dtype)
data.append(pixels)
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDeleteFramebuffers(1, [fb, ])
return data
def get_mipmap_count(self):
factor = max(max(self.width, self.height), self.depth)
return math.floor(math.log2(factor)) + 1
def generate_mipmap(self):
if self.enable_mipmap:
glBindTexture(self.target, self.buffer)
glGenerateMipmap(self.target)
else:
logger.warn('%s disable to generate mipmap.' % self.name)
def texure_wrap(self, wrap):
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, wrap)
def bind_texture(self, wrap=None):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
glBindTexture(self.target, self.buffer)
if wrap is not None:
self.texure_wrap(wrap)
def bind_image(self, image_unit, level=0, access=GL_READ_WRITE):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
# flag : GL_READ_WRITE, GL_WRITE_ONLY, GL_READ_ONLY
glBindImageTexture(image_unit, self.buffer, level, GL_FALSE, 0, access, self.internal_format)
def is_attached(self):
return self.attachment
def set_attachment(self, attachment):
self.attachment = attachment
def get_attribute(self):
self.attribute.set_attribute("name", self.name)
self.attribute.set_attribute("target", self.target)
self.attribute.set_attribute("width", self.width)
self.attribute.set_attribute("height", self.height)
self.attribute.set_attribute("depth", self.depth)
self.attribute.set_attribute("image_mode", self.image_mode)
self.attribute.set_attribute("internal_format", self.internal_format)
self.attribute.set_attribute("texture_format", self.texture_format)
self.attribute.set_attribute("data_type", self.data_type)
self.attribute.set_attribute("min_filter", self.min_filter)
self.attribute.set_attribute("mag_filter", self.mag_filter)
self.attribute.set_attribute("multisample_count", self.multisample_count)
self.attribute.set_attribute("wrap", self.wrap)
self.attribute.set_attribute("wrap_s", self.wrap_s)
self.attribute.set_attribute("wrap_t", self.wrap_t)
self.attribute.set_attribute("wrap_r", self.wrap_r)
return self.attribute
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if hasattr(self, attribute_name) and "" != attribute_value:
setattr(self, attribute_name, eval(attribute_value))
if 'wrap' in attribute_name:
glBindTexture(self.target, self.buffer)
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glBindTexture(self.target, 0)
return self.attribute
class Texture2D(Texture):
target = GL_TEXTURE_2D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_2D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height)
if data is not None:
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
self.width, self.height,
self.texture_format,
self.data_type,
data)
else:
glTexImage2D(GL_TEXTURE_2D,
0,
self.internal_format,
self.width,
self.height,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
if self.clear_color is not None:
glClearTexImage(self.buffer, 0, self.texture_format, self.data_type, self.clear_color)
glBindTexture(GL_TEXTURE_2D, 0)
class Texture2DArray(Texture):
target = GL_TEXTURE_2D_ARRAY
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_ARRAY, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_2D_ARRAY, 0)
class Texture3D(Texture):
target = GL_TEXTURE_3D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_3D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_3D,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_3D,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_3D)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_3D, 0)
class Texture2DMultiSample(Texture):
target = GL_TEXTURE_2D_MULTISAMPLE
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
multisample_count = texture_data.get('multisample_count', 4)
self.multisample_count = multisample_count - (multisample_count % 4)
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, self.buffer)
if self.use_glTexStorage:
glTexStorage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
else:
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
class TextureCube(Texture):
target = GL_TEXTURE_CUBE_MAP
default_wrap = GL_REPEAT
def __init__(self, **texture_data):
self.texture_positive_x = None
self.texture_negative_x = None
self.texture_positive_y = None
self.texture_negative_y = None
self.texture_positive_z = None
self.texture_negative_z = None
Texture.__init__(self, **texture_data)
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
# If texture2d is None then create render target.
face_texture_datas = copy.copy(texture_data)
face_texture_datas.pop('name')
face_texture_datas['texture_type'] = Texture2D
self.texture_positive_x = texture_data.get('texture_positive_x', CreateTexture(name=self.name + "_right", **face_texture_datas))
self.texture_negative_x = texture_data.get('texture_negative_x', CreateTexture(name=self.name + "_left", **face_texture_datas))
self.texture_positive_y = texture_data.get('texture_positive_y', CreateTexture(name=self.name + "_top", **face_texture_datas))
self.texture_negative_y = texture_data.get('texture_negative_y', CreateTexture(name=self.name + "_bottom", **face_texture_datas))
self.texture_positive_z = texture_data.get('texture_positive_z', CreateTexture(name=self.name + "_front", **face_texture_datas))
self.texture_negative_z = texture_data.get('texture_negative_z', CreateTexture(name=self.name + "_back", **face_texture_datas))
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_CUBE_MAP, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_CUBE_MAP, self.get_mipmap_count(), self.internal_format, self.width, self.height)
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
else:
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_CUBE_MAP)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_CUBE_MAP, 0)
@staticmethod
def createTexImage2D(target_face, texture):
glTexImage2D(target_face,
0,
texture.internal_format,
texture.width,
texture.height,
0,
texture.texture_format,
texture.data_type,
texture.get_image_data())
@staticmethod
def createTexSubImage2D(target_face, texture):
glTexSubImage2D(target_face,
0,
0, 0,
texture.width, texture.height,
texture.texture_format,
texture.data_type,
texture.get_image_data())
def delete(self):
super(TextureCube, self).delete()
self.texture_positive_x.delete()
self.texture_negative_x.delete()
self.texture_positive_y.delete()
self.texture_negative_y.delete()
self.texture_positive_z.delete()
self.texture_negative_z.delete()
def get_save_data(self, get_image_data=True):
save_data = Texture.get_save_data(self)
save_data['texture_positive_x'] = self.texture_positive_x.name
save_data['texture_negative_x'] = self.texture_negative_x.name
save_data['texture_positive_y'] = self.texture_positive_y.name
save_data['texture_negative_y'] = self.texture_negative_y.name
save_data['texture_positive_z'] = self.texture_positive_z.name
save_data['texture_negative_z'] = self.texture_negative_z.name
return save_data
def get_attribute(self):
Texture.get_attribute(self)
self.attribute.set_attribute("texture_positive_x", self.texture_positive_x.name)
self.attribute.set_attribute("texture_negative_x", self.texture_negative_x.name)
self.attribute.set_attribute("texture_positive_y", self.texture_positive_y.name)
self.attribute.set_attribute("texture_negative_y", self.texture_negative_y.name)
self.attribute.set_attribute("texture_positive_z", self.texture_positive_z.name)
self.attribute.set_attribute("texture_negative_z", self.texture_negative_z.name)
return self.attribute
|
[
"PyEngine3D.Common.logger.error",
"traceback.format_exc",
"PyEngine3D.OpenGLContext.OpenGLContext.glGetTexImage",
"PyEngine3D.Utilities.GetClassName",
"PyEngine3D.Common.logger.warn",
"math.log2",
"PyEngine3D.Utilities.Attributes",
"numpy.array",
"copy.copy",
"numpy.fromstring",
"PyEngine3D.Common.logger.info"
] |
[((964, 1042), 'PyEngine3D.Common.logger.error', 'logger.error', (['"""Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)"""', 'data_type'], {}), "('Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)', data_type)\n", (976, 1042), False, 'from PyEngine3D.Common import logger\n'), ((3639, 3651), 'PyEngine3D.Utilities.Attributes', 'Attributes', ([], {}), '()\n', (3649, 3651), False, 'from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler\n'), ((6519, 6531), 'PyEngine3D.Utilities.Attributes', 'Attributes', ([], {}), '()\n', (6529, 6531), False, 'from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler\n'), ((9711, 9738), 'numpy.array', 'np.array', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (9719, 9738), True, 'import numpy as np\n'), ((20172, 20195), 'copy.copy', 'copy.copy', (['texture_data'], {}), '(texture_data)\n', (20181, 20195), False, 'import copy\n'), ((8132, 8221), 'PyEngine3D.OpenGLContext.OpenGLContext.glGetTexImage', 'OpenGLContext.glGetTexImage', (['self.target', 'level', 'self.texture_format', 'self.data_type'], {}), '(self.target, level, self.texture_format, self.\n data_type)\n', (8159, 8221), False, 'from PyEngine3D.OpenGLContext import OpenGLContext\n'), ((10210, 10267), 'PyEngine3D.Common.logger.warn', 'logger.warn', (["('%s disable to generate mipmap.' % self.name)"], {}), "('%s disable to generate mipmap.' % self.name)\n", (10221, 10267), False, 'from PyEngine3D.Common import logger\n'), ((10570, 10619), 'PyEngine3D.Common.logger.warn', 'logger.warn', (["('%s texture is invalid.' % self.name)"], {}), "('%s texture is invalid.' % self.name)\n", (10581, 10619), False, 'from PyEngine3D.Common import logger\n'), ((10865, 10914), 'PyEngine3D.Common.logger.warn', 'logger.warn', (["('%s texture is invalid.' % self.name)"], {}), "('%s texture is invalid.' % self.name)\n", (10876, 10914), False, 'from PyEngine3D.Common import logger\n'), ((1341, 1420), 'PyEngine3D.Common.logger.error', 'logger.error', (["('get_internal_format::unknown image mode ( %s )' % str_image_mode)"], {}), "('get_internal_format::unknown image mode ( %s )' % str_image_mode)\n", (1353, 1420), False, 'from PyEngine3D.Common import logger\n'), ((1784, 1862), 'PyEngine3D.Common.logger.error', 'logger.error', (["('get_texture_format::unknown image mode ( %s )' % str_image_mode)"], {}), "('get_texture_format::unknown image mode ( %s )' % str_image_mode)\n", (1796, 1862), False, 'from PyEngine3D.Common import logger\n'), ((8313, 8345), 'numpy.fromstring', 'np.fromstring', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (8326, 8345), True, 'import numpy as np\n'), ((8387, 8414), 'numpy.array', 'np.array', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (8395, 8414), True, 'import numpy as np\n'), ((8558, 8614), 'PyEngine3D.Common.logger.error', 'logger.error', (["('%s failed to get image data.' % self.name)"], {}), "('%s failed to get image data.' % self.name)\n", (8570, 8614), False, 'from PyEngine3D.Common import logger\n'), ((8627, 8662), 'PyEngine3D.Common.logger.info', 'logger.info', (['"""Try to glReadPixels."""'], {}), "('Try to glReadPixels.')\n", (8638, 8662), False, 'from PyEngine3D.Common import logger\n'), ((9629, 9663), 'numpy.fromstring', 'np.fromstring', (['pixels'], {'dtype': 'dtype'}), '(pixels, dtype=dtype)\n', (9642, 9663), True, 'import numpy as np\n'), ((10004, 10021), 'math.log2', 'math.log2', (['factor'], {}), '(factor)\n', (10013, 10021), False, 'import math\n'), ((6338, 6356), 'PyEngine3D.Utilities.GetClassName', 'GetClassName', (['self'], {}), '(self)\n', (6350, 6356), False, 'from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler\n'), ((6632, 6650), 'PyEngine3D.Utilities.GetClassName', 'GetClassName', (['self'], {}), '(self)\n', (6644, 6650), False, 'from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler\n'), ((8522, 8544), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8542, 8544), False, 'import traceback\n'), ((2349, 2438), 'PyEngine3D.Common.logger.error', 'logger.error', (["('get_image_mode::unknown image format ( %s )' % texture_internal_format)"], {}), "('get_image_mode::unknown image format ( %s )' %\n texture_internal_format)\n", (2361, 2438), False, 'from PyEngine3D.Common import logger\n')]
|
"""Find stars that are both in our sample and in Shull+21"""
import numpy as np
import get_data
from matplotlib import pyplot as plt
data = get_data.get_merged_table()
shull = get_data.get_shull2021()
matches = [name for name in data["Name"] if name in shull["Name"]]
print(len(matches), " matches found")
print(matches)
data_comp = data[np.isin(data["Name"], matches)]
refs = data_comp['hiref']
shull_comp = shull[np.isin(shull["Name"], matches)]
def compare_shull(param):
plt.figure()
x = shull_comp[param]
y = data_comp[param]
plt.plot(x, x, color="k")
plt.scatter(x, y, c=refs)
plt.colorbar()
plt.ylabel("ours")
plt.xlabel("shull")
plt.title(param)
# compare_shull("nhtot")
compare_shull("EBV")
compare_shull("fh2")
compare_shull("nhi")
compare_shull("nh2")
plt.show()
|
[
"get_data.get_merged_table",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"numpy.isin",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"get_data.get_shull2021",
"matplotlib.pyplot.show"
] |
[((142, 169), 'get_data.get_merged_table', 'get_data.get_merged_table', ([], {}), '()\n', (167, 169), False, 'import get_data\n'), ((178, 202), 'get_data.get_shull2021', 'get_data.get_shull2021', ([], {}), '()\n', (200, 202), False, 'import get_data\n'), ((806, 816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (814, 816), True, 'from matplotlib import pyplot as plt\n'), ((341, 371), 'numpy.isin', 'np.isin', (["data['Name']", 'matches'], {}), "(data['Name'], matches)\n", (348, 371), True, 'import numpy as np\n'), ((418, 449), 'numpy.isin', 'np.isin', (["shull['Name']", 'matches'], {}), "(shull['Name'], matches)\n", (425, 449), True, 'import numpy as np\n'), ((483, 495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (493, 495), True, 'from matplotlib import pyplot as plt\n'), ((551, 576), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'color': '"""k"""'}), "(x, x, color='k')\n", (559, 576), True, 'from matplotlib import pyplot as plt\n'), ((581, 606), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'refs'}), '(x, y, c=refs)\n', (592, 606), True, 'from matplotlib import pyplot as plt\n'), ((611, 625), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (623, 625), True, 'from matplotlib import pyplot as plt\n'), ((630, 648), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ours"""'], {}), "('ours')\n", (640, 648), True, 'from matplotlib import pyplot as plt\n'), ((653, 672), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""shull"""'], {}), "('shull')\n", (663, 672), True, 'from matplotlib import pyplot as plt\n'), ((677, 693), 'matplotlib.pyplot.title', 'plt.title', (['param'], {}), '(param)\n', (686, 693), True, 'from matplotlib import pyplot as plt\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from collections import Iterable
mrkr1 = 12
mrkr1_inner = 8
fs = 18
# FUNCTION TO TURN NESTED LIST INTO 1D LIST
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
# FUNCTION TO DRAW TREES
def tree (base, graph, cycle, bias, visits, print_states_hex, docolour):
# find parents
parents = graph[base][0]
for each in cycle:
if each in parents:
parents.remove(each)
# add parents to visits
for a in parents:
visits.append(a)
greycol = (0.4,0.4,0.4)
# add co-ordinates to graph array
l = len(parents)
count = 0
amp = graph[base][2][0]
min_ang = graph[base][2][1]
max_ang = graph[base][2][2]
for b in parents:
graph[b][2][0] = amp + 1
graph[b][2][1] = min_ang + count*(max_ang-min_ang)/l
graph[b][2][2] = min_ang + (count+1)*(max_ang-min_ang)/l
count = count + 1
# draw those on the branches
for c in parents:
mid = (graph[c][2][1] + graph[c][2][2])/2
xco = graph[c][2][0]*np.cos(np.radians(mid))
yco = graph[c][2][0]*np.sin(np.radians(mid)) + bias
graph[c][2][3] = xco
graph[c][2][4] = yco
colo = plt.cm.hsv(c/32)
if c%2==0:
colo2 = plt.cm.flag(c/32.0)
else:
colo2 = plt.cm.prism(c/32.0)
if docolour == False:
colo = 'k'
colo2 = 'k'
#print ('Printing marker for c={0}'.format(c))
plt.plot(xco, yco, 'o', markersize=mrkr1, color=colo)
text_labels=0
if c==21 or c==10 or c==16 or c==0:
text_labels=1
if text_labels:
if print_states_hex:
tt = plt.text(xco+0.25,yco+0.4, '{:02X}'.format(c), ha='center', fontsize=fs)
else:
tt = plt.text(xco+0.25,yco+0.4, '{:d}'.format(c), ha='center', fontsize=fs)
tt.set_bbox(dict(boxstyle='round,pad=0.0', edgecolor='none', facecolor='white', alpha=0.6))
if c==21 or c==10:
selmarker = 'v'
if docolour == False:
colo2 = 'w'
elif c==16 or c==0:
#print ('Printing star for c={0}'.format(c)) # Note in one case, star is red and BG circle is red.
selmarker = '*'
if docolour == False:
selmarker = 'o'
colo2 = 'w'
else:
if (c==0):
print ('printing selmarker for c={0} with BLUE star'.format(c))
colo2='b'
else:
selmarker = 'o'
plt.plot (xco, yco, marker=selmarker, markersize=mrkr1_inner, color=colo2)
plt.arrow(xco, yco, graph[base][2][3]-xco, graph[base][2][4]-yco, overhang=0, length_includes_head=True, head_width=0.15, head_length=0.5, fc=greycol, ec=greycol)
for z in parents:
tree (z, graph, parents, bias, visits, print_states_hex, docolour)
def plot_states (net, ax, print_states_hex=False, kequalsn=True, docolour=True):
# Find where each state leads
targets = []
for i in range(2**5):
state = np.binary_repr(i,5)
# k=n
if kequalsn:
effect = net[int(state,2)] + net[int(state[1:]+state[0:1],2) + 32] + net[int(state[2:]+state[0:2],2) + 64] + net[int(state[3:]+state[0:3],2)+96] + net[int(state[4:]+state[0:4],2)+128]
else:
# k=n-1
effect = net[int(state[1:],2)] + net[int(state[:1]+state[2:],2)+16] + net[int(state[:2]+state[3:],2) + 32] + net[int(state[:3]+state[4],2)+48] + net[int(state[:4],2)+64]
# in decimal form
targets.append(int(effect[4]) + 2*int(effect[3]) + 4*int(effect[2]) + 8*int(effect[1]) + 16*int(effect[0]))
# graph[n] gives the parent nodes, child nodes and co-ordinates for the nth node.
# graph[n][2][0] gives polar amplitude, [1] is min angle, [2] is max angle, [3] is x, [4] is y
graph = [[[],[],[0,0,0,0,0]] for x in range(1024)]
targets = [int(z) for z in targets]
for y in range(32):
graph[y][1] = targets[y] # add child
graph[targets[y]][0].append(y) # add parent
visits = []
greycol = (0.4,0.4,0.4)
plt.xticks([])
plt.yticks([])
bases = []
for x in range(len(targets)):
visits = []
while not x in visits:
visits.append(x)
x = targets[x]
base = visits[visits.index(x):]
# It's awkward to format the list of bases in hex, so it's not implemented
if not base[0] in list(flatten(bases)):
bases.append(base)
for base in bases:
# find co-ordinates of base nodes
tot = len(base)
count = 0
for x in base:
graph[x][2][0] = 1
graph[x][2][1] = count*180/tot
graph[x][2][2] = (count+1)*180/tot
count = count + 1
# find max y-co for bias for next tree
bias = graph[0][2][4]
for node in graph:
if node[2][4]>bias:
bias = node[2][4]
bias = bias + 2
# draw those on the LC
tt = plt.text(0+0.7,bias-2+0.5,base, ha='center', fontsize=fs)
tt.set_bbox(dict(boxstyle='round,pad=0.0', edgecolor='none', facecolor='white', alpha=0.6))
circle = plt.Circle ((0,bias), 1, color=greycol, fill=False)
ax.add_artist(circle)
for x in base:
mid = (graph[x][2][1] + graph[x][2][2])/2.
graph[x][2][3] = graph[x][2][0]*np.cos(np.radians(mid))
graph[x][2][4] = graph[x][2][0]*np.sin(np.radians(mid)) + bias
colo = plt.cm.hsv(x/32)
if x%2==0:
colo2 = plt.cm.flag(x/32.0)
else:
colo2 = plt.cm.prism(x/32.0)
#plt.plot(graph[x][2][3], graph[x][2][4], 'o', color=(0,0,0), markersize=mrkr1)
#print ('Printing marker for c={0}'.format(x))
if docolour == True:
plt.plot(graph[x][2][3], graph[x][2][4], 'o', color=colo, markersize=mrkr1)
else:
plt.plot(graph[x][2][3], graph[x][2][4], 'o', color='k', markersize=mrkr1)
if docolour == False:
colo2 = 'k'
if x==21 or x==10:
selmarker = 'v'
if docolour == False:
colo2 = 'w'
elif x==16 or x==0:
selmarker = '*'
if docolour == False:
selmarker = 'o'
colo2 = 'w'
else:
if x==0:
print ('printing selmarker for x={0} with BLUE star'.format(x))
colo2='b' # special case
else:
selmarker = 'o'
plt.plot(graph[x][2][3], graph[x][2][4], marker=selmarker, color=colo2, markersize=mrkr1_inner)
for x in base:
tree (x, graph, base, bias, visits, print_states_hex, docolour)
# do it again for the next set
# find max y and x to get axis right
max_x = graph[0][2][3]
max_y = graph[0][2][4]
min_x = max_x
for node in graph:
if node[2][4] > max_y:
max_y = node[2][4]
if node[2][3] > max_x:
max_x = node[2][3]
#plt.plot(graph[21][2][3], graph[21][2][4],'v',color='k', markersize=mrkr1-2) # final ant
#plt.plot(graph[10][2][3], graph[10][2][4],'v',color='w', markersize=mrkr1-2) # final post
#plt.plot(graph[16][2][3], graph[16][2][4],'*',color='k', markersize=mrkr1-2) # initial ant
#plt.plot(graph[0][2][3], graph[0][2][4],'*',color='w', markersize=mrkr1-2) # initial post
# Modify use of the area inside the graph
ymin,ymax = plt.ylim()
plt.ylim(ymin-4,ymax+1)
xmin,xmax = plt.xlim()
plt.xlim(xmin-0,xmax+0)
|
[
"numpy.radians",
"matplotlib.pyplot.text",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.cm.flag",
"numpy.binary_repr",
"matplotlib.pyplot.cm.hsv",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.arrow",
"matplotlib.pyplot.cm.prism"
] |
[((4339, 4353), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4349, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4358, 4372), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4368, 4372), True, 'import matplotlib.pyplot as plt\n'), ((7847, 7857), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (7855, 7857), True, 'import matplotlib.pyplot as plt\n'), ((7862, 7890), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin - 4)', '(ymax + 1)'], {}), '(ymin - 4, ymax + 1)\n', (7870, 7890), True, 'import matplotlib.pyplot as plt\n'), ((7902, 7912), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (7910, 7912), True, 'import matplotlib.pyplot as plt\n'), ((7917, 7945), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin - 0)', '(xmax + 0)'], {}), '(xmin - 0, xmax + 0)\n', (7925, 7945), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1390), 'matplotlib.pyplot.cm.hsv', 'plt.cm.hsv', (['(c / 32)'], {}), '(c / 32)\n', (1382, 1390), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1696), 'matplotlib.pyplot.plot', 'plt.plot', (['xco', 'yco', '"""o"""'], {'markersize': 'mrkr1', 'color': 'colo'}), "(xco, yco, 'o', markersize=mrkr1, color=colo)\n", (1651, 1696), True, 'import matplotlib.pyplot as plt\n'), ((2744, 2817), 'matplotlib.pyplot.plot', 'plt.plot', (['xco', 'yco'], {'marker': 'selmarker', 'markersize': 'mrkr1_inner', 'color': 'colo2'}), '(xco, yco, marker=selmarker, markersize=mrkr1_inner, color=colo2)\n', (2752, 2817), True, 'import matplotlib.pyplot as plt\n'), ((2828, 3002), 'matplotlib.pyplot.arrow', 'plt.arrow', (['xco', 'yco', '(graph[base][2][3] - xco)', '(graph[base][2][4] - yco)'], {'overhang': '(0)', 'length_includes_head': '(True)', 'head_width': '(0.15)', 'head_length': '(0.5)', 'fc': 'greycol', 'ec': 'greycol'}), '(xco, yco, graph[base][2][3] - xco, graph[base][2][4] - yco,\n overhang=0, length_includes_head=True, head_width=0.15, head_length=0.5,\n fc=greycol, ec=greycol)\n', (2837, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3285), 'numpy.binary_repr', 'np.binary_repr', (['i', '(5)'], {}), '(i, 5)\n', (3279, 3285), True, 'import numpy as np\n'), ((5259, 5324), 'matplotlib.pyplot.text', 'plt.text', (['(0 + 0.7)', '(bias - 2 + 0.5)', 'base'], {'ha': '"""center"""', 'fontsize': 'fs'}), "(0 + 0.7, bias - 2 + 0.5, base, ha='center', fontsize=fs)\n", (5267, 5324), True, 'import matplotlib.pyplot as plt\n'), ((5434, 5485), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, bias)', '(1)'], {'color': 'greycol', 'fill': '(False)'}), '((0, bias), 1, color=greycol, fill=False)\n', (5444, 5485), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1449), 'matplotlib.pyplot.cm.flag', 'plt.cm.flag', (['(c / 32.0)'], {}), '(c / 32.0)\n', (1439, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1504), 'matplotlib.pyplot.cm.prism', 'plt.cm.prism', (['(c / 32.0)'], {}), '(c / 32.0)\n', (1494, 1504), True, 'import matplotlib.pyplot as plt\n'), ((5756, 5774), 'matplotlib.pyplot.cm.hsv', 'plt.cm.hsv', (['(x / 32)'], {}), '(x / 32)\n', (5766, 5774), True, 'import matplotlib.pyplot as plt\n'), ((6905, 7004), 'matplotlib.pyplot.plot', 'plt.plot', (['graph[x][2][3]', 'graph[x][2][4]'], {'marker': 'selmarker', 'color': 'colo2', 'markersize': 'mrkr1_inner'}), '(graph[x][2][3], graph[x][2][4], marker=selmarker, color=colo2,\n markersize=mrkr1_inner)\n', (6913, 7004), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1237), 'numpy.radians', 'np.radians', (['mid'], {}), '(mid)\n', (1232, 1237), True, 'import numpy as np\n'), ((5820, 5841), 'matplotlib.pyplot.cm.flag', 'plt.cm.flag', (['(x / 32.0)'], {}), '(x / 32.0)\n', (5831, 5841), True, 'import matplotlib.pyplot as plt\n'), ((5882, 5904), 'matplotlib.pyplot.cm.prism', 'plt.cm.prism', (['(x / 32.0)'], {}), '(x / 32.0)\n', (5894, 5904), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6178), 'matplotlib.pyplot.plot', 'plt.plot', (['graph[x][2][3]', 'graph[x][2][4]', '"""o"""'], {'color': 'colo', 'markersize': 'mrkr1'}), "(graph[x][2][3], graph[x][2][4], 'o', color=colo, markersize=mrkr1)\n", (6111, 6178), True, 'import matplotlib.pyplot as plt\n'), ((6213, 6287), 'matplotlib.pyplot.plot', 'plt.plot', (['graph[x][2][3]', 'graph[x][2][4]', '"""o"""'], {'color': '"""k"""', 'markersize': 'mrkr1'}), "(graph[x][2][3], graph[x][2][4], 'o', color='k', markersize=mrkr1)\n", (6221, 6287), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1290), 'numpy.radians', 'np.radians', (['mid'], {}), '(mid)\n', (1285, 1290), True, 'import numpy as np\n'), ((5645, 5660), 'numpy.radians', 'np.radians', (['mid'], {}), '(mid)\n', (5655, 5660), True, 'import numpy as np\n'), ((5713, 5728), 'numpy.radians', 'np.radians', (['mid'], {}), '(mid)\n', (5723, 5728), True, 'import numpy as np\n')]
|
# pdaggerq - A code for bringing strings of creation / annihilation operators to normal order.
# Copyright (C) 2020 <NAME>
#
# This file is part of the pdaggerq package.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
spin-orbital CCSD amplitude equations
"""
import numpy as np
from numpy import einsum
def ccsd_energy(t1, t2, f, g, o, v):
# < 0 | e(-T) H e(T) | 0> :
# 1.0000 f(i,i)
energy = 1.000000000000000 * einsum('ii', f[o, o])
# 1.0000 f(i,a)*t1(a,i)
energy += 1.000000000000000 * einsum('ia,ai', f[o, v], t1)
# -0.5000 <j,i||j,i>
energy += -0.500000000000000 * einsum('jiji', g[o, o, o, o])
# 0.2500 <j,i||a,b>*t2(a,b,j,i)
energy += 0.250000000000000 * einsum('jiab,abji', g[o, o, v, v], t2)
# -0.5000 <j,i||a,b>*t1(a,i)*t1(b,j)
energy += -0.500000000000000 * einsum('jiab,ai,bj', g[o, o, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
return energy
def singles_residual(t1, t2, f, g, o, v):
# < 0 | m* e e(-T) H e(T) | 0> :
# 1.0000 f(e,m)
singles_res = 1.000000000000000 * einsum('em->em', f[v, o])
# -1.0000 f(i,m)*t1(e,i)
singles_res += -1.000000000000000 * einsum('im,ei->em', f[o, o], t1)
# 1.0000 f(e,a)*t1(a,m)
singles_res += 1.000000000000000 * einsum('ea,am->em', f[v, v], t1)
# -1.0000 f(i,a)*t2(a,e,m,i)
singles_res += -1.000000000000000 * einsum('ia,aemi->em', f[o, v], t2)
# -1.0000 f(i,a)*t1(a,m)*t1(e,i)
singles_res += -1.000000000000000 * einsum('ia,am,ei->em', f[o, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <i,e||a,m>*t1(a,i)
singles_res += 1.000000000000000 * einsum('ieam,ai->em', g[o, v, v, o], t1)
# -0.5000 <j,i||a,m>*t2(a,e,j,i)
singles_res += -0.500000000000000 * einsum('jiam,aeji->em', g[o, o, v, o], t2)
# -0.5000 <i,e||a,b>*t2(a,b,m,i)
singles_res += -0.500000000000000 * einsum('ieab,abmi->em', g[o, v, v, v], t2)
# 1.0000 <j,i||a,b>*t1(a,i)*t2(b,e,m,j)
singles_res += 1.000000000000000 * einsum('jiab,ai,bemj->em', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 0.5000 <j,i||a,b>*t1(a,m)*t2(b,e,j,i)
singles_res += 0.500000000000000 * einsum('jiab,am,beji->em', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.5000 <j,i||a,b>*t1(e,i)*t2(a,b,m,j)
singles_res += 0.500000000000000 * einsum('jiab,ei,abmj->em', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 1.0000 <j,i||a,m>*t1(a,i)*t1(e,j)
singles_res += 1.000000000000000 * einsum('jiam,ai,ej->em', g[o, o, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <i,e||a,b>*t1(a,i)*t1(b,m)
singles_res += 1.000000000000000 * einsum('ieab,ai,bm->em', g[o, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <j,i||a,b>*t1(a,i)*t1(b,m)*t1(e,j)
singles_res += 1.000000000000000 * einsum('jiab,ai,bm,ej->em', g[o, o, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
return singles_res
def doubles_residual(t1, t2, f, g, o, v):
# < 0 | m* n* f e e(-T) H e(T) | 0> :
# -1.0000 P(m,n)f(i,n)*t2(e,f,m,i)
contracted_intermediate = -1.000000000000000 * einsum('in,efmi->efmn', f[o, o], t2)
doubles_res = 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 1.0000 P(e,f)f(e,a)*t2(a,f,m,n)
contracted_intermediate = 1.000000000000000 * einsum('ea,afmn->efmn', f[v, v], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -1.0000 P(m,n)f(i,a)*t1(a,n)*t2(e,f,m,i)
contracted_intermediate = -1.000000000000000 * einsum('ia,an,efmi->efmn', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -1.0000 P(e,f)f(i,a)*t1(e,i)*t2(a,f,m,n)
contracted_intermediate = -1.000000000000000 * einsum('ia,ei,afmn->efmn', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# 1.0000 <e,f||m,n>
doubles_res += 1.000000000000000 * einsum('efmn->efmn', g[v, v, o, o])
# 1.0000 P(e,f)<i,e||m,n>*t1(f,i)
contracted_intermediate = 1.000000000000000 * einsum('iemn,fi->efmn', g[o, v, o, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# 1.0000 P(m,n)<e,f||a,n>*t1(a,m)
contracted_intermediate = 1.000000000000000 * einsum('efan,am->efmn', g[v, v, v, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 0.5000 <j,i||m,n>*t2(e,f,j,i)
doubles_res += 0.500000000000000 * einsum('jimn,efji->efmn', g[o, o, o, o], t2)
# 1.0000 P(m,n)*P(e,f)<i,e||a,n>*t2(a,f,m,i)
contracted_intermediate = 1.000000000000000 * einsum('iean,afmi->efmn', g[o, v, v, o], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# 0.5000 <e,f||a,b>*t2(a,b,m,n)
doubles_res += 0.500000000000000 * einsum('efab,abmn->efmn', g[v, v, v, v], t2)
# 1.0000 P(m,n)<j,i||a,n>*t1(a,i)*t2(e,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jian,ai,efmj->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 0.5000 P(m,n)<j,i||a,n>*t1(a,m)*t2(e,f,j,i)
contracted_intermediate = 0.500000000000000 * einsum('jian,am,efji->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -1.0000 P(m,n)*P(e,f)<j,i||a,n>*t1(e,i)*t2(a,f,m,j)
contracted_intermediate = -1.000000000000000 * einsum('jian,ei,afmj->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# 1.0000 P(e,f)<i,e||a,b>*t1(a,i)*t2(b,f,m,n)
contracted_intermediate = 1.000000000000000 * einsum('ieab,ai,bfmn->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -1.0000 P(m,n)*P(e,f)<i,e||a,b>*t1(a,n)*t2(b,f,m,i)
contracted_intermediate = -1.000000000000000 * einsum('ieab,an,bfmi->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# 0.5000 P(e,f)<i,e||a,b>*t1(f,i)*t2(a,b,m,n)
contracted_intermediate = 0.500000000000000 * einsum('ieab,fi,abmn->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -1.0000 <j,i||m,n>*t1(e,i)*t1(f,j)
doubles_res += -1.000000000000000 * einsum('jimn,ei,fj->efmn', g[o, o, o, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(m,n)*P(e,f)<i,e||a,n>*t1(a,m)*t1(f,i)
contracted_intermediate = 1.000000000000000 * einsum('iean,am,fi->efmn', g[o, v, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# -1.0000 <e,f||a,b>*t1(a,n)*t1(b,m)
doubles_res += -1.000000000000000 * einsum('efab,an,bm->efmn', g[v, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# -0.5000 P(m,n)<j,i||a,b>*t2(a,b,n,i)*t2(e,f,m,j)
contracted_intermediate = -0.500000000000000 * einsum('jiab,abni,efmj->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 0.2500 <j,i||a,b>*t2(a,b,m,n)*t2(e,f,j,i)
doubles_res += 0.250000000000000 * einsum('jiab,abmn,efji->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# -0.5000 <j,i||a,b>*t2(a,e,j,i)*t2(b,f,m,n)
doubles_res += -0.500000000000000 * einsum('jiab,aeji,bfmn->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(m,n)<j,i||a,b>*t2(a,e,n,i)*t2(b,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jiab,aeni,bfmj->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -0.5000 <j,i||a,b>*t2(a,e,m,n)*t2(b,f,j,i)
doubles_res += -0.500000000000000 * einsum('jiab,aemn,bfji->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 1.0000 P(m,n)<j,i||a,b>*t1(a,i)*t1(b,n)*t2(e,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jiab,ai,bn,efmj->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 1.0000 P(e,f)<j,i||a,b>*t1(a,i)*t1(e,j)*t2(b,f,m,n)
contracted_intermediate = 1.000000000000000 * einsum('jiab,ai,ej,bfmn->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -0.5000 <j,i||a,b>*t1(a,n)*t1(b,m)*t2(e,f,j,i)
doubles_res += -0.500000000000000 * einsum('jiab,an,bm,efji->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
# 1.0000 P(m,n)*P(e,f)<j,i||a,b>*t1(a,n)*t1(e,i)*t2(b,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jiab,an,ei,bfmj->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# -0.5000 <j,i||a,b>*t1(e,i)*t1(f,j)*t2(a,b,m,n)
doubles_res += -0.500000000000000 * einsum('jiab,ei,fj,abmn->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
# -1.0000 P(m,n)<j,i||a,n>*t1(a,m)*t1(e,i)*t1(f,j)
contracted_intermediate = -1.000000000000000 * einsum('jian,am,ei,fj->efmn', g[o, o, v, o], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -1.0000 P(e,f)<i,e||a,b>*t1(a,n)*t1(b,m)*t1(f,i)
contracted_intermediate = -1.000000000000000 * einsum('ieab,an,bm,fi->efmn', g[o, v, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# 1.0000 <j,i||a,b>*t1(a,n)*t1(b,m)*t1(e,i)*t1(f,j)
doubles_res += 1.000000000000000 * einsum('jiab,an,bm,ei,fj->efmn', g[o, o, v, v], t1, t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 3), (0, 2), (0, 1)])
return doubles_res
def ccsd_iterations(t1, t2, fock, g, o, v, e_ai, e_abij, hf_energy, max_iter=100,
e_convergence=1e-8,r_convergence=1e-8,diis_size=None, diis_start_cycle=4):
# initialize diis if diis_size is not None
# else normal scf iterate
if diis_size is not None:
from diis import DIIS
diis_update = DIIS(diis_size, start_iter=diis_start_cycle)
t1_dim = t1.size
old_vec = np.hstack((t1.flatten(), t2.flatten()))
fock_e_ai = np.reciprocal(e_ai)
fock_e_abij = np.reciprocal(e_abij)
old_energy = ccsd_energy(t1, t2, fock, g, o, v)
print("")
print(" ==> CCSD amplitude equations <==")
print("")
print(" Iter Energy |dE| |dT|")
for idx in range(max_iter):
residual_singles = singles_residual(t1, t2, fock, g, o, v)
residual_doubles = doubles_residual(t1, t2, fock, g, o, v)
res_norm = np.linalg.norm(residual_singles) + np.linalg.norm(residual_doubles)
singles_res = residual_singles + fock_e_ai * t1
doubles_res = residual_doubles + fock_e_abij * t2
new_singles = singles_res * e_ai
new_doubles = doubles_res * e_abij
# diis update
if diis_size is not None:
vectorized_iterate = np.hstack(
(new_singles.flatten(), new_doubles.flatten()))
error_vec = old_vec - vectorized_iterate
new_vectorized_iterate = diis_update.compute_new_vec(vectorized_iterate,
error_vec)
new_singles = new_vectorized_iterate[:t1_dim].reshape(t1.shape)
new_doubles = new_vectorized_iterate[t1_dim:].reshape(t2.shape)
old_vec = new_vectorized_iterate
current_energy = ccsd_energy(new_singles, new_doubles, fock, g, o, v)
delta_e = np.abs(old_energy - current_energy)
print(" {: 5d} {: 20.12f} {: 20.12f} {: 20.12f}".format(idx, current_energy - hf_energy, delta_e, res_norm))
if delta_e < e_convergence and res_norm < r_convergence:
# assign t1 and t2 variables for future use before breaking
t1 = new_singles
t2 = new_doubles
break
else:
# assign t1 and t2 and old_energy for next iteration
t1 = new_singles
t2 = new_doubles
old_energy = current_energy
else:
raise ValueError("CCSD iterations did not converge")
return t1, t2
|
[
"numpy.abs",
"numpy.reciprocal",
"numpy.einsum",
"numpy.linalg.norm",
"diis.DIIS"
] |
[((13236, 13255), 'numpy.reciprocal', 'np.reciprocal', (['e_ai'], {}), '(e_ai)\n', (13249, 13255), True, 'import numpy as np\n'), ((13274, 13295), 'numpy.reciprocal', 'np.reciprocal', (['e_abij'], {}), '(e_abij)\n', (13287, 13295), True, 'import numpy as np\n'), ((959, 980), 'numpy.einsum', 'einsum', (['"""ii"""', 'f[o, o]'], {}), "('ii', f[o, o])\n", (965, 980), False, 'from numpy import einsum\n'), ((1051, 1079), 'numpy.einsum', 'einsum', (['"""ia,ai"""', 'f[o, v]', 't1'], {}), "('ia,ai', f[o, v], t1)\n", (1057, 1079), False, 'from numpy import einsum\n'), ((1146, 1175), 'numpy.einsum', 'einsum', (['"""jiji"""', 'g[o, o, o, o]'], {}), "('jiji', g[o, o, o, o])\n", (1152, 1175), False, 'from numpy import einsum\n'), ((1254, 1292), 'numpy.einsum', 'einsum', (['"""jiab,abji"""', 'g[o, o, v, v]', 't2'], {}), "('jiab,abji', g[o, o, v, v], t2)\n", (1260, 1292), False, 'from numpy import einsum\n'), ((1375, 1464), 'numpy.einsum', 'einsum', (['"""jiab,ai,bj"""', 'g[o, o, v, v]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiab,ai,bj', g[o, o, v, v], t1, t1, optimize=['einsum_path', (0, 1),\n (0, 1)])\n", (1381, 1464), False, 'from numpy import einsum\n'), ((1638, 1663), 'numpy.einsum', 'einsum', (['"""em->em"""', 'f[v, o]'], {}), "('em->em', f[v, o])\n", (1644, 1663), False, 'from numpy import einsum\n'), ((1739, 1771), 'numpy.einsum', 'einsum', (['"""im,ei->em"""', 'f[o, o]', 't1'], {}), "('im,ei->em', f[o, o], t1)\n", (1745, 1771), False, 'from numpy import einsum\n'), ((1847, 1879), 'numpy.einsum', 'einsum', (['"""ea,am->em"""', 'f[v, v]', 't1'], {}), "('ea,am->em', f[v, v], t1)\n", (1853, 1879), False, 'from numpy import einsum\n'), ((1959, 1993), 'numpy.einsum', 'einsum', (['"""ia,aemi->em"""', 'f[o, v]', 't2'], {}), "('ia,aemi->em', f[o, v], t2)\n", (1965, 1993), False, 'from numpy import einsum\n'), ((2077, 2162), 'numpy.einsum', 'einsum', (['"""ia,am,ei->em"""', 'f[o, v]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ia,am,ei->em', f[o, v], t1, t1, optimize=['einsum_path', (0, 1), (0,\n 1)])\n", (2083, 2162), False, 'from numpy import einsum\n'), ((2238, 2278), 'numpy.einsum', 'einsum', (['"""ieam,ai->em"""', 'g[o, v, v, o]', 't1'], {}), "('ieam,ai->em', g[o, v, v, o], t1)\n", (2244, 2278), False, 'from numpy import einsum\n'), ((2362, 2404), 'numpy.einsum', 'einsum', (['"""jiam,aeji->em"""', 'g[o, o, v, o]', 't2'], {}), "('jiam,aeji->em', g[o, o, v, o], t2)\n", (2368, 2404), False, 'from numpy import einsum\n'), ((2488, 2530), 'numpy.einsum', 'einsum', (['"""ieab,abmi->em"""', 'g[o, v, v, v]', 't2'], {}), "('ieab,abmi->em', g[o, v, v, v], t2)\n", (2494, 2530), False, 'from numpy import einsum\n'), ((2622, 2717), 'numpy.einsum', 'einsum', (['"""jiab,ai,bemj->em"""', 'g[o, o, v, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiab,ai,bemj->em', g[o, o, v, v], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (2628, 2717), False, 'from numpy import einsum\n'), ((2805, 2900), 'numpy.einsum', 'einsum', (['"""jiab,am,beji->em"""', 'g[o, o, v, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 2), (0, 1)]"}), "('jiab,am,beji->em', g[o, o, v, v], t1, t2, optimize=['einsum_path',\n (0, 2), (0, 1)])\n", (2811, 2900), False, 'from numpy import einsum\n'), ((2988, 3083), 'numpy.einsum', 'einsum', (['"""jiab,ei,abmj->em"""', 'g[o, o, v, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 2), (0, 1)]"}), "('jiab,ei,abmj->em', g[o, o, v, v], t1, t2, optimize=['einsum_path',\n (0, 2), (0, 1)])\n", (2994, 3083), False, 'from numpy import einsum\n'), ((3167, 3260), 'numpy.einsum', 'einsum', (['"""jiam,ai,ej->em"""', 'g[o, o, v, o]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiam,ai,ej->em', g[o, o, v, o], t1, t1, optimize=['einsum_path', (0,\n 1), (0, 1)])\n", (3173, 3260), False, 'from numpy import einsum\n'), ((3344, 3437), 'numpy.einsum', 'einsum', (['"""ieab,ai,bm->em"""', 'g[o, v, v, v]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ieab,ai,bm->em', g[o, v, v, v], t1, t1, optimize=['einsum_path', (0,\n 1), (0, 1)])\n", (3350, 3437), False, 'from numpy import einsum\n'), ((3529, 3638), 'numpy.einsum', 'einsum', (['"""jiab,ai,bm,ej->em"""', 'g[o, o, v, v]', 't1', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jiab,ai,bm,ej->em', g[o, o, v, v], t1, t1, t1, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (3535, 3638), False, 'from numpy import einsum\n'), ((3851, 3887), 'numpy.einsum', 'einsum', (['"""in,efmi->efmn"""', 'f[o, o]', 't2'], {}), "('in,efmi->efmn', f[o, o], t2)\n", (3857, 3887), False, 'from numpy import einsum\n'), ((4097, 4133), 'numpy.einsum', 'einsum', (['"""ea,afmn->efmn"""', 'f[v, v]', 't2'], {}), "('ea,afmn->efmn', f[v, v], t2)\n", (4103, 4133), False, 'from numpy import einsum\n'), ((4352, 4441), 'numpy.einsum', 'einsum', (['"""ia,an,efmi->efmn"""', 'f[o, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ia,an,efmi->efmn', f[o, v], t1, t2, optimize=['einsum_path', (0, 1),\n (0, 1)])\n", (4358, 4441), False, 'from numpy import einsum\n'), ((4656, 4745), 'numpy.einsum', 'einsum', (['"""ia,ei,afmn->efmn"""', 'f[o, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ia,ei,afmn->efmn', f[o, v], t1, t2, optimize=['einsum_path', (0, 1),\n (0, 1)])\n", (4662, 4745), False, 'from numpy import einsum\n'), ((4927, 4962), 'numpy.einsum', 'einsum', (['"""efmn->efmn"""', 'g[v, v, o, o]'], {}), "('efmn->efmn', g[v, v, o, o])\n", (4933, 4962), False, 'from numpy import einsum\n'), ((5059, 5101), 'numpy.einsum', 'einsum', (['"""iemn,fi->efmn"""', 'g[o, v, o, o]', 't1'], {}), "('iemn,fi->efmn', g[o, v, o, o], t1)\n", (5065, 5101), False, 'from numpy import einsum\n'), ((5312, 5354), 'numpy.einsum', 'einsum', (['"""efan,am->efmn"""', 'g[v, v, v, o]', 't1'], {}), "('efan,am->efmn', g[v, v, v, o], t1)\n", (5318, 5354), False, 'from numpy import einsum\n'), ((5552, 5596), 'numpy.einsum', 'einsum', (['"""jimn,efji->efmn"""', 'g[o, o, o, o]', 't2'], {}), "('jimn,efji->efmn', g[o, o, o, o], t2)\n", (5558, 5596), False, 'from numpy import einsum\n'), ((5704, 5748), 'numpy.einsum', 'einsum', (['"""iean,afmi->efmn"""', 'g[o, v, v, o]', 't2'], {}), "('iean,afmi->efmn', g[o, v, v, o], t2)\n", (5710, 5748), False, 'from numpy import einsum\n'), ((6066, 6110), 'numpy.einsum', 'einsum', (['"""efab,abmn->efmn"""', 'g[v, v, v, v]', 't2'], {}), "('efab,abmn->efmn', g[v, v, v, v], t2)\n", (6072, 6110), False, 'from numpy import einsum\n'), ((6219, 6316), 'numpy.einsum', 'einsum', (['"""jian,ai,efmj->efmn"""', 'g[o, o, v, o]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jian,ai,efmj->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (6225, 6316), False, 'from numpy import einsum\n'), ((6535, 6632), 'numpy.einsum', 'einsum', (['"""jian,am,efji->efmn"""', 'g[o, o, v, o]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jian,am,efji->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (6541, 6632), False, 'from numpy import einsum\n'), ((6858, 6955), 'numpy.einsum', 'einsum', (['"""jian,ei,afmj->efmn"""', 'g[o, o, v, o]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jian,ei,afmj->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (6864, 6955), False, 'from numpy import einsum\n'), ((7294, 7391), 'numpy.einsum', 'einsum', (['"""ieab,ai,bfmn->efmn"""', 'g[o, v, v, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ieab,ai,bfmn->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (7300, 7391), False, 'from numpy import einsum\n'), ((7617, 7714), 'numpy.einsum', 'einsum', (['"""ieab,an,bfmi->efmn"""', 'g[o, v, v, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ieab,an,bfmi->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (7623, 7714), False, 'from numpy import einsum\n'), ((8053, 8150), 'numpy.einsum', 'einsum', (['"""ieab,fi,abmn->efmn"""', 'g[o, v, v, v]', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('ieab,fi,abmn->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (8059, 8150), False, 'from numpy import einsum\n'), ((8348, 8443), 'numpy.einsum', 'einsum', (['"""jimn,ei,fj->efmn"""', 'g[o, o, o, o]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jimn,ei,fj->efmn', g[o, o, o, o], t1, t1, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (8354, 8443), False, 'from numpy import einsum\n'), ((8551, 8646), 'numpy.einsum', 'einsum', (['"""iean,am,fi->efmn"""', 'g[o, v, v, o]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('iean,am,fi->efmn', g[o, v, v, o], t1, t1, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (8557, 8646), False, 'from numpy import einsum\n'), ((8964, 9059), 'numpy.einsum', 'einsum', (['"""efab,an,bm->efmn"""', 'g[v, v, v, v]', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('efab,an,bm->efmn', g[v, v, v, v], t1, t1, optimize=['einsum_path',\n (0, 1), (0, 1)])\n", (8970, 9059), False, 'from numpy import einsum\n'), ((9168, 9268), 'numpy.einsum', 'einsum', (['"""jiab,abni,efmj->efmn"""', 'g[o, o, v, v]', 't2', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiab,abni,efmj->efmn', g[o, o, v, v], t2, t2, optimize=[\n 'einsum_path', (0, 1), (0, 1)])\n", (9174, 9268), False, 'from numpy import einsum\n'), ((9473, 9573), 'numpy.einsum', 'einsum', (['"""jiab,abmn,efji->efmn"""', 'g[o, o, v, v]', 't2', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiab,abmn,efji->efmn', g[o, o, v, v], t2, t2, optimize=[\n 'einsum_path', (0, 1), (0, 1)])\n", (9479, 9573), False, 'from numpy import einsum\n'), ((9664, 9764), 'numpy.einsum', 'einsum', (['"""jiab,aeji,bfmn->efmn"""', 'g[o, o, v, v]', 't2', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiab,aeji,bfmn->efmn', g[o, o, v, v], t2, t2, optimize=[\n 'einsum_path', (0, 1), (0, 1)])\n", (9670, 9764), False, 'from numpy import einsum\n'), ((9872, 9972), 'numpy.einsum', 'einsum', (['"""jiab,aeni,bfmj->efmn"""', 'g[o, o, v, v]', 't2', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 1)]"}), "('jiab,aeni,bfmj->efmn', g[o, o, v, v], t2, t2, optimize=[\n 'einsum_path', (0, 1), (0, 1)])\n", (9878, 9972), False, 'from numpy import einsum\n'), ((10177, 10277), 'numpy.einsum', 'einsum', (['"""jiab,aemn,bfji->efmn"""', 'g[o, o, v, v]', 't2', 't2'], {'optimize': "['einsum_path', (0, 2), (0, 1)]"}), "('jiab,aemn,bfji->efmn', g[o, o, v, v], t2, t2, optimize=[\n 'einsum_path', (0, 2), (0, 1)])\n", (10183, 10277), False, 'from numpy import einsum\n'), ((10389, 10502), 'numpy.einsum', 'einsum', (['"""jiab,ai,bn,efmj->efmn"""', 'g[o, o, v, v]', 't1', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jiab,ai,bn,efmj->efmn', g[o, o, v, v], t1, t1, t2, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (10395, 10502), False, 'from numpy import einsum\n'), ((10728, 10841), 'numpy.einsum', 'einsum', (['"""jiab,ai,ej,bfmn->efmn"""', 'g[o, o, v, v]', 't1', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jiab,ai,ej,bfmn->efmn', g[o, o, v, v], t1, t1, t2, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (10734, 10841), False, 'from numpy import einsum\n'), ((11050, 11163), 'numpy.einsum', 'einsum', (['"""jiab,an,bm,efji->efmn"""', 'g[o, o, v, v]', 't1', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jiab,an,bm,efji->efmn', g[o, o, v, v], t1, t1, t2, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (11056, 11163), False, 'from numpy import einsum\n'), ((11282, 11395), 'numpy.einsum', 'einsum', (['"""jiab,an,ei,bfmj->efmn"""', 'g[o, o, v, v]', 't1', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jiab,an,ei,bfmj->efmn', g[o, o, v, v], t1, t1, t2, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (11288, 11395), False, 'from numpy import einsum\n'), ((11724, 11837), 'numpy.einsum', 'einsum', (['"""jiab,ei,fj,abmn->efmn"""', 'g[o, o, v, v]', 't1', 't1', 't2'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jiab,ei,fj,abmn->efmn', g[o, o, v, v], t1, t1, t2, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (11730, 11837), False, 'from numpy import einsum\n'), ((11945, 12056), 'numpy.einsum', 'einsum', (['"""jian,am,ei,fj->efmn"""', 'g[o, o, v, o]', 't1', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('jian,am,ei,fj->efmn', g[o, o, v, o], t1, t1, t1, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (11951, 12056), False, 'from numpy import einsum\n'), ((12278, 12389), 'numpy.einsum', 'einsum', (['"""ieab,an,bm,fi->efmn"""', 'g[o, v, v, v]', 't1', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 2), (0, 1)]"}), "('ieab,an,bm,fi->efmn', g[o, v, v, v], t1, t1, t1, optimize=[\n 'einsum_path', (0, 1), (0, 2), (0, 1)])\n", (12284, 12389), False, 'from numpy import einsum\n'), ((12602, 12728), 'numpy.einsum', 'einsum', (['"""jiab,an,bm,ei,fj->efmn"""', 'g[o, o, v, v]', 't1', 't1', 't1', 't1'], {'optimize': "['einsum_path', (0, 1), (0, 3), (0, 2), (0, 1)]"}), "('jiab,an,bm,ei,fj->efmn', g[o, o, v, v], t1, t1, t1, t1, optimize=[\n 'einsum_path', (0, 1), (0, 3), (0, 2), (0, 1)])\n", (12608, 12728), False, 'from numpy import einsum\n'), ((13091, 13135), 'diis.DIIS', 'DIIS', (['diis_size'], {'start_iter': 'diis_start_cycle'}), '(diis_size, start_iter=diis_start_cycle)\n', (13095, 13135), False, 'from diis import DIIS\n'), ((14641, 14676), 'numpy.abs', 'np.abs', (['(old_energy - current_energy)'], {}), '(old_energy - current_energy)\n', (14647, 14676), True, 'import numpy as np\n'), ((3954, 3999), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (3960, 3999), False, 'from numpy import einsum\n'), ((4201, 4246), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (4207, 4246), False, 'from numpy import einsum\n'), ((4505, 4550), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (4511, 4550), False, 'from numpy import einsum\n'), ((4809, 4854), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (4815, 4854), False, 'from numpy import einsum\n'), ((5169, 5214), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (5175, 5214), False, 'from numpy import einsum\n'), ((5422, 5467), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (5428, 5467), False, 'from numpy import einsum\n'), ((5936, 5981), 'numpy.einsum', 'einsum', (['"""efmn->fenm"""', 'contracted_intermediate'], {}), "('efmn->fenm', contracted_intermediate)\n", (5942, 5981), False, 'from numpy import einsum\n'), ((6380, 6425), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (6386, 6425), False, 'from numpy import einsum\n'), ((6696, 6741), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (6702, 6741), False, 'from numpy import einsum\n'), ((7139, 7184), 'numpy.einsum', 'einsum', (['"""efmn->fenm"""', 'contracted_intermediate'], {}), "('efmn->fenm', contracted_intermediate)\n", (7145, 7184), False, 'from numpy import einsum\n'), ((7455, 7500), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (7461, 7500), False, 'from numpy import einsum\n'), ((7898, 7943), 'numpy.einsum', 'einsum', (['"""efmn->fenm"""', 'contracted_intermediate'], {}), "('efmn->fenm', contracted_intermediate)\n", (7904, 7943), False, 'from numpy import einsum\n'), ((8214, 8259), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (8220, 8259), False, 'from numpy import einsum\n'), ((8830, 8875), 'numpy.einsum', 'einsum', (['"""efmn->fenm"""', 'contracted_intermediate'], {}), "('efmn->fenm', contracted_intermediate)\n", (8836, 8875), False, 'from numpy import einsum\n'), ((9331, 9376), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (9337, 9376), False, 'from numpy import einsum\n'), ((10035, 10080), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (10041, 10080), False, 'from numpy import einsum\n'), ((10565, 10610), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (10571, 10610), False, 'from numpy import einsum\n'), ((10904, 10949), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (10910, 10949), False, 'from numpy import einsum\n'), ((11578, 11623), 'numpy.einsum', 'einsum', (['"""efmn->fenm"""', 'contracted_intermediate'], {}), "('efmn->fenm', contracted_intermediate)\n", (11584, 11623), False, 'from numpy import einsum\n'), ((12119, 12164), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (12125, 12164), False, 'from numpy import einsum\n'), ((12452, 12497), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (12458, 12497), False, 'from numpy import einsum\n'), ((13700, 13732), 'numpy.linalg.norm', 'np.linalg.norm', (['residual_singles'], {}), '(residual_singles)\n', (13714, 13732), True, 'import numpy as np\n'), ((13735, 13767), 'numpy.linalg.norm', 'np.linalg.norm', (['residual_doubles'], {}), '(residual_doubles)\n', (13749, 13767), True, 'import numpy as np\n'), ((5876, 5921), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (5882, 5921), False, 'from numpy import einsum\n'), ((7079, 7124), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (7085, 7124), False, 'from numpy import einsum\n'), ((7838, 7883), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (7844, 7883), False, 'from numpy import einsum\n'), ((8770, 8815), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (8776, 8815), False, 'from numpy import einsum\n'), ((11518, 11563), 'numpy.einsum', 'einsum', (['"""efmn->femn"""', 'contracted_intermediate'], {}), "('efmn->femn', contracted_intermediate)\n", (11524, 11563), False, 'from numpy import einsum\n'), ((5816, 5861), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (5822, 5861), False, 'from numpy import einsum\n'), ((7019, 7064), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (7025, 7064), False, 'from numpy import einsum\n'), ((7778, 7823), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (7784, 7823), False, 'from numpy import einsum\n'), ((8710, 8755), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (8716, 8755), False, 'from numpy import einsum\n'), ((11458, 11503), 'numpy.einsum', 'einsum', (['"""efmn->efnm"""', 'contracted_intermediate'], {}), "('efmn->efnm', contracted_intermediate)\n", (11464, 11503), False, 'from numpy import einsum\n')]
|
"""Module containing definitions of arithmetic functions used by perceptrons"""
from abc import ABC, abstractmethod
import numpy as np
from NaiveNeurals.utils import ErrorAlgorithm
class ActivationFunction(ABC):
"""Abstract function for defining functions"""
label = ''
@staticmethod
@abstractmethod
def function(arg: np.array) -> np.array:
"""Implementation of function
:param arg: float
:return: float
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def prime(cls, arg: np.array) -> np.array:
"""First derivative of implemented function
:param arg: float
:return: float
"""
raise NotImplementedError()
class Sigmoid(ActivationFunction):
"""Represents sigmoid function and its derivative"""
label = 'sigmoid'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate sigmoid(arg)
:param arg: float input value
:return: float sig(arg) value
"""
return 1 / (1 + np.exp(-arg))
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of sigmoid's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return cls.function(arg) * (1 - cls.function(arg))
class Tanh(ActivationFunction):
"""Represents hyperbolic tangent"""
label = 'tanh'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate tanh(arg)
:param arg: float input value
:return: float tanh(arg) value
"""
return np.tanh(arg)
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of tanh's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return 1 - np.tanh(arg)**2
class Linear(ActivationFunction):
"""Represents linear function"""
label = 'lin'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate lin(arg)
:param arg: float input value
:return: float lin(arg) value
"""
return arg
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of lin's prime derivative for given arg
:param arg: float input value
:return: float value
"""
ones = np.array(arg)
ones[::] = 1.0
return ones
class SoftMax(ActivationFunction):
"""Represents SoftMax function
The ``softmax`` function takes an N-dimensional vector of arbitrary real values and produces
another N-dimensional vector with real values in the range (0, 1) that add up to 1.0.
source: https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
"""
label = 'softmax'
@staticmethod
def function(arg: np.array, beta: int = 20) -> np.array: # pylint: disable=arguments-differ
"""Calculate softmax(arg)
:param arg: float input value
:param beta: scaling parameter
:return: float softmax(arg) value
"""
exps = np.exp(beta * arg - beta * arg.max())
return exps / np.sum(exps)
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of softmax's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return cls.function(arg) * (1 - cls.function(arg))
class SoftPlus(ActivationFunction):
"""Represents softplus function"""
label = 'softplus'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate softplus(arg)
:param arg: float input value
:return: float softmax(arg) value
"""
return np.log(1 + np.exp(arg))
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of softplus's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return 1/(1 + np.exp(-arg))
def get_activation_function(label: str) -> ActivationFunction:
"""Get activation function by label
:param label: string denoting function
:return: callable function
"""
if label == 'lin':
return Linear()
if label == 'sigmoid':
return Sigmoid()
if label == 'tanh':
return Tanh()
return Sigmoid()
def calculate_error(target: np.array, actual: np.array,
func_type: ErrorAlgorithm = ErrorAlgorithm.SQR) -> np.array:
"""Calculates error for provided actual and targeted data.
:param target: target data
:param actual: actual training data
:param func_type: denotes type of used function for error
:return: calculated error
"""
if func_type == ErrorAlgorithm.SQR:
return np.sum(0.5 * np.power(actual - target, 2), axis=1)
elif func_type == ErrorAlgorithm.CE:
return -1 * np.sum(target * np.log(abs(actual)), axis=1)
raise NotImplementedError()
|
[
"numpy.power",
"numpy.tanh",
"numpy.exp",
"numpy.sum",
"numpy.array"
] |
[((1650, 1662), 'numpy.tanh', 'np.tanh', (['arg'], {}), '(arg)\n', (1657, 1662), True, 'import numpy as np\n'), ((2432, 2445), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (2440, 2445), True, 'import numpy as np\n'), ((3232, 3244), 'numpy.sum', 'np.sum', (['exps'], {}), '(exps)\n', (3238, 3244), True, 'import numpy as np\n'), ((1067, 1079), 'numpy.exp', 'np.exp', (['(-arg)'], {}), '(-arg)\n', (1073, 1079), True, 'import numpy as np\n'), ((1895, 1907), 'numpy.tanh', 'np.tanh', (['arg'], {}), '(arg)\n', (1902, 1907), True, 'import numpy as np\n'), ((3839, 3850), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (3845, 3850), True, 'import numpy as np\n'), ((4091, 4103), 'numpy.exp', 'np.exp', (['(-arg)'], {}), '(-arg)\n', (4097, 4103), True, 'import numpy as np\n'), ((4901, 4929), 'numpy.power', 'np.power', (['(actual - target)', '(2)'], {}), '(actual - target, 2)\n', (4909, 4929), True, 'import numpy as np\n')]
|
from OpenGL import GL
from PIL import Image
from pathlib import Path
import numpy as np
import gc
import os
import ctypes
GL_COMPRESSED_RGBA_S3TC_DXT1_EXT = 0x83F1
VBO = None
VAO = None
TEXTURE = None
SHADER = None
vertexData = [
-1.0, -1.0, 0.0, 0.0, 1.0,
-1.0, 1.0, 0.0, 0.0, 0.0,
1.0, 1.0, 0.0, 1.0, 0.0,
1.0, 1.0, 0.0, 1.0, 0.0,
1.0, -1.0, 0.0, 1.0, 1.0,
-1.0, -1.0, 0.0, 0.0, 1.0]
_filepath = os.path.join(
Path(__file__).parent.parent.parent, "branding/spykeLogo.dds")
tex = dds_loader.DDSTexture()
tex.load(_filepath)
texData = np.fromstring(tex.data, dtype=np.uint8)
texImageSize = tex.real_size
vertSource = """
#version 450 core
layout(location = 0) in vec3 aPosition;
layout(location = 1) in vec2 aTexCoord;
out vec2 vTexCoord;
void main() {
vTexCoord = aTexCoord;
gl_Position = vec4(aPosition, 1.0f);
}
"""
fragSource = """
#version 450 core
in vec2 vTexCoord;
uniform sampler2D uTexture;
out vec4 Color;
void main() {
Color = texture(uTexture, vTexCoord);
}
"""
def __SetupShader():
global SHADER
SHADER = GL.glCreateProgram()
vert = GL.glCreateShader(GL.GL_VERTEX_SHADER)
GL.glShaderSource(vert, vertSource)
GL.glCompileShader(vert)
GL.glAttachShader(SHADER, vert)
frag = GL.glCreateShader(GL.GL_FRAGMENT_SHADER)
GL.glShaderSource(frag, fragSource)
GL.glCompileShader(frag)
GL.glAttachShader(SHADER, frag)
GL.glLinkProgram(SHADER)
GL.glValidateProgram(SHADER)
GL.glDetachShader(SHADER, vert)
GL.glDetachShader(SHADER, frag)
GL.glDeleteShader(vert)
GL.glDeleteShader(frag)
def __SetupVbo():
global VBO
VBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(vertexData) * ctypes.sizeof(ctypes.c_float),
np.asarray(vertexData, dtype=np.float32), GL.GL_STATIC_DRAW)
def __SetupVao():
global VAO
vertexSize = (3 + 2) * ctypes.sizeof(ctypes.c_float)
VAO = GL.glGenVertexArrays(1)
GL.glBindVertexArray(VAO)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False,
vertexSize, ctypes.c_void_p(0))
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(1, 2, GL.GL_FLOAT, False, vertexSize, ctypes.c_void_p(
3 * ctypes.sizeof(ctypes.c_float)))
GL.glEnableVertexAttribArray(1)
def __SetupTexture():
global TEXTURE
TEXTURE = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, TEXTURE)
GL.glCompressedTexImage2D(
GL.GL_TEXTURE_2D, 0, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, 1024, 1024, texImageSize, texData)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
def CleanupPreview():
global vertexData, texData, vertSource, fragSource
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glDeleteProgram(SHADER)
GL.glDeleteBuffers(1, [VBO])
GL.glDeleteVertexArrays(1, [VAO])
GL.glDeleteTextures(1, [TEXTURE])
err = GL.glGetError()
while err != GL.GL_NO_ERROR:
err = GL.glGetError()
del vertexData
del texData
del vertSource
del fragSource
gc.collect()
def RenderPreview():
global VBO, VAO, TEXTURE, SHADER
__SetupShader()
__SetupVbo()
__SetupVao()
__SetupTexture()
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glUseProgram(SHADER)
GL.glBindVertexArray(VAO)
GL.glBindTexture(GL.GL_TEXTURE_2D, TEXTURE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 6)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
err = GL.glGetError()
while err != GL.GL_NO_ERROR:
err = GL.glGetError()
|
[
"OpenGL.GL.glTexParameter",
"OpenGL.GL.glDeleteProgram",
"ctypes.c_void_p",
"OpenGL.GL.glAttachShader",
"OpenGL.GL.glCreateShader",
"OpenGL.GL.glDrawArrays",
"OpenGL.GL.glDeleteBuffers",
"OpenGL.GL.glGenTextures",
"pathlib.Path",
"OpenGL.GL.glBindVertexArray",
"OpenGL.GL.glGenBuffers",
"OpenGL.GL.glClear",
"numpy.asarray",
"OpenGL.GL.glBindBuffer",
"numpy.fromstring",
"OpenGL.GL.glShaderSource",
"OpenGL.GL.glDetachShader",
"OpenGL.GL.glBindTexture",
"OpenGL.GL.glUseProgram",
"OpenGL.GL.glGenVertexArrays",
"OpenGL.GL.glCompileShader",
"OpenGL.GL.glDeleteTextures",
"OpenGL.GL.glEnable",
"OpenGL.GL.glEnableVertexAttribArray",
"OpenGL.GL.glDeleteShader",
"gc.collect",
"OpenGL.GL.glLinkProgram",
"OpenGL.GL.glDeleteVertexArrays",
"ctypes.sizeof",
"OpenGL.GL.glCompressedTexImage2D",
"OpenGL.GL.glBlendFunc",
"OpenGL.GL.glValidateProgram",
"OpenGL.GL.glCreateProgram",
"OpenGL.GL.glGetError"
] |
[((575, 614), 'numpy.fromstring', 'np.fromstring', (['tex.data'], {'dtype': 'np.uint8'}), '(tex.data, dtype=np.uint8)\n', (588, 614), True, 'import numpy as np\n'), ((1089, 1109), 'OpenGL.GL.glCreateProgram', 'GL.glCreateProgram', ([], {}), '()\n', (1107, 1109), False, 'from OpenGL import GL\n'), ((1122, 1160), 'OpenGL.GL.glCreateShader', 'GL.glCreateShader', (['GL.GL_VERTEX_SHADER'], {}), '(GL.GL_VERTEX_SHADER)\n', (1139, 1160), False, 'from OpenGL import GL\n'), ((1165, 1200), 'OpenGL.GL.glShaderSource', 'GL.glShaderSource', (['vert', 'vertSource'], {}), '(vert, vertSource)\n', (1182, 1200), False, 'from OpenGL import GL\n'), ((1205, 1229), 'OpenGL.GL.glCompileShader', 'GL.glCompileShader', (['vert'], {}), '(vert)\n', (1223, 1229), False, 'from OpenGL import GL\n'), ((1234, 1265), 'OpenGL.GL.glAttachShader', 'GL.glAttachShader', (['SHADER', 'vert'], {}), '(SHADER, vert)\n', (1251, 1265), False, 'from OpenGL import GL\n'), ((1278, 1318), 'OpenGL.GL.glCreateShader', 'GL.glCreateShader', (['GL.GL_FRAGMENT_SHADER'], {}), '(GL.GL_FRAGMENT_SHADER)\n', (1295, 1318), False, 'from OpenGL import GL\n'), ((1323, 1358), 'OpenGL.GL.glShaderSource', 'GL.glShaderSource', (['frag', 'fragSource'], {}), '(frag, fragSource)\n', (1340, 1358), False, 'from OpenGL import GL\n'), ((1363, 1387), 'OpenGL.GL.glCompileShader', 'GL.glCompileShader', (['frag'], {}), '(frag)\n', (1381, 1387), False, 'from OpenGL import GL\n'), ((1392, 1423), 'OpenGL.GL.glAttachShader', 'GL.glAttachShader', (['SHADER', 'frag'], {}), '(SHADER, frag)\n', (1409, 1423), False, 'from OpenGL import GL\n'), ((1429, 1453), 'OpenGL.GL.glLinkProgram', 'GL.glLinkProgram', (['SHADER'], {}), '(SHADER)\n', (1445, 1453), False, 'from OpenGL import GL\n'), ((1458, 1486), 'OpenGL.GL.glValidateProgram', 'GL.glValidateProgram', (['SHADER'], {}), '(SHADER)\n', (1478, 1486), False, 'from OpenGL import GL\n'), ((1492, 1523), 'OpenGL.GL.glDetachShader', 'GL.glDetachShader', (['SHADER', 'vert'], {}), '(SHADER, vert)\n', (1509, 1523), False, 'from OpenGL import GL\n'), ((1528, 1559), 'OpenGL.GL.glDetachShader', 'GL.glDetachShader', (['SHADER', 'frag'], {}), '(SHADER, frag)\n', (1545, 1559), False, 'from OpenGL import GL\n'), ((1565, 1588), 'OpenGL.GL.glDeleteShader', 'GL.glDeleteShader', (['vert'], {}), '(vert)\n', (1582, 1588), False, 'from OpenGL import GL\n'), ((1593, 1616), 'OpenGL.GL.glDeleteShader', 'GL.glDeleteShader', (['frag'], {}), '(frag)\n', (1610, 1616), False, 'from OpenGL import GL\n'), ((1663, 1681), 'OpenGL.GL.glGenBuffers', 'GL.glGenBuffers', (['(1)'], {}), '(1)\n', (1678, 1681), False, 'from OpenGL import GL\n'), ((1686, 1726), 'OpenGL.GL.glBindBuffer', 'GL.glBindBuffer', (['GL.GL_ARRAY_BUFFER', 'VBO'], {}), '(GL.GL_ARRAY_BUFFER, VBO)\n', (1701, 1726), False, 'from OpenGL import GL\n'), ((2001, 2024), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)'], {}), '(1)\n', (2021, 2024), False, 'from OpenGL import GL\n'), ((2029, 2054), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['VAO'], {}), '(VAO)\n', (2049, 2054), False, 'from OpenGL import GL\n'), ((2060, 2100), 'OpenGL.GL.glBindBuffer', 'GL.glBindBuffer', (['GL.GL_ARRAY_BUFFER', 'VBO'], {}), '(GL.GL_ARRAY_BUFFER, VBO)\n', (2075, 2100), False, 'from OpenGL import GL\n'), ((2222, 2253), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['(0)'], {}), '(0)\n', (2250, 2253), False, 'from OpenGL import GL\n'), ((2387, 2418), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['(1)'], {}), '(1)\n', (2415, 2418), False, 'from OpenGL import GL\n'), ((2477, 2496), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (2493, 2496), False, 'from OpenGL import GL\n'), ((2501, 2544), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'TEXTURE'], {}), '(GL.GL_TEXTURE_2D, TEXTURE)\n', (2517, 2544), False, 'from OpenGL import GL\n'), ((2550, 2669), 'OpenGL.GL.glCompressedTexImage2D', 'GL.glCompressedTexImage2D', (['GL.GL_TEXTURE_2D', '(0)', 'GL_COMPRESSED_RGBA_S3TC_DXT1_EXT', '(1024)', '(1024)', 'texImageSize', 'texData'], {}), '(GL.GL_TEXTURE_2D, 0,\n GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, 1024, 1024, texImageSize, texData)\n', (2575, 2669), False, 'from OpenGL import GL\n'), ((2679, 2754), 'OpenGL.GL.glTexParameter', 'GL.glTexParameter', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (2696, 2754), False, 'from OpenGL import GL\n'), ((2759, 2834), 'OpenGL.GL.glTexParameter', 'GL.glTexParameter', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)\n', (2776, 2834), False, 'from OpenGL import GL\n'), ((2919, 2953), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (2929, 2953), False, 'from OpenGL import GL\n'), ((2959, 2985), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['SHADER'], {}), '(SHADER)\n', (2977, 2985), False, 'from OpenGL import GL\n'), ((2990, 3018), 'OpenGL.GL.glDeleteBuffers', 'GL.glDeleteBuffers', (['(1)', '[VBO]'], {}), '(1, [VBO])\n', (3008, 3018), False, 'from OpenGL import GL\n'), ((3023, 3056), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[VAO]'], {}), '(1, [VAO])\n', (3046, 3056), False, 'from OpenGL import GL\n'), ((3061, 3094), 'OpenGL.GL.glDeleteTextures', 'GL.glDeleteTextures', (['(1)', '[TEXTURE]'], {}), '(1, [TEXTURE])\n', (3080, 3094), False, 'from OpenGL import GL\n'), ((3106, 3121), 'OpenGL.GL.glGetError', 'GL.glGetError', ([], {}), '()\n', (3119, 3121), False, 'from OpenGL import GL\n'), ((3264, 3276), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3274, 3276), False, 'import gc\n'), ((3418, 3442), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_BLEND'], {}), '(GL.GL_BLEND)\n', (3429, 3442), False, 'from OpenGL import GL\n'), ((3447, 3505), 'OpenGL.GL.glBlendFunc', 'GL.glBlendFunc', (['GL.GL_SRC_ALPHA', 'GL.GL_ONE_MINUS_SRC_ALPHA'], {}), '(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n', (3461, 3505), False, 'from OpenGL import GL\n'), ((3511, 3534), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['SHADER'], {}), '(SHADER)\n', (3526, 3534), False, 'from OpenGL import GL\n'), ((3539, 3564), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['VAO'], {}), '(VAO)\n', (3559, 3564), False, 'from OpenGL import GL\n'), ((3569, 3612), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'TEXTURE'], {}), '(GL.GL_TEXTURE_2D, TEXTURE)\n', (3585, 3612), False, 'from OpenGL import GL\n'), ((3618, 3652), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (3628, 3652), False, 'from OpenGL import GL\n'), ((3657, 3695), 'OpenGL.GL.glDrawArrays', 'GL.glDrawArrays', (['GL.GL_TRIANGLES', '(0)', '(6)'], {}), '(GL.GL_TRIANGLES, 0, 6)\n', (3672, 3695), False, 'from OpenGL import GL\n'), ((3701, 3738), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (3717, 3738), False, 'from OpenGL import GL\n'), ((3750, 3765), 'OpenGL.GL.glGetError', 'GL.glGetError', ([], {}), '()\n', (3763, 3765), False, 'from OpenGL import GL\n'), ((1836, 1876), 'numpy.asarray', 'np.asarray', (['vertexData'], {'dtype': 'np.float32'}), '(vertexData, dtype=np.float32)\n', (1846, 1876), True, 'import numpy as np\n'), ((1960, 1989), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (1973, 1989), False, 'import ctypes\n'), ((2198, 2216), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(0)'], {}), '(0)\n', (2213, 2216), False, 'import ctypes\n'), ((3169, 3184), 'OpenGL.GL.glGetError', 'GL.glGetError', ([], {}), '()\n', (3182, 3184), False, 'from OpenGL import GL\n'), ((3813, 3828), 'OpenGL.GL.glGetError', 'GL.glGetError', ([], {}), '()\n', (3826, 3828), False, 'from OpenGL import GL\n'), ((1785, 1814), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (1798, 1814), False, 'import ctypes\n'), ((451, 465), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (455, 465), False, 'from pathlib import Path\n'), ((2351, 2380), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (2364, 2380), False, 'import ctypes\n')]
|
import io
import torchvision.transforms as transforms
from PIL import Image
import onnxruntime as ort
import numpy as np
class_map = {
0: "10 Reais Frente",
1: "10 Reais Verso",
2: "20 Reais Frente",
3: "20 Reais Verso",
4: "2 Reais Frente",
5: "2 Reais Verso",
6: "50 Reais Frente",
7: "50 Reais Verso",
8: "5 Reais Frente",
9: "5 Reais Verso"
}
def transform_image(image_bytes):
my_transforms = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image = Image.open(io.BytesIO(image_bytes))
return my_transforms(image).unsqueeze_(0)
def get_prediction(image_bytes, inference_session):
tensor = transform_image(image_bytes=image_bytes)
outputs = inference_session.run(None, {'input.1': tensor.numpy()})
y_hat = np.argmax(outputs[0], axis=1)[0]
return class_map[y_hat]
if __name__ == "__main__":
ort_session = ort.InferenceSession('app/models/banknote_best.onnx')
filename = [
"data/validation/2reaisFrente/compressed_0_1835891.jpeg",
'data/validation/2reaisVerso/compressed_0_3752849.jpeg',
"data/validation/5reaisFrente/compressed_0_1986857.jpeg",
"data/validation/5reaisVerso/compressed_0_4651610.jpeg",
"data/validation/10reaisFrente/compressed_0_2854543.jpeg",
"data/validation/10reaisVerso/compressed_0_2175135.jpeg",
'data/validation/20reaisFrente/compressed_0_1516768.jpeg',
'data/validation/20reaisVerso/compressed_0_3080811.jpeg',
'data/validation/50reaisFrente/compressed_0_1478513.jpeg',
'data/validation/50reaisVerso/compressed_0_3923784.jpeg']
for img in filename:
with open(img, 'rb') as f:
image_bytes = f.read()
tensor = get_prediction(image_bytes, ort_session)
print(tensor)
|
[
"onnxruntime.InferenceSession",
"numpy.argmax",
"io.BytesIO",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] |
[((1044, 1097), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['"""app/models/banknote_best.onnx"""'], {}), "('app/models/banknote_best.onnx')\n", (1064, 1097), True, 'import onnxruntime as ort\n'), ((669, 692), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (679, 692), False, 'import io\n'), ((931, 960), 'numpy.argmax', 'np.argmax', (['outputs[0]'], {'axis': '(1)'}), '(outputs[0], axis=1)\n', (940, 960), True, 'import numpy as np\n'), ((490, 519), 'torchvision.transforms.Resize', 'transforms.Resize', (['[224, 224]'], {}), '([224, 224])\n', (507, 519), True, 'import torchvision.transforms as transforms\n'), ((533, 554), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (552, 554), True, 'import torchvision.transforms as transforms\n'), ((568, 634), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (588, 634), True, 'import torchvision.transforms as transforms\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import vplot
import scipy.signal as sig
#plt.rcParams["text.usetex"]=True
#plt.rcParams["text.latex.unicode"]=True
plt.rcParams.update({'font.size':16,'legend.fontsize':15})
import sys
# Check correct number of arguments
if (len(sys.argv) != 2):
print('ERROR: Incorrect number of arguments.')
print('Usage: '+sys.argv[0]+' <pdf | png>')
exit(1)
if (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):
print('ERROR: Unknown file format: '+sys.argv[1])
print('Options are: pdf, png')
exit(1)
out = vplot.GetOutput()
# Print final state
#print('Final: t=%.3f TUMan=%f TMan=%f TCMB=%f TCore=%f HflowUMan=%.1f HflowCMB=%.1f RadPowerTotal=%f RadPowerMan=%.1f RadPowerCore=%.1f MagMom=%f RIC=%f'%(out.earth.Time[-1],out.earth.TUMan[-1],out.earth.TMan[-1],out.earth.TCMB[-1],out.earth.TCore[-1],out.earth.HflowUMan[-1],out.earth.HflowCMB[-1],out.earth.RadPowerTotal[-1],out.earth.RadPowerMan[-1],out.earth.RadPowerCore[-1],out.earth.MagMom[-1],out.earth.RIC[-1]))
### Uncertainty ranges
TUMan_ra = np.array([1280.,1475.])+273. #[K] Jaupart (2015) Table 4.
TCMB_ra = np.array([3800,4200.]) #[K] Hirose (2013) Table 2.
HflowUMan_ra = np.array([35,41.]) #[TW] Jaupart (2015) Table 12.
HflowUMan_ra = np.array([35,41.]) #[TW] Jaupart (2015) Table 12.
HflowCMB_ra = np.array([5,17]) #[TW] Jaupart (2015) Table 12.
ViscUMan_ra = np.array([1.5e19,1.5e22])/3300. #[m^2/s] Paulson (2005) Fig 3.
ViscLMan_ra = np.array([3e19,1.5e22])/5200. #[m^2/s] Paulson (2005) Fig 3.
MeltMassFlux_ra = np.array([0.52,4*.52]) #[1e6 kg/s] Cogne (2004) 5-15 km^3/yr. Li (2015) ~20 km^3/yr
FMeltUMan_ra = np.array([0.07,0.15]) # refs?
### Hi/lo
TUMan_lo = np.abs(TUMan_ra[0]-out.earth.TUMan[-1])
TUMan_hi = np.abs(TUMan_ra[1]-out.earth.TUMan[-1])
TCMB_lo = np.abs(TCMB_ra[0]-out.earth.TCMB[-1])
TCMB_hi = np.abs(TCMB_ra[1]-out.earth.TCMB[-1])
HflowUMan_lo = np.abs(HflowUMan_ra[0]-out.earth.HflowUMan[-1])
HflowUMan_hi = np.abs(HflowUMan_ra[1]-out.earth.HflowUMan[-1])
HflowCMB_lo = np.abs(HflowCMB_ra[0]-out.earth.HflowCMB[-1])
HflowCMB_hi = np.abs(HflowCMB_ra[1]-out.earth.HflowCMB[-1])
ViscUMan_lo = np.abs(ViscUMan_ra[0]-out.earth.ViscUMan[-1])
ViscUMan_hi = np.abs(ViscUMan_ra[1]-out.earth.ViscUMan[-1])
ViscLMan_lo = np.abs(ViscLMan_ra[0]-out.earth.ViscLMan[-1])
ViscLMan_hi = np.abs(ViscLMan_ra[1]-out.earth.ViscLMan[-1])
MeltMassFlux_lo = np.abs(MeltMassFlux_ra[0]-out.earth.MeltMassFluxMan[-1]*1e-6)
MeltMassFlux_hi = np.abs(MeltMassFlux_ra[1]-out.earth.MeltMassFluxMan[-1]*1e-6)
FMeltUMan_lo = np.abs(FMeltUMan_ra[0]-out.earth.FMeltUMan[-1])
FMeltUMan_hi = np.abs(FMeltUMan_ra[1]-out.earth.FMeltUMan[-1])
# Plots
rows=3
cols=2
# Mantle Figure
nfig=1
fig = plt.figure(nfig, figsize=(10,15))
panel=1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.TMan,color=vplot.colors.red,linestyle='-',label=r'$T_{M}$')
plt.plot(out.earth.Time,out.earth.TUMan,color=vplot.colors.orange,linestyle='-',label=r'$T_{UM}$')
plt.errorbar(out.earth.Time[-1],out.earth.TUMan[-1],yerr=[[TUMan_lo],[TUMan_hi]],color=vplot.colors.orange,fmt='o')
plt.plot(out.earth.Time,out.earth.TLMan,color=vplot.colors.pale_blue,linestyle='-',label=r'$T_{LM}$')
plt.plot(out.earth.Time,out.earth.TCMB,color=vplot.colors.purple,linestyle='-',label=r'$T_{CMB}$')
plt.errorbar(out.earth.Time[-1],out.earth.TCMB[-1],yerr=[[TCMB_lo],[TCMB_hi]],color=vplot.colors.purple,fmt='-o')
plt.plot(out.earth.Time,out.earth.TCore,'k-',label=r'$T_{C}$')
plt.legend(loc='best',ncol=2,frameon=True,columnspacing=1)
plt.ylabel('Temperature (K)')
plt.xlabel('Time (Gyr)')
plt.ylim(0,10000)
plt.xlim(0,4.6)
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.HflowUMan,color=vplot.colors.red,linestyle='-',label=r'$Q_{UMan}$')
plt.errorbar(out.earth.Time[-1],out.earth.HflowUMan[-1],yerr=[[HflowUMan_lo],[HflowUMan_hi]],color=vplot.colors.red,fmt='o')
plt.plot(out.earth.Time,out.earth.HflowCMB,color=vplot.colors.orange,linestyle='-',label=r'$Q_{CMB}$')
plt.errorbar(out.earth.Time[-1],out.earth.HflowCMB[-1],yerr=[[HflowCMB_lo],[HflowCMB_hi]],color=vplot.colors.orange,fmt='o')
plt.plot(out.earth.Time,out.earth.RadPowerMan,color=vplot.colors.pale_blue,linestyle='-',label=r'$Q_{Rad,Man}$')
plt.plot(out.earth.Time,out.earth.RadPowerCore,'k-',label=r'$Q_{Rad,Core}$')
plt.legend(loc='best',frameon=True)
plt.ylabel('Heat Flow (TW)')
plt.xlabel('Time (Gyr)')
plt.ylim(0,150)
plt.xlim(0,4.6)
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.BLUMan,label=r'$\delta_{UM}$',color=vplot.colors.dark_blue)
plt.plot(out.earth.Time,out.earth.BLLMan,label=r'$\delta_{LM}$',color=vplot.colors.orange)
plt.legend(loc='best',frameon=True)
plt.ylabel(r'Boundary Layer Depths (km)')
plt.xlabel('Time (Gyr)')
plt.xlim(0,4.6)
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.semilogy(out.earth.Time,out.earth.ViscUMan,label=r'$\nu_{UM}$',color=vplot.colors.dark_blue)
plt.errorbar(out.earth.Time[-1],out.earth.ViscUMan[-1],yerr=[[ViscUMan_lo],[ViscUMan_hi]],color=vplot.colors.dark_blue,fmt='o')
plt.semilogy(out.earth.Time,out.earth.ViscLMan,label=r'$\nu_{LM}$',color=vplot.colors.orange)
plt.errorbar(out.earth.Time[-1],out.earth.ViscLMan[-1],yerr=[[ViscLMan_lo],[ViscLMan_hi]],color=vplot.colors.orange,fmt='o')
plt.legend(loc='best',frameon=True)
plt.ylabel(r'Mantle Viscosity ($m^2s^{-1}$)')
plt.xlabel('Time (Gyr)')
plt.ylim(1e12,1e19)
plt.xlim(0,4.6)
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.FMeltUMan,color=vplot.colors.dark_blue)
plt.errorbar(out.earth.Time[-1],out.earth.FMeltUMan[-1]*1e-6,yerr=[[FMeltUMan_lo],[FMeltUMan_hi]],color=vplot.colors.dark_blue,fmt='o')
plt.ylabel(r'Melt Fraction Upper Mantle (n.d.)')
plt.xlabel('Time (Gyr)')
plt.xlim(0,4.6)
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.MeltMassFluxMan*1e-6,color=vplot.colors.dark_blue)
plt.errorbar(out.earth.Time[-1],out.earth.MeltMassFluxMan[-1]*1e-6,yerr=[[MeltMassFlux_lo],[MeltMassFlux_hi]],color=vplot.colors.dark_blue,fmt='o')
plt.ylabel(r'Melt Mass Flux Mantle ($\times 10^6$ kg$/$s)')
plt.xlabel('Time (Gyr)')
plt.ylim(0,100)
plt.xlim(0,4.6)
plt.xticks([0,1,2,3,4])
vplot.make_pretty(fig)
if (sys.argv[1] == 'pdf'):
plt.savefig('EarthInterior%d.pdf'%nfig)
if (sys.argv[1] == 'png'):
plt.savefig('EarthInterior%d.png'%nfig)
# Core Plots
rows=2
nfig += 1
fig = plt.figure(nfig, figsize=(10,10))
panel = 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.RIC,label='RIC')
plt.ylim(0,1500)
plt.ylabel(r'Inner Core Radius (km)')
plt.xlabel('Time (Gyr)')
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.CoreBuoyTherm*1e13,label='Thermal')
plt.plot(out.earth.Time,out.earth.CoreBuoyCompo*1e13,label='Compositional')
plt.plot(out.earth.Time,out.earth.CoreBuoyTotal*1e13,label='Total')
plt.legend(loc='best',frameon=True)
plt.ylabel(r'Core Buoyancy Flux ($\times10^{-13}$ m$^2/$s$^3$)')
plt.xlabel('Time (Gyr)')
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.MagMom,label='MagMom')
plt.ylim(0,2)
plt.ylabel('Magnetic Moment (E. Units)')
plt.xlabel('Time (Gyr)')
plt.xticks([0,1,2,3,4])
panel += 1
plt.subplot(rows,cols,panel)
plt.plot(out.earth.Time,out.earth.MagPauseRad)
plt.ylabel(r'Magnetopause Radius (E. Units)')
plt.xlabel('Time (Gyr)')
plt.xticks([0,1,2,3,4])
#panel += 1
#plt.subplot(rows,cols,panel)
#plt.plot(out.earth.Time,out.earth.ChiOC,label='ChiOC')
#plt.plot(out.earth.Time,out.earth.ChiIC,label='ChiIC')
#plt.ylim(0,0.2)
#plt.ylabel(r'Core Light Element Concentration')
#plt.xlabel('Time (Gyr)')
#plt.legend(loc='best',frameon=False)
vplot.make_pretty(fig)
if (sys.argv[1] == 'pdf'):
plt.savefig('EarthInterior%d.pdf'%nfig)
if (sys.argv[1] == 'png'):
plt.savefig('EarthInterior%d.png'%nfig)
plt.close()
|
[
"numpy.abs",
"matplotlib.pyplot.semilogy",
"vplot.make_pretty",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"matplotlib.pyplot.figure",
"vplot.GetOutput",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
] |
[((191, 252), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16, 'legend.fontsize': 15}"], {}), "({'font.size': 16, 'legend.fontsize': 15})\n", (210, 252), True, 'import matplotlib.pyplot as plt\n'), ((595, 612), 'vplot.GetOutput', 'vplot.GetOutput', ([], {}), '()\n', (610, 612), False, 'import vplot\n'), ((1165, 1189), 'numpy.array', 'np.array', (['[3800, 4200.0]'], {}), '([3800, 4200.0])\n', (1173, 1189), True, 'import numpy as np\n'), ((1245, 1265), 'numpy.array', 'np.array', (['[35, 41.0]'], {}), '([35, 41.0])\n', (1253, 1265), True, 'import numpy as np\n'), ((1323, 1343), 'numpy.array', 'np.array', (['[35, 41.0]'], {}), '([35, 41.0])\n', (1331, 1343), True, 'import numpy as np\n'), ((1401, 1418), 'numpy.array', 'np.array', (['[5, 17]'], {}), '([5, 17])\n', (1409, 1418), True, 'import numpy as np\n'), ((1640, 1666), 'numpy.array', 'np.array', (['[0.52, 4 * 0.52]'], {}), '([0.52, 4 * 0.52])\n', (1648, 1666), True, 'import numpy as np\n'), ((1747, 1769), 'numpy.array', 'np.array', (['[0.07, 0.15]'], {}), '([0.07, 0.15])\n', (1755, 1769), True, 'import numpy as np\n'), ((1810, 1851), 'numpy.abs', 'np.abs', (['(TUMan_ra[0] - out.earth.TUMan[-1])'], {}), '(TUMan_ra[0] - out.earth.TUMan[-1])\n', (1816, 1851), True, 'import numpy as np\n'), ((1861, 1902), 'numpy.abs', 'np.abs', (['(TUMan_ra[1] - out.earth.TUMan[-1])'], {}), '(TUMan_ra[1] - out.earth.TUMan[-1])\n', (1867, 1902), True, 'import numpy as np\n'), ((1911, 1950), 'numpy.abs', 'np.abs', (['(TCMB_ra[0] - out.earth.TCMB[-1])'], {}), '(TCMB_ra[0] - out.earth.TCMB[-1])\n', (1917, 1950), True, 'import numpy as np\n'), ((1959, 1998), 'numpy.abs', 'np.abs', (['(TCMB_ra[1] - out.earth.TCMB[-1])'], {}), '(TCMB_ra[1] - out.earth.TCMB[-1])\n', (1965, 1998), True, 'import numpy as np\n'), ((2012, 2061), 'numpy.abs', 'np.abs', (['(HflowUMan_ra[0] - out.earth.HflowUMan[-1])'], {}), '(HflowUMan_ra[0] - out.earth.HflowUMan[-1])\n', (2018, 2061), True, 'import numpy as np\n'), ((2075, 2124), 'numpy.abs', 'np.abs', (['(HflowUMan_ra[1] - out.earth.HflowUMan[-1])'], {}), '(HflowUMan_ra[1] - out.earth.HflowUMan[-1])\n', (2081, 2124), True, 'import numpy as np\n'), ((2137, 2184), 'numpy.abs', 'np.abs', (['(HflowCMB_ra[0] - out.earth.HflowCMB[-1])'], {}), '(HflowCMB_ra[0] - out.earth.HflowCMB[-1])\n', (2143, 2184), True, 'import numpy as np\n'), ((2197, 2244), 'numpy.abs', 'np.abs', (['(HflowCMB_ra[1] - out.earth.HflowCMB[-1])'], {}), '(HflowCMB_ra[1] - out.earth.HflowCMB[-1])\n', (2203, 2244), True, 'import numpy as np\n'), ((2257, 2304), 'numpy.abs', 'np.abs', (['(ViscUMan_ra[0] - out.earth.ViscUMan[-1])'], {}), '(ViscUMan_ra[0] - out.earth.ViscUMan[-1])\n', (2263, 2304), True, 'import numpy as np\n'), ((2317, 2364), 'numpy.abs', 'np.abs', (['(ViscUMan_ra[1] - out.earth.ViscUMan[-1])'], {}), '(ViscUMan_ra[1] - out.earth.ViscUMan[-1])\n', (2323, 2364), True, 'import numpy as np\n'), ((2377, 2424), 'numpy.abs', 'np.abs', (['(ViscLMan_ra[0] - out.earth.ViscLMan[-1])'], {}), '(ViscLMan_ra[0] - out.earth.ViscLMan[-1])\n', (2383, 2424), True, 'import numpy as np\n'), ((2437, 2484), 'numpy.abs', 'np.abs', (['(ViscLMan_ra[1] - out.earth.ViscLMan[-1])'], {}), '(ViscLMan_ra[1] - out.earth.ViscLMan[-1])\n', (2443, 2484), True, 'import numpy as np\n'), ((2501, 2567), 'numpy.abs', 'np.abs', (['(MeltMassFlux_ra[0] - out.earth.MeltMassFluxMan[-1] * 1e-06)'], {}), '(MeltMassFlux_ra[0] - out.earth.MeltMassFluxMan[-1] * 1e-06)\n', (2507, 2567), True, 'import numpy as np\n'), ((2581, 2647), 'numpy.abs', 'np.abs', (['(MeltMassFlux_ra[1] - out.earth.MeltMassFluxMan[-1] * 1e-06)'], {}), '(MeltMassFlux_ra[1] - out.earth.MeltMassFluxMan[-1] * 1e-06)\n', (2587, 2647), True, 'import numpy as np\n'), ((2658, 2707), 'numpy.abs', 'np.abs', (['(FMeltUMan_ra[0] - out.earth.FMeltUMan[-1])'], {}), '(FMeltUMan_ra[0] - out.earth.FMeltUMan[-1])\n', (2664, 2707), True, 'import numpy as np\n'), ((2721, 2770), 'numpy.abs', 'np.abs', (['(FMeltUMan_ra[1] - out.earth.FMeltUMan[-1])'], {}), '(FMeltUMan_ra[1] - out.earth.FMeltUMan[-1])\n', (2727, 2770), True, 'import numpy as np\n'), ((2821, 2855), 'matplotlib.pyplot.figure', 'plt.figure', (['nfig'], {'figsize': '(10, 15)'}), '(nfig, figsize=(10, 15))\n', (2831, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2893), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (2874, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2993), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.TMan'], {'color': 'vplot.colors.red', 'linestyle': '"""-"""', 'label': '"""$T_{M}$"""'}), "(out.earth.Time, out.earth.TMan, color=vplot.colors.red, linestyle=\n '-', label='$T_{M}$')\n", (2900, 2993), True, 'import matplotlib.pyplot as plt\n'), ((2986, 3091), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.TUMan'], {'color': 'vplot.colors.orange', 'linestyle': '"""-"""', 'label': '"""$T_{UM}$"""'}), "(out.earth.Time, out.earth.TUMan, color=vplot.colors.orange,\n linestyle='-', label='$T_{UM}$')\n", (2994, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3210), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', 'out.earth.TUMan[-1]'], {'yerr': '[[TUMan_lo], [TUMan_hi]]', 'color': 'vplot.colors.orange', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.TUMan[-1], yerr=[[TUMan_lo], [\n TUMan_hi]], color=vplot.colors.orange, fmt='o')\n", (3097, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3201, 3309), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.TLMan'], {'color': 'vplot.colors.pale_blue', 'linestyle': '"""-"""', 'label': '"""$T_{LM}$"""'}), "(out.earth.Time, out.earth.TLMan, color=vplot.colors.pale_blue,\n linestyle='-', label='$T_{LM}$')\n", (3209, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3303, 3408), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.TCMB'], {'color': 'vplot.colors.purple', 'linestyle': '"""-"""', 'label': '"""$T_{CMB}$"""'}), "(out.earth.Time, out.earth.TCMB, color=vplot.colors.purple,\n linestyle='-', label='$T_{CMB}$')\n", (3311, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3402, 3525), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', 'out.earth.TCMB[-1]'], {'yerr': '[[TCMB_lo], [TCMB_hi]]', 'color': 'vplot.colors.purple', 'fmt': '"""-o"""'}), "(out.earth.Time[-1], out.earth.TCMB[-1], yerr=[[TCMB_lo], [\n TCMB_hi]], color=vplot.colors.purple, fmt='-o')\n", (3414, 3525), True, 'import matplotlib.pyplot as plt\n'), ((3516, 3580), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.TCore', '"""k-"""'], {'label': '"""$T_{C}$"""'}), "(out.earth.Time, out.earth.TCore, 'k-', label='$T_{C}$')\n", (3524, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3640), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'ncol': '(2)', 'frameon': '(True)', 'columnspacing': '(1)'}), "(loc='best', ncol=2, frameon=True, columnspacing=1)\n", (3589, 3640), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3667), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature (K)"""'], {}), "('Temperature (K)')\n", (3648, 3667), True, 'import matplotlib.pyplot as plt\n'), ((3668, 3692), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (3678, 3692), True, 'import matplotlib.pyplot as plt\n'), ((3693, 3711), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(10000)'], {}), '(0, 10000)\n', (3701, 3711), True, 'import matplotlib.pyplot as plt\n'), ((3711, 3727), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(4.6)'], {}), '(0, 4.6)\n', (3719, 3727), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3754), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (3737, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3762, 3792), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (3773, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3791, 3899), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.HflowUMan'], {'color': 'vplot.colors.red', 'linestyle': '"""-"""', 'label': '"""$Q_{UMan}$"""'}), "(out.earth.Time, out.earth.HflowUMan, color=vplot.colors.red,\n linestyle='-', label='$Q_{UMan}$')\n", (3799, 3899), True, 'import matplotlib.pyplot as plt\n'), ((3893, 4027), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', 'out.earth.HflowUMan[-1]'], {'yerr': '[[HflowUMan_lo], [HflowUMan_hi]]', 'color': 'vplot.colors.red', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.HflowUMan[-1], yerr=[[\n HflowUMan_lo], [HflowUMan_hi]], color=vplot.colors.red, fmt='o')\n", (3905, 4027), True, 'import matplotlib.pyplot as plt\n'), ((4018, 4127), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.HflowCMB'], {'color': 'vplot.colors.orange', 'linestyle': '"""-"""', 'label': '"""$Q_{CMB}$"""'}), "(out.earth.Time, out.earth.HflowCMB, color=vplot.colors.orange,\n linestyle='-', label='$Q_{CMB}$')\n", (4026, 4127), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4255), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', 'out.earth.HflowCMB[-1]'], {'yerr': '[[HflowCMB_lo], [HflowCMB_hi]]', 'color': 'vplot.colors.orange', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.HflowCMB[-1], yerr=[[HflowCMB_lo\n ], [HflowCMB_hi]], color=vplot.colors.orange, fmt='o')\n", (4133, 4255), True, 'import matplotlib.pyplot as plt\n'), ((4246, 4366), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.RadPowerMan'], {'color': 'vplot.colors.pale_blue', 'linestyle': '"""-"""', 'label': '"""$Q_{Rad,Man}$"""'}), "(out.earth.Time, out.earth.RadPowerMan, color=vplot.colors.\n pale_blue, linestyle='-', label='$Q_{Rad,Man}$')\n", (4254, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4359, 4437), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.RadPowerCore', '"""k-"""'], {'label': '"""$Q_{Rad,Core}$"""'}), "(out.earth.Time, out.earth.RadPowerCore, 'k-', label='$Q_{Rad,Core}$')\n", (4367, 4437), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4472), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (4446, 4472), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4500), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Heat Flow (TW)"""'], {}), "('Heat Flow (TW)')\n", (4482, 4500), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4525), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (4511, 4525), True, 'import matplotlib.pyplot as plt\n'), ((4526, 4542), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(150)'], {}), '(0, 150)\n', (4534, 4542), True, 'import matplotlib.pyplot as plt\n'), ((4542, 4558), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(4.6)'], {}), '(0, 4.6)\n', (4550, 4558), True, 'import matplotlib.pyplot as plt\n'), ((4558, 4585), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (4568, 4585), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4623), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (4604, 4623), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4723), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.BLUMan'], {'label': '"""$\\\\delta_{UM}$"""', 'color': 'vplot.colors.dark_blue'}), "(out.earth.Time, out.earth.BLUMan, label='$\\\\delta_{UM}$', color=\n vplot.colors.dark_blue)\n", (4630, 4723), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4814), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.BLLMan'], {'label': '"""$\\\\delta_{LM}$"""', 'color': 'vplot.colors.orange'}), "(out.earth.Time, out.earth.BLLMan, label='$\\\\delta_{LM}$', color=\n vplot.colors.orange)\n", (4724, 4814), True, 'import matplotlib.pyplot as plt\n'), ((4807, 4843), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (4817, 4843), True, 'import matplotlib.pyplot as plt\n'), ((4843, 4883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Boundary Layer Depths (km)"""'], {}), "('Boundary Layer Depths (km)')\n", (4853, 4883), True, 'import matplotlib.pyplot as plt\n'), ((4885, 4909), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (4895, 4909), True, 'import matplotlib.pyplot as plt\n'), ((4910, 4926), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(4.6)'], {}), '(0, 4.6)\n', (4918, 4926), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4953), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (4936, 4953), True, 'import matplotlib.pyplot as plt\n'), ((4961, 4991), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (4972, 4991), True, 'import matplotlib.pyplot as plt\n'), ((4990, 5094), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['out.earth.Time', 'out.earth.ViscUMan'], {'label': '"""$\\\\nu_{UM}$"""', 'color': 'vplot.colors.dark_blue'}), "(out.earth.Time, out.earth.ViscUMan, label='$\\\\nu_{UM}$', color\n =vplot.colors.dark_blue)\n", (5002, 5094), True, 'import matplotlib.pyplot as plt\n'), ((5087, 5224), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', 'out.earth.ViscUMan[-1]'], {'yerr': '[[ViscUMan_lo], [ViscUMan_hi]]', 'color': 'vplot.colors.dark_blue', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.ViscUMan[-1], yerr=[[ViscUMan_lo\n ], [ViscUMan_hi]], color=vplot.colors.dark_blue, fmt='o')\n", (5099, 5224), True, 'import matplotlib.pyplot as plt\n'), ((5215, 5316), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['out.earth.Time', 'out.earth.ViscLMan'], {'label': '"""$\\\\nu_{LM}$"""', 'color': 'vplot.colors.orange'}), "(out.earth.Time, out.earth.ViscLMan, label='$\\\\nu_{LM}$', color\n =vplot.colors.orange)\n", (5227, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5309, 5443), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', 'out.earth.ViscLMan[-1]'], {'yerr': '[[ViscLMan_lo], [ViscLMan_hi]]', 'color': 'vplot.colors.orange', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.ViscLMan[-1], yerr=[[ViscLMan_lo\n ], [ViscLMan_hi]], color=vplot.colors.orange, fmt='o')\n", (5321, 5443), True, 'import matplotlib.pyplot as plt\n'), ((5434, 5470), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (5444, 5470), True, 'import matplotlib.pyplot as plt\n'), ((5470, 5514), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mantle Viscosity ($m^2s^{-1}$)"""'], {}), "('Mantle Viscosity ($m^2s^{-1}$)')\n", (5480, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5516, 5540), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (5526, 5540), True, 'import matplotlib.pyplot as plt\n'), ((5541, 5573), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1000000000000.0)', '(1e+19)'], {}), '(1000000000000.0, 1e+19)\n', (5549, 5573), True, 'import matplotlib.pyplot as plt\n'), ((5561, 5577), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(4.6)'], {}), '(0, 4.6)\n', (5569, 5577), True, 'import matplotlib.pyplot as plt\n'), ((5577, 5604), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (5587, 5604), True, 'import matplotlib.pyplot as plt\n'), ((5612, 5642), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (5623, 5642), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5716), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.FMeltUMan'], {'color': 'vplot.colors.dark_blue'}), '(out.earth.Time, out.earth.FMeltUMan, color=vplot.colors.dark_blue)\n', (5649, 5716), True, 'import matplotlib.pyplot as plt\n'), ((5715, 5863), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', '(out.earth.FMeltUMan[-1] * 1e-06)'], {'yerr': '[[FMeltUMan_lo], [FMeltUMan_hi]]', 'color': 'vplot.colors.dark_blue', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.FMeltUMan[-1] * 1e-06, yerr=[[\n FMeltUMan_lo], [FMeltUMan_hi]], color=vplot.colors.dark_blue, fmt='o')\n", (5727, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5851, 5898), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Melt Fraction Upper Mantle (n.d.)"""'], {}), "('Melt Fraction Upper Mantle (n.d.)')\n", (5861, 5898), True, 'import matplotlib.pyplot as plt\n'), ((5900, 5924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (5910, 5924), True, 'import matplotlib.pyplot as plt\n'), ((5925, 5941), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(4.6)'], {}), '(0, 4.6)\n', (5933, 5941), True, 'import matplotlib.pyplot as plt\n'), ((5941, 5968), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (5951, 5968), True, 'import matplotlib.pyplot as plt\n'), ((5976, 6006), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (5987, 6006), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6099), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', '(out.earth.MeltMassFluxMan * 1e-06)'], {'color': 'vplot.colors.dark_blue'}), '(out.earth.Time, out.earth.MeltMassFluxMan * 1e-06, color=vplot.\n colors.dark_blue)\n', (6013, 6099), True, 'import matplotlib.pyplot as plt\n'), ((6090, 6254), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['out.earth.Time[-1]', '(out.earth.MeltMassFluxMan[-1] * 1e-06)'], {'yerr': '[[MeltMassFlux_lo], [MeltMassFlux_hi]]', 'color': 'vplot.colors.dark_blue', 'fmt': '"""o"""'}), "(out.earth.Time[-1], out.earth.MeltMassFluxMan[-1] * 1e-06,\n yerr=[[MeltMassFlux_lo], [MeltMassFlux_hi]], color=vplot.colors.\n dark_blue, fmt='o')\n", (6102, 6254), True, 'import matplotlib.pyplot as plt\n'), ((6238, 6297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Melt Mass Flux Mantle ($\\\\times 10^6$ kg$/$s)"""'], {}), "('Melt Mass Flux Mantle ($\\\\times 10^6$ kg$/$s)')\n", (6248, 6297), True, 'import matplotlib.pyplot as plt\n'), ((6298, 6322), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (6308, 6322), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6339), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (6331, 6339), True, 'import matplotlib.pyplot as plt\n'), ((6339, 6355), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(4.6)'], {}), '(0, 4.6)\n', (6347, 6355), True, 'import matplotlib.pyplot as plt\n'), ((6355, 6382), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (6365, 6382), True, 'import matplotlib.pyplot as plt\n'), ((6380, 6402), 'vplot.make_pretty', 'vplot.make_pretty', (['fig'], {}), '(fig)\n', (6397, 6402), False, 'import vplot\n'), ((6582, 6616), 'matplotlib.pyplot.figure', 'plt.figure', (['nfig'], {'figsize': '(10, 10)'}), '(nfig, figsize=(10, 10))\n', (6592, 6616), True, 'import matplotlib.pyplot as plt\n'), ((6626, 6656), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (6637, 6656), True, 'import matplotlib.pyplot as plt\n'), ((6655, 6707), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.RIC'], {'label': '"""RIC"""'}), "(out.earth.Time, out.earth.RIC, label='RIC')\n", (6663, 6707), True, 'import matplotlib.pyplot as plt\n'), ((6706, 6723), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1500)'], {}), '(0, 1500)\n', (6714, 6723), True, 'import matplotlib.pyplot as plt\n'), ((6723, 6759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Inner Core Radius (km)"""'], {}), "('Inner Core Radius (km)')\n", (6733, 6759), True, 'import matplotlib.pyplot as plt\n'), ((6761, 6785), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (6771, 6785), True, 'import matplotlib.pyplot as plt\n'), ((6797, 6827), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (6808, 6827), True, 'import matplotlib.pyplot as plt\n'), ((6826, 6916), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', '(out.earth.CoreBuoyTherm * 10000000000000.0)'], {'label': '"""Thermal"""'}), "(out.earth.Time, out.earth.CoreBuoyTherm * 10000000000000.0, label=\n 'Thermal')\n", (6834, 6916), True, 'import matplotlib.pyplot as plt\n'), ((6896, 6992), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', '(out.earth.CoreBuoyCompo * 10000000000000.0)'], {'label': '"""Compositional"""'}), "(out.earth.Time, out.earth.CoreBuoyCompo * 10000000000000.0, label=\n 'Compositional')\n", (6904, 6992), True, 'import matplotlib.pyplot as plt\n'), ((6972, 7060), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', '(out.earth.CoreBuoyTotal * 10000000000000.0)'], {'label': '"""Total"""'}), "(out.earth.Time, out.earth.CoreBuoyTotal * 10000000000000.0, label=\n 'Total')\n", (6980, 7060), True, 'import matplotlib.pyplot as plt\n'), ((7040, 7076), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (7050, 7076), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7140), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Core Buoyancy Flux ($\\\\times10^{-13}$ m$^2/$s$^3$)"""'], {}), "('Core Buoyancy Flux ($\\\\times10^{-13}$ m$^2/$s$^3$)')\n", (7086, 7140), True, 'import matplotlib.pyplot as plt\n'), ((7141, 7165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (7151, 7165), True, 'import matplotlib.pyplot as plt\n'), ((7166, 7193), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (7176, 7193), True, 'import matplotlib.pyplot as plt\n'), ((7201, 7231), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (7212, 7231), True, 'import matplotlib.pyplot as plt\n'), ((7230, 7288), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.MagMom'], {'label': '"""MagMom"""'}), "(out.earth.Time, out.earth.MagMom, label='MagMom')\n", (7238, 7288), True, 'import matplotlib.pyplot as plt\n'), ((7287, 7301), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(2)'], {}), '(0, 2)\n', (7295, 7301), True, 'import matplotlib.pyplot as plt\n'), ((7301, 7341), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnetic Moment (E. Units)"""'], {}), "('Magnetic Moment (E. Units)')\n", (7311, 7341), True, 'import matplotlib.pyplot as plt\n'), ((7342, 7366), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (7352, 7366), True, 'import matplotlib.pyplot as plt\n'), ((7367, 7394), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (7377, 7394), True, 'import matplotlib.pyplot as plt\n'), ((7402, 7432), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', 'panel'], {}), '(rows, cols, panel)\n', (7413, 7432), True, 'import matplotlib.pyplot as plt\n'), ((7431, 7478), 'matplotlib.pyplot.plot', 'plt.plot', (['out.earth.Time', 'out.earth.MagPauseRad'], {}), '(out.earth.Time, out.earth.MagPauseRad)\n', (7439, 7478), True, 'import matplotlib.pyplot as plt\n'), ((7478, 7522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnetopause Radius (E. Units)"""'], {}), "('Magnetopause Radius (E. Units)')\n", (7488, 7522), True, 'import matplotlib.pyplot as plt\n'), ((7524, 7548), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Gyr)"""'], {}), "('Time (Gyr)')\n", (7534, 7548), True, 'import matplotlib.pyplot as plt\n'), ((7549, 7576), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (7559, 7576), True, 'import matplotlib.pyplot as plt\n'), ((7858, 7880), 'vplot.make_pretty', 'vplot.make_pretty', (['fig'], {}), '(fig)\n', (7875, 7880), False, 'import vplot\n'), ((8023, 8034), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8032, 8034), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1116), 'numpy.array', 'np.array', (['[1280.0, 1475.0]'], {}), '([1280.0, 1475.0])\n', (1098, 1116), True, 'import numpy as np\n'), ((1480, 1508), 'numpy.array', 'np.array', (['[1.5e+19, 1.5e+22]'], {}), '([1.5e+19, 1.5e+22])\n', (1488, 1508), True, 'import numpy as np\n'), ((1558, 1584), 'numpy.array', 'np.array', (['[3e+19, 1.5e+22]'], {}), '([3e+19, 1.5e+22])\n', (1566, 1584), True, 'import numpy as np\n'), ((6434, 6475), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('EarthInterior%d.pdf' % nfig)"], {}), "('EarthInterior%d.pdf' % nfig)\n", (6445, 6475), True, 'import matplotlib.pyplot as plt\n'), ((6505, 6546), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('EarthInterior%d.png' % nfig)"], {}), "('EarthInterior%d.png' % nfig)\n", (6516, 6546), True, 'import matplotlib.pyplot as plt\n'), ((7912, 7953), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('EarthInterior%d.pdf' % nfig)"], {}), "('EarthInterior%d.pdf' % nfig)\n", (7923, 7953), True, 'import matplotlib.pyplot as plt\n'), ((7983, 8024), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('EarthInterior%d.png' % nfig)"], {}), "('EarthInterior%d.png' % nfig)\n", (7994, 8024), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import array
import numpy as np
from numcodecs.compat import buffer_tobytes
def test_buffer_tobytes():
bufs = [
b'adsdasdas',
bytes(20),
np.arange(100),
array.array('l', b'qwertyuiqwertyui')
]
for buf in bufs:
b = buffer_tobytes(buf)
assert isinstance(b, bytes)
|
[
"numcodecs.compat.buffer_tobytes",
"array.array",
"numpy.arange"
] |
[((260, 274), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (269, 274), True, 'import numpy as np\n'), ((284, 321), 'array.array', 'array.array', (['"""l"""', "b'qwertyuiqwertyui'"], {}), "('l', b'qwertyuiqwertyui')\n", (295, 321), False, 'import array\n'), ((361, 380), 'numcodecs.compat.buffer_tobytes', 'buffer_tobytes', (['buf'], {}), '(buf)\n', (375, 380), False, 'from numcodecs.compat import buffer_tobytes\n')]
|
import tensorflow as tf
import numpy as np
import cv2
import os
import rospy
from timeit import default_timer as timer
from styx_msgs.msg import TrafficLight
CLASS_TRAFFIC_LIGHT = 10
MODEL_DIR = 'light_classification/models/'
IMG_DIR = 'light_classification/img/'
DEBUG_DIR = 'light_classification/result/'
class TLClassifier(object):
def __init__(self):
#TODO load classifier
# object detection: faster_rcnn_inception_v2
# from Tensorflow detection model zoo:
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
self.detector = MODEL_DIR + 'faster_rcnn_inception_v2.pb'
self.sess= self.load_graph(self.detector)
detection_graph = self.sess.graph
if not os.path.exists(DEBUG_DIR): #check the result of light detection
os.makedirs(DEBUG_DIR)
# The input placeholder for the image.
# 'get_tensor_by_name' returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# the first decoding
test_image = cv2.imread(IMG_DIR + 'image_test.jpg')
image_np, box_coords, classes, scores = self.detect_tl(test_image)
# Traditional traffic light classifier
pred_image, is_red = self.classify_red_tl(image_np, box_coords, classes, scores)
# rospy.loginfo("DEBUG: stage 4")
if is_red:
rospy.loginfo("Classifier: RED")
else:
rospy.loginfo("Classifier: NOT RED")
cv2.imwrite(IMG_DIR + 'pred_image.png', pred_image)
rospy.loginfo("TensorFlow Initiation: Done")
self.num_image = 1
def load_graph(self, graph_file, use_xla=False):
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
# if use_xla:
# jit_level = tf.OptimizerOptions.ON_1
# config.graph_options.optimizer_options.global_jit_level = jit_level
with tf.Session(graph=tf.Graph(), config=config) as sess:
gd = tf.GraphDef()
with tf.gfile.Open(graph_file, 'rb') as f:
data = f.read()
gd.ParseFromString(data)
tf.import_graph_def(gd, name='')
ops = sess.graph.get_operations()
n_ops = len(ops)
print("number of operations = %d" % n_ops)
return sess
# return sess, ops
def detect_tl(self, image):
trt_image = np.copy(image)
image_np = np.expand_dims(np.asarray(trt_image, dtype=np.uint8), 0)
# Actual detection.
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.8
# Filter traffic light boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.filter_boxes(confidence_cutoff, boxes, scores, classes, keep_classes=[CLASS_TRAFFIC_LIGHT])
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
image_np = np.squeeze(image_np)
width = image_np.shape[1]
height = image_np.shape[0]
box_coords = self.to_image_coords(boxes, height, width)
return image_np, box_coords, classes, scores
# Filter the boxes which detection confidence lower than the threshold
def filter_boxes(self, min_score, boxes, scores, classes, keep_classes):
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
if ((keep_classes is None) or (int(classes[i]) in keep_classes)):
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
# Convert the normalized box coordinates (0~1) to image coordinates
def to_image_coords(self, boxes, height, width):
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
#Draw bounding box on traffic light, and detect if it is RED
def classify_red_tl(self, image_np, boxes, classes, scores, thickness=5):
for i in range(len(boxes)):
# rospy.loginfo("DEBUG: stage 3.1")
bot, left, top, right = boxes[i, ...]
class_id = int(classes[i])
score = scores[i]
h = top - bot
w = right - left
if h <= 1.5 * w:
continue # Truncated Traffic Ligth box
cv2.rectangle(image_np,(left, top), (right, bot), (255, 43, 255), thickness) # BGR format for color
tl_img = image_np[int(bot):int(top), int(left):int(right)]
tl_img_simu = self.select_red_simu(tl_img) # SELECT RED
tl_img_real = self.select_lighton_real(tl_img) # SELECT TL
tl_img = (tl_img_simu + tl_img_real) / 2
gray_tl_img = cv2.cvtColor(tl_img, cv2.COLOR_RGB2GRAY)
nrows, ncols = gray_tl_img.shape[0], gray_tl_img.shape[1]
# compute center of mass of RED points
mean_row = 0
mean_col = 0
npoints = 0
for row in range(nrows):
for col in range(ncols):
if (gray_tl_img[row, col] > 0):
mean_row += row
mean_col += col
npoints += 1
if npoints > 0:
mean_row = float(mean_row / npoints) / nrows
mean_col = float(mean_col / npoints) / ncols
# Get the normalized center of mass of RED points
# Use the location of light to detect the color, RED is in the upper part of the box
if npoints > 10 and mean_row < 0.33:
rospy.loginfo("RED Light Detection Confidance: %.2f", score)
return image_np, True
return image_np, False
# select RED mask in simulation situation
def select_red_simu(self, img): # BGR
lower = np.array([ 0, 0, 200], dtype="uint8")
upper = np.array([ 55, 55, 255], dtype="uint8")
red_mask = cv2.inRange(img, lower, upper)
return cv2.bitwise_and(img, img, mask = red_mask)
# select Traffic Lighton area(HLS: high L and high S) in real situation
# for camera without polarization filter
def select_lighton_real(self, img): # HLS for real
hls_img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
lower = np.array([ 50, 150, 150], dtype="uint8")
upper = np.array([ 100, 255, 255], dtype="uint8")
tl_mask = cv2.inRange(hls_img, lower, upper)
return cv2.bitwise_and(img, img, mask = tl_mask)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#implement light color prediction
image_np, box_coords, classes, scores = self.detect_tl(image)
# light color detection
detected_image, is_red = self.classify_red_tl(image_np, box_coords, classes, scores)
# fimage = DEBUG_DIR + 'detected_img_' + str(self.num_image) + '.png'
# #output the predicted image
# cv2.imwrite(fimage, detected_image)
self.num_image += 1
#return 'if it is a RED'
if is_red:
return TrafficLight.RED
else:
return TrafficLight.UNKNOWN
|
[
"cv2.rectangle",
"numpy.array",
"os.path.exists",
"tensorflow.Graph",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.GraphDef",
"tensorflow.ConfigProto",
"numpy.squeeze",
"cv2.cvtColor",
"tensorflow.import_graph_def",
"cv2.imread",
"rospy.loginfo",
"cv2.imwrite",
"numpy.copy",
"tensorflow.gfile.Open",
"os.makedirs",
"cv2.inRange",
"cv2.bitwise_and",
"numpy.zeros_like"
] |
[((1463, 1501), 'cv2.imread', 'cv2.imread', (["(IMG_DIR + 'image_test.jpg')"], {}), "(IMG_DIR + 'image_test.jpg')\n", (1473, 1501), False, 'import cv2\n'), ((1908, 1959), 'cv2.imwrite', 'cv2.imwrite', (["(IMG_DIR + 'pred_image.png')", 'pred_image'], {}), "(IMG_DIR + 'pred_image.png', pred_image)\n", (1919, 1959), False, 'import cv2\n'), ((1968, 2012), 'rospy.loginfo', 'rospy.loginfo', (['"""TensorFlow Initiation: Done"""'], {}), "('TensorFlow Initiation: Done')\n", (1981, 2012), False, 'import rospy\n'), ((2129, 2171), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)'}), '(log_device_placement=False)\n', (2143, 2171), True, 'import tensorflow as tf\n'), ((2237, 2262), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2247, 2262), True, 'import tensorflow as tf\n'), ((2928, 2942), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2935, 2942), True, 'import numpy as np\n'), ((3328, 3345), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3338, 3345), True, 'import numpy as np\n'), ((3363, 3381), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3373, 3381), True, 'import numpy as np\n'), ((3400, 3419), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3410, 3419), True, 'import numpy as np\n'), ((3853, 3873), 'numpy.squeeze', 'np.squeeze', (['image_np'], {}), '(image_np)\n', (3863, 3873), True, 'import numpy as np\n'), ((4812, 4832), 'numpy.zeros_like', 'np.zeros_like', (['boxes'], {}), '(boxes)\n', (4825, 4832), True, 'import numpy as np\n'), ((7078, 7114), 'numpy.array', 'np.array', (['[0, 0, 200]'], {'dtype': '"""uint8"""'}), "([0, 0, 200], dtype='uint8')\n", (7086, 7114), True, 'import numpy as np\n'), ((7134, 7172), 'numpy.array', 'np.array', (['[55, 55, 255]'], {'dtype': '"""uint8"""'}), "([55, 55, 255], dtype='uint8')\n", (7142, 7172), True, 'import numpy as np\n'), ((7193, 7223), 'cv2.inRange', 'cv2.inRange', (['img', 'lower', 'upper'], {}), '(img, lower, upper)\n', (7204, 7223), False, 'import cv2\n'), ((7239, 7279), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'red_mask'}), '(img, img, mask=red_mask)\n', (7254, 7279), False, 'import cv2\n'), ((7469, 7505), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (7481, 7505), False, 'import cv2\n'), ((7522, 7561), 'numpy.array', 'np.array', (['[50, 150, 150]'], {'dtype': '"""uint8"""'}), "([50, 150, 150], dtype='uint8')\n", (7530, 7561), True, 'import numpy as np\n'), ((7581, 7621), 'numpy.array', 'np.array', (['[100, 255, 255]'], {'dtype': '"""uint8"""'}), "([100, 255, 255], dtype='uint8')\n", (7589, 7621), True, 'import numpy as np\n'), ((7641, 7675), 'cv2.inRange', 'cv2.inRange', (['hls_img', 'lower', 'upper'], {}), '(hls_img, lower, upper)\n', (7652, 7675), False, 'import cv2\n'), ((7691, 7730), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'tl_mask'}), '(img, img, mask=tl_mask)\n', (7706, 7730), False, 'import cv2\n'), ((820, 845), 'os.path.exists', 'os.path.exists', (['DEBUG_DIR'], {}), '(DEBUG_DIR)\n', (834, 845), False, 'import os\n'), ((896, 918), 'os.makedirs', 'os.makedirs', (['DEBUG_DIR'], {}), '(DEBUG_DIR)\n', (907, 918), False, 'import os\n'), ((1786, 1818), 'rospy.loginfo', 'rospy.loginfo', (['"""Classifier: RED"""'], {}), "('Classifier: RED')\n", (1799, 1818), False, 'import rospy\n'), ((1845, 1881), 'rospy.loginfo', 'rospy.loginfo', (['"""Classifier: NOT RED"""'], {}), "('Classifier: NOT RED')\n", (1858, 1881), False, 'import rospy\n'), ((2501, 2514), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2512, 2514), True, 'import tensorflow as tf\n'), ((2655, 2687), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['gd'], {'name': '""""""'}), "(gd, name='')\n", (2674, 2687), True, 'import tensorflow as tf\n'), ((2977, 3014), 'numpy.asarray', 'np.asarray', (['trt_image'], {'dtype': 'np.uint8'}), '(trt_image, dtype=np.uint8)\n', (2987, 3014), True, 'import numpy as np\n'), ((5567, 5644), 'cv2.rectangle', 'cv2.rectangle', (['image_np', '(left, top)', '(right, bot)', '(255, 43, 255)', 'thickness'], {}), '(image_np, (left, top), (right, bot), (255, 43, 255), thickness)\n', (5580, 5644), False, 'import cv2\n'), ((5958, 5998), 'cv2.cvtColor', 'cv2.cvtColor', (['tl_img', 'cv2.COLOR_RGB2GRAY'], {}), '(tl_img, cv2.COLOR_RGB2GRAY)\n', (5970, 5998), False, 'import cv2\n'), ((2532, 2563), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['graph_file', '"""rb"""'], {}), "(graph_file, 'rb')\n", (2545, 2563), True, 'import tensorflow as tf\n'), ((2448, 2458), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2456, 2458), True, 'import tensorflow as tf\n'), ((6822, 6882), 'rospy.loginfo', 'rospy.loginfo', (['"""RED Light Detection Confidance: %.2f"""', 'score'], {}), "('RED Light Detection Confidance: %.2f', score)\n", (6835, 6882), False, 'import rospy\n')]
|
"""Created on Sat Oct 01 2015 16:24.
@author: <NAME>
"""
import numpy as np
def coe2mee(COE, mu=1.):
"""
Convert classical orbital elements to modified equinoctial elements.
Parameters
----------
COE : ndarray
mx6 array of elements ordered as [p e i W w nu].
mu : float
Standard gravitational parameter. Defaults to canonical units.
Returns
-------
MEE : ndarray
mx6 array of elements ordered as [p f g h k L].
"""
p = COE[0:, 0:1]
e = COE[0:, 1:2]
i = COE[0:, 2:3]
W = COE[0:, 3:4]
w = COE[0:, 4:5]
nu = COE[0:, 5:6]
# x,y components of eccentricity vector
f = e * np.cos(w + W)
g = e * np.sin(w + W)
# x,y components of ascending node vector
h = np.tan(i/2.) * np.cos(W)
k = np.tan(i/2.) * np.sin(W)
# true longitude
L = np.mod(W+w+nu, 2*np.pi)
return np.concatenate((p, f, g, h, k, L), 1)
|
[
"numpy.tan",
"numpy.cos",
"numpy.concatenate",
"numpy.sin",
"numpy.mod"
] |
[((852, 881), 'numpy.mod', 'np.mod', (['(W + w + nu)', '(2 * np.pi)'], {}), '(W + w + nu, 2 * np.pi)\n', (858, 881), True, 'import numpy as np\n'), ((888, 925), 'numpy.concatenate', 'np.concatenate', (['(p, f, g, h, k, L)', '(1)'], {}), '((p, f, g, h, k, L), 1)\n', (902, 925), True, 'import numpy as np\n'), ((669, 682), 'numpy.cos', 'np.cos', (['(w + W)'], {}), '(w + W)\n', (675, 682), True, 'import numpy as np\n'), ((695, 708), 'numpy.sin', 'np.sin', (['(w + W)'], {}), '(w + W)\n', (701, 708), True, 'import numpy as np\n'), ((764, 779), 'numpy.tan', 'np.tan', (['(i / 2.0)'], {}), '(i / 2.0)\n', (770, 779), True, 'import numpy as np\n'), ((779, 788), 'numpy.cos', 'np.cos', (['W'], {}), '(W)\n', (785, 788), True, 'import numpy as np\n'), ((797, 812), 'numpy.tan', 'np.tan', (['(i / 2.0)'], {}), '(i / 2.0)\n', (803, 812), True, 'import numpy as np\n'), ((812, 821), 'numpy.sin', 'np.sin', (['W'], {}), '(W)\n', (818, 821), True, 'import numpy as np\n')]
|
import pickle
from sys import intern
from numpy import uint32
import numpy as np
import zarr
from napari_plugin_engine import napari_hook_implementation
from qtpy.QtWidgets import QWidget, QHBoxLayout, QPushButton
from magicgui import magic_factory
import pathlib
import napari
def viterbrain_reader(path: str) -> list:
with open(path, "rb") as handle:
viterbi = pickle.load(handle)
layer_labels = zarr.open(viterbi.fragment_path)
image_path = viterbi.fragment_path[:-12] + ".zarr"
layer_image = zarr.open(image_path)
scale = viterbi.resolution
meta_labels = {"name": "fragments", "scale": scale}
meta_image = {"name": "image", "scale": scale}
return [(layer_image, meta_image, "image"), (layer_labels, meta_labels, "labels")]
def napari_get_reader(path: str) -> list:
parts = path.split(".")
if parts[-1] == "pickle" or parts[-1] == "pkl":
return viterbrain_reader
else:
return None
@magic_factory(
call_button="Trace", start_comp={"max": 2**20}, end_comp={"max": 2**20}
)
def comp_trace(
v: napari.Viewer,
start_comp: int,
end_comp: int,
filename=pathlib.Path("/some/path.pickle"),
) -> None:
with open(filename, "rb") as handle:
viterbi = pickle.load(handle)
def comp2point(comp: int) -> list:
state = viterbi.comp_to_states[comp][0]
if viterbi.nxGraph.nodes[state]["type"] == "fragment":
return viterbi.nxGraph.nodes[state]["point1"]
else:
coords = viterbi.soma_fragment2coords[comp]
centroid = np.mean(coords, axis=0)
centroid = [int(c) for c in centroid]
return centroid
start_pt = comp2point(start_comp)
end_pt = comp2point(end_comp)
print(f"tracing from {start_pt} to {end_pt}")
path = viterbi.shortest_path(start_pt, end_pt)
v.add_shapes(
path,
shape_type="path",
edge_color="r",
edge_width=1,
name=f"trace {start_comp} to {end_comp}",
scale=viterbi.resolution,
)
|
[
"numpy.mean",
"pathlib.Path",
"pickle.load",
"zarr.open",
"magicgui.magic_factory"
] |
[((965, 1060), 'magicgui.magic_factory', 'magic_factory', ([], {'call_button': '"""Trace"""', 'start_comp': "{'max': 2 ** 20}", 'end_comp': "{'max': 2 ** 20}"}), "(call_button='Trace', start_comp={'max': 2 ** 20}, end_comp={\n 'max': 2 ** 20})\n", (978, 1060), False, 'from magicgui import magic_factory\n'), ((419, 451), 'zarr.open', 'zarr.open', (['viterbi.fragment_path'], {}), '(viterbi.fragment_path)\n', (428, 451), False, 'import zarr\n'), ((525, 546), 'zarr.open', 'zarr.open', (['image_path'], {}), '(image_path)\n', (534, 546), False, 'import zarr\n'), ((1149, 1182), 'pathlib.Path', 'pathlib.Path', (['"""/some/path.pickle"""'], {}), "('/some/path.pickle')\n", (1161, 1182), False, 'import pathlib\n'), ((379, 398), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (390, 398), False, 'import pickle\n'), ((1254, 1273), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1265, 1273), False, 'import pickle\n'), ((1576, 1599), 'numpy.mean', 'np.mean', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (1583, 1599), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.