code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# -*- coding: utf-8 -*-
# @Time : 23.03.21 14:42
# @Author : sing_sd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
from rdp import rdp
import pickle
import src.clustering.COStransforms as ct
from frechetdist import frdist
ref = {'lon': 12.0, 'lat': 54.35, 'ECEF': ct.WGS84toECEF(12.0, 54.35)}
colour_array = ["r", "g", "b", "y", "c", "m", "#9475FC", "k"] # an extra k
# pylab.ion()
# pylab.clf()
########## A Graph associatiation method by frdist distance
def assign_to_graph(data1):
ENUcoord, nodes, edges = get_data_nodes_edges(data1)
data = ENUcoord.tolist()
assignments = np.zeros(shape=(len(data), 1), dtype=int)
idx_start = 0
EPSILON = 1000
idx_end = idx_start + data1.shape[0]
ENUcoord_mmsi = ENUcoord[idx_start:idx_end, :]
ENUcoord_subset = rdp(ENUcoord_mmsi, EPSILON)
rdpNodes = np.array(ENUcoord_subset)
cost_matrix = np.zeros((len(rdpNodes) - 1, len(edges)))
start = idx_start
for i in range(1, len(rdpNodes)):
ends = np.where((ENUcoord_mmsi == rdpNodes[i, :]).all(axis=1))[0]
end = ends[np.argmax(ends > start)] + idx_start
for e_idx, e in enumerate(edges):
cost_matrix[i-1, e_idx] = frdist([rdpNodes[i-1, :], rdpNodes[i, :]], [nodes[e[0],:], nodes[e[1],:]])
e_idx = np.argmin(cost_matrix[i-1, :]) # edge id
if min(cost_matrix[i-1, :]) > 25000:
e_idx = len(edges) # ensures color as black
assignments[start:end, 0] = e_idx
start = end
assignments[end, 0] = assignments[end-1, 0]
return assignments #np.array(assignments[:, 0]).reshape(len(assignments),1)
def get_data_nodes_edges(data1):
clm_idx = 0
data1 = np.array(data1)
nData1 = data1.shape[0]
lon = np.array(data1[:, clm_idx], dtype=float).reshape([1, nData1])
lat = np.array(data1[:, clm_idx + 1], dtype=float).reshape([1, nData1])
ENUcoord = ct.WGS84toENU(lon, lat, ref)
ENUcoord = np.transpose(ENUcoord)
ENUcoord = np.delete(ENUcoord, np.s_[2], axis=1)
with open("../resources/graph_nodes_refined.pkl", 'rb') as f:
nodesWGS = pickle.load(f)
nodes = ct.WGS84toENU(nodesWGS[:, 0].T, nodesWGS[:, 1].T, ref)
nodes = np.transpose(nodes)
nodes = np.delete(nodes, np.s_[2], axis=1)
with open("../resources/graph_edges_refined.pkl", 'rb') as f:
edges = pickle.load(f)
# i. e., now same as before edges = [[1, 2], [0, 1], [0, 3], [0, 4], [1, 4], [4, 6], [0, 5]]
return ENUcoord, nodes, edges
def point2edge(point, edge_start, edge_end):
line_vec = np.subtract(edge_end, edge_start)
pnt_vec = np.subtract(point, edge_start)
line_len = np.linalg.norm(line_vec)
line_unitvec = line_vec / np.linalg.norm(line_vec)
pnt_vec_scaled = np.multiply(pnt_vec, 1.0 / line_len)
t = np.dot(line_unitvec, pnt_vec_scaled)
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
nearest = np.multiply(line_vec, t)
distance = np.linalg.norm(np.subtract(nearest, pnt_vec))
return distance
def cosine_angle(a, b, c): # angle at b
return np.dot(a-b, c-b) / (np.linalg.norm(a-b) * np.linalg.norm(c-b))
def get_slope_edges(edges, nodes):
n_edges = len(edges)
slope_edges = [0.0]*(n_edges+1) # +1 for outlier edge
for e in range(n_edges):
slope_edges[e] = (nodes[edges[e][0]][1] - nodes[edges[e][1]][1]) / (nodes[edges[e][0]][0] - nodes[edges[e][1]][0])
return np.array(slope_edges)
def get_assignment(data1):
DIST_THR = 7000
ENUcoord, nodes, edges = get_data_nodes_edges(data1)
data = ENUcoord.tolist()
cost_matrix = np.zeros((len(data), len(edges)+1))
cost_matrix[:, len(edges)] = DIST_THR + 1
assignments = []
for ii, points in enumerate(data):
for jj, items in enumerate(edges):
start = nodes[edges[jj][0]][0:2]
end = nodes[edges[jj][1]][0:2]
cost_matrix[ii, jj] = point2edge(points, start, end)
e_idx = np.argmin(cost_matrix[ii, :]) # edge id
if min(cost_matrix[ii, :]) > DIST_THR: # 5000:
e_idx = len(edges) # ensures color as black
assignments.append((ii, e_idx))
assignments = np.array(assignments)
EPSILON = 500 #1000
if data1.shape[1] == 2:
idx_start = 0
idx_end = data1.shape[0]
ENUcoord_mmsi = ENUcoord[idx_start:idx_end, :]
ENUcoord_subset = rdp(ENUcoord_mmsi, EPSILON)
rdpNodes = np.array(ENUcoord_subset)
start = idx_start
slope_edges = get_slope_edges(edges, nodes)
for i in range(1, len(rdpNodes)):
ends = np.where((ENUcoord_mmsi == rdpNodes[i, :]).all(axis=1))[0]
end = ends[np.argmax(ends > start)] + idx_start
unique_labels = np.unique(assignments[start:end, 1]) # numpy unique does not preserve order
max_num = np.inf
slope_rdpnodes = (rdpNodes[i, 1] - rdpNodes[i - 1, 1]) / (rdpNodes[i, 0] - rdpNodes[i - 1, 0])
label = unique_labels[np.argmin(np.abs(slope_rdpnodes - slope_edges[unique_labels]))] # unique_labels[0]
for j in unique_labels:
if max(cost_matrix[start,j], cost_matrix[end, j]) < DIST_THR:
if abs(slope_rdpnodes - slope_edges[j]) < max_num:
# and not (-0.8 < slope_edges[j]*slope_rdpnodes < -1.2):
label = j
max_num = abs(slope_rdpnodes - slope_edges[j])
assignments[start:end, 1] = label
start = end
assignments[end, 1] = assignments[end - 1, 1] # assign the cluster number to last point
else:
print("need to be updated like single vessel data code in Else loop")
exit(0)
angle_thr = 0.2
# mmsi_index = data1.columns.get_loc("mmsi")
for mmsi in data1.mmsi.unique():
idx_start = np.argmax(data1["mmsi"] == mmsi)
idx_end = idx_start + np.sum(data1["mmsi"] == mmsi)
ENUcoord_mmsi = ENUcoord[idx_start:idx_end, :]
ENUcoord_subset = rdp(ENUcoord_mmsi, EPSILON)
rdpNodes = np.array(ENUcoord_subset)
start = idx_start
for i in range(1, len(rdpNodes)):
ends = np.where((ENUcoord_mmsi == rdpNodes[i, :]).all(axis=1))[0]
end = ends[np.argmax(ends>start)] + idx_start
unique_labels = np.unique(assignments[start:end, 1])
label = unique_labels[0]
max_num = 0
for j in unique_labels:
if max_num < sum(assignments[start:end, 1] == j):
label = j
max_num = sum(assignments[start:end, 1] == j)
# if label != len(edges) and \
# (-angle_thr < cosine_angle(rdpNodes[i-1, :], nodes[edges[label][0]][0:2],
# rdpNodes[i, :]) < angle_thr and \
# -angle_thr < cosine_angle(rdpNodes[i-1, :], nodes[edges[label][1]][0:2],
# rdpNodes[i, :]) < angle_thr):
# label = len(edges)
assignments[start:end, 1] = label
start = end
assignments[end, 1] = assignments[end - 1, 1]
print('data associated')
return assignments[:, 1] #np.array(assignments[:, 1]).reshape(len(assignments),1)
|
[
"src.clustering.COStransforms.WGS84toECEF",
"frechetdist.frdist",
"numpy.multiply",
"numpy.subtract",
"numpy.sum",
"numpy.argmax",
"numpy.abs",
"src.clustering.COStransforms.WGS84toENU",
"numpy.transpose",
"rdp.rdp",
"numpy.argmin",
"pickle.load",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.delete",
"numpy.unique"
] |
[((294, 321), 'src.clustering.COStransforms.WGS84toECEF', 'ct.WGS84toECEF', (['(12.0)', '(54.35)'], {}), '(12.0, 54.35)\n', (308, 321), True, 'import src.clustering.COStransforms as ct\n'), ((813, 840), 'rdp.rdp', 'rdp', (['ENUcoord_mmsi', 'EPSILON'], {}), '(ENUcoord_mmsi, EPSILON)\n', (816, 840), False, 'from rdp import rdp\n'), ((856, 881), 'numpy.array', 'np.array', (['ENUcoord_subset'], {}), '(ENUcoord_subset)\n', (864, 881), True, 'import numpy as np\n'), ((1702, 1717), 'numpy.array', 'np.array', (['data1'], {}), '(data1)\n', (1710, 1717), True, 'import numpy as np\n'), ((1910, 1938), 'src.clustering.COStransforms.WGS84toENU', 'ct.WGS84toENU', (['lon', 'lat', 'ref'], {}), '(lon, lat, ref)\n', (1923, 1938), True, 'import src.clustering.COStransforms as ct\n'), ((1954, 1976), 'numpy.transpose', 'np.transpose', (['ENUcoord'], {}), '(ENUcoord)\n', (1966, 1976), True, 'import numpy as np\n'), ((1992, 2029), 'numpy.delete', 'np.delete', (['ENUcoord', 'np.s_[2]'], {'axis': '(1)'}), '(ENUcoord, np.s_[2], axis=1)\n', (2001, 2029), True, 'import numpy as np\n'), ((2143, 2197), 'src.clustering.COStransforms.WGS84toENU', 'ct.WGS84toENU', (['nodesWGS[:, 0].T', 'nodesWGS[:, 1].T', 'ref'], {}), '(nodesWGS[:, 0].T, nodesWGS[:, 1].T, ref)\n', (2156, 2197), True, 'import src.clustering.COStransforms as ct\n'), ((2210, 2229), 'numpy.transpose', 'np.transpose', (['nodes'], {}), '(nodes)\n', (2222, 2229), True, 'import numpy as np\n'), ((2242, 2276), 'numpy.delete', 'np.delete', (['nodes', 'np.s_[2]'], {'axis': '(1)'}), '(nodes, np.s_[2], axis=1)\n', (2251, 2276), True, 'import numpy as np\n'), ((2572, 2605), 'numpy.subtract', 'np.subtract', (['edge_end', 'edge_start'], {}), '(edge_end, edge_start)\n', (2583, 2605), True, 'import numpy as np\n'), ((2620, 2650), 'numpy.subtract', 'np.subtract', (['point', 'edge_start'], {}), '(point, edge_start)\n', (2631, 2650), True, 'import numpy as np\n'), ((2666, 2690), 'numpy.linalg.norm', 'np.linalg.norm', (['line_vec'], {}), '(line_vec)\n', (2680, 2690), True, 'import numpy as np\n'), ((2767, 2803), 'numpy.multiply', 'np.multiply', (['pnt_vec', '(1.0 / line_len)'], {}), '(pnt_vec, 1.0 / line_len)\n', (2778, 2803), True, 'import numpy as np\n'), ((2812, 2848), 'numpy.dot', 'np.dot', (['line_unitvec', 'pnt_vec_scaled'], {}), '(line_unitvec, pnt_vec_scaled)\n', (2818, 2848), True, 'import numpy as np\n'), ((2929, 2953), 'numpy.multiply', 'np.multiply', (['line_vec', 't'], {}), '(line_vec, t)\n', (2940, 2953), True, 'import numpy as np\n'), ((3434, 3455), 'numpy.array', 'np.array', (['slope_edges'], {}), '(slope_edges)\n', (3442, 3455), True, 'import numpy as np\n'), ((4175, 4196), 'numpy.array', 'np.array', (['assignments'], {}), '(assignments)\n', (4183, 4196), True, 'import numpy as np\n'), ((1304, 1336), 'numpy.argmin', 'np.argmin', (['cost_matrix[i - 1, :]'], {}), '(cost_matrix[i - 1, :])\n', (1313, 1336), True, 'import numpy as np\n'), ((2116, 2130), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2127, 2130), False, 'import pickle\n'), ((2360, 2374), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2371, 2374), False, 'import pickle\n'), ((2721, 2745), 'numpy.linalg.norm', 'np.linalg.norm', (['line_vec'], {}), '(line_vec)\n', (2735, 2745), True, 'import numpy as np\n'), ((2984, 3013), 'numpy.subtract', 'np.subtract', (['nearest', 'pnt_vec'], {}), '(nearest, pnt_vec)\n', (2995, 3013), True, 'import numpy as np\n'), ((3088, 3108), 'numpy.dot', 'np.dot', (['(a - b)', '(c - b)'], {}), '(a - b, c - b)\n', (3094, 3108), True, 'import numpy as np\n'), ((3963, 3992), 'numpy.argmin', 'np.argmin', (['cost_matrix[ii, :]'], {}), '(cost_matrix[ii, :])\n', (3972, 3992), True, 'import numpy as np\n'), ((4387, 4414), 'rdp.rdp', 'rdp', (['ENUcoord_mmsi', 'EPSILON'], {}), '(ENUcoord_mmsi, EPSILON)\n', (4390, 4414), False, 'from rdp import rdp\n'), ((4434, 4459), 'numpy.array', 'np.array', (['ENUcoord_subset'], {}), '(ENUcoord_subset)\n', (4442, 4459), True, 'import numpy as np\n'), ((1212, 1290), 'frechetdist.frdist', 'frdist', (['[rdpNodes[i - 1, :], rdpNodes[i, :]]', '[nodes[e[0], :], nodes[e[1], :]]'], {}), '([rdpNodes[i - 1, :], rdpNodes[i, :]], [nodes[e[0], :], nodes[e[1], :]])\n', (1218, 1290), False, 'from frechetdist import frdist\n'), ((1756, 1796), 'numpy.array', 'np.array', (['data1[:, clm_idx]'], {'dtype': 'float'}), '(data1[:, clm_idx], dtype=float)\n', (1764, 1796), True, 'import numpy as np\n'), ((1828, 1872), 'numpy.array', 'np.array', (['data1[:, clm_idx + 1]'], {'dtype': 'float'}), '(data1[:, clm_idx + 1], dtype=float)\n', (1836, 1872), True, 'import numpy as np\n'), ((3108, 3129), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (3122, 3129), True, 'import numpy as np\n'), ((3130, 3151), 'numpy.linalg.norm', 'np.linalg.norm', (['(c - b)'], {}), '(c - b)\n', (3144, 3151), True, 'import numpy as np\n'), ((4746, 4782), 'numpy.unique', 'np.unique', (['assignments[start:end, 1]'], {}), '(assignments[start:end, 1])\n', (4755, 4782), True, 'import numpy as np\n'), ((5858, 5890), 'numpy.argmax', 'np.argmax', (["(data1['mmsi'] == mmsi)"], {}), "(data1['mmsi'] == mmsi)\n", (5867, 5890), True, 'import numpy as np\n'), ((6044, 6071), 'rdp.rdp', 'rdp', (['ENUcoord_mmsi', 'EPSILON'], {}), '(ENUcoord_mmsi, EPSILON)\n', (6047, 6071), False, 'from rdp import rdp\n'), ((6095, 6120), 'numpy.array', 'np.array', (['ENUcoord_subset'], {}), '(ENUcoord_subset)\n', (6103, 6120), True, 'import numpy as np\n'), ((1095, 1118), 'numpy.argmax', 'np.argmax', (['(ends > start)'], {}), '(ends > start)\n', (1104, 1118), True, 'import numpy as np\n'), ((5925, 5954), 'numpy.sum', 'np.sum', (["(data1['mmsi'] == mmsi)"], {}), "(data1['mmsi'] == mmsi)\n", (5931, 5954), True, 'import numpy as np\n'), ((6374, 6410), 'numpy.unique', 'np.unique', (['assignments[start:end, 1]'], {}), '(assignments[start:end, 1])\n', (6383, 6410), True, 'import numpy as np\n'), ((4681, 4704), 'numpy.argmax', 'np.argmax', (['(ends > start)'], {}), '(ends > start)\n', (4690, 4704), True, 'import numpy as np\n'), ((5002, 5053), 'numpy.abs', 'np.abs', (['(slope_rdpnodes - slope_edges[unique_labels])'], {}), '(slope_rdpnodes - slope_edges[unique_labels])\n', (5008, 5053), True, 'import numpy as np\n'), ((6307, 6330), 'numpy.argmax', 'np.argmax', (['(ends > start)'], {}), '(ends > start)\n', (6316, 6330), True, 'import numpy as np\n')]
|
import math
from typing import Tuple
import numpy as np
import open3d as o3d
import open3d.core as o3c
import pytest
from dq3d import quat, dualquat
import nnrt
import nnrt.geometry as nnrt_geom
from image_processing import compute_normals
from image_processing.numba_cuda.preprocessing import cuda_compute_normal
from image_processing.numpy_cpu.preprocessing import cpu_compute_normal
def generate_xy_plane_depth_image(resolution: Tuple[int, int], depth: int) -> np.ndarray:
image = np.ones(resolution, dtype=np.uint16) * depth
return image
def generate_xy_plane_color_image(resolution: Tuple[int, int], value: Tuple[int, int, int]) -> np.ndarray:
image = np.ndarray((resolution[0], resolution[1], 3), dtype=np.uint8)
image[:, :] = value
return image
def construct_intrinsic_matrix1_3x3():
intrinsics = np.eye(3, dtype=np.float32)
intrinsics[0, 0] = 100.0
intrinsics[1, 1] = 100.0
intrinsics[0, 2] = 50.0
intrinsics[1, 2] = 50.0
return intrinsics
def construct_intrinsic_matrix1_4x4():
intrinsics = np.eye(4, dtype=np.float32)
intrinsics[0, 0] = 100.0
intrinsics[1, 1] = 100.0
intrinsics[0, 2] = 50.0
intrinsics[1, 2] = 50.0
return intrinsics
def construct_test_volume1(device=o3d.core.Device('cuda:0')):
# initialize volume
voxel_size = 0.01 # 1 cm voxel size
sdf_truncation_distance = 0.02 # truncation distance = 2cm
block_resolution = 8 # 8^3 voxel blocks
initial_block_count = 128 # initially allocated number of voxel blocks
volume = nnrt.geometry.WarpableTSDFVoxelGrid(
{
'tsdf': o3d.core.Dtype.Float32,
'weight': o3d.core.Dtype.UInt16,
'color': o3d.core.Dtype.UInt16
},
voxel_size=voxel_size,
sdf_trunc=sdf_truncation_distance,
block_resolution=block_resolution,
block_count=initial_block_count,
device=device)
# generate image
image_width = 100
image_height = 100
image_resolution = (image_width, image_height)
depth = 50 # mm
depth_image = generate_xy_plane_depth_image(image_resolution, depth)
depth_image_gpu = o3d.t.geometry.Image(o3c.Tensor(depth_image, device=device))
value = (100, 100, 100)
color_image = generate_xy_plane_color_image(image_resolution, value)
color_image_gpu = o3d.t.geometry.Image(o3c.Tensor(color_image, device=device))
# set up matrix parameters
intrinsics = construct_intrinsic_matrix1_3x3()
intrinsics_open3d_gpu = o3c.Tensor(intrinsics, device=device)
extrinsics_open3d_gpu = o3c.Tensor(np.eye(4, dtype=np.float32), device=device)
# integrate volume
volume.integrate(depth_image_gpu, color_image_gpu, intrinsics_open3d_gpu, extrinsics_open3d_gpu, 1000.0, 3.0)
return volume
@pytest.mark.parametrize("device", [o3d.core.Device('cuda:0'), o3d.core.Device('cpu:0')])
def test_integrate_warped_simple_motion_dq(device):
camera_rotation = np.ascontiguousarray(np.eye(3, dtype=np.float32))
camera_translation = np.ascontiguousarray(np.zeros(3, dtype=np.float32))
# we need at least four nodes this time, otherwise psdf computation will consider voxel invalid and produce "NaN".
# Make it five.
nodes = np.array([[0.0, 0.0, 0.05],
[0.02, 0.0, 0.05],
[-0.02, 0.0, 0.05],
[0.00, 0.02, 0.05],
[0.00, -0.02, 0.05]],
dtype=np.float32)
volume = construct_test_volume1(device)
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np_originals = voxel_tsdf_and_weights.cpu().numpy()
# the first node moves 1 cm along the negative z axis (towards the camera).
node_dual_quaternions_dq3d = [dualquat(quat.identity(), quat(1.0, 0.0, 0.0, -0.005))] + [dualquat(quat.identity())] * (len(nodes) - 1)
node_dual_quaternions = np.array([np.concatenate((dq.real.data, dq.dual.data)) for dq in node_dual_quaternions_dq3d])
depth = 50 # mm
image_width = 100
image_height = 100
image_resolution = (image_width, image_height)
depth_image = generate_xy_plane_depth_image(image_resolution, depth)
# let's imagine that the central surface point is 1 cm closer to the camera as well, so we alter the depth
# to 40 mm there. Make the motion cease at the other four nodes, e.g. their depth should remain at 50.
# We can make a radial "pinch" in the center of the depth image.
# For our predefined camera, 1 px = 0.005 m, and the nodes are around the 0.002 m radius,
# which puts our pixel radius at 0.002 / 0.0005 = 40 px
pinch_diameter = 40
pinch_radius = pinch_diameter // 2
pinch_height = 10
y_coordinates = np.linspace(-1, 1, pinch_diameter)[None, :] * pinch_height
x_coordinates = np.linspace(-1, 1, pinch_diameter)[:, None] * pinch_height
delta = -pinch_height + np.sqrt(x_coordinates ** 2 + y_coordinates ** 2)
half_image_width = image_width // 2
half_image_height = image_height // 2
# @formatter:off
depth_image[half_image_height - pinch_radius:half_image_height + pinch_radius,
half_image_width - pinch_radius:half_image_width + pinch_radius] += np.round(delta).astype(np.uint16)
# @formatter:on
# ---- compute normals ----
intrinsic_matrix = construct_intrinsic_matrix1_3x3()
fx, fy, cx, cy = intrinsic_matrix[0, 0], intrinsic_matrix[1, 1], intrinsic_matrix[0, 2], intrinsic_matrix[1, 2]
point_image = nnrt.backproject_depth_ushort(depth_image, fx, fy, cx, cy, 1000.0)
normals = compute_normals(device, point_image)
# ---- compute updates ----
truncation_distance = 0.02 # same value as in construct_test_volume1
node_coverage = 0.05
depth_image_o3d = o3d.t.geometry.Image.from_legacy_image(o3d.geometry.Image(depth_image), device=device)
normals_o3d = o3c.Tensor(normals, dtype=o3c.Dtype.Float32, device=device)
intrinsic_matrix_o3d = o3c.Tensor(intrinsic_matrix, dtype=o3c.Dtype.Float32, device=device)
extrinsic_matrix_o3d = o3c.Tensor.eye(4, dtype=o3c.Dtype.Float32, device=device)
node_dual_quaternions_o3d = o3c.Tensor(node_dual_quaternions, dtype=o3c.Dtype.Float32, device=device)
nodes_o3d = o3c.Tensor(nodes, dtype=o3c.Dtype.Float32, device=device)
node_edges_o3d = o3c.Tensor((1, 1))
cos_voxel_ray_to_normal = volume.integrate_warped_dq(
depth_image_o3d, normals_o3d, intrinsic_matrix_o3d, extrinsic_matrix_o3d,
nodes_o3d, node_edges_o3d, node_dual_quaternions_o3d, node_coverage,
anchor_count=4, minimum_valid_anchor_count=3, depth_scale=1000.0, depth_max=3.0,
compute_anchors_using=nnrt_geom.AnchorComputationMethod.EUCLIDEAN, use_node_distance_thresholding=False)
cos_voxel_ray_to_normal = np.squeeze(cos_voxel_ray_to_normal.cpu().numpy())
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np = voxel_tsdf_and_weights.cpu().numpy()
# voxel in the center of the plane is at 0, 0, 0.05,
# which should coincide with the first and only node
# voxel global position is (0, 0, 5) (in voxels)
# voxel is, presumably, in block 3
# voxel's index in block 0 is 5 * (8*8) = 320
# each block holds 512 voxels
center_plane_voxel_index = 512 + 512 + 512 + 320
indices_to_test = [center_plane_voxel_index,
center_plane_voxel_index + 1,
center_plane_voxel_index + 8,
center_plane_voxel_index + 16,
center_plane_voxel_index + 64]
# generated using the above function.
# Note: if anything about the reference implementation changes, these residuals need to be re-computed.
# each array row contains:
# u, v, cosine, tsdf, weight
ground_truth_data = np.array([
[50, 50, 0.4970065653324127, 0.0, 0.0],
[71, 50, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 71, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 92, 0.9215041958391356, 0.06362117264804237, 2.0],
[50, 50, 0.4970065653324127, 0.0, 0.0]
])
def check_voxel_at(index, ground_truth):
assert math.isclose(cos_voxel_ray_to_normal[int(ground_truth[0]), int(ground_truth[1])], ground_truth[2], abs_tol=1e-7)
if ground_truth[2] > 0.5:
assert np.allclose(voxel_tsdf_and_weights_np[index], ground_truth[3:])
for index, ground_truth in zip(indices_to_test, ground_truth_data):
check_voxel_at(index, ground_truth)
@pytest.mark.parametrize("device", [o3d.core.Device('cuda:0'), o3d.core.Device('cpu:0')])
def test_integrate_warped_simple_motion_mat(device):
camera_rotation = np.ascontiguousarray(np.eye(3, dtype=np.float32))
camera_translation = np.ascontiguousarray(np.zeros(3, dtype=np.float32))
# we need at least four nodes this time, otherwise psdf computation will consider voxel invalid and produce "NaN".
# Make it five.
nodes = np.array([[0.0, 0.0, 0.05],
[0.02, 0.0, 0.05],
[-0.02, 0.0, 0.05],
[0.00, 0.02, 0.05],
[0.00, -0.02, 0.05]],
dtype=np.float32)
# voxel size = 0.01 m
volume = construct_test_volume1(device)
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np_originals = voxel_tsdf_and_weights.cpu().numpy()
# the first node moves 1 cm along the negative z axis (towards the camera).
node_dual_quaternions_dq3d = [dualquat(quat.identity(), quat(1.0, 0.0, 0.0, -0.005))] + [dualquat(quat.identity())] * (len(nodes) - 1)
node_rotations_mat = np.array([dq.rotation().to_rotation_matrix().astype(np.float32) for dq in node_dual_quaternions_dq3d])
node_translations_vec = np.array([dq.translation().astype(np.float32) for dq in node_dual_quaternions_dq3d])
depth = 50 # mm
image_width = 100
image_height = 100
image_resolution = (image_width, image_height)
depth_image = generate_xy_plane_depth_image(image_resolution, depth)
color_image = np.zeros((image_height, image_width, 3), dtype=np.uint8)
# let's imagine that the central surface point is 1 cm closer to the camera as well, so we alter the depth
# to 40 mm there. Make the motion cease at the other four nodes, e.g. their depth should remain at 50.
# We can make a radial "pinch" in the center of the depth image.
# For our predefined camera, 1 px = 0.005 m, and the nodes are around the 0.002 m radius,
# which puts our pixel radius at 0.002 / 0.0005 = 40 px
pinch_diameter = 40
pinch_radius = pinch_diameter // 2
pinch_height = 10
y_coordinates = np.linspace(-1, 1, pinch_diameter)[None, :] * pinch_height
x_coordinates = np.linspace(-1, 1, pinch_diameter)[:, None] * pinch_height
delta = -pinch_height + np.sqrt(x_coordinates ** 2 + y_coordinates ** 2)
half_image_width = image_width // 2
half_image_height = image_height // 2
# @formatter:off
depth_image[half_image_height - pinch_radius:half_image_height + pinch_radius,
half_image_width - pinch_radius:half_image_width + pinch_radius] += np.round(delta).astype(np.uint16)
# @formatter:on
# ---- compute normals ----
intrinsic_matrix = construct_intrinsic_matrix1_3x3()
fx, fy, cx, cy = intrinsic_matrix[0, 0], intrinsic_matrix[1, 1], intrinsic_matrix[0, 2], intrinsic_matrix[1, 2]
point_image = nnrt.backproject_depth_ushort(depth_image, fx, fy, cx, cy, 1000.0)
normals = compute_normals(device, point_image)
# ---- compute updates ----
node_coverage = 0.05
depth_image_o3d = o3d.t.geometry.Image.from_legacy_image(o3d.geometry.Image(depth_image), device=device)
color_image_o3d = o3d.t.geometry.Image.from_legacy_image(o3d.geometry.Image(color_image), device=device)
normals_o3d = o3c.Tensor(normals, dtype=o3c.Dtype.Float32, device=device)
intrinsic_matrix_o3d = o3c.Tensor(intrinsic_matrix, dtype=o3c.Dtype.Float32, device=device)
extrinsic_matrix_o3d = o3c.Tensor.eye(4, dtype=o3c.Dtype.Float32, device=device)
node_rotations_o3d = o3c.Tensor(node_rotations_mat, dtype=o3c.Dtype.Float32, device=device)
node_translations_o3d = o3c.Tensor(node_translations_vec, dtype=o3c.Dtype.Float32, device=device)
nodes_o3d = o3c.Tensor(nodes, dtype=o3c.Dtype.Float32, device=device)
edges_o3d = o3c.Tensor((1, 1))
cos_voxel_ray_to_normal = volume.integrate_warped_mat(
depth_image_o3d, color_image_o3d, normals_o3d, intrinsic_matrix_o3d, extrinsic_matrix_o3d,
nodes_o3d, edges_o3d, node_rotations_o3d, node_translations_o3d, node_coverage,
anchor_count=4, minimum_valid_anchor_count=3, depth_scale=1000.0, depth_max=3.0,
compute_anchors_using=nnrt_geom.AnchorComputationMethod.EUCLIDEAN, use_node_distance_thresholding=False)
cos_voxel_ray_to_normal = np.squeeze(cos_voxel_ray_to_normal.cpu().numpy())
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np = voxel_tsdf_and_weights.cpu().numpy()
# voxel in the center of the plane is at 0, 0, 0.05,
# which should coincide with the first and only node
# voxel global position is (0, 0, 5) (in voxels)
# voxel is, presumably, in block 3
# voxel's index in block 0 is 5 * (8*8) = 320
# each block holds 512 voxels
center_plane_voxel_index = 512 + 512 + 512 + 320
indices_to_test = [center_plane_voxel_index,
center_plane_voxel_index + 1, # x + 1
center_plane_voxel_index + 8, # y + 1
center_plane_voxel_index + 16, # y + 2
center_plane_voxel_index + 64] # z + 1
# generated using the above function.
# Note: if anything about the reference implementation changes, these residuals need to be re-computed.
# each array row contains:
# v, u, cosine, tsdf, weight
ground_truth_data = np.array([
[50, 50, 0.4970065653324127, 0.0, 0.0],
[71, 50, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 71, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 92, 0.9215041958391356, 0.06362117264804237, 2.0],
[50, 50, 0.4970065653324127, 0.0, 0.0]
])
def check_voxel_at(index, ground_truth):
assert math.isclose(cos_voxel_ray_to_normal[int(ground_truth[0]), int(ground_truth[1])], ground_truth[2], abs_tol=1e-7)
if ground_truth[2] > 0.5:
assert np.allclose(voxel_tsdf_and_weights_np[index], ground_truth[3:])
for index, ground_truth in zip(indices_to_test, ground_truth_data):
check_voxel_at(index, ground_truth)
|
[
"numpy.allclose",
"numpy.ones",
"numpy.round",
"numpy.ndarray",
"open3d.core.Device",
"open3d.geometry.Image",
"image_processing.compute_normals",
"dq3d.quat",
"numpy.linspace",
"nnrt.backproject_depth_ushort",
"open3d.core.Tensor.eye",
"open3d.core.Tensor",
"dq3d.quat.identity",
"numpy.concatenate",
"numpy.zeros",
"numpy.array",
"numpy.eye",
"nnrt.geometry.WarpableTSDFVoxelGrid",
"numpy.sqrt"
] |
[((676, 737), 'numpy.ndarray', 'np.ndarray', (['(resolution[0], resolution[1], 3)'], {'dtype': 'np.uint8'}), '((resolution[0], resolution[1], 3), dtype=np.uint8)\n', (686, 737), True, 'import numpy as np\n'), ((837, 864), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (843, 864), True, 'import numpy as np\n'), ((1059, 1086), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (1065, 1086), True, 'import numpy as np\n'), ((1259, 1284), 'open3d.core.Device', 'o3d.core.Device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1274, 1284), True, 'import open3d as o3d\n'), ((1551, 1842), 'nnrt.geometry.WarpableTSDFVoxelGrid', 'nnrt.geometry.WarpableTSDFVoxelGrid', (["{'tsdf': o3d.core.Dtype.Float32, 'weight': o3d.core.Dtype.UInt16, 'color':\n o3d.core.Dtype.UInt16}"], {'voxel_size': 'voxel_size', 'sdf_trunc': 'sdf_truncation_distance', 'block_resolution': 'block_resolution', 'block_count': 'initial_block_count', 'device': 'device'}), "({'tsdf': o3d.core.Dtype.Float32,\n 'weight': o3d.core.Dtype.UInt16, 'color': o3d.core.Dtype.UInt16},\n voxel_size=voxel_size, sdf_trunc=sdf_truncation_distance,\n block_resolution=block_resolution, block_count=initial_block_count,\n device=device)\n", (1586, 1842), False, 'import nnrt\n'), ((2512, 2549), 'open3d.core.Tensor', 'o3c.Tensor', (['intrinsics'], {'device': 'device'}), '(intrinsics, device=device)\n', (2522, 2549), True, 'import open3d.core as o3c\n'), ((3233, 3362), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.05], [0.02, 0.0, 0.05], [-0.02, 0.0, 0.05], [0.0, 0.02, 0.05],\n [0.0, -0.02, 0.05]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.05], [0.02, 0.0, 0.05], [-0.02, 0.0, 0.05], [0.0, \n 0.02, 0.05], [0.0, -0.02, 0.05]], dtype=np.float32)\n', (3241, 3362), True, 'import numpy as np\n'), ((5507, 5573), 'nnrt.backproject_depth_ushort', 'nnrt.backproject_depth_ushort', (['depth_image', 'fx', 'fy', 'cx', 'cy', '(1000.0)'], {}), '(depth_image, fx, fy, cx, cy, 1000.0)\n', (5536, 5573), False, 'import nnrt\n'), ((5588, 5624), 'image_processing.compute_normals', 'compute_normals', (['device', 'point_image'], {}), '(device, point_image)\n', (5603, 5624), False, 'from image_processing import compute_normals\n'), ((5884, 5943), 'open3d.core.Tensor', 'o3c.Tensor', (['normals'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(normals, dtype=o3c.Dtype.Float32, device=device)\n', (5894, 5943), True, 'import open3d.core as o3c\n'), ((5971, 6039), 'open3d.core.Tensor', 'o3c.Tensor', (['intrinsic_matrix'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(intrinsic_matrix, dtype=o3c.Dtype.Float32, device=device)\n', (5981, 6039), True, 'import open3d.core as o3c\n'), ((6067, 6124), 'open3d.core.Tensor.eye', 'o3c.Tensor.eye', (['(4)'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(4, dtype=o3c.Dtype.Float32, device=device)\n', (6081, 6124), True, 'import open3d.core as o3c\n'), ((6157, 6230), 'open3d.core.Tensor', 'o3c.Tensor', (['node_dual_quaternions'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(node_dual_quaternions, dtype=o3c.Dtype.Float32, device=device)\n', (6167, 6230), True, 'import open3d.core as o3c\n'), ((6247, 6304), 'open3d.core.Tensor', 'o3c.Tensor', (['nodes'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(nodes, dtype=o3c.Dtype.Float32, device=device)\n', (6257, 6304), True, 'import open3d.core as o3c\n'), ((6326, 6344), 'open3d.core.Tensor', 'o3c.Tensor', (['(1, 1)'], {}), '((1, 1))\n', (6336, 6344), True, 'import open3d.core as o3c\n'), ((7846, 8124), 'numpy.array', 'np.array', (['[[50, 50, 0.4970065653324127, 0.0, 0.0], [71, 50, 0.9784621335214618, \n 0.06499883711342021, 2.0], [50, 71, 0.9784621335214618, \n 0.06499883711342021, 2.0], [50, 92, 0.9215041958391356, \n 0.06362117264804237, 2.0], [50, 50, 0.4970065653324127, 0.0, 0.0]]'], {}), '([[50, 50, 0.4970065653324127, 0.0, 0.0], [71, 50, \n 0.9784621335214618, 0.06499883711342021, 2.0], [50, 71, \n 0.9784621335214618, 0.06499883711342021, 2.0], [50, 92, \n 0.9215041958391356, 0.06362117264804237, 2.0], [50, 50, \n 0.4970065653324127, 0.0, 0.0]])\n', (7854, 8124), True, 'import numpy as np\n'), ((9004, 9133), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.05], [0.02, 0.0, 0.05], [-0.02, 0.0, 0.05], [0.0, 0.02, 0.05],\n [0.0, -0.02, 0.05]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.05], [0.02, 0.0, 0.05], [-0.02, 0.0, 0.05], [0.0, \n 0.02, 0.05], [0.0, -0.02, 0.05]], dtype=np.float32)\n', (9012, 9133), True, 'import numpy as np\n'), ((10142, 10198), 'numpy.zeros', 'np.zeros', (['(image_height, image_width, 3)'], {'dtype': 'np.uint8'}), '((image_height, image_width, 3), dtype=np.uint8)\n', (10150, 10198), True, 'import numpy as np\n'), ((11510, 11576), 'nnrt.backproject_depth_ushort', 'nnrt.backproject_depth_ushort', (['depth_image', 'fx', 'fy', 'cx', 'cy', '(1000.0)'], {}), '(depth_image, fx, fy, cx, cy, 1000.0)\n', (11539, 11576), False, 'import nnrt\n'), ((11591, 11627), 'image_processing.compute_normals', 'compute_normals', (['device', 'point_image'], {}), '(device, point_image)\n', (11606, 11627), False, 'from image_processing import compute_normals\n'), ((11922, 11981), 'open3d.core.Tensor', 'o3c.Tensor', (['normals'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(normals, dtype=o3c.Dtype.Float32, device=device)\n', (11932, 11981), True, 'import open3d.core as o3c\n'), ((12009, 12077), 'open3d.core.Tensor', 'o3c.Tensor', (['intrinsic_matrix'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(intrinsic_matrix, dtype=o3c.Dtype.Float32, device=device)\n', (12019, 12077), True, 'import open3d.core as o3c\n'), ((12105, 12162), 'open3d.core.Tensor.eye', 'o3c.Tensor.eye', (['(4)'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(4, dtype=o3c.Dtype.Float32, device=device)\n', (12119, 12162), True, 'import open3d.core as o3c\n'), ((12188, 12258), 'open3d.core.Tensor', 'o3c.Tensor', (['node_rotations_mat'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(node_rotations_mat, dtype=o3c.Dtype.Float32, device=device)\n', (12198, 12258), True, 'import open3d.core as o3c\n'), ((12287, 12360), 'open3d.core.Tensor', 'o3c.Tensor', (['node_translations_vec'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(node_translations_vec, dtype=o3c.Dtype.Float32, device=device)\n', (12297, 12360), True, 'import open3d.core as o3c\n'), ((12377, 12434), 'open3d.core.Tensor', 'o3c.Tensor', (['nodes'], {'dtype': 'o3c.Dtype.Float32', 'device': 'device'}), '(nodes, dtype=o3c.Dtype.Float32, device=device)\n', (12387, 12434), True, 'import open3d.core as o3c\n'), ((12451, 12469), 'open3d.core.Tensor', 'o3c.Tensor', (['(1, 1)'], {}), '((1, 1))\n', (12461, 12469), True, 'import open3d.core as o3c\n'), ((14036, 14314), 'numpy.array', 'np.array', (['[[50, 50, 0.4970065653324127, 0.0, 0.0], [71, 50, 0.9784621335214618, \n 0.06499883711342021, 2.0], [50, 71, 0.9784621335214618, \n 0.06499883711342021, 2.0], [50, 92, 0.9215041958391356, \n 0.06362117264804237, 2.0], [50, 50, 0.4970065653324127, 0.0, 0.0]]'], {}), '([[50, 50, 0.4970065653324127, 0.0, 0.0], [71, 50, \n 0.9784621335214618, 0.06499883711342021, 2.0], [50, 71, \n 0.9784621335214618, 0.06499883711342021, 2.0], [50, 92, \n 0.9215041958391356, 0.06362117264804237, 2.0], [50, 50, \n 0.4970065653324127, 0.0, 0.0]])\n', (14044, 14314), True, 'import numpy as np\n'), ((493, 529), 'numpy.ones', 'np.ones', (['resolution'], {'dtype': 'np.uint16'}), '(resolution, dtype=np.uint16)\n', (500, 529), True, 'import numpy as np\n'), ((2177, 2215), 'open3d.core.Tensor', 'o3c.Tensor', (['depth_image'], {'device': 'device'}), '(depth_image, device=device)\n', (2187, 2215), True, 'import open3d.core as o3c\n'), ((2361, 2399), 'open3d.core.Tensor', 'o3c.Tensor', (['color_image'], {'device': 'device'}), '(color_image, device=device)\n', (2371, 2399), True, 'import open3d.core as o3c\n'), ((2589, 2616), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (2595, 2616), True, 'import numpy as np\n'), ((2976, 3003), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (2982, 3003), True, 'import numpy as np\n'), ((3051, 3080), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (3059, 3080), True, 'import numpy as np\n'), ((4921, 4969), 'numpy.sqrt', 'np.sqrt', (['(x_coordinates ** 2 + y_coordinates ** 2)'], {}), '(x_coordinates ** 2 + y_coordinates ** 2)\n', (4928, 4969), True, 'import numpy as np\n'), ((5818, 5849), 'open3d.geometry.Image', 'o3d.geometry.Image', (['depth_image'], {}), '(depth_image)\n', (5836, 5849), True, 'import open3d as o3d\n'), ((2827, 2852), 'open3d.core.Device', 'o3d.core.Device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2842, 2852), True, 'import open3d as o3d\n'), ((2854, 2878), 'open3d.core.Device', 'o3d.core.Device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (2869, 2878), True, 'import open3d as o3d\n'), ((8747, 8774), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (8753, 8774), True, 'import numpy as np\n'), ((8822, 8851), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (8830, 8851), True, 'import numpy as np\n'), ((10912, 10960), 'numpy.sqrt', 'np.sqrt', (['(x_coordinates ** 2 + y_coordinates ** 2)'], {}), '(x_coordinates ** 2 + y_coordinates ** 2)\n', (10919, 10960), True, 'import numpy as np\n'), ((11747, 11778), 'open3d.geometry.Image', 'o3d.geometry.Image', (['depth_image'], {}), '(depth_image)\n', (11765, 11778), True, 'import open3d as o3d\n'), ((11856, 11887), 'open3d.geometry.Image', 'o3d.geometry.Image', (['color_image'], {}), '(color_image)\n', (11874, 11887), True, 'import open3d as o3d\n'), ((8597, 8622), 'open3d.core.Device', 'o3d.core.Device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (8612, 8622), True, 'import open3d as o3d\n'), ((8624, 8648), 'open3d.core.Device', 'o3d.core.Device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (8639, 8648), True, 'import open3d as o3d\n'), ((3933, 3977), 'numpy.concatenate', 'np.concatenate', (['(dq.real.data, dq.dual.data)'], {}), '((dq.real.data, dq.dual.data))\n', (3947, 3977), True, 'import numpy as np\n'), ((4755, 4789), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'pinch_diameter'], {}), '(-1, 1, pinch_diameter)\n', (4766, 4789), True, 'import numpy as np\n'), ((4834, 4868), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'pinch_diameter'], {}), '(-1, 1, pinch_diameter)\n', (4845, 4868), True, 'import numpy as np\n'), ((5228, 5243), 'numpy.round', 'np.round', (['delta'], {}), '(delta)\n', (5236, 5243), True, 'import numpy as np\n'), ((8378, 8441), 'numpy.allclose', 'np.allclose', (['voxel_tsdf_and_weights_np[index]', 'ground_truth[3:]'], {}), '(voxel_tsdf_and_weights_np[index], ground_truth[3:])\n', (8389, 8441), True, 'import numpy as np\n'), ((10746, 10780), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'pinch_diameter'], {}), '(-1, 1, pinch_diameter)\n', (10757, 10780), True, 'import numpy as np\n'), ((10825, 10859), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'pinch_diameter'], {}), '(-1, 1, pinch_diameter)\n', (10836, 10859), True, 'import numpy as np\n'), ((11231, 11246), 'numpy.round', 'np.round', (['delta'], {}), '(delta)\n', (11239, 11246), True, 'import numpy as np\n'), ((14568, 14631), 'numpy.allclose', 'np.allclose', (['voxel_tsdf_and_weights_np[index]', 'ground_truth[3:]'], {}), '(voxel_tsdf_and_weights_np[index], ground_truth[3:])\n', (14579, 14631), True, 'import numpy as np\n'), ((3799, 3814), 'dq3d.quat.identity', 'quat.identity', ([], {}), '()\n', (3812, 3814), False, 'from dq3d import quat, dualquat\n'), ((3816, 3843), 'dq3d.quat', 'quat', (['(1.0)', '(0.0)', '(0.0)', '(-0.005)'], {}), '(1.0, 0.0, 0.0, -0.005)\n', (3820, 3843), False, 'from dq3d import quat, dualquat\n'), ((9596, 9611), 'dq3d.quat.identity', 'quat.identity', ([], {}), '()\n', (9609, 9611), False, 'from dq3d import quat, dualquat\n'), ((9613, 9640), 'dq3d.quat', 'quat', (['(1.0)', '(0.0)', '(0.0)', '(-0.005)'], {}), '(1.0, 0.0, 0.0, -0.005)\n', (9617, 9640), False, 'from dq3d import quat, dualquat\n'), ((3858, 3873), 'dq3d.quat.identity', 'quat.identity', ([], {}), '()\n', (3871, 3873), False, 'from dq3d import quat, dualquat\n'), ((9655, 9670), 'dq3d.quat.identity', 'quat.identity', ([], {}), '()\n', (9668, 9670), False, 'from dq3d import quat, dualquat\n')]
|
##############################################################################
#
# Copyright (c) 2016, <NAME>
#
# This file is part of arlpy which is released under Simplified BSD License.
# See file LICENSE or go to http://www.opensource.org/licenses/BSD-3-Clause
# for full license details.
#
##############################################################################
"""DTLA support toolbox."""
import os as _os
import numpy as _np
from scipy import signal as _sig
_fs = 1/(1.6e-6*26)
_framelen = 2*26
_channels = 24
_magic = 0xc0de
def check(filename):
"""Check if a file is likely to be a valid DTLA datafile."""
statinfo = _os.stat(filename)
if statinfo.st_size >= 2*2*_channels:
with open(filename, 'rb') as f:
data = _np.fromfile(f, dtype=_np.uint16, count=_framelen/2)
if data[0] == _magic & data[1] == _magic:
return True
return False
def get_sampling_rate(filename=None):
"""Get the sampling rate in Hz."""
return _fs
def get_channels(filename=None):
"""Get the number of available data channels."""
return _channels
def get_data_length(filename):
"""Get the length of the datafile in samples."""
statinfo = _os.stat(filename)
return statinfo.st_size//_framelen
def get_data(filename, channel=None, start=0, length=None, detrend='linear'):
"""Load selected data from DTLA recording.
:param filename: name of the datafile
:param channel: list of channels to read (base 0, None to read all channels)
:param start: sample index to start from
:param length: number of samples to read (None means read all available samples)
:param detrend: processing to be applied to each channel to remove offset/bias
(supported values: ``'linear'``, ``'constant'``, ``None``)
"""
if channel is None:
channel = range(_channels)
elif isinstance(channel, int):
channel = [channel]
if length is None:
length = get_data_length(filename)-start
with open(filename, 'rb') as f:
f.seek(start*_framelen, _os.SEEK_SET)
data = _np.fromfile(f, dtype=_np.uint16, count=_framelen//2*length)
data = _np.reshape(data, [length,_framelen//2])
data = data[:,2:]
data = _np.take(data, channel, axis=1).astype(_np.float)
if len(channel) == 1:
data = data.ravel()
data = 5*data/65536-2.5
if detrend is not None:
data = _sig.detrend(data, axis=0, type=detrend)
return data
|
[
"os.stat",
"numpy.fromfile",
"numpy.take",
"numpy.reshape",
"scipy.signal.detrend"
] |
[((645, 663), 'os.stat', '_os.stat', (['filename'], {}), '(filename)\n', (653, 663), True, 'import os as _os\n'), ((1210, 1228), 'os.stat', '_os.stat', (['filename'], {}), '(filename)\n', (1218, 1228), True, 'import os as _os\n'), ((2181, 2224), 'numpy.reshape', '_np.reshape', (['data', '[length, _framelen // 2]'], {}), '(data, [length, _framelen // 2])\n', (2192, 2224), True, 'import numpy as _np\n'), ((2109, 2173), 'numpy.fromfile', '_np.fromfile', (['f'], {'dtype': '_np.uint16', 'count': '(_framelen // 2 * length)'}), '(f, dtype=_np.uint16, count=_framelen // 2 * length)\n', (2121, 2173), True, 'import numpy as _np\n'), ((2430, 2470), 'scipy.signal.detrend', '_sig.detrend', (['data'], {'axis': '(0)', 'type': 'detrend'}), '(data, axis=0, type=detrend)\n', (2442, 2470), True, 'from scipy import signal as _sig\n'), ((765, 819), 'numpy.fromfile', '_np.fromfile', (['f'], {'dtype': '_np.uint16', 'count': '(_framelen / 2)'}), '(f, dtype=_np.uint16, count=_framelen / 2)\n', (777, 819), True, 'import numpy as _np\n'), ((2255, 2286), 'numpy.take', '_np.take', (['data', 'channel'], {'axis': '(1)'}), '(data, channel, axis=1)\n', (2263, 2286), True, 'import numpy as _np\n')]
|
from typing import Collection, Dict, List, Sequence, Type, TypeVar
import numpy as np
import pandas as pd
from athenian.api.controllers.features.metric import Metric, MetricInt
from athenian.api.controllers.features.metric_calculator import AnyMetricCalculator, \
BinnedMetricCalculator, \
MetricCalculator, MetricCalculatorEnsemble, SumMetricCalculator
from athenian.api.controllers.miners.github.developer import developer_changed_lines_column, \
developer_identity_column, DeveloperTopic
from athenian.api.controllers.miners.github.pull_request import ReviewResolution
from athenian.api.models.metadata.github import PullRequest, PullRequestComment, \
PullRequestReview, PullRequestReviewComment, PushCommit, \
Release
metric_calculators: Dict[str, Type[MetricCalculator]] = {}
T = TypeVar("T")
def register_metric(topic: DeveloperTopic):
"""Keep track of the developer metric calculators."""
assert isinstance(topic, DeveloperTopic)
def register_with_name(cls: Type[MetricCalculator]):
metric_calculators[topic.value] = cls
return cls
return register_with_name
class DeveloperMetricCalculatorEnsemble(MetricCalculatorEnsemble):
"""MetricCalculatorEnsemble adapted for developers."""
def __init__(self, *metrics: str, quantiles: Sequence[float], quantile_stride: int):
"""Initialize a new instance of ReleaseMetricCalculatorEnsemble class."""
super().__init__(*metrics,
quantiles=quantiles,
quantile_stride=quantile_stride,
class_mapping=metric_calculators)
class DeveloperBinnedMetricCalculator(BinnedMetricCalculator):
"""BinnedMetricCalculator adapted for developers."""
ensemble_class = DeveloperMetricCalculatorEnsemble
class DeveloperTopicCounter(SumMetricCalculator[int]):
"""Count all `topic` events in each time interval."""
may_have_negative_values = False
metric = MetricInt
timestamp_column: str
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
result = np.full((len(min_times), len(facts)), self.nan, self.dtype)
column = facts[self.timestamp_column].astype(min_times.dtype, copy=False).values
column_in_range = (min_times[:, None] <= column) & (column < max_times[:, None])
result[column_in_range] = 1
return result
class DeveloperTopicSummator(SumMetricCalculator[int]):
"""Sum all `topic` events in each time interval."""
may_have_negative_values = False
metric = MetricInt
topic_column: str
timestamp_column: str
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
result = np.full((len(min_times), len(facts)), self.nan, self.dtype)
topic_column = facts[self.topic_column].values
ts_column = facts[self.timestamp_column].values
column_in_range = (min_times[:, None] <= ts_column) & (ts_column < max_times[:, None])
for result_dim, column_in_range_dim in zip(result, column_in_range):
result_dim[column_in_range_dim] = topic_column[column_in_range_dim]
return result
@register_metric(DeveloperTopic.commits_pushed)
class CommitsPushedCounter(DeveloperTopicCounter):
"""Calculate "dev-commits-pushed" metric."""
timestamp_column = PushCommit.committed_date.name
@register_metric(DeveloperTopic.lines_changed)
class LinesChangedCounter(DeveloperTopicSummator):
"""Calculate "dev-lines-changed" metric."""
topic_column = developer_changed_lines_column
timestamp_column = PushCommit.committed_date.name
@register_metric(DeveloperTopic.active)
class ActiveCounter(MetricCalculator[int]):
"""Calculate "dev-active" metric."""
ACTIVITY_DAYS_THRESHOLD_DENSITY = 0.2
may_have_negative_values = False
metric = MetricInt
def _value(self, samples: np.ndarray) -> Metric[int]:
if len(samples) > 0:
days = samples[0] % 1000000
active = len(np.unique(samples // 1000000))
else:
days = 1
active = 0
assert days > 0
value = int(active / days > self.ACTIVITY_DAYS_THRESHOLD_DENSITY)
return self.metric.from_fields(True, value, None, None)
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
column = facts[PushCommit.committed_date.name].dt.floor(freq="D").values
column_in_range = (min_times[:, None] <= column) & (column < max_times[:, None])
timestamps = np.repeat(column[None, :], len(min_times), axis=0)
result = timestamps.view(int)
lengths = (max_times - min_times).astype("timedelta64[D]").view(int)
result += lengths[:, None]
result[~column_in_range] = self.nan
return result
@register_metric(DeveloperTopic.active0)
class Active0Counter(AnyMetricCalculator[int]):
"""Calculate "dev-active0" metric."""
deps = (ActiveCounter,)
may_have_negative_values = False
metric = MetricInt
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
return self._calcs[0].peek
@register_metric(DeveloperTopic.prs_created)
class PRsCreatedCounter(DeveloperTopicCounter):
"""Calculate "dev-prs-created" metric."""
timestamp_column = PullRequest.created_at.name
@register_metric(DeveloperTopic.prs_merged)
class PRsMergedCounter(DeveloperTopicCounter):
"""Calculate "dev-prs-merged" metric."""
timestamp_column = PullRequest.merged_at.name
@register_metric(DeveloperTopic.releases)
class ReleasesCounter(DeveloperTopicCounter):
"""Calculate "dev-releases" metric."""
timestamp_column = Release.published_at.name
@register_metric(DeveloperTopic.regular_pr_comments)
class RegularPRCommentsCounter(DeveloperTopicCounter):
"""Calculate "dev-regular-pr-comments" metric."""
timestamp_column = PullRequestComment.created_at.name
@register_metric(DeveloperTopic.review_pr_comments)
class ReviewPRCommentsCounter(DeveloperTopicCounter):
"""Calculate "dev-review-pr-comments" metric."""
timestamp_column = PullRequestReviewComment.created_at.name
@register_metric(DeveloperTopic.pr_comments)
class PRCommentsCounter(DeveloperTopicCounter):
"""Calculate "dev-pr-comments" metric."""
timestamp_column = "created_at"
@register_metric(DeveloperTopic.prs_reviewed)
class PRReviewedCounter(SumMetricCalculator[int]):
"""Calculate "dev-prs-reviewed" metric."""
may_have_negative_values = False
metric = MetricInt
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
result = np.full((len(min_times), len(facts)), self.nan, self.dtype)
column = facts[PullRequestReview.submitted_at.name].values
column_in_range = (min_times[:, None] <= column) & (column < max_times[:, None])
duplicated = facts.duplicated([
PullRequestReview.pull_request_node_id.name, developer_identity_column,
]).values
column_in_range[np.broadcast_to(duplicated[None, :], result.shape)] = False
result[column_in_range] = 1
return result
@register_metric(DeveloperTopic.reviews)
class ReviewsCounter(DeveloperTopicCounter):
"""Calculate "dev-reviews" metric."""
timestamp_column = PullRequestReview.submitted_at.name
class ReviewStatesCounter(SumMetricCalculator[int]):
"""Count reviews with the specified outcome in `state`."""
may_have_negative_values = False
metric = MetricInt
state = None
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
result = np.full((len(min_times), len(facts)), self.nan, self.dtype)
column = facts[PullRequestReview.submitted_at.name].values
column_in_range = (min_times[:, None] <= column) & (column < max_times[:, None])
wrong_state = facts[PullRequestReview.state.name].values != self.state.value
column_in_range[np.broadcast_to(wrong_state[None, :], result.shape)] = False
result[column_in_range] = 1
return result
@register_metric(DeveloperTopic.review_approvals)
class ApprovalsCounter(ReviewStatesCounter):
"""Calculate "dev-review-approved" metric."""
state = ReviewResolution.APPROVED
@register_metric(DeveloperTopic.review_rejections)
class RejectionsCounter(ReviewStatesCounter):
"""Calculate "dev-review-rejected" metric."""
state = ReviewResolution.CHANGES_REQUESTED
@register_metric(DeveloperTopic.review_neutrals)
class NeutralReviewsCounter(ReviewStatesCounter):
"""Calculate "dev-review-neutrals" metric."""
state = ReviewResolution.COMMENTED
@register_metric(DeveloperTopic.worked)
class WorkedCounter(AnyMetricCalculator[int]):
"""Calculate "dev-worked" metric."""
deps = (
PRsCreatedCounter,
PRsMergedCounter,
ReleasesCounter,
CommitsPushedCounter,
ReviewsCounter,
RegularPRCommentsCounter,
)
may_have_negative_values = False
metric = MetricInt
def _analyze(self,
facts: pd.DataFrame,
min_times: np.ndarray,
max_times: np.ndarray,
**kwargs) -> np.array:
result = np.full((len(min_times), len(facts)), 0, self.dtype)
for calc in self._calcs:
result |= calc.peek > 0
result[result == 0] = self.nan
return result
def group_actions_by_developers(devs: Sequence[Collection[str]],
df: pd.DataFrame,
) -> List[np.ndarray]:
"""Group developer actions by developer groups."""
indexes = []
identities = df[developer_identity_column].values.astype("S")
for group in devs:
if len(group) == 1:
dev = next(iter(group))
indexes.append(np.nonzero(identities == dev.encode())[0])
continue
if isinstance(group, set):
group = list(group)
indexes.append(np.nonzero(np.in1d(identities, np.array(group, dtype="S")))[0])
return indexes
|
[
"typing.TypeVar",
"numpy.broadcast_to",
"numpy.array",
"numpy.unique"
] |
[((808, 820), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (815, 820), False, 'from typing import Collection, Dict, List, Sequence, Type, TypeVar\n'), ((7516, 7566), 'numpy.broadcast_to', 'np.broadcast_to', (['duplicated[None, :]', 'result.shape'], {}), '(duplicated[None, :], result.shape)\n', (7531, 7566), True, 'import numpy as np\n'), ((8544, 8595), 'numpy.broadcast_to', 'np.broadcast_to', (['wrong_state[None, :]', 'result.shape'], {}), '(wrong_state[None, :], result.shape)\n', (8559, 8595), True, 'import numpy as np\n'), ((4206, 4235), 'numpy.unique', 'np.unique', (['(samples // 1000000)'], {}), '(samples // 1000000)\n', (4215, 4235), True, 'import numpy as np\n'), ((10604, 10630), 'numpy.array', 'np.array', (['group'], {'dtype': '"""S"""'}), "(group, dtype='S')\n", (10612, 10630), True, 'import numpy as np\n')]
|
'''
Test the FFT functions
'''
import numpy as np
from .. import LombScargle as LS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from ..Tools.mode import mode
from ..Tools.DetectGaps import DetectGaps
from ..Spectrogram.SpectrogramPlotter import SpectrogramPlotter
def Spectrum():
#pick two frequencies
f0 = 0.002
f1 = 0.005
#amplitudes
A0 = 2.0
A1 = 1.5
#phases
p0 = np.pi/2.0
p1 = 0.0
#time series
t = np.arange(1000.0)
dt = np.random.rand(t.size)*5
t = t + dt
t.sort()
v0 = A0*np.cos(2*np.pi*f0*t + p0)
v1 = A1*np.cos(2*np.pi*f1*t + p1)
v = v0 + v1
#frequencies
freq = np.arange(2000,dtype='float32')/(np.float32(4000*1.0))
print(freq.max())
#spectrum
power,A,phase,fr,fi = LS.LombScargle(t,v,freq)
#figure
fig = plt
fig.figure(figsize=(8,11))
ax0 = fig.subplot2grid((2,1),(0,0))
ax1 = fig.subplot2grid((2,1),(1,0))
ax0.plot(t,v0,color='red',linestyle='--')
ax0.plot(t,v1,color='orange',linestyle='--')
ax0.plot(t,v,color='black',linestyle='-')
ax1.plot(freq*1000.0,power,color='blue')
ax0.set_xlabel('$t$ (s)')
ax1.set_ylabel('Power')
ax1.set_xlabel('Frequency (mHz)')
fmx = np.min([freq.max(),1.5*np.max([f0,f1])])
ax1.set_xlim(0,fmx*1000)
def Spectrogram():
#pick two frequencies
f0 = 0.002
f1 = 0.005
#amplitudes
A0 = 2.0
A1 = 1.5
#phases
p0 = np.pi/2.0
p1 = 0.0
#time series
t = np.arange(10800.0)
dt = np.random.rand(t.size)*5
t = t + dt
t.sort()
v0 = A0*np.cos(2*np.pi*f0*t + p0)
v1 = A1*np.cos(2*np.pi*f1*t + p1)
v = v0 + v1
wind = 1800
slip = 200
#frequencies
freq = np.arange(900,dtype='float32')/(np.float32(1800*1.0))
#figure
fig = plt
fig.figure(figsize=(8,11))
ax0 = fig.subplot2grid((2,1),(0,0))
ax1 = fig.subplot2grid((2,1),(1,0))
ax0.plot(t,v0,color='red',linestyle='--')
ax0.plot(t,v1,color='orange',linestyle='--')
ax0.plot(t,v,color='black',linestyle='-')
ax0.set_xlabel('Time (s)')
#spectrogram
ax1,Nw,Freq,Spec = LS.PlotSpectrogram(t,v,wind,slip,Freq=freq,FreqAxisUnits='mHz',fig=fig,maps=[1,2,0,1])
fmx = np.min([Freq.max(),1.5*np.max([f0,f1])])
ax1.set_ylim(0,fmx*1000)
def Spectrogram2():
#pick two frequencies
f0 = 0.002
f1 = 0.005
#amplitudes
A0 = 2.0
A1 = 1.5
#phases
p0 = np.pi/2.0
p1 = 0.0
#time series
t = np.arange(10800.0)
dt = np.random.rand(t.size)*5
t = t + dt
t.sort()
v0 = A0*np.cos(2*np.pi*f0*t + p0)
v1 = A1*np.cos(2*np.pi*f1*t + p1)
v = v0 + v1
wind = 1800
slip = 200
#frequencies
freq = np.arange(900,dtype='float32')/(np.float32(1800*1.0))
#figure
fig = plt
fig.figure(figsize=(8,11))
ax0 = fig.subplot2grid((2,1),(0,0))
ax1 = fig.subplot2grid((2,1),(1,0))
ax0.plot(t,v0,color='red',linestyle='--')
ax0.plot(t,v1,color='orange',linestyle='--')
ax0.plot(t,v,color='black',linestyle='-')
ax0.set_xlabel('Time (s)')
Nw,Freq,Spec = LS.Spectrogram(t,v,wind,slip,Freq=freq)
#spectrogram
ax1,Nw,Freq,Spec = LS.PlotSpectrogram(Freq,Spec,FreqAxisUnits='mHz',fig=fig,maps=[1,2,0,1])
fmx = np.min([Freq.max(),1.5*np.max([f0,f1])])
ax1.set_ylim(0,fmx*1000)
def Spectrogram3():
#pick two frequencies
f0 = 0.002
f1 = 0.005
#amplitudes
A0 = 2.0
A1 = 1.5
#phases
p0 = np.pi/2.0
p1 = 0.0
#time series
t = np.arange(10800.0)
dt = np.random.rand(t.size)*5
t = t + dt
t.sort()
v0 = A0*np.cos(2*np.pi*f0*t + p0)
v1 = A1*np.cos(2*np.pi*f1*t + p1)
v = v0 + v1
wind = 1800
slip = 200
#frequencies
freq = np.arange(900,dtype='float32')/(np.float32(1800*1.0))
#figure
fig = plt
fig.figure(figsize=(8,11))
ax0 = fig.subplot2grid((2,1),(0,0))
ax1 = fig.subplot2grid((2,1),(1,0))
ax0.plot(t,v0,color='red',linestyle='--')
ax0.plot(t,v1,color='orange',linestyle='--')
ax0.plot(t,v,color='black',linestyle='-')
ax0.set_xlabel('Time (s)')
Nw,Freq,Spec = LS.Spectrogram(t,v,wind,slip,Freq=freq)
#spectrogram
ax1 = SpectrogramPlotter(Spec.Tspec,Freq*1000,Spec.Pow,fig=fig,maps=[1,2,0,1])
fmx = np.min([Freq.max(),1.5*np.max([f0,f1])])
ax1.set_ylim(0,fmx*1000)
def Spectrogram3D():
#pick some frequencies
fx0 = 0.002
fx1 = 0.007
fy0 = 0.007
fy1 = 0.010
#amplitudes
A0 = 2.0
A1 = 1.5
#phases
p0 = np.pi/2.0
p1 = 0.0
#time series
t = np.arange(10800.0)
dt = np.random.rand(t.size)*5
t = t + dt
t.sort()
x0 = A0*np.cos(2*np.pi*fx0*t + p0)
x1 = A1*np.cos(2*np.pi*fx1*t + p1)
x = x0 + x1
y0 = A0*np.cos(2*np.pi*fy0*t + p0)
y1 = A1*np.cos(2*np.pi*fy1*t + p1)
y = y0 + y1
z = np.zeros(t.size,dtype='float32')
#frequencies
freq = np.arange(900,dtype='float32')/(np.float32(1800*1.0))
#spectrogram
wind = 1800
slip = 200
Nw,Freq,Spec = LS.Spectrogram3D(t,x,y,z,wind,slip,Freq=freq,CombineComps=True)
Nf = Freq.size - 1
S = Spec.xyPow
f = Freq[:Nf+1]*1000.0
ts = Spec.Tspec
xlabel = 'Time (s)'
dt = mode(ts[1:] - ts[:-1])/2.0
scale = [np.nanmin(S),np.nanmax(S)]
norm = colors.Normalize(vmin=scale[0],vmax=scale[1])
cmap = plt.cm.get_cmap('gnuplot')
#find gaps
gaps = np.where(np.isfinite(S[:,1]) == False)[0]
ngd,T0,T1 = DetectGaps(S[:,1])
#figure
fig = plt
fig.figure(figsize=(8,11))
ax0 = fig.subplot2grid((2,1),(0,0))
ax1 = fig.subplot2grid((2,1),(1,0))
ax0.plot(t,x,color='red')
ax0.plot(t,y,color='orange')
sm = None
for i in range(0,ngd):
#select the good portion of the
use = np.arange(T0[i],T1[i]+1)
tax = np.append(ts[use]-dt,ts[use[-1]]+dt)
Stmp = S[use]
#mesh the axes
tm,fm = np.meshgrid(tax,f)
#plot the section
sm = ax1.pcolormesh(tm.T,fm.T,Stmp,cmap=cmap,norm=norm)
#colour bar
fig.subplots_adjust(right=0.8)
box = ax1.get_position()
if not sm is None:
cax = plt.axes([0.05*box.width + box.x1,box.y0+0.1*box.height,box.width*0.025,box.height*0.8])
cbar = fig.colorbar(sm,cax=cax)
cbar.set_label('Power')
#axis labels
ax1.set_xlabel(xlabel)
ax1.set_ylabel('$f$ (mHz)')
fmx = np.min([Freq.max(),1.5*np.max([fx0,fx1,fy0,fy1])])
ax1.set_ylim(0,fmx*1000)
return Spec
|
[
"numpy.meshgrid",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.axes",
"numpy.float32",
"numpy.zeros",
"numpy.isfinite",
"numpy.nanmin",
"numpy.append",
"numpy.max",
"numpy.arange",
"numpy.cos",
"numpy.random.rand",
"matplotlib.pyplot.cm.get_cmap",
"numpy.nanmax"
] |
[((502, 519), 'numpy.arange', 'np.arange', (['(1000.0)'], {}), '(1000.0)\n', (511, 519), True, 'import numpy as np\n'), ((1455, 1473), 'numpy.arange', 'np.arange', (['(10800.0)'], {}), '(10800.0)\n', (1464, 1473), True, 'import numpy as np\n'), ((2364, 2382), 'numpy.arange', 'np.arange', (['(10800.0)'], {}), '(10800.0)\n', (2373, 2382), True, 'import numpy as np\n'), ((3320, 3338), 'numpy.arange', 'np.arange', (['(10800.0)'], {}), '(10800.0)\n', (3329, 3338), True, 'import numpy as np\n'), ((4292, 4310), 'numpy.arange', 'np.arange', (['(10800.0)'], {}), '(10800.0)\n', (4301, 4310), True, 'import numpy as np\n'), ((4541, 4574), 'numpy.zeros', 'np.zeros', (['t.size'], {'dtype': '"""float32"""'}), "(t.size, dtype='float32')\n", (4549, 4574), True, 'import numpy as np\n'), ((4949, 4995), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'scale[0]', 'vmax': 'scale[1]'}), '(vmin=scale[0], vmax=scale[1])\n', (4965, 4995), True, 'import matplotlib.colors as colors\n'), ((5004, 5030), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""gnuplot"""'], {}), "('gnuplot')\n", (5019, 5030), True, 'import matplotlib.pyplot as plt\n'), ((526, 548), 'numpy.random.rand', 'np.random.rand', (['t.size'], {}), '(t.size)\n', (540, 548), True, 'import numpy as np\n'), ((584, 615), 'numpy.cos', 'np.cos', (['(2 * np.pi * f0 * t + p0)'], {}), '(2 * np.pi * f0 * t + p0)\n', (590, 615), True, 'import numpy as np\n'), ((619, 650), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * t + p1)'], {}), '(2 * np.pi * f1 * t + p1)\n', (625, 650), True, 'import numpy as np\n'), ((682, 714), 'numpy.arange', 'np.arange', (['(2000)'], {'dtype': '"""float32"""'}), "(2000, dtype='float32')\n", (691, 714), True, 'import numpy as np\n'), ((715, 737), 'numpy.float32', 'np.float32', (['(4000 * 1.0)'], {}), '(4000 * 1.0)\n', (725, 737), True, 'import numpy as np\n'), ((1480, 1502), 'numpy.random.rand', 'np.random.rand', (['t.size'], {}), '(t.size)\n', (1494, 1502), True, 'import numpy as np\n'), ((1537, 1568), 'numpy.cos', 'np.cos', (['(2 * np.pi * f0 * t + p0)'], {}), '(2 * np.pi * f0 * t + p0)\n', (1543, 1568), True, 'import numpy as np\n'), ((1572, 1603), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * t + p1)'], {}), '(2 * np.pi * f1 * t + p1)\n', (1578, 1603), True, 'import numpy as np\n'), ((1661, 1692), 'numpy.arange', 'np.arange', (['(900)'], {'dtype': '"""float32"""'}), "(900, dtype='float32')\n", (1670, 1692), True, 'import numpy as np\n'), ((1693, 1715), 'numpy.float32', 'np.float32', (['(1800 * 1.0)'], {}), '(1800 * 1.0)\n', (1703, 1715), True, 'import numpy as np\n'), ((2389, 2411), 'numpy.random.rand', 'np.random.rand', (['t.size'], {}), '(t.size)\n', (2403, 2411), True, 'import numpy as np\n'), ((2446, 2477), 'numpy.cos', 'np.cos', (['(2 * np.pi * f0 * t + p0)'], {}), '(2 * np.pi * f0 * t + p0)\n', (2452, 2477), True, 'import numpy as np\n'), ((2481, 2512), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * t + p1)'], {}), '(2 * np.pi * f1 * t + p1)\n', (2487, 2512), True, 'import numpy as np\n'), ((2570, 2601), 'numpy.arange', 'np.arange', (['(900)'], {'dtype': '"""float32"""'}), "(900, dtype='float32')\n", (2579, 2601), True, 'import numpy as np\n'), ((2602, 2624), 'numpy.float32', 'np.float32', (['(1800 * 1.0)'], {}), '(1800 * 1.0)\n', (2612, 2624), True, 'import numpy as np\n'), ((3345, 3367), 'numpy.random.rand', 'np.random.rand', (['t.size'], {}), '(t.size)\n', (3359, 3367), True, 'import numpy as np\n'), ((3402, 3433), 'numpy.cos', 'np.cos', (['(2 * np.pi * f0 * t + p0)'], {}), '(2 * np.pi * f0 * t + p0)\n', (3408, 3433), True, 'import numpy as np\n'), ((3437, 3468), 'numpy.cos', 'np.cos', (['(2 * np.pi * f1 * t + p1)'], {}), '(2 * np.pi * f1 * t + p1)\n', (3443, 3468), True, 'import numpy as np\n'), ((3526, 3557), 'numpy.arange', 'np.arange', (['(900)'], {'dtype': '"""float32"""'}), "(900, dtype='float32')\n", (3535, 3557), True, 'import numpy as np\n'), ((3558, 3580), 'numpy.float32', 'np.float32', (['(1800 * 1.0)'], {}), '(1800 * 1.0)\n', (3568, 3580), True, 'import numpy as np\n'), ((4317, 4339), 'numpy.random.rand', 'np.random.rand', (['t.size'], {}), '(t.size)\n', (4331, 4339), True, 'import numpy as np\n'), ((4375, 4407), 'numpy.cos', 'np.cos', (['(2 * np.pi * fx0 * t + p0)'], {}), '(2 * np.pi * fx0 * t + p0)\n', (4381, 4407), True, 'import numpy as np\n'), ((4411, 4443), 'numpy.cos', 'np.cos', (['(2 * np.pi * fx1 * t + p1)'], {}), '(2 * np.pi * fx1 * t + p1)\n', (4417, 4443), True, 'import numpy as np\n'), ((4460, 4492), 'numpy.cos', 'np.cos', (['(2 * np.pi * fy0 * t + p0)'], {}), '(2 * np.pi * fy0 * t + p0)\n', (4466, 4492), True, 'import numpy as np\n'), ((4496, 4528), 'numpy.cos', 'np.cos', (['(2 * np.pi * fy1 * t + p1)'], {}), '(2 * np.pi * fy1 * t + p1)\n', (4502, 4528), True, 'import numpy as np\n'), ((4597, 4628), 'numpy.arange', 'np.arange', (['(900)'], {'dtype': '"""float32"""'}), "(900, dtype='float32')\n", (4606, 4628), True, 'import numpy as np\n'), ((4629, 4651), 'numpy.float32', 'np.float32', (['(1800 * 1.0)'], {}), '(1800 * 1.0)\n', (4639, 4651), True, 'import numpy as np\n'), ((4914, 4926), 'numpy.nanmin', 'np.nanmin', (['S'], {}), '(S)\n', (4923, 4926), True, 'import numpy as np\n'), ((4927, 4939), 'numpy.nanmax', 'np.nanmax', (['S'], {}), '(S)\n', (4936, 4939), True, 'import numpy as np\n'), ((5391, 5418), 'numpy.arange', 'np.arange', (['T0[i]', '(T1[i] + 1)'], {}), '(T0[i], T1[i] + 1)\n', (5400, 5418), True, 'import numpy as np\n'), ((5424, 5465), 'numpy.append', 'np.append', (['(ts[use] - dt)', '(ts[use[-1]] + dt)'], {}), '(ts[use] - dt, ts[use[-1]] + dt)\n', (5433, 5465), True, 'import numpy as np\n'), ((5510, 5529), 'numpy.meshgrid', 'np.meshgrid', (['tax', 'f'], {}), '(tax, f)\n', (5521, 5529), True, 'import numpy as np\n'), ((5708, 5813), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.05 * box.width + box.x1, box.y0 + 0.1 * box.height, box.width * 0.025, \n box.height * 0.8]'], {}), '([0.05 * box.width + box.x1, box.y0 + 0.1 * box.height, box.width *\n 0.025, box.height * 0.8])\n', (5716, 5813), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1263), 'numpy.max', 'np.max', (['[f0, f1]'], {}), '([f0, f1])\n', (1253, 1263), True, 'import numpy as np\n'), ((2157, 2173), 'numpy.max', 'np.max', (['[f0, f1]'], {}), '([f0, f1])\n', (2163, 2173), True, 'import numpy as np\n'), ((3111, 3127), 'numpy.max', 'np.max', (['[f0, f1]'], {}), '([f0, f1])\n', (3117, 3127), True, 'import numpy as np\n'), ((4054, 4070), 'numpy.max', 'np.max', (['[f0, f1]'], {}), '([f0, f1])\n', (4060, 4070), True, 'import numpy as np\n'), ((5061, 5081), 'numpy.isfinite', 'np.isfinite', (['S[:, 1]'], {}), '(S[:, 1])\n', (5072, 5081), True, 'import numpy as np\n'), ((5960, 5988), 'numpy.max', 'np.max', (['[fx0, fx1, fy0, fy1]'], {}), '([fx0, fx1, fy0, fy1])\n', (5966, 5988), True, 'import numpy as np\n')]
|
import numpy as np
from unittest import TestCase
from diffprivlib.mechanisms import Exponential
from diffprivlib.utils import global_seed
class TestExponential(TestCase):
def setup_method(self, method):
if method.__name__ .endswith("prob"):
global_seed(314159)
self.mech = Exponential()
def teardown_method(self, method):
del self.mech
def test_not_none(self):
self.assertIsNotNone(self.mech)
def test_class(self):
from diffprivlib.mechanisms import DPMechanism
self.assertTrue(issubclass(Exponential, DPMechanism))
def test_no_params(self):
with self.assertRaises(ValueError):
self.mech.randomise("A")
def test_no_epsilon(self):
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
]
self.mech.set_utility(utility_list)
with self.assertRaises(ValueError):
self.mech.randomise("A")
def test_inf_epsilon(self):
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
]
self.mech.set_utility(utility_list).set_epsilon(float("inf"))
# print(_mech.randomise("A"))
for i in range(1000):
self.assertEqual(self.mech.randomise("A"), "A")
def test_neg_epsilon(self):
with self.assertRaises(ValueError):
self.mech.set_epsilon(-1)
def test_complex_epsilon(self):
with self.assertRaises(TypeError):
self.mech.set_epsilon(1+2j)
def test_string_epsilon(self):
with self.assertRaises(TypeError):
self.mech.set_epsilon("Two")
def test_non_zero_delta(self):
with self.assertRaises(ValueError):
self.mech.set_epsilon_delta(1, 0.5)
def test_no_utility(self):
self.mech.set_epsilon(1)
with self.assertRaises(ValueError):
self.mech.randomise("1")
def test_hierarchy_first(self):
utility_list = [
["A", "B", 1],
["A", "2", 2],
["B", "2", 2]
]
self.mech.set_utility(utility_list)
self.assertIsNotNone(self.mech)
def test_non_string_hierarchy(self):
utility_list = [
["A", "B", 1],
["A", 2, 2],
["B", 2, 2]
]
with self.assertRaises(TypeError):
self.mech.set_utility(utility_list)
def test_missing_utilities(self):
utility_list = [
["A", "B", 1],
["A", "C", 2]
]
with self.assertRaises(ValueError):
self.mech.set_utility(utility_list)
def test_wrong_utilities(self):
utility_list = (
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
)
with self.assertRaises(TypeError):
self.mech.set_utility(utility_list)
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", "2"]
]
with self.assertRaises(TypeError):
self.mech.set_utility(utility_list)
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", -2]
]
with self.assertRaises(ValueError):
self.mech.set_utility(utility_list)
def test_non_string_input(self):
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
]
self.mech.set_epsilon(1).set_utility(utility_list)
with self.assertRaises(TypeError):
self.mech.randomise(2)
def test_outside_domain(self):
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
]
self.mech.set_epsilon(1).set_utility(utility_list)
with self.assertRaises(ValueError):
self.mech.randomise("D")
def test_get_utility_list(self):
self.assertIsNone(self.mech.get_utility_list())
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
]
self.mech.set_epsilon(1).set_utility(utility_list)
_utility_list = self.mech.get_utility_list()
self.assertEqual(len(_utility_list), len(utility_list))
def test_self_in_utility(self):
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2],
["A", "A", 5]
]
self.mech.set_epsilon(1).set_utility(utility_list)
_utility_list = self.mech.get_utility_list()
self.assertEqual(len(_utility_list) + 1, len(utility_list))
self.assertEqual(self.mech._get_utility("A", "A"), 0)
def test_distrib_prob(self):
epsilon = np.log(2)
runs = 20000
utility_list = [
["A", "B", 1],
["A", "C", 2],
["B", "C", 2]
]
self.mech.set_epsilon(epsilon).set_utility(utility_list)
count = [0, 0, 0]
for i in range(runs):
val = self.mech.randomise("A")
if val == "A":
count[0] += 1
elif val == "B":
count[1] += 1
elif val == "C":
count[2] += 1
# print("A: %d, B: %d, C: %d" % (count[0], count[1], count[2]))
self.assertLessEqual(count[0] / runs, np.exp(epsilon) * count[2] / runs + 0.05)
self.assertAlmostEqual(count[0] / count[1], count[1] / count[2], delta=0.1)
def test_repr(self):
repr_ = repr(self.mech.set_epsilon(1))
self.assertIn(".Exponential(", repr_)
|
[
"diffprivlib.utils.global_seed",
"numpy.exp",
"diffprivlib.mechanisms.Exponential",
"numpy.log"
] |
[((309, 322), 'diffprivlib.mechanisms.Exponential', 'Exponential', ([], {}), '()\n', (320, 322), False, 'from diffprivlib.mechanisms import Exponential\n'), ((4765, 4774), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4771, 4774), True, 'import numpy as np\n'), ((268, 287), 'diffprivlib.utils.global_seed', 'global_seed', (['(314159)'], {}), '(314159)\n', (279, 287), False, 'from diffprivlib.utils import global_seed\n'), ((5370, 5385), 'numpy.exp', 'np.exp', (['epsilon'], {}), '(epsilon)\n', (5376, 5385), True, 'import numpy as np\n')]
|
"""
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(mx.autograd.Function):
"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""
def __init__(self, p):
super(ShakeDrop, self).__init__()
self.p = p
def forward(self, x):
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.p)
alpha = mx.nd.random.uniform_like(x.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=-1.0, high=1.0)
y = mx.nd.broadcast_mul(b + alpha - b * alpha, x)
self.save_for_backward(b)
else:
y = self.p * x
return y
def backward(self, dy):
b, = self.saved_tensors
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=0.0, high=1.0)
return mx.nd.broadcast_mul(b + beta - b * beta, dy)
class ShakeDropResUnit(HybridBlock):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ShakeDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
# self.shake_drop = ShakeDrop(self.life_prob)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = ShakeDrop(self.life_prob)(x) + identity
# x = self.shake_drop(x) + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(HybridBlock):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
# y = net(x)
with mx.autograd.record():
y = net(x)
y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
|
[
"numpy.random.binomial",
"mxnet.autograd.is_training",
"mxnet.autograd.record",
"mxnet.gluon.nn.HybridSequential",
"mxnet.gluon.nn.Dense",
"mxnet.gluon.nn.Activation",
"mxnet.nd.zeros",
"mxnet.nd.broadcast_mul",
"mxnet.cpu",
"mxnet.gluon.nn.AvgPool2D",
"os.path.join",
"mxnet.gluon.nn.Flatten",
"numpy.prod"
] |
[((6743, 6748), 'mxnet.cpu', 'cpu', ([], {}), '()\n', (6746, 6748), False, 'from mxnet import cpu\n'), ((6785, 6822), 'os.path.join', 'os.path.join', (['"""~"""', '""".mxnet"""', '"""models"""'], {}), "('~', '.mxnet', 'models')\n", (6797, 6822), False, 'import os\n'), ((834, 859), 'mxnet.autograd.is_training', 'mx.autograd.is_training', ([], {}), '()\n', (857, 859), True, 'import mxnet as mx\n'), ((1378, 1422), 'mxnet.nd.broadcast_mul', 'mx.nd.broadcast_mul', (['(b + beta - b * beta)', 'dy'], {}), '(b + beta - b * beta, dy)\n', (1397, 1422), True, 'import mxnet as mx\n'), ((11422, 11430), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (11428, 11430), True, 'import mxnet as mx\n'), ((12081, 12118), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(14, 3, 32, 32)'], {'ctx': 'ctx'}), '((14, 3, 32, 32), ctx=ctx)\n', (12092, 12118), True, 'import mxnet as mx\n'), ((877, 910), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'self.p'}), '(n=1, p=self.p)\n', (895, 910), True, 'import numpy as np\n'), ((1046, 1091), 'mxnet.nd.broadcast_mul', 'mx.nd.broadcast_mul', (['(b + alpha - b * alpha)', 'x'], {}), '(b + alpha - b * alpha, x)\n', (1065, 1091), True, 'import mxnet as mx\n'), ((3082, 3103), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (3095, 3103), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((4961, 4991), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (4980, 4991), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((6202, 6232), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (6221, 6232), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((11752, 11772), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (11759, 11772), True, 'import numpy as np\n'), ((12153, 12173), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (12171, 12173), True, 'import mxnet as mx\n'), ((6104, 6140), 'mxnet.gluon.nn.AvgPool2D', 'nn.AvgPool2D', ([], {'pool_size': '(8)', 'strides': '(1)'}), '(pool_size=8, strides=1)\n', (6116, 6140), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((6261, 6273), 'mxnet.gluon.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (6271, 6273), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((6303, 6348), 'mxnet.gluon.nn.Dense', 'nn.Dense', ([], {'units': 'classes', 'in_units': 'in_channels'}), '(units=classes, in_units=in_channels)\n', (6311, 6348), False, 'from mxnet.gluon import nn, HybridBlock\n')]
|
import numpy as np
import pandas as pd
import pulp
from laptimize.curve_approximation import CurveApproximator
from laptimize.log import LogFactory
class LAPModel(object):
"""solve the linear approximated LP problem and sub problems"""
def __init__(self, name='nlp_problem'):
self.logger = LogFactory.get_logger()
self.lp_variables = dict()
self.segment = pd.DataFrame()
self.curve = pd.DataFrame()
self.lp_slack = pulp.LpVariable.dict('p_%s', ['p1'], lowBound=0)
self.model = pulp.LpProblem(name, pulp.LpMinimize)
self.objective_expressions = []
self.constraint_expression = []
def initialize(self, segment=pd.DataFrame(), curve=pd.DataFrame(), name='nlp_sub_problem'):
"""
initialize variables for branching sub problems
Parameters
----------
lp_variables: dict
linear problem related variables
segment: pandas data frame
updated piecewise segment for decision variables
curve: pandas data frame
function values of objective and constraints function for each segment values
name: string
problem name
Returns
-------
self
"""
self.lp_variables = dict()
self.segment = segment
self.curve = curve
self.model = pulp.LpProblem(name, pulp.LpMinimize)
self.objective_expressions = []
self.constraint_expression = []
return self
def generate_variable_names(self, no_of_segments, node_name):
"""
generate weight variables names for approximated lp problem
Parameters
---------
no_of_segments: int
no of piecewise linear segment
node_name: string
non-linear decision variable name
Returns
------
variable_names: list
weight variable names
ex:[x1_1,x1_2,x1_3]
"""
variable_names = []
for i in range(0, no_of_segments):
variable_names.append("%s_%s" % (node_name, i))
return variable_names
def define_weights_for_segment(self, variable_names, name):
"""
create linear problem related variables using piece wise variables
Parameters
---------
variable_names: list
piece wise variable list
name: string
decision variable name
Returns
-------
self.lp_variable : dict
update lp_variable dictionary with weight linear problem variables
"""
self.lp_variables[name] = pulp.LpVariable.dict('l_%s', variable_names, lowBound=0, upBound=1)
return self.lp_variables[name]
def fill_constraint_objective_arrays(self, lp_allocation, constraint):
"""
update objective and constraints expression lists for linear problem
Parameters
---------
lp_allocation: dict
linear problem variables
constraint: pandas data frame
problem data frame
Returns
-------
weights: dict
weights constraints expression
problem_expressions: dict
collection of objective and constraints expression
"""
try:
problem_expressions = pd.DataFrame()
for index in constraint.index:
constraint_expression = []
weights = []
for key in lp_allocation:
constraint_expression.append(lp_allocation[key] * self.curve.loc[key][index])
weights.append(lp_allocation[key])
problem_expressions[index] = list(constraint_expression)
return weights, problem_expressions
except Exception as err:
self.logger.info('fill_constraint_objective_arrays method ended with error ')
self.logger.error(str(err))
raise
def add_sub_problem(self, segment_key, k):
"""
add sub problem constraint related to the weight variable
Parameters
----------
segment_key: string
branching variable key
k: list
branching sub variables key ex : [x1_1, x1_2]
Returns
-------
self
"""
# adding a sub problem
for key in self.lp_variables[segment_key]:
if key in k:
continue
else:
self.model += self.lp_variables[segment_key][key] == 0
self.segment = self.segment.drop([key])
self.curve = self.curve.drop([key])
def add_weights_sum_constraint_to_model(self, weights):
self.model += pulp.lpSum(weights) == 1
def add_model_constraint_and_objective(self, constraints, values):
"""
add constraint and objective function to the pulp lp problem
Parameters
----------
constraints: pandas data frame
problem data frame
values: pandas series
right side values for the constraints
Returns
-------
self
"""
try:
# Add objective function to model.
self.model += pulp.lpSum(constraints.objective) + self.lp_slack['p1']
constraints = constraints.drop(['objective'], axis=1)
for constraint_expression in constraints:
self.model += (pulp.lpSum(constraints[constraint_expression]) + self.lp_slack['p1']) <= values[
constraint_expression]
except Exception as err:
self.logger.info('add_model_constraint_and_objective method ended with error ')
self.logger.error(str(err))
raise
def solve_model(self):
"""
problem solve method for lp problems
"""
try:
solver = pulp.PULP_CBC_CMD(msg=0)
self.model.solve(solver)
except Exception as err:
self.logger.info('solve_model method ended with error ')
self.logger.error(str(err))
raise
def model_solver(self, constraints_df, partition_len):
"""
solve the initial lp problem with piecewise linear variables(weights)
Parameters
----------
constraints_df: pandas data frame
which include problem related details,data frame version of problem dictionary
Returns
-------
lp_variables: dict
pulp solution for the lp weight variables
lp_slack: dict
value of the lp slack variable
segment: pandas data frame
segment values for each decision variable
curve: pandas data frame
function values of objective and constraints function for each segment values
"""
try:
constraint_values = pd.DataFrame()
constraints = constraints_df.drop(['value'])
# Iterate over constrains and build model.
for _, constraint in constraints.iterrows():
# piecewise linear segments.
x_array = np.append(np.arange(constraint.capacity[0], constraint.capacity[1], partition_len),
constraint.capacity[1])
no_of_segments = len(x_array)
constraint = constraint.drop(['capacity'])
variable_names = self.generate_variable_names(no_of_segments, constraint.name)
# lp variable.
lp_allocation = self.define_weights_for_segment(variable_names, constraint.name)
# segment value.
segment = pd.DataFrame({'key': [constraint.name] * len(x_array), 'segment': x_array})
segment.index = variable_names
self.segment = pd.concat([self.segment, segment])
# curve approximation for each segment.
curve = pd.DataFrame(CurveApproximator().get_curve_approximation(constraint, x_array))
curve.index = variable_names
self.curve = pd.concat([self.curve, curve])
weights, problem_values = self.fill_constraint_objective_arrays(lp_allocation, constraint)
constraint_values = pd.concat([constraint_values, problem_values], axis=0)
self.add_weights_sum_constraint_to_model(weights)
self.add_model_constraint_and_objective(constraint_values, constraints_df.loc['value'])
self.solve_model()
return self.lp_variables, self.segment, self.curve
except Exception as err:
self.logger.info('model_solver method ended with error ')
self.logger.error(str(err))
raise
def global_solver(self, segment_key, k, constraints_df):
"""
solve the given sub lp problem with branching rule
Parameters
----------
segment_key: str
branching variable key ex: x1
k: list
branching sub variables key ex : [x1_1, x1-2]
constraints_df: pandas data frame
which include problem related details,data frame version of problem dictionary
Returns
-------
lp_variables: dict
pulp solution for the lp weight variables
lp_slack: dict
value of the lp slack variable
segment: pandas data frame
segment values for each decision variable
curve: pandas data frame
function values of objective and constraints functions for each segment values
"""
# Iterate over constrains and build model.
try:
constraint_values = pd.DataFrame()
constraints = constraints_df.drop(['value'])
for _, constraint in constraints.iterrows():
constraint = constraint.drop(['capacity'])
segment = self.segment[self.segment.key == constraint.name]['segment'].to_dict()
variable_names = list(segment.keys())
lp_allocation = self.define_weights_for_segment(variable_names, constraint.name)
weights, problem_values = self.fill_constraint_objective_arrays(lp_allocation, constraint)
constraint_values = pd.concat([constraint_values, problem_values], axis=0)
self.add_weights_sum_constraint_to_model(weights)
# adding sub problem
self.add_sub_problem(segment_key, k)
self.add_model_constraint_and_objective(constraint_values, constraints_df.loc['value'])
self.solve_model()
return self.lp_variables, self.segment, self.curve
except Exception as err:
self.logger.info('global_solver method ended with error ')
self.logger.error(str(err))
raise
|
[
"pandas.DataFrame",
"pulp.lpSum",
"laptimize.curve_approximation.CurveApproximator",
"pulp.LpVariable.dict",
"numpy.arange",
"pulp.LpProblem",
"pulp.PULP_CBC_CMD",
"pandas.concat",
"laptimize.log.LogFactory.get_logger"
] |
[((310, 333), 'laptimize.log.LogFactory.get_logger', 'LogFactory.get_logger', ([], {}), '()\n', (331, 333), False, 'from laptimize.log import LogFactory\n'), ((392, 406), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (404, 406), True, 'import pandas as pd\n'), ((428, 442), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (440, 442), True, 'import pandas as pd\n'), ((467, 515), 'pulp.LpVariable.dict', 'pulp.LpVariable.dict', (['"""p_%s"""', "['p1']"], {'lowBound': '(0)'}), "('p_%s', ['p1'], lowBound=0)\n", (487, 515), False, 'import pulp\n'), ((537, 574), 'pulp.LpProblem', 'pulp.LpProblem', (['name', 'pulp.LpMinimize'], {}), '(name, pulp.LpMinimize)\n', (551, 574), False, 'import pulp\n'), ((689, 703), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (701, 703), True, 'import pandas as pd\n'), ((711, 725), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (723, 725), True, 'import pandas as pd\n'), ((1368, 1405), 'pulp.LpProblem', 'pulp.LpProblem', (['name', 'pulp.LpMinimize'], {}), '(name, pulp.LpMinimize)\n', (1382, 1405), False, 'import pulp\n'), ((2636, 2703), 'pulp.LpVariable.dict', 'pulp.LpVariable.dict', (['"""l_%s"""', 'variable_names'], {'lowBound': '(0)', 'upBound': '(1)'}), "('l_%s', variable_names, lowBound=0, upBound=1)\n", (2656, 2703), False, 'import pulp\n'), ((3334, 3348), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3346, 3348), True, 'import pandas as pd\n'), ((4733, 4752), 'pulp.lpSum', 'pulp.lpSum', (['weights'], {}), '(weights)\n', (4743, 4752), False, 'import pulp\n'), ((5893, 5917), 'pulp.PULP_CBC_CMD', 'pulp.PULP_CBC_CMD', ([], {'msg': '(0)'}), '(msg=0)\n', (5910, 5917), False, 'import pulp\n'), ((6885, 6899), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6897, 6899), True, 'import pandas as pd\n'), ((9694, 9708), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9706, 9708), True, 'import pandas as pd\n'), ((5247, 5280), 'pulp.lpSum', 'pulp.lpSum', (['constraints.objective'], {}), '(constraints.objective)\n', (5257, 5280), False, 'import pulp\n'), ((7826, 7860), 'pandas.concat', 'pd.concat', (['[self.segment, segment]'], {}), '([self.segment, segment])\n', (7835, 7860), True, 'import pandas as pd\n'), ((8094, 8124), 'pandas.concat', 'pd.concat', (['[self.curve, curve]'], {}), '([self.curve, curve])\n', (8103, 8124), True, 'import pandas as pd\n'), ((8269, 8323), 'pandas.concat', 'pd.concat', (['[constraint_values, problem_values]'], {'axis': '(0)'}), '([constraint_values, problem_values], axis=0)\n', (8278, 8323), True, 'import pandas as pd\n'), ((10273, 10327), 'pandas.concat', 'pd.concat', (['[constraint_values, problem_values]'], {'axis': '(0)'}), '([constraint_values, problem_values], axis=0)\n', (10282, 10327), True, 'import pandas as pd\n'), ((7150, 7222), 'numpy.arange', 'np.arange', (['constraint.capacity[0]', 'constraint.capacity[1]', 'partition_len'], {}), '(constraint.capacity[0], constraint.capacity[1], partition_len)\n', (7159, 7222), True, 'import numpy as np\n'), ((5454, 5500), 'pulp.lpSum', 'pulp.lpSum', (['constraints[constraint_expression]'], {}), '(constraints[constraint_expression])\n', (5464, 5500), False, 'import pulp\n'), ((7954, 7973), 'laptimize.curve_approximation.CurveApproximator', 'CurveApproximator', ([], {}), '()\n', (7971, 7973), False, 'from laptimize.curve_approximation import CurveApproximator\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `simu_linear` package."""
import pytest
import numpy as np
from thermo.chemical import Chemical
from pandangas import simu_linear as sim
from pandangas import topology as top
from fixtures import simple_network
def test_solve():
# 3 * x0 + x1 = 9 and x0 + 2 * x1 = 8 <=> x0 = 2, x1 = 3
a = np.array([[3, 1], [1, 2]])
b = np.array([9, 8])
assert np.array_equal(sim.solve(a, b), np.array([2.0, 3.0]))
def test_weird():
a = np.array([1, 0, 0, 1, 0, 1])
waited = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]])
assert np.array_equal(sim.weird(a), waited)
def test_create_a(simple_network):
gas = Chemical("natural gas", T=10 + 273.15, P=1.022e5)
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
a = sim.create_a(graph, gas)
assert a.shape == (20, 20)
def test_create_k(simple_network):
gas = Chemical("natural gas", T=10 + 273.15, P=1.022e5)
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
k = sim.create_k(graph, gas)
assert k.shape == (len(graph.edges),)
for ik in k:
assert int(ik) == 49975
def test_create_b(simple_network):
net = simple_network
loads = sim._scaled_loads_as_dict(net)
p_ops = sim._operating_pressures_as_dict(net)
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
b = sim.create_b(graph, loads, p_ops)
assert b.shape == (20,)
def test_run_one_level_BP_shape(simple_network):
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
p_nodes, m_dot_pipes, m_dot_nodes, gas = sim.run_one_level(net, "BP")
assert p_nodes.shape == (len(graph.nodes),)
assert m_dot_pipes.shape == (len(graph.edges),)
assert m_dot_nodes.shape == (len(graph.nodes),)
def test_run_one_level_BP_values(simple_network):
net = simple_network
g = top.graphs_by_level_as_dict(net)
graph = g["BP"]
p_nodes, m_dot_pipes, m_dot_nodes, gas = sim.run_one_level(net, "BP")
assert p_nodes.round().tolist() == [102200.0, 102190.0, 102188.0, 102193.0, 102190.0, 102200.0]
assert m_dot_pipes.round(5).tolist() == [2.1e-04, 2.4e-04, 3.0e-05, 7.0e-05, -1.4e-04, 7.0e-05, -2.0e-04, 1.0e-05]
assert m_dot_nodes.round(5).tolist() == [-0.00045, 0.00026, 0.00026, 0.0, 0.00026, -0.00034]
|
[
"pandangas.simu_linear.create_b",
"pandangas.simu_linear.solve",
"pandangas.simu_linear.create_a",
"pandangas.simu_linear.run_one_level",
"pandangas.simu_linear._operating_pressures_as_dict",
"numpy.array",
"pandangas.topology.graphs_by_level_as_dict",
"thermo.chemical.Chemical",
"pandangas.simu_linear._scaled_loads_as_dict",
"pandangas.simu_linear.create_k",
"pandangas.simu_linear.weird"
] |
[((363, 389), 'numpy.array', 'np.array', (['[[3, 1], [1, 2]]'], {}), '([[3, 1], [1, 2]])\n', (371, 389), True, 'import numpy as np\n'), ((398, 414), 'numpy.array', 'np.array', (['[9, 8]'], {}), '([9, 8])\n', (406, 414), True, 'import numpy as np\n'), ((508, 536), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 0, 1]'], {}), '([1, 0, 0, 1, 0, 1])\n', (516, 536), True, 'import numpy as np\n'), ((550, 620), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]])\n', (558, 620), True, 'import numpy as np\n'), ((716, 766), 'thermo.chemical.Chemical', 'Chemical', (['"""natural gas"""'], {'T': '(10 + 273.15)', 'P': '(102200.0)'}), "('natural gas', T=10 + 273.15, P=102200.0)\n", (724, 766), False, 'from thermo.chemical import Chemical\n'), ((799, 831), 'pandangas.topology.graphs_by_level_as_dict', 'top.graphs_by_level_as_dict', (['net'], {}), '(net)\n', (826, 831), True, 'from pandangas import topology as top\n'), ((860, 884), 'pandangas.simu_linear.create_a', 'sim.create_a', (['graph', 'gas'], {}), '(graph, gas)\n', (872, 884), True, 'from pandangas import simu_linear as sim\n'), ((963, 1013), 'thermo.chemical.Chemical', 'Chemical', (['"""natural gas"""'], {'T': '(10 + 273.15)', 'P': '(102200.0)'}), "('natural gas', T=10 + 273.15, P=102200.0)\n", (971, 1013), False, 'from thermo.chemical import Chemical\n'), ((1046, 1078), 'pandangas.topology.graphs_by_level_as_dict', 'top.graphs_by_level_as_dict', (['net'], {}), '(net)\n', (1073, 1078), True, 'from pandangas import topology as top\n'), ((1107, 1131), 'pandangas.simu_linear.create_k', 'sim.create_k', (['graph', 'gas'], {}), '(graph, gas)\n', (1119, 1131), True, 'from pandangas import simu_linear as sim\n'), ((1297, 1327), 'pandangas.simu_linear._scaled_loads_as_dict', 'sim._scaled_loads_as_dict', (['net'], {}), '(net)\n', (1322, 1327), True, 'from pandangas import simu_linear as sim\n'), ((1340, 1377), 'pandangas.simu_linear._operating_pressures_as_dict', 'sim._operating_pressures_as_dict', (['net'], {}), '(net)\n', (1372, 1377), True, 'from pandangas import simu_linear as sim\n'), ((1386, 1418), 'pandangas.topology.graphs_by_level_as_dict', 'top.graphs_by_level_as_dict', (['net'], {}), '(net)\n', (1413, 1418), True, 'from pandangas import topology as top\n'), ((1447, 1480), 'pandangas.simu_linear.create_b', 'sim.create_b', (['graph', 'loads', 'p_ops'], {}), '(graph, loads, p_ops)\n', (1459, 1480), True, 'from pandangas import simu_linear as sim\n'), ((1593, 1625), 'pandangas.topology.graphs_by_level_as_dict', 'top.graphs_by_level_as_dict', (['net'], {}), '(net)\n', (1620, 1625), True, 'from pandangas import topology as top\n'), ((1691, 1719), 'pandangas.simu_linear.run_one_level', 'sim.run_one_level', (['net', '"""BP"""'], {}), "(net, 'BP')\n", (1708, 1719), True, 'from pandangas import simu_linear as sim\n'), ((1957, 1989), 'pandangas.topology.graphs_by_level_as_dict', 'top.graphs_by_level_as_dict', (['net'], {}), '(net)\n', (1984, 1989), True, 'from pandangas import topology as top\n'), ((2055, 2083), 'pandangas.simu_linear.run_one_level', 'sim.run_one_level', (['net', '"""BP"""'], {}), "(net, 'BP')\n", (2072, 2083), True, 'from pandangas import simu_linear as sim\n'), ((441, 456), 'pandangas.simu_linear.solve', 'sim.solve', (['a', 'b'], {}), '(a, b)\n', (450, 456), True, 'from pandangas import simu_linear as sim\n'), ((458, 478), 'numpy.array', 'np.array', (['[2.0, 3.0]'], {}), '([2.0, 3.0])\n', (466, 478), True, 'import numpy as np\n'), ((647, 659), 'pandangas.simu_linear.weird', 'sim.weird', (['a'], {}), '(a)\n', (656, 659), True, 'from pandangas import simu_linear as sim\n')]
|
# Copyright 2019 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.utils.validation import check_is_fitted
from ..utils import import_graph, is_almost_symmetric
from .base import BaseEmbedMulti
from .svd import select_dimension, selectSVD
class MultipleASE(BaseEmbedMulti):
r"""
Multiple Adjacency Spectral Embedding (MASE) embeds arbitrary number of input
graphs with matched vertex sets.
For a population of undirected graphs, MASE assumes that the population of graphs
is sampled from :math:`VR^{(i)}V^T` where :math:`V \in \mathbb{R}^{n\times d}` and
:math:`R^{(i)} \in \mathbb{R}^{d\times d}`. Score matrices, :math:`R^{(i)}`, are
allowed to vary for each graph, but are symmetric. All graphs share a common a
latent position matrix :math:`V`.
For a population of directed graphs, MASE assumes that the population is sampled
from :math:`UR^{(i)}V^T` where :math:`U \in \mathbb{R}^{n\times d_1}`,
:math:`V \in \mathbb{R}^{n\times d_2}`, and
:math:`R^{(i)} \in \mathbb{R}^{d_1\times d_2}`. In this case, score matrices
:math:`R^{(i)}` can be assymetric and non-square, but all graphs still share a
common latent position matrices :math:`U` and :math:`V`.
Parameters
----------
n_components : int or None, default = None
Desired dimensionality of output data. If "full",
n_components must be <= min(X.shape). Otherwise, n_components must be
< min(X.shape). If None, then optimal dimensions will be chosen by
:func:`~graspy.embed.select_dimension` using ``n_elbows`` argument.
n_elbows : int, optional, default: 2
If ``n_components=None``, then compute the optimal embedding dimension using
:func:`~graspy.embed.select_dimension`. Otherwise, ignored.
algorithm : {'randomized' (default), 'full', 'truncated'}, optional
SVD solver to use:
- 'randomized'
Computes randomized svd using
:func:`sklearn.utils.extmath.randomized_svd`
- 'full'
Computes full svd using :func:`scipy.linalg.svd`
- 'truncated'
Computes truncated svd using :func:`scipy.sparse.linalg.svds`
n_iter : int, optional (default = 5)
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
scaled : bool, optional (default=False)
Whether to scale individual eigenvectors with eigenvalues in first embedding
stage.
Attributes
----------
n_graphs_ : int
Number of graphs
n_vertices_ : int
Number of vertices in each graph
latent_left_ : array, shape (n_samples, n_components)
Estimated left latent positions of the graph.
latent_right_ : array, shape (n_samples, n_components), or None
Estimated right latent positions of the graph. Only computed when the an input
graph is directed, or adjacency matrix is assymetric. Otherwise, None.
scores_ : array, shape (n_samples, n_components, n_components)
Estimated :math:`\hat{R}` matrices for each input graph.
Notes
-----
When an input graph is directed, `n_components` of `latent_left_` may not be equal
to `n_components` of `latent_right_`.
"""
def __init__(
self,
n_components=None,
n_elbows=2,
algorithm="randomized",
n_iter=5,
scaled=False,
):
if not isinstance(scaled, bool):
msg = "scaled must be a boolean, not {}".format(scaled)
raise TypeError(msg)
super().__init__(
n_components=n_components,
n_elbows=n_elbows,
algorithm=algorithm,
n_iter=n_iter,
)
self.scaled = scaled
def _reduce_dim(self, graphs):
# first embed into log2(n_vertices) for each graph
n_components = int(np.ceil(np.log2(np.min(self.n_vertices_))))
# embed individual graphs
embeddings = [
selectSVD(
graph,
n_components=n_components,
algorithm=self.algorithm,
n_iter=self.n_iter,
)
for graph in graphs
]
Us, Ds, Vs = zip(*embeddings)
# Choose the best embedding dimension for each graphs
if self.n_components is None:
embedding_dimensions = []
for D in Ds:
elbows, _ = select_dimension(D, n_elbows=self.n_elbows)
embedding_dimensions.append(elbows[-1])
# Choose the max of all of best embedding dimension of all graphs
best_dimension = int(np.ceil(np.max(embedding_dimensions)))
else:
best_dimension = self.n_components
if not self.scaled:
Us = np.hstack([U[:, :best_dimension] for U in Us])
Vs = np.hstack([V.T[:, :best_dimension] for V in Vs])
else:
# Equivalent to ASE
Us = np.hstack(
[
U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for U, D in zip(Us, Ds)
]
)
Vs = np.hstack(
[
V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for V, D in zip(Vs, Ds)
]
)
# Second SVD for vertices
# The notation is slightly different than the paper
Uhat, _, _ = selectSVD(
Us,
n_components=self.n_components,
n_elbows=self.n_elbows,
algorithm=self.algorithm,
n_iter=self.n_iter,
)
Vhat, _, _ = selectSVD(
Vs,
n_components=self.n_components,
n_elbows=self.n_elbows,
algorithm=self.algorithm,
n_iter=self.n_iter,
)
return Uhat, Vhat
def fit(self, graphs, y=None):
"""
Fit the model with graphs.
Parameters
----------
graphs : list of nx.Graph or ndarray, or ndarray
If list of nx.Graph, each Graph must contain same number of nodes.
If list of ndarray, each array must have shape (n_vertices, n_vertices).
If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).
Returns
-------
self : object
Returns an instance of self.
"""
graphs = self._check_input_graphs(graphs)
# Check if undirected
undirected = all(is_almost_symmetric(g) for g in graphs)
# embed
Uhat, Vhat = self._reduce_dim(graphs)
self.latent_left_ = Uhat
if not undirected:
self.latent_right_ = Vhat
self.scores_ = Uhat.T @ graphs @ Vhat
else:
self.latent_right_ = None
self.scores_ = Uhat.T @ graphs @ Uhat
return self
def fit_transform(self, graphs, y=None):
"""
Fit the model with graphs and apply the embedding on graphs.
n_components is either automatically determined or based on user input.
Parameters
----------
graphs : list of nx.Graph or ndarray, or ndarray
If list of nx.Graph, each Graph must contain same number of nodes.
If list of ndarray, each array must have shape (n_vertices, n_vertices).
If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).
Returns
-------
out : array-like, shape (n_vertices, n_components) if input
graphs were symmetric. If graphs were directed, returns tuple of
two arrays (same shape as above) where the first corresponds to the
left latent positions, and the right to the right latent positions
"""
return self._fit_transform(graphs)
|
[
"numpy.min",
"numpy.sqrt",
"numpy.max",
"numpy.hstack"
] |
[((5492, 5538), 'numpy.hstack', 'np.hstack', (['[U[:, :best_dimension] for U in Us]'], {}), '([U[:, :best_dimension] for U in Us])\n', (5501, 5538), True, 'import numpy as np\n'), ((5556, 5604), 'numpy.hstack', 'np.hstack', (['[V.T[:, :best_dimension] for V in Vs]'], {}), '([V.T[:, :best_dimension] for V in Vs])\n', (5565, 5604), True, 'import numpy as np\n'), ((4595, 4619), 'numpy.min', 'np.min', (['self.n_vertices_'], {}), '(self.n_vertices_)\n', (4601, 4619), True, 'import numpy as np\n'), ((5354, 5382), 'numpy.max', 'np.max', (['embedding_dimensions'], {}), '(embedding_dimensions)\n', (5360, 5382), True, 'import numpy as np\n'), ((5749, 5776), 'numpy.sqrt', 'np.sqrt', (['D[:best_dimension]'], {}), '(D[:best_dimension])\n', (5756, 5776), True, 'import numpy as np\n'), ((5954, 5981), 'numpy.sqrt', 'np.sqrt', (['D[:best_dimension]'], {}), '(D[:best_dimension])\n', (5961, 5981), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.core.function_base import linspace
EPS = 1e-9
class CameraSensor:
"""
Defines the camera sensor properties
"""
def __init__(self, image_size, pitch, RGB=False, name=None, bayer_pattern=None):
self.img_size = np.array([image_size[0], image_size[1]]) # image height, width (in pixels)
self.img_cntr = np.array([int(image_size[0]/2), int(image_size[1]/2)])
if RGB:
self.type = 'RGB'
self.C = 3 # number of channels
if bayer_pattern is not None:
self.bayer_pattern = bayer_pattern
else:
self.bayer_pattern = 'RGGB'
else:
self.type = 'Mono'
self.C = 1
if len(pitch)==1:
self.px_size = np.array([pitch, pitch]) # should be in meters
else:
self.px_size = np.array([pitch[0], pitch[1]]) # should be in meters
self.name = name # name of camera sensor (optional)
# create coordinate system for image plane
dh, dw = self.px_size[0], self.px_size[1]
h , w = dh*self.img_size[0], dw*self.img_size[1]
self.x_sensor = np.linspace( -w/2 + dw/2 , +w/2 - dw/2 + EPS, self.img_size[1])
self.y_sensor = np.linspace( -h/2 + dh/2 , +h/2 - dh/2 + EPS, self.img_size[0])
self.X_sensor, self.Y_sensor = np.meshgrid(self.x_sensor, self.y_sensor)
def get_physical_sensor_size(self):
"""
Returns the physical sensor size (in units of mm x mm)
"""
height_mm = np.float(self.px_size[0]*self.img_size[0])*1000
width_mm = np.float(self.px_size[1]*self.img_size[1])*1000
return height_mm, width_mm
class Lens:
def __init__(self, f, D=None):
self.f = f
self.D = D # D is set to None if its value is irrelevant
class Mask:
"""
Class for creating an amplitude/phase mask.
"""
def __init__(self, mask_pattern, mask_size):
"""
mask_pattern (numpy.ndarray): 2D array of values (real or complex)
mask_size (list or numpy.array): Physical size of mask (h x w). Units of meters
mask_pattern array values should have magnitude should be between [0, 1] for realistic mask patterns.
"""
self.mask = mask_pattern # mask pattern can be a complex-valued as well (numpy 2D array)
self.mask_size = np.array([mask_size[0], mask_size[1]])
self.mask_pitch = np.array([mask_size[0]/mask_pattern.shape[0], mask_size[1]/mask_pattern.shape[1]])
# create coordinate system on mask-plane
h, w = self.mask_size[0], self.mask_size[1]
dh, dw = self.mask_pitch[0], self.mask_pitch[1]
self.x_mask = np.linspace( -w/2 + dw/2 , +w/2 - dw/2 + EPS, num=self.mask.shape[1])
self.y_mask = np.linspace( -h/2 + dh/2 , +h/2 - dh/2 + EPS, num=self.mask.shape[0])
self.X_mask, self.Y_mask = np.meshgrid(self.x_mask, self.y_mask)
|
[
"numpy.meshgrid",
"numpy.array",
"numpy.float",
"numpy.linspace"
] |
[((264, 304), 'numpy.array', 'np.array', (['[image_size[0], image_size[1]]'], {}), '([image_size[0], image_size[1]])\n', (272, 304), True, 'import numpy as np\n'), ((1217, 1286), 'numpy.linspace', 'np.linspace', (['(-w / 2 + dw / 2)', '(+w / 2 - dw / 2 + EPS)', 'self.img_size[1]'], {}), '(-w / 2 + dw / 2, +w / 2 - dw / 2 + EPS, self.img_size[1])\n', (1228, 1286), True, 'import numpy as np\n'), ((1305, 1374), 'numpy.linspace', 'np.linspace', (['(-h / 2 + dh / 2)', '(+h / 2 - dh / 2 + EPS)', 'self.img_size[0]'], {}), '(-h / 2 + dh / 2, +h / 2 - dh / 2 + EPS, self.img_size[0])\n', (1316, 1374), True, 'import numpy as np\n'), ((1408, 1449), 'numpy.meshgrid', 'np.meshgrid', (['self.x_sensor', 'self.y_sensor'], {}), '(self.x_sensor, self.y_sensor)\n', (1419, 1449), True, 'import numpy as np\n'), ((2438, 2476), 'numpy.array', 'np.array', (['[mask_size[0], mask_size[1]]'], {}), '([mask_size[0], mask_size[1]])\n', (2446, 2476), True, 'import numpy as np\n'), ((2503, 2594), 'numpy.array', 'np.array', (['[mask_size[0] / mask_pattern.shape[0], mask_size[1] / mask_pattern.shape[1]]'], {}), '([mask_size[0] / mask_pattern.shape[0], mask_size[1] / mask_pattern\n .shape[1]])\n', (2511, 2594), True, 'import numpy as np\n'), ((2765, 2840), 'numpy.linspace', 'np.linspace', (['(-w / 2 + dw / 2)', '(+w / 2 - dw / 2 + EPS)'], {'num': 'self.mask.shape[1]'}), '(-w / 2 + dw / 2, +w / 2 - dw / 2 + EPS, num=self.mask.shape[1])\n', (2776, 2840), True, 'import numpy as np\n'), ((2857, 2932), 'numpy.linspace', 'np.linspace', (['(-h / 2 + dh / 2)', '(+h / 2 - dh / 2 + EPS)'], {'num': 'self.mask.shape[0]'}), '(-h / 2 + dh / 2, +h / 2 - dh / 2 + EPS, num=self.mask.shape[0])\n', (2868, 2932), True, 'import numpy as np\n'), ((2962, 2999), 'numpy.meshgrid', 'np.meshgrid', (['self.x_mask', 'self.y_mask'], {}), '(self.x_mask, self.y_mask)\n', (2973, 2999), True, 'import numpy as np\n'), ((788, 812), 'numpy.array', 'np.array', (['[pitch, pitch]'], {}), '([pitch, pitch])\n', (796, 812), True, 'import numpy as np\n'), ((884, 914), 'numpy.array', 'np.array', (['[pitch[0], pitch[1]]'], {}), '([pitch[0], pitch[1]])\n', (892, 914), True, 'import numpy as np\n'), ((1602, 1646), 'numpy.float', 'np.float', (['(self.px_size[0] * self.img_size[0])'], {}), '(self.px_size[0] * self.img_size[0])\n', (1610, 1646), True, 'import numpy as np\n'), ((1669, 1713), 'numpy.float', 'np.float', (['(self.px_size[1] * self.img_size[1])'], {}), '(self.px_size[1] * self.img_size[1])\n', (1677, 1713), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Segmentation methods to find regions of interest in the time and frequency domain.
"""
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: New BSD License
# =============================================================================
# Load the modules
# =============================================================================
# Import external modules
import numpy as np
from scipy.stats import iqr
from skimage import measure
import pandas as pd
import sys
_MIN_ = sys.float_info.min
# Import internal modules
from maad.util import (plot2d, rand_cmap)
#%%
#****************************************************************************
# private functions
#****************************************************************************
def _double_threshold_rel (im, bin_std=6, bin_per=0.5,
verbose=False, display=False, savefig=None, **kwargs):
"""
Binarize an image based on a double relative threshold.
The values used for the thresholding depends on the values found in the
image. => relative threshold
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
bin_std : scalar, optional, default is 6
Set the first threshold. This threshold is not an absolute value but
depends on values that are similar to 75th percentile (pseudo_mean) and
a sort of std value of the image.
threshold1 = "pseudo_mean" + "std" * bin_std
Value higher than threshold1 are set to 1, they are the seeds for
the second step. The others are set to 0.
bin_per: scalar, optional, defautl is 0.5
Set how much the second threshold is lower than the first
threshold value. From 0 to 1. ex: 0.1 = 10 %.
threshold2 = threshold1 (1-bin_per)
Value higher than threshold2 and connected (directly or not) to the
seeds are set to 1, the other remains 0
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by plt.plot and savefig functions
- savefilename : str, optional, default :'_spectro_after_noise_subtraction.png'
Postfix of the figure filename
- figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
- title : string, optional, default : 'Spectrogram'
title of the figure
- xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
- ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
- cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
- vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
- extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
- dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
- format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im_out: binary image
References
----------
.. [1] from MATLAB: Threshold estimation (from Oliveira et al, 2015)
Adapted by <NAME> Dec 12, 2017
"""
# test if im is full of zeros
if not im.any() :
im_out = np.zeros(im.shape)
else:
# Compute the qth percentile of the data along the specified axis
val1 = np.percentile(im[np.where(im>0)],75) # value corresponding to the limit between the 75% lowest value and 25% largest value
# The interquartile range (IQR) is the difference between the 75th and
# 25th percentile of the data. It is a measure of the dispersion similar
# to standard deviation or variance, but is much more robust against outliers
val2 = iqr(im[np.where(im>0)])*bin_std
# Threshold : qth percentile + sort of std
h_th = val1 + val2
# Low threshold limit
l_th = (h_th-h_th*bin_per)
if verbose :
print(72 * '_')
print('Double thresholding with values relative to the image...')
print ('**********************************************************')
print (' high threshold value %.2f | low threshold value %.2f' % (h_th, l_th))
print ('**********************************************************')
# binarisation
im_t1 = im > h_th # mask1
im_t2 = im > l_th # mask2
im_t3 = im * im_t1 # selected parts of the image
#find index of regions which meet the criteria
conncomp_t2 = measure.label(im_t2) #Find connected components in binary image
rprops = measure.regionprops(conncomp_t2,im_t3)
rprops_mean_intensity = [region['mean_intensity'] for region in rprops]
rprops_mean_intensity = np.asarray(rprops_mean_intensity)
rprops_label = [region['label'] for region in rprops]
rprops_label = np.asarray(rprops_label)
[ind]=np.where(rprops_mean_intensity>0)
im_out = np.isin(conncomp_t2, rprops_label[ind]) # test if the indice is in the matrix of indices
im_out =im_out*1 # boolean to 0,1 conversion
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','binary image => MASK')
cmap =kwargs.pop('cmap','gray')
vmin=kwargs.pop('vmin',0)
vmax=kwargs.pop('vmax',1)
extent=kwargs.pop('extent',None)
if extent is None :
xlabel = 'pseudotime [points]'
ylabel = 'pseudofrequency [points]'
_, fig = plot2d (im_out,
extent = extent,
title = title,
ylabel = ylabel,
xlabel = xlabel,
vmin = vmin,
vmax = vmax,
cmap = cmap,
**kwargs)
# SAVE FIGURE
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename=kwargs.pop('filename','_spectro_binary')
filename = savefig+filename+'.'+format
if verbose :
print('\n''save figure : %s' %filename)
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return im_out
#%%
def _double_threshold_abs(im, bin_h=0.7, bin_l=0.2,
verbose=False,display=False, savefig=None, **kwargs):
"""
Binarize an image based on a double relative threshold.
The values used for the thresholding are independent of the values in the
image => absolute threshold
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
bin_h : scalar, optional, default is 0.7
Set the first threshold. Value higher than this value are set to 1,
the others are set to 0. They are the seeds for the second step
bin_l: scalar, optional, defautl is 0.2
Set the second threshold. Value higher than this value and connected
to the seeds or to other pixels connected to the seeds are set to 1,
the other remains 0
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by plt.plot and savefig functions
- savefilename : str, optional, default :'_spectro_after_noise_subtraction.png'
Postfix of the figure filename
- figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
- title : string, optional, default : 'Spectrogram'
title of the figure
- xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
- ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
- cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
- vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
- extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
- dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
- format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im_out: binary image
References
----------
.. [1] <NAME>. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence. 1986; vol. 8, pp.679-698. `DOI: 10.1109/TPAMI.1986.4767851 <https://doi.org/10.1109/TPAMI.1986.4767851>`_
"""
# binarisation
im_t1 = im > bin_h # mask1
im_t2 = im > bin_l # mask2
im_t3 = im * im_t1 # selected parts of the image
#find index of regions which meet the criteria
conncomp_t2 = measure.label(im_t2) #Find connected components in binary image
rprops = measure.regionprops(conncomp_t2,im_t3)
rprops_mean_intensity = [region['mean_intensity'] for region in rprops]
rprops_mean_intensity = np.asarray(rprops_mean_intensity)
rprops_label = [region['label'] for region in rprops]
rprops_label = np.asarray(rprops_label)
[ind]=np.where(rprops_mean_intensity>0)
im_out = np.isin(conncomp_t2, rprops_label[ind]) # test if the indice is in the maxtrix of indices
im_out =im_out*1 # boolean to 0,1 conversion
if verbose :
print(72 * '_')
print('Double thresholding with absolute values...')
print ('**********************************************************')
print (' Number of rois %.2f | Rois cover %.2f%' % (len(rprops_label),
sum(im_out)/(im_out.shape[1]*im_out.shape[0])*100))
print ('**********************************************************')
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','binary image => MASK')
cmap =kwargs.pop('cmap','gray')
vmin=kwargs.pop('vmin',0)
vmax=kwargs.pop('vmax',1)
extent=kwargs.pop('extent',None)
if extent is None :
xlabel = 'pseudotime [points]'
ylabel = 'pseudofrequency [points]'
_, fig = plot2d (im_out,
extent = extent,
title = title,
ylabel = ylabel,
xlabel = xlabel,
vmin = vmin,
vmax = vmax,
cmap = cmap,
**kwargs)
# SAVE FIGURE
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename=kwargs.pop('filename','_spectro_binary')
filename = savefig+filename+'.'+format
if verbose :
print('\n''save figure : %s' %filename)
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return im_out
#%%
# =============================================================================
# public functions
# =============================================================================
def create_mask(im, mode_bin = 'relative',
verbose= False, display = False, savefig = None, **kwargs):
"""
Binarize an image based on a double threshold.
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
mode_bin : string in {'relative', 'absolute'}, optional, default is 'relative'
if 'absolute' [1]_ , a double threshold with absolute value is performed
with two parameters (see \*\*kwargs section)
if 'relative' [2]_, a relative double threshold is performed with two
parameters (see \*\*kwargs section)
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by the maad functions as well
as the plt.plot and savefig functions.
All the input arguments required or optional in the signature of the
functions above can be passed as kwargs :
if 'absolute' [1]_
- bin_h : scalar, optional, default is 0.7
Set the first threshold. Value higher than this value are set to 1,
the others are set to 0. They are the seeds for the second step
- bin_l: scalar, optional, defautl is 0.2
Set the second threshold. Value higher than this value and connected
to the seeds or to other pixels connected to the seeds (6-connectivity)
are set to 1, the other remains 0
if 'relative' [2]_ :
- bin_std : scalar, optional, default is 6
bin_std is needed to compute the threshold1.
This threshold is not an absolute value but depends on values that are
similar to 75th percentile (pseudo_mean) and a sort of std value of
the image.
threshold1 = "pseudo_mean" + "std" * bin_std
Value higher than threshold1 are set to 1, they are the seeds for
the second step. The others are set to 0.
- bin_per: scalar, optional, defautl is 0.5
Set how much the second threshold is lower than the first
threshold value. From 0 to 1. ex: 0.1 = 10 %.
threshold2 = threshold1 (1-bin_per)
Value higher than threshold2 and connected (6-connectivity) to the
seeds are set to 1, the other remains 0
... and more, see matplotlib
Returns
-------
im_bin: binary image
References
----------
.. [1] <NAME>. A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence. 1986; vol. 8, pp.679-698. `DOI: 10.1109/TPAMI.1986.4767851 <https://doi.org/10.1109/TPAMI.1986.4767851>`_
.. [2] from MATLAB: Threshold estimation (Oliveira et al, 2015)
Examples
--------
Load audio recording and convert it into spectrogram
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx,tn,fn,ext = maad.sound.spectrogram (s, fs, fcrop=(0,10000))
Convert linear spectrogram into dB
>>> Sxx_dB = maad.util.power2dB(Sxx) +96
Smooth the spectrogram
>>> Sxx_dB_blurred = maad.rois.smooth(Sxx_dB)
Detection of the acoustic signature => creation of a mask
>>> im_bin = maad.rois.create_mask(Sxx_dB_blurred, bin_std=1.5, bin_per=0.25, mode='relative')
Plot spectrograms
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(2, 1)
>>> maad.util.plot2d(Sxx_dB, ax=ax1, extent=ext, title='original', vmin=10, vmax=70)
>>> maad.util.plot2d(im_bin, ax=ax2, extent=ext, title='mask)')
>>> fig.set_size_inches(13,8)
>>> fig.tight_layout()
"""
if mode_bin == 'relative':
bin_std=kwargs.pop('bin_std', 6)
bin_per=kwargs.pop('bin_per', 0.5)
im_bin = _double_threshold_rel(im, bin_std, bin_per,
verbose, display, savefig, **kwargs)
elif mode_bin == 'absolute':
bin_h=kwargs.pop('bin_h', 0.7)
bin_l=kwargs.pop('bin_l', 0.3)
im_bin = _double_threshold_abs(im, bin_h, bin_l,
verbose, display, savefig, **kwargs)
return im_bin
#%%
def select_rois(im_bin, min_roi=None ,max_roi=None,
verbose=False, display=False, savefig = None, **kwargs):
"""
Select regions of interest based on its dimensions.
The input is a binary mask, and the output is an image with labelled pixels.
Parameters
----------
im : 2d ndarray of scalars
Spectrogram (or image)
min_roi, max_roi : scalars, optional, default : None
Define the minimum and the maximum area possible for an ROI. If None,
the minimum ROI area is 1 pixel and the maximum ROI area is the area of
the image
verbose : boolean, optional, default is False
print messages
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
\*\*kwargs, optional. This parameter is used by plt.plot and savefig functions
- savefilename : str, optional, default :'_spectro_after_noise_subtraction.png'
Postfix of the figure filename
- figsize : tuple of integers, optional, default: (4,10)
width, height in inches.
- title : string, optional, default : 'Spectrogram'
title of the figure
- xlabel : string, optional, default : 'Time [s]'
label of the horizontal axis
- ylabel : string, optional, default : 'Amplitude [AU]'
label of the vertical axis
- cmap : string or Colormap object, optional, default is 'gray'
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
- vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
- extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
- dpi : integer, optional, default is 96
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
- format : string, optional, default is 'png'
Format to save the figure
... and more, see matplotlib
Returns
-------
im_rois: 2d ndarray
image with labels as values
rois: pandas DataFrame
Regions of interest with future descriptors will be computed.
Array have column names: ``labelID``, ``label``, ``min_y``, ``min_x``,
``max_y``, ``max_x``,
Use the function ``maad.util.format_features`` before using
centroid_features to format of the ``rois`` DataFrame
correctly.
Examples
--------
Load audio recording compute the spectrogram in dB.
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx,tn,fn,ext = maad.sound.spectrogram (s, fs, fcrop=(0,20000), display=True)
>>> Sxx_dB = maad.util.power2dB(Sxx) +96
Smooth the spectrogram
>>> Sxx_dB_blurred = maad.sound.smooth(Sxx_dB)
Using image binarization, detect isolated region in the time-frequency domain with high density of energy, i.e. regions of interest (ROIs).
>>> im_bin = maad.rois.create_mask(Sxx_dB_blurred, bin_std=1.5, bin_per=0.5, mode='relative')
Select ROIs from the binary mask.
>>> im_rois, df_rois = maad.rois.select_rois(im_bin, display=True)
We detected the background noise as a ROI, and that multiple ROIs are mixed in a single region. To have better results, it is adviced to preprocess the spectrogram to remove the background noise before creating the mask.
>>> Sxx_noNoise = maad.sound.median_equalizer(Sxx)
>>> Sxx_noNoise_dB = maad.util.power2dB(Sxx_noNoise)
>>> Sxx_noNoise_dB_blurred = maad.sound.smooth(Sxx_noNoise_dB)
>>> im_bin2 = maad.rois.create_mask(Sxx_noNoise_dB_blurred, bin_std=6, bin_per=0.5, mode='relative')
>>> im_rois2, df_rois2 = maad.rois.select_rois(im_bin2, display=True)
"""
# test if max_roi and min_roi are defined
if max_roi is None:
# the maximum ROI is set to the aera of the image
max_roi=im_bin.shape[0]*im_bin.shape[1]
if min_roi is None:
# the min ROI area is set to 1 pixel
min_roi = 1
if verbose :
print(72 * '_')
print('Automatic ROIs selection in progress...')
print ('**********************************************************')
print (' Min ROI area %d pix² | Max ROI area %d pix²' % (min_roi, max_roi))
print ('**********************************************************')
labels = measure.label(im_bin) #Find connected components in binary image
rprops = measure.regionprops(labels)
rois_bbox = []
rois_label = []
for roi in rprops:
# select the rois depending on their size
if (roi.area >= min_roi) & (roi.area <= max_roi):
# get the label
rois_label.append(roi.label)
# get rectangle coordonates
rois_bbox.append (roi.bbox)
im_rois = np.isin(labels, rois_label) # test if the indice is in the matrix of indices
im_rois = im_rois* labels
# create a list with labelID and labelName (None in this case)
rois_label = list(zip(rois_label,['unknown']*len(rois_label)))
# test if there is a roi
if len(rois_label)>0 :
# create a dataframe rois containing the coordonates and the label
rois = np.concatenate((np.asarray(rois_label), np.asarray(rois_bbox)), axis=1)
rois = pd.DataFrame(rois, columns = ['labelID', 'label', 'min_y','min_x','max_y', 'max_x'])
# force type to integer
rois = rois.astype({'label': str,'min_y':int,'min_x':int,'max_y':int, 'max_x':int})
# compensate half-open interval of bbox from skimage
rois.max_y -= 1
rois.max_x -= 1
else :
rois = []
rois = pd.DataFrame(rois, columns = ['labelID', 'label', 'min_y','min_x','max_y', 'max_x'])
rois = rois.astype({'label': str,'min_y':int,'min_x':int,'max_y':int, 'max_x':int})
# Display
if display :
ylabel =kwargs.pop('ylabel','Frequency [Hz]')
xlabel =kwargs.pop('xlabel','Time [sec]')
title =kwargs.pop('title','Selected ROIs')
extent=kwargs.pop('extent',None)
if extent is None :
xlabel = 'pseudotime [points]'
ylabel = 'pseudofrequency [points]'
# randcmap = rand_cmap(len(rois_label))
# cmap =kwargs.pop('cmap',randcmap)
cmap =kwargs.pop('cmap','tab20')
_, fig = plot2d (im_rois,
extent = extent,
title = title,
ylabel = ylabel,
xlabel = xlabel,
cmap = cmap,
**kwargs)
# SAVE FIGURE
if savefig is not None :
dpi =kwargs.pop('dpi',96)
format=kwargs.pop('format','png')
filename=kwargs.pop('filename','_spectro_selectrois')
filename = savefig+filename+'.'+format
fig.savefig(filename, bbox_inches='tight', dpi=dpi, format=format,
**kwargs)
return im_rois, rois
#%%
def rois_to_imblobs(im_zeros, rois):
"""
Take a matrix full of zeros and add ones in delimited regions defined by rois.
Parameters
----------
im_zeros : ndarray
matrix full of zeros with the size to the image where the rois come from.
rois : DataFrame
rois must have the columns names:((min_y, min_x, max_y, max_x) which
correspond to the bounding box coordinates
Returns
-------
im_blobs : ndarray
matrix with 1 corresponding to the rois and 0 elsewhere
Examples
--------
>>> from maad import rois, util
>>> import pandas as pd
>>> import numpy as np
>>> im_zeros = np.zeros((100,300))
>>> df_rois = pd.DataFrame({'min_y': [10, 40], 'min_x': [10, 200], 'max_y': [60, 80], 'max_x': [110, 250]})
>>> im_blobs = rois.rois_to_imblobs(im_zeros, df_rois)
>>> util.plot2d(im_blobs)
"""
# Check format of the input data
if type(rois) is not pd.core.frame.DataFrame :
raise TypeError('Rois must be of type pandas DataFrame')
if not(('min_y' and 'min_x' and 'max_y' and 'max_x') in rois) :
raise TypeError('Array must be a Pandas DataFrame with column names:((min_y, min_x, max_y, max_x). Check example in documentation.')
# select the columns
rois_bbox = rois[['min_y', 'min_x', 'max_y', 'max_x']]
# roi to image blob
for min_y, min_x, max_y, max_x in rois_bbox.values:
im_zeros[int(min_y):int(max_y+1), int(min_x):int(max_x+1)] = 1
im_blobs = im_zeros.astype(int)
return im_blobs
|
[
"pandas.DataFrame",
"numpy.isin",
"numpy.asarray",
"numpy.zeros",
"maad.util.plot2d",
"skimage.measure.label",
"numpy.where",
"skimage.measure.regionprops"
] |
[((12272, 12292), 'skimage.measure.label', 'measure.label', (['im_t2'], {}), '(im_t2)\n', (12285, 12292), False, 'from skimage import measure\n'), ((12354, 12393), 'skimage.measure.regionprops', 'measure.regionprops', (['conncomp_t2', 'im_t3'], {}), '(conncomp_t2, im_t3)\n', (12373, 12393), False, 'from skimage import measure\n'), ((12511, 12544), 'numpy.asarray', 'np.asarray', (['rprops_mean_intensity'], {}), '(rprops_mean_intensity)\n', (12521, 12544), True, 'import numpy as np\n'), ((12634, 12658), 'numpy.asarray', 'np.asarray', (['rprops_label'], {}), '(rprops_label)\n', (12644, 12658), True, 'import numpy as np\n'), ((12682, 12717), 'numpy.where', 'np.where', (['(rprops_mean_intensity > 0)'], {}), '(rprops_mean_intensity > 0)\n', (12690, 12717), True, 'import numpy as np\n'), ((12738, 12777), 'numpy.isin', 'np.isin', (['conncomp_t2', 'rprops_label[ind]'], {}), '(conncomp_t2, rprops_label[ind])\n', (12745, 12777), True, 'import numpy as np\n'), ((25235, 25256), 'skimage.measure.label', 'measure.label', (['im_bin'], {}), '(im_bin)\n', (25248, 25256), False, 'from skimage import measure\n'), ((25318, 25345), 'skimage.measure.regionprops', 'measure.regionprops', (['labels'], {}), '(labels)\n', (25337, 25345), False, 'from skimage import measure\n'), ((25759, 25786), 'numpy.isin', 'np.isin', (['labels', 'rois_label'], {}), '(labels, rois_label)\n', (25766, 25786), True, 'import numpy as np\n'), ((4785, 4803), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (4793, 4803), True, 'import numpy as np\n'), ((6188, 6208), 'skimage.measure.label', 'measure.label', (['im_t2'], {}), '(im_t2)\n', (6201, 6208), False, 'from skimage import measure\n'), ((6274, 6313), 'skimage.measure.regionprops', 'measure.regionprops', (['conncomp_t2', 'im_t3'], {}), '(conncomp_t2, im_t3)\n', (6293, 6313), False, 'from skimage import measure\n'), ((6443, 6476), 'numpy.asarray', 'np.asarray', (['rprops_mean_intensity'], {}), '(rprops_mean_intensity)\n', (6453, 6476), True, 'import numpy as np\n'), ((6578, 6602), 'numpy.asarray', 'np.asarray', (['rprops_label'], {}), '(rprops_label)\n', (6588, 6602), True, 'import numpy as np\n'), ((6634, 6669), 'numpy.where', 'np.where', (['(rprops_mean_intensity > 0)'], {}), '(rprops_mean_intensity > 0)\n', (6642, 6669), True, 'import numpy as np\n'), ((6698, 6737), 'numpy.isin', 'np.isin', (['conncomp_t2', 'rprops_label[ind]'], {}), '(conncomp_t2, rprops_label[ind])\n', (6705, 6737), True, 'import numpy as np\n'), ((13912, 14031), 'maad.util.plot2d', 'plot2d', (['im_out'], {'extent': 'extent', 'title': 'title', 'ylabel': 'ylabel', 'xlabel': 'xlabel', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': 'cmap'}), '(im_out, extent=extent, title=title, ylabel=ylabel, xlabel=xlabel,\n vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n', (13918, 14031), False, 'from maad.util import plot2d, rand_cmap\n'), ((26263, 26351), 'pandas.DataFrame', 'pd.DataFrame', (['rois'], {'columns': "['labelID', 'label', 'min_y', 'min_x', 'max_y', 'max_x']"}), "(rois, columns=['labelID', 'label', 'min_y', 'min_x', 'max_y',\n 'max_x'])\n", (26275, 26351), True, 'import pandas as pd\n'), ((26653, 26741), 'pandas.DataFrame', 'pd.DataFrame', (['rois'], {'columns': "['labelID', 'label', 'min_y', 'min_x', 'max_y', 'max_x']"}), "(rois, columns=['labelID', 'label', 'min_y', 'min_x', 'max_y',\n 'max_x'])\n", (26665, 26741), True, 'import pandas as pd\n'), ((27407, 27505), 'maad.util.plot2d', 'plot2d', (['im_rois'], {'extent': 'extent', 'title': 'title', 'ylabel': 'ylabel', 'xlabel': 'xlabel', 'cmap': 'cmap'}), '(im_rois, extent=extent, title=title, ylabel=ylabel, xlabel=xlabel,\n cmap=cmap, **kwargs)\n', (27413, 27505), False, 'from maad.util import plot2d, rand_cmap\n'), ((7466, 7585), 'maad.util.plot2d', 'plot2d', (['im_out'], {'extent': 'extent', 'title': 'title', 'ylabel': 'ylabel', 'xlabel': 'xlabel', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': 'cmap'}), '(im_out, extent=extent, title=title, ylabel=ylabel, xlabel=xlabel,\n vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n', (7472, 7585), False, 'from maad.util import plot2d, rand_cmap\n'), ((4933, 4949), 'numpy.where', 'np.where', (['(im > 0)'], {}), '(im > 0)\n', (4941, 4949), True, 'import numpy as np\n'), ((26190, 26212), 'numpy.asarray', 'np.asarray', (['rois_label'], {}), '(rois_label)\n', (26200, 26212), True, 'import numpy as np\n'), ((26214, 26235), 'numpy.asarray', 'np.asarray', (['rois_bbox'], {}), '(rois_bbox)\n', (26224, 26235), True, 'import numpy as np\n'), ((5330, 5346), 'numpy.where', 'np.where', (['(im > 0)'], {}), '(im > 0)\n', (5338, 5346), True, 'import numpy as np\n')]
|
import logging
import os
import numpy as np
import argparse
import open3d as o3d
from urllib.request import urlretrieve
from util.visualization import get_colored_point_cloud_feature
from util.misc import extract_features
from model.resunet import ResUNetBN2C
import torch
if not os.path.isfile('ResUNetBN2C-16feat-3conv.pth'):
print('Downloading weights...')
urlretrieve(
"https://node1.chrischoy.org/data/publications/fcgf/2019-09-18_14-15-59.pth",
'ResUNetBN2C-16feat-3conv.pth')
if not os.path.isfile('redkitchen-20.ply'):
print('Downloading a mesh...')
urlretrieve("https://node1.chrischoy.org/data/publications/fcgf/redkitchen-20.ply",
'redkitchen-20.ply')
def demo(config):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load(config.model)
model = ResUNetBN2C(1, 16, normalize_feature=True, conv1_kernel_size=3, D=3)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model = model.to(device)
pcd = o3d.io.read_point_cloud(config.input)
xyz_down, feature = extract_features(
model,
xyz=np.array(pcd.points),
voxel_size=config.voxel_size,
device=device,
skip_check=True)
vis_pcd = o3d.geometry.PointCloud()
vis_pcd.points = o3d.utility.Vector3dVector(xyz_down)
vis_pcd = get_colored_point_cloud_feature(vis_pcd,
feature.detach().cpu().numpy(),
config.voxel_size)
#o3d.visualization.draw_geometries([vis_pcd])
#o3d.io.write_triangle_mesh('/home/curnis/result/fcgf/mesh.obj', vis_pcd)
print(type(vis_pcd))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--input',
default='redkitchen-20.ply',
type=str,
help='path to a pointcloud file')
parser.add_argument(
'-m',
'--model',
default='ResUNetBN2C-16feat-3conv.pth',
type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument(
'--voxel_size',
default=0.025,
type=float,
help='voxel size to preprocess point cloud')
config = parser.parse_args()
demo(config)
|
[
"argparse.ArgumentParser",
"torch.load",
"open3d.io.read_point_cloud",
"open3d.geometry.PointCloud",
"model.resunet.ResUNetBN2C",
"urllib.request.urlretrieve",
"os.path.isfile",
"torch.cuda.is_available",
"numpy.array",
"open3d.utility.Vector3dVector"
] |
[((283, 329), 'os.path.isfile', 'os.path.isfile', (['"""ResUNetBN2C-16feat-3conv.pth"""'], {}), "('ResUNetBN2C-16feat-3conv.pth')\n", (297, 329), False, 'import os\n'), ((367, 498), 'urllib.request.urlretrieve', 'urlretrieve', (['"""https://node1.chrischoy.org/data/publications/fcgf/2019-09-18_14-15-59.pth"""', '"""ResUNetBN2C-16feat-3conv.pth"""'], {}), "(\n 'https://node1.chrischoy.org/data/publications/fcgf/2019-09-18_14-15-59.pth'\n , 'ResUNetBN2C-16feat-3conv.pth')\n", (378, 498), False, 'from urllib.request import urlretrieve\n'), ((510, 545), 'os.path.isfile', 'os.path.isfile', (['"""redkitchen-20.ply"""'], {}), "('redkitchen-20.ply')\n", (524, 545), False, 'import os\n'), ((582, 695), 'urllib.request.urlretrieve', 'urlretrieve', (['"""https://node1.chrischoy.org/data/publications/fcgf/redkitchen-20.ply"""', '"""redkitchen-20.ply"""'], {}), "(\n 'https://node1.chrischoy.org/data/publications/fcgf/redkitchen-20.ply',\n 'redkitchen-20.ply')\n", (593, 695), False, 'from urllib.request import urlretrieve\n'), ((809, 833), 'torch.load', 'torch.load', (['config.model'], {}), '(config.model)\n', (819, 833), False, 'import torch\n'), ((844, 912), 'model.resunet.ResUNetBN2C', 'ResUNetBN2C', (['(1)', '(16)'], {'normalize_feature': '(True)', 'conv1_kernel_size': '(3)', 'D': '(3)'}), '(1, 16, normalize_feature=True, conv1_kernel_size=3, D=3)\n', (855, 912), False, 'from model.resunet import ResUNetBN2C\n'), ((1015, 1052), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['config.input'], {}), '(config.input)\n', (1038, 1052), True, 'import open3d as o3d\n'), ((1231, 1256), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1254, 1256), True, 'import open3d as o3d\n'), ((1276, 1312), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['xyz_down'], {}), '(xyz_down)\n', (1302, 1312), True, 'import open3d as o3d\n'), ((1693, 1718), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1716, 1718), False, 'import argparse\n'), ((755, 780), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (778, 780), False, 'import torch\n'), ((1116, 1136), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (1124, 1136), True, 'import numpy as np\n')]
|
import numpy as np
import sys
import os, psutil
from collections import deque
def mem() :
process = psutil.Process(os.getpid())
print(process.memory_info().rss / 1000000, "Mb", file=sys.stderr)
class Graph:
def neighbors(self, node):
return self.neighbors_array[self.index[node]:self.index[node]+self.deg[node]] # thanks to great implementation of numpy, this is a view and not a copy
def __init__(self, left, right, number_nodes):
self.nb_nodes = number_nodes
self.nb_edges = len(left)
self.deg = np.zeros(self.nb_nodes, dtype = np.int32)
uniques,counts = np.unique(np.concatenate((l1,l2)), return_counts=True)
self.deg = np.zeros(maxIdx+1, dtype = np.int32)
for unique, count in zip(uniques,counts):
self.deg[unique] = count
self.index = np.zeros(self.nb_nodes, dtype = np.int32)
for i in range(1, self.nb_nodes):
self.index[i] = self.index[i-1]+self.deg[i-1]
mutable_index = np.copy(self.index)
self.neighbors_array = np.zeros(self.index[self.nb_nodes-1]+self.deg[self.nb_nodes-1], dtype = np.int32) # memory of size sum number of degrees
for a, b in zip(left, right):
self.neighbors_array[mutable_index[a]] = b
self.neighbors_array[mutable_index[b]] = a
mutable_index[a]+=1
mutable_index[b]+=1
if __name__ == "__main__":
argv = sys.argv[1:]
estimNbAretes = int(argv[1])
#lecture du fichier et constitution du tableau des arêtes
l1 = np.zeros(estimNbAretes, dtype=np.int32)
l2 = np.zeros(estimNbAretes, dtype=np.int32)
with open(argv[0], 'r') as f:
count=0
for line in f:
if line[0]!='#':
newline=line.split()
a = int(newline[0],10)
b = int(newline[1],10)
l1[count]=a
l2[count]=b
count+=1
maxIdx = max(np.max(l1),np.max(l2))
l1 = l1[:count]
l2 = l2[:count]
G = Graph(l1, l2, maxIdx+1)
del l1
del l2
mem()
#on peut retourner le nombre de sommets et d'arêtes
print("n="+str(G.nb_nodes))
print("m="+str(G.nb_edges))
#calcul et retour du degré max
degMax=np.max(G.deg)
print("degmax="+str(degMax))
#calcul et retour de las distance entre u et v
u=int(argv[2])
v=int(argv[3])
res = -1
#on procède à un BFS partant de u en utilisant une file pour la visite (to_visit) et en retenant les noeuds vus (seen) et leur distance
if u==v:
res = 0
else:
seen=np.zeros(maxIdx+1, dtype=np.int32)
dist=np.zeros(maxIdx+1, dtype=np.int32)
seen[u]=1
to_visit=deque([])
for w in G.neighbors(u):
seen[w]=1
dist[w]=1
to_visit.append(w)
while to_visit:
w=to_visit.popleft()
if w==v:
res = dist[w]
break
else:
for z in G.neighbors(w):
if not seen[z]:
to_visit.append(z)
seen[z]=1
dist[z]=dist[w]+1
mem()
if res == -1:
print("dist="+str(float('inf')))
else:
print("dist="+str(res))
|
[
"os.getpid",
"numpy.concatenate",
"numpy.copy",
"numpy.zeros",
"numpy.max",
"collections.deque"
] |
[((1543, 1582), 'numpy.zeros', 'np.zeros', (['estimNbAretes'], {'dtype': 'np.int32'}), '(estimNbAretes, dtype=np.int32)\n', (1551, 1582), True, 'import numpy as np\n'), ((1592, 1631), 'numpy.zeros', 'np.zeros', (['estimNbAretes'], {'dtype': 'np.int32'}), '(estimNbAretes, dtype=np.int32)\n', (1600, 1631), True, 'import numpy as np\n'), ((2238, 2251), 'numpy.max', 'np.max', (['G.deg'], {}), '(G.deg)\n', (2244, 2251), True, 'import numpy as np\n'), ((118, 129), 'os.getpid', 'os.getpid', ([], {}), '()\n', (127, 129), False, 'import os, psutil\n'), ((545, 584), 'numpy.zeros', 'np.zeros', (['self.nb_nodes'], {'dtype': 'np.int32'}), '(self.nb_nodes, dtype=np.int32)\n', (553, 584), True, 'import numpy as np\n'), ((686, 722), 'numpy.zeros', 'np.zeros', (['(maxIdx + 1)'], {'dtype': 'np.int32'}), '(maxIdx + 1, dtype=np.int32)\n', (694, 722), True, 'import numpy as np\n'), ((831, 870), 'numpy.zeros', 'np.zeros', (['self.nb_nodes'], {'dtype': 'np.int32'}), '(self.nb_nodes, dtype=np.int32)\n', (839, 870), True, 'import numpy as np\n'), ((997, 1016), 'numpy.copy', 'np.copy', (['self.index'], {}), '(self.index)\n', (1004, 1016), True, 'import numpy as np\n'), ((1048, 1138), 'numpy.zeros', 'np.zeros', (['(self.index[self.nb_nodes - 1] + self.deg[self.nb_nodes - 1])'], {'dtype': 'np.int32'}), '(self.index[self.nb_nodes - 1] + self.deg[self.nb_nodes - 1], dtype\n =np.int32)\n', (1056, 1138), True, 'import numpy as np\n'), ((1947, 1957), 'numpy.max', 'np.max', (['l1'], {}), '(l1)\n', (1953, 1957), True, 'import numpy as np\n'), ((1958, 1968), 'numpy.max', 'np.max', (['l2'], {}), '(l2)\n', (1964, 1968), True, 'import numpy as np\n'), ((2575, 2611), 'numpy.zeros', 'np.zeros', (['(maxIdx + 1)'], {'dtype': 'np.int32'}), '(maxIdx + 1, dtype=np.int32)\n', (2583, 2611), True, 'import numpy as np\n'), ((2621, 2657), 'numpy.zeros', 'np.zeros', (['(maxIdx + 1)'], {'dtype': 'np.int32'}), '(maxIdx + 1, dtype=np.int32)\n', (2629, 2657), True, 'import numpy as np\n'), ((2688, 2697), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (2693, 2697), False, 'from collections import deque\n'), ((622, 646), 'numpy.concatenate', 'np.concatenate', (['(l1, l2)'], {}), '((l1, l2))\n', (636, 646), True, 'import numpy as np\n')]
|
from keras import backend as K
from keras import optimizers
from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda,Dropout
from keras.models import Model
from models.common import sse, bce, mmd, sampling, kl_regu
from keras.losses import mean_squared_error,binary_crossentropy
import numpy as np
from tensorflow import set_random_seed
class XVAE:
def __init__(self, args):
self.args = args
self.vae = None
self.encoder = None
def build_model(self):
np.random.seed(42)
set_random_seed(42)
# Build the encoder network
# ------------ Input -----------------
s1_inp = Input(shape=(self.args.s1_input_size,))
s2_inp = Input(shape=(self.args.s2_input_size,))
inputs = [s1_inp, s2_inp]
# ------------ Concat Layer -----------------
x1 = Dense(self.args.ds, activation=self.args.act)(s1_inp)
x1 = BN()(x1)
x2 = Dense(self.args.ds, activation=self.args.act)(s2_inp)
x2 = BN()(x2)
x = Concatenate(axis=-1)([x1, x2])
x = Dense(self.args.ds, activation=self.args.act)(x)
x = BN()(x)
# ------------ Embedding Layer --------------
z_mean = Dense(self.args.ls, name='z_mean')(x)
z_log_sigma = Dense(self.args.ls, name='z_log_sigma', kernel_initializer='zeros')(x)
z = Lambda(sampling, output_shape=(self.args.ls,), name='z')([z_mean, z_log_sigma])
self.encoder = Model(inputs, [z_mean, z_log_sigma, z], name='encoder')
self.encoder.summary()
# Build the decoder network
# ------------ Dense out -----------------
latent_inputs = Input(shape=(self.args.ls,), name='z_sampling')
x = latent_inputs
x = Dense(self.args.ds, activation=self.args.act)(x)
x = BN()(x)
x=Dropout(self.args.dropout)(x)
# ------------ Dense branches ------------
x1 = Dense(self.args.ds, activation=self.args.act)(x)
x1 = BN()(x1)
x2 = Dense(self.args.ds, activation=self.args.act)(x)
x2 = BN()(x2)
# ------------ Out -----------------------
s1_out = Dense(self.args.s1_input_size, activation='sigmoid')(x1)
if self.args.integration == 'Clin+CNA':
s2_out = Dense(self.args.s2_input_size,activation='sigmoid')(x2)
else:
s2_out = Dense(self.args.s2_input_size)(x2)
decoder = Model(latent_inputs, [s1_out, s2_out], name='decoder')
decoder.summary()
outputs = decoder(self.encoder(inputs)[2])
self.vae = Model(inputs, outputs, name='vae_x')
if self.args.distance == "mmd":
true_samples = K.random_normal(K.stack([self.args.bs, self.args.ls]))
distance = mmd(true_samples, z)
if self.args.distance == "kl":
distance = kl_regu(z_mean,z_log_sigma)
s1_loss= binary_crossentropy(inputs[0], outputs[0])
if self.args.integration == 'Clin+CNA':
s2_loss =binary_crossentropy(inputs[1], outputs[1])
else:
s2_loss =mean_squared_error(inputs[1], outputs[1])
reconstruction_loss = s1_loss+s2_loss
vae_loss = K.mean(reconstruction_loss + self.args.beta * distance)
self.vae.add_loss(vae_loss)
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, amsgrad=False, decay=0.001)
self.vae.compile(optimizer=adam, metrics=[mean_squared_error, mean_squared_error])
self.vae.summary()
def train(self, s1_train, s2_train, s1_test, s2_test):
self.vae.fit([s1_train, s2_train], epochs=self.args.epochs, batch_size=self.args.bs, shuffle=True,
validation_data=([s1_test, s2_test], None))
if self.args.save_model:
self.vae.save_weights('./models/vae_xvae.h5')
def predict(self, s1_data, s2_data):
return self.encoder.predict([s1_data, s2_data], batch_size=self.args.bs)[0]
|
[
"keras.backend.stack",
"numpy.random.seed",
"keras.losses.binary_crossentropy",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"keras.models.Model",
"tensorflow.set_random_seed",
"keras.layers.Concatenate",
"keras.layers.Dense",
"keras.backend.mean",
"keras.layers.Lambda",
"models.common.kl_regu",
"keras.layers.Input",
"models.common.mmd",
"keras.layers.BatchNormalization",
"keras.losses.mean_squared_error"
] |
[((531, 549), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (545, 549), True, 'import numpy as np\n'), ((558, 577), 'tensorflow.set_random_seed', 'set_random_seed', (['(42)'], {}), '(42)\n', (573, 577), False, 'from tensorflow import set_random_seed\n'), ((678, 717), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.s1_input_size,)'}), '(shape=(self.args.s1_input_size,))\n', (683, 717), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((735, 774), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.s2_input_size,)'}), '(shape=(self.args.s2_input_size,))\n', (740, 774), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1488, 1543), 'keras.models.Model', 'Model', (['inputs', '[z_mean, z_log_sigma, z]'], {'name': '"""encoder"""'}), "(inputs, [z_mean, z_log_sigma, z], name='encoder')\n", (1493, 1543), False, 'from keras.models import Model\n'), ((1687, 1734), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.ls,)', 'name': '"""z_sampling"""'}), "(shape=(self.args.ls,), name='z_sampling')\n", (1692, 1734), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2459, 2513), 'keras.models.Model', 'Model', (['latent_inputs', '[s1_out, s2_out]'], {'name': '"""decoder"""'}), "(latent_inputs, [s1_out, s2_out], name='decoder')\n", (2464, 2513), False, 'from keras.models import Model\n'), ((2611, 2647), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {'name': '"""vae_x"""'}), "(inputs, outputs, name='vae_x')\n", (2616, 2647), False, 'from keras.models import Model\n'), ((2951, 2993), 'keras.losses.binary_crossentropy', 'binary_crossentropy', (['inputs[0]', 'outputs[0]'], {}), '(inputs[0], outputs[0])\n', (2970, 2993), False, 'from keras.losses import mean_squared_error, binary_crossentropy\n'), ((3267, 3322), 'keras.backend.mean', 'K.mean', (['(reconstruction_loss + self.args.beta * distance)'], {}), '(reconstruction_loss + self.args.beta * distance)\n', (3273, 3322), True, 'from keras import backend as K\n'), ((3375, 3473), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': 'None', 'amsgrad': '(False)', 'decay': '(0.001)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, amsgrad=\n False, decay=0.001)\n', (3390, 3473), False, 'from keras import optimizers\n'), ((877, 922), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (882, 922), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((944, 948), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (946, 948), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((967, 1012), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (972, 1012), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1034, 1038), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (1036, 1038), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1056, 1076), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1067, 1076), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1100, 1145), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (1105, 1145), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1161, 1165), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (1163, 1165), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1241, 1275), 'keras.layers.Dense', 'Dense', (['self.args.ls'], {'name': '"""z_mean"""'}), "(self.args.ls, name='z_mean')\n", (1246, 1275), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1301, 1368), 'keras.layers.Dense', 'Dense', (['self.args.ls'], {'name': '"""z_log_sigma"""', 'kernel_initializer': '"""zeros"""'}), "(self.args.ls, name='z_log_sigma', kernel_initializer='zeros')\n", (1306, 1368), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1384, 1440), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(self.args.ls,)', 'name': '"""z"""'}), "(sampling, output_shape=(self.args.ls,), name='z')\n", (1390, 1440), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1773, 1818), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (1778, 1818), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1834, 1838), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (1836, 1838), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1861, 1887), 'keras.layers.Dropout', 'Dropout', (['self.args.dropout'], {}), '(self.args.dropout)\n', (1868, 1887), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1955, 2000), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (1960, 2000), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2017, 2021), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (2019, 2021), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2039, 2084), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (2044, 2084), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2101, 2105), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (2103, 2105), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2179, 2231), 'keras.layers.Dense', 'Dense', (['self.args.s1_input_size'], {'activation': '"""sigmoid"""'}), "(self.args.s1_input_size, activation='sigmoid')\n", (2184, 2231), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2794, 2814), 'models.common.mmd', 'mmd', (['true_samples', 'z'], {}), '(true_samples, z)\n', (2797, 2814), False, 'from models.common import sse, bce, mmd, sampling, kl_regu\n'), ((2877, 2905), 'models.common.kl_regu', 'kl_regu', (['z_mean', 'z_log_sigma'], {}), '(z_mean, z_log_sigma)\n', (2884, 2905), False, 'from models.common import sse, bce, mmd, sampling, kl_regu\n'), ((3064, 3106), 'keras.losses.binary_crossentropy', 'binary_crossentropy', (['inputs[1]', 'outputs[1]'], {}), '(inputs[1], outputs[1])\n', (3083, 3106), False, 'from keras.losses import mean_squared_error, binary_crossentropy\n'), ((3142, 3183), 'keras.losses.mean_squared_error', 'mean_squared_error', (['inputs[1]', 'outputs[1]'], {}), '(inputs[1], outputs[1])\n', (3160, 3183), False, 'from keras.losses import mean_squared_error, binary_crossentropy\n'), ((2314, 2366), 'keras.layers.Dense', 'Dense', (['self.args.s2_input_size'], {'activation': '"""sigmoid"""'}), "(self.args.s2_input_size, activation='sigmoid')\n", (2319, 2366), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2405, 2435), 'keras.layers.Dense', 'Dense', (['self.args.s2_input_size'], {}), '(self.args.s2_input_size)\n', (2410, 2435), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2732, 2769), 'keras.backend.stack', 'K.stack', (['[self.args.bs, self.args.ls]'], {}), '([self.args.bs, self.args.ls])\n', (2739, 2769), True, 'from keras import backend as K\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from DictLearner import DictLearner
import scipy.sparse.linalg
"""The inference code was adapted from <NAME>'s sparsenet implementation,
available on github."""
class FISTALearner(DictLearner):
def __init__(self, data, learnrate, nunits, lam = 0.4, niter=100, **kwargs):
self.lam = 0.4
self.niter = niter
super().__init__(data, learnrate, nunits, **kwargs)
def infer(self, data, max_iterations=None, display=False):
""" FISTA Inference for Lasso (l1) Problem
data: Batches of data (dim x batch)
Phi: Dictionary (dictionary element x dim) (nparray or sparse array)
lambdav: Sparsity penalty
max_iterations: Maximum number of iterations
"""
lambdav=self.lam
def proxOp(x,t):
""" L1 Proximal Operator """
return np.fmax(x-t, 0) + np.fmin(x+t, 0)
x = np.zeros((self.Q.shape[0], data.shape[1]))
c = self.Q.dot(self.Q.T)
b = -2*self.Q.dot(data)
L = scipy.sparse.linalg.eigsh(2*c, 1, which='LM')[0]
invL = 1/float(L)
y = x
t = 1
max_iterations = max_iterations or self.niter
for i in range(max_iterations):
g = 2*c.dot(y) + b
x2 = proxOp(y-invL*g,invL*lambdav)
t2 = (1+np.sqrt(1+4*(t**2)))/2.0
y = x2 + ((t-1)/t2)*(x2-x)
x = x2
t = t2
if display == True:
print ("L1 Objective " + str(np.sum((data-self.Q.T.dot(x2))**2) + lambdav*np.sum(np.abs(x2))))
return x2, 0, 0
|
[
"numpy.fmin",
"numpy.fmax",
"numpy.abs",
"numpy.zeros",
"numpy.sqrt"
] |
[((940, 982), 'numpy.zeros', 'np.zeros', (['(self.Q.shape[0], data.shape[1])'], {}), '((self.Q.shape[0], data.shape[1]))\n', (948, 982), True, 'import numpy as np\n'), ((889, 906), 'numpy.fmax', 'np.fmax', (['(x - t)', '(0)'], {}), '(x - t, 0)\n', (896, 906), True, 'import numpy as np\n'), ((907, 924), 'numpy.fmin', 'np.fmin', (['(x + t)', '(0)'], {}), '(x + t, 0)\n', (914, 924), True, 'import numpy as np\n'), ((1356, 1379), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * t ** 2)'], {}), '(1 + 4 * t ** 2)\n', (1363, 1379), True, 'import numpy as np\n'), ((1571, 1581), 'numpy.abs', 'np.abs', (['x2'], {}), '(x2)\n', (1577, 1581), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import rospy
from math import cos, sin, atan, pi
import numpy as np
import yaml
import sys
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float64
import pdb
pub = rospy.Publisher('pid_error', Float64, queue_size=10)
# You can define constants in Python as uppercase global names like these.
MIN_DISTANCE = 0.1
MAX_DISTANCE = 30.0
MIN_ANGLE = -45.0
MAX_ANGLE = 225.0
a = 0.0
b = 0.0
al = 0.0
bl = 0.0
ar = 0.0
br = 0.0
# data: single message from topic /scan
# angle: between -45 to 225 degrees, where 0 degrees is directly to the right
# Outputs length in meters to object with angle in lidar scan field of view
def getRange(data, angle):
# TODO: implement
ranges = np.asarray(data.ranges)
angle_index = (angle + 45) * 4 # hokuyo ust-10lx fen bian lv 0.25du
output_range = ranges[angle_index]
return output_range
# data: single message from topic /scan
# desired_distance: desired distance to the left wall [meters]
# Outputs the PID error required to make the car follow the left wall.
def followLeft(data, desired_distance):
# TODO: implement
global a, b
L = 0.015 #old: 0.025
desired_distance = desired_distance
a = getRange(data, 135)
b = getRange(data, 180)
theta = 45 * pi / 180
alpha = atan((a * cos(theta) - b) / (a * sin(theta)))
current_dist = b * cos(alpha)
next_dist = current_dist + L * sin(alpha)
error_t = -(current_dist - desired_distance + L * sin(alpha))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# data: single message from topic /scan
# desired_distance: desired distance to the right wall [meters]
# Outputs the PID error required to make the car follow the right wall.
def followRight(data, desired_distance):
# TODO: implement
global a, b
L = 0.025
desired_distance = desired_distance
a = getRange(data, 45)
b = getRange(data, 0)
theta = 45 * pi / 180
alpha = atan((a * cos(theta) - b) / (a * sin(theta)))
current_dist = b * cos(alpha)
next_dist = current_dist + L * sin(alpha)
error_t = -(current_dist - desired_distance + L * sin(alpha))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# data: single message from topic /scan
# Outputs the PID error required to make the car drive in the middle
# of the hallway.
def followCenter(data):
# TODO: implement
global al, bl, ar, br
L = 0.025
al = getRange(data, 135)
bl = getRange(data, 180)
ar = getRange(data, 0)
br = getRange(data, 45)
theta = 45 * pi / 180
alpha_l = atan((al * cos(theta) - bl) / (al * sin(theta)))
alpha_r = atan((ar * cos(theta) - br) / (ar * sin(theta)))
left_dist = bl * cos(alpha_l)
right_dist = br * cos(alpha_r)
desired_distance = (left_dist + right_dist) / 2.0
error_t = -(right_dist - desired_distance + L * sin(alpha_r))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# Callback for receiving LIDAR data on the /scan topic.
# data: the LIDAR data, published as a list of distances to the wall.
def scan_callback(data):
error = followCenter(data) # TODO: replace with followLeft, followRight, or followCenter
msg = Float64()
msg.data = error
pub.publish(msg)
# Boilerplate code to start this ROS node.
# DO NOT MODIFY!
if __name__ == '__main__':
rospy.init_node('pid_error_node', anonymous = True)
rospy.Subscriber("scan", LaserScan, scan_callback)
rospy.spin()
|
[
"rospy.Subscriber",
"numpy.asarray",
"rospy.Publisher",
"math.sin",
"std_msgs.msg.Float64",
"rospy.init_node",
"math.cos",
"rospy.spin"
] |
[((203, 255), 'rospy.Publisher', 'rospy.Publisher', (['"""pid_error"""', 'Float64'], {'queue_size': '(10)'}), "('pid_error', Float64, queue_size=10)\n", (218, 255), False, 'import rospy\n'), ((712, 735), 'numpy.asarray', 'np.asarray', (['data.ranges'], {}), '(data.ranges)\n', (722, 735), True, 'import numpy as np\n'), ((3221, 3230), 'std_msgs.msg.Float64', 'Float64', ([], {}), '()\n', (3228, 3230), False, 'from std_msgs.msg import Float64\n'), ((3358, 3407), 'rospy.init_node', 'rospy.init_node', (['"""pid_error_node"""'], {'anonymous': '(True)'}), "('pid_error_node', anonymous=True)\n", (3373, 3407), False, 'import rospy\n'), ((3411, 3461), 'rospy.Subscriber', 'rospy.Subscriber', (['"""scan"""', 'LaserScan', 'scan_callback'], {}), "('scan', LaserScan, scan_callback)\n", (3427, 3461), False, 'import rospy\n'), ((3463, 3475), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3473, 3475), False, 'import rospy\n'), ((1330, 1340), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (1333, 1340), False, 'from math import cos, sin, atan, pi\n'), ((2004, 2014), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (2007, 2014), False, 'from math import cos, sin, atan, pi\n'), ((2704, 2716), 'math.cos', 'cos', (['alpha_l'], {}), '(alpha_l)\n', (2707, 2716), False, 'from math import cos, sin, atan, pi\n'), ((2737, 2749), 'math.cos', 'cos', (['alpha_r'], {}), '(alpha_r)\n', (2740, 2749), False, 'from math import cos, sin, atan, pi\n'), ((1374, 1384), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (1377, 1384), False, 'from math import cos, sin, atan, pi\n'), ((2048, 2058), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (2051, 2058), False, 'from math import cos, sin, atan, pi\n'), ((1296, 1306), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1299, 1306), False, 'from math import cos, sin, atan, pi\n'), ((1437, 1447), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (1440, 1447), False, 'from math import cos, sin, atan, pi\n'), ((1970, 1980), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1973, 1980), False, 'from math import cos, sin, atan, pi\n'), ((2111, 2121), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (2114, 2121), False, 'from math import cos, sin, atan, pi\n'), ((2611, 2621), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2614, 2621), False, 'from math import cos, sin, atan, pi\n'), ((2672, 2682), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2675, 2682), False, 'from math import cos, sin, atan, pi\n'), ((2852, 2864), 'math.sin', 'sin', (['alpha_r'], {}), '(alpha_r)\n', (2855, 2864), False, 'from math import cos, sin, atan, pi\n'), ((1273, 1283), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1276, 1283), False, 'from math import cos, sin, atan, pi\n'), ((1947, 1957), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1950, 1957), False, 'from math import cos, sin, atan, pi\n'), ((2586, 2596), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2589, 2596), False, 'from math import cos, sin, atan, pi\n'), ((2647, 2657), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2650, 2657), False, 'from math import cos, sin, atan, pi\n')]
|
from inferelator import amusr_workflow
from inferelator import workflow
from inferelator.regression.base_regression import _RegressionWorkflowMixin
from inferelator.postprocessing.results_processor import ResultsProcessor
from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE
from inferelator.utils import InferelatorData
import pandas as pd
import numpy as np
class NoOutputRP(ResultsProcessor):
def summarize_network(self, output_dir, gold_standard, priors):
return super(NoOutputRP, self).summarize_network(None, gold_standard, priors)
# Factory method to spit out a puppet workflow
def create_puppet_workflow(regression_class=_RegressionWorkflowMixin,
base_class=workflow.WorkflowBase,
result_processor_class=NoOutputRP):
puppet_parent = workflow._factory_build_inferelator(regression=regression_class, workflow=base_class)
class PuppetClass(puppet_parent):
"""
Standard workflow except it takes all the data as references to __init__ instead of as filenames on disk or
as environment variables, and returns the model AUPR and edge counts without writing files (unless told to)
"""
write_network = True
network_file_name = None
pr_curve_file_name = None
initialize_mp = False
def __init__(self, data, prior_data, gs_data):
self.data = data
self.priors_data = prior_data
self.gold_standard = gs_data
super(PuppetClass, self).__init__()
def startup_run(self):
# Skip all of the data loading
self.process_priors_and_gold_standard()
def create_output_dir(self, *args, **kwargs):
pass
return PuppetClass
class TaskDataStub(amusr_workflow.create_task_data_class(workflow_class="single-cell")):
priors_data = TestDataSingleCellLike.priors_data
tf_names = TestDataSingleCellLike.tf_names
meta_data_task_column = "Condition"
tasks_from_metadata = True
task_name = "TestStub"
task_workflow_type = "single-cell"
def __init__(self, sparse=False):
self.data = TEST_DATA.copy() if not sparse else TEST_DATA_SPARSE.copy()
super(TaskDataStub, self).__init__()
def get_data(self):
if self.tasks_from_metadata:
return self.separate_tasks_by_metadata()
else:
return [self]
class FakeDRD:
def __init__(self, *args, **kwargs):
pass
def run(self, expr, meta):
return expr, expr, expr
def validate_run(self, meta):
return True
class FakeWriter(object):
def writerow(self, *args, **kwargs):
pass
class FakeRegressionMixin(_RegressionWorkflowMixin):
def run_regression(self):
beta = [pd.DataFrame(np.array([[0, 1], [0.5, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
beta_resc = [pd.DataFrame(np.array([[0, 1], [1, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
return beta, beta_resc
def run_bootstrap(self, bootstrap):
return True
class FakeResultProcessor:
network_data = None
def __init__(self, *args, **kwargs):
pass
def summarize_network(self, *args, **kwargs):
return 1, 0, 0
|
[
"inferelator.amusr_workflow.create_task_data_class",
"inferelator.tests.artifacts.test_data.TEST_DATA_SPARSE.copy",
"inferelator.tests.artifacts.test_data.TEST_DATA.copy",
"numpy.array",
"inferelator.workflow._factory_build_inferelator"
] |
[((1835, 1902), 'inferelator.amusr_workflow.create_task_data_class', 'amusr_workflow.create_task_data_class', ([], {'workflow_class': '"""single-cell"""'}), "(workflow_class='single-cell')\n", (1872, 1902), False, 'from inferelator import amusr_workflow\n'), ((867, 957), 'inferelator.workflow._factory_build_inferelator', 'workflow._factory_build_inferelator', ([], {'regression': 'regression_class', 'workflow': 'base_class'}), '(regression=regression_class, workflow=\n base_class)\n', (902, 957), False, 'from inferelator import workflow\n'), ((2203, 2219), 'inferelator.tests.artifacts.test_data.TEST_DATA.copy', 'TEST_DATA.copy', ([], {}), '()\n', (2217, 2219), False, 'from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE\n'), ((2239, 2262), 'inferelator.tests.artifacts.test_data.TEST_DATA_SPARSE.copy', 'TEST_DATA_SPARSE.copy', ([], {}), '()\n', (2260, 2262), False, 'from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE\n'), ((2850, 2881), 'numpy.array', 'np.array', (['[[0, 1], [0.5, 0.05]]'], {}), '([[0, 1], [0.5, 0.05]])\n', (2858, 2881), True, 'import numpy as np\n'), ((2968, 2997), 'numpy.array', 'np.array', (['[[0, 1], [1, 0.05]]'], {}), '([[0, 1], [1, 0.05]])\n', (2976, 2997), True, 'import numpy as np\n')]
|
from solvers.rigidity_solver.models import *
import numpy as np
_scale = lambda arr: arr * 15
v = lambda x, y, z: np.array([x, y, z], dtype=np.double)
p = lambda x, y, z: (_scale(np.array([x, y, z], dtype=np.double)))
def lerp(p, q, weight):
return p + (q - p) * weight
def define(stage):
_p = {
"a": p(0, 0, 0),
"b": p(1, 0, 0),
"c": p(1 / 2, np.sqrt(3) / 2, 0),
"A-u": p(3 / 2, np.sqrt(3) / 2, 1),
"A-d": p(3 / 2, np.sqrt(3) / 2, -1),
"B-u": p(-1 / 2, np.sqrt(3) / 2, 1),
"B-d": p(-1 / 2, np.sqrt(3) / 2, -1),
"C-u": p(1 / 2, -np.sqrt(3) / 2, 1),
"C-d": p(1 / 2, -np.sqrt(3) / 2, -1),
}
_p.update({
"ab-mid": lerp(_p["A-u"], _p["B-u"], 0.5),
"bc-mid": lerp(_p["B-u"], _p["C-u"], 0.5),
"ca-mid": lerp(_p["C-u"], _p["A-u"], 0.5),
"ab-0.1": lerp(_p["A-u"], _p["B-u"], 0.1),
"bc-0.1": lerp(_p["B-u"], _p["C-u"], 0.1),
"ca-0.1": lerp(_p["C-u"], _p["A-u"], 0.1),
"ba-0.1": lerp(_p["B-u"], _p["A-u"], 0.1),
"cb-0.1": lerp(_p["C-u"], _p["B-u"], 0.1),
"ac-0.1": lerp(_p["A-u"], _p["C-u"], 0.1),
"ab-0.9": lerp(_p["A-u"], _p["B-u"], 0.9),
"bc-0.9": lerp(_p["B-u"], _p["C-u"], 0.9),
"ca-0.9": lerp(_p["C-u"], _p["A-u"], 0.9),
"ba-0.9": lerp(_p["B-u"], _p["A-u"], 0.9),
"cb-0.9": lerp(_p["C-u"], _p["B-u"], 0.9),
"ac-0.9": lerp(_p["A-u"], _p["C-u"], 0.9),
})
def beam_init(p, q, density=0.5):
return Beam.tetra(p, q, density=density, thickness=1)
stage_2_frac = 0.25
stage_3_frac = 0.7
normalize = lambda x: x / np.linalg.norm(x)
_da = normalize(_p["c"] - _p["b"])
_db = normalize(_p["a"] - _p["c"])
_dc = normalize(_p["b"] - _p["a"])
_dz = v(0, 0, 1)
model = Model()
_bmap = {
"top-A": beam_init(_p["B-u"], _p["C-u"]),
"top-B": beam_init(_p["C-u"], _p["A-u"]),
"top-C": beam_init(_p["A-u"], _p["B-u"]),
# "top-ab-bc": beam_init(_p["ab-mid"], _p["bc-mid"]),
# "top-bc-ca": beam_init(_p["bc-mid"], _p["ca-mid"]),
# "top-ca-ab": beam_init(_p["ca-mid"], _p["ab-mid"]),
#
# "core-ab": beam_init(_p['a'], _p["b"]),
# "core-bc": beam_init(_p["b"], _p["c"]),
# "core-ca": beam_init(_p["c"], _p["a"]),
#
"A-c": beam_init(_p["ca-0.9"], _p["C-d"]),
"A-b": beam_init(_p["ab-0.1"], _p["B-d"]),
"B-a": beam_init(_p["ab-0.9"], _p["A-d"]),
"B-c": beam_init(_p["bc-0.1"], _p["C-d"]),
"C-b": beam_init(_p["bc-0.9"], _p["B-d"]),
"C-a": beam_init(_p["ca-0.1"], _p["A-d"]),
}
joints = [
Joint(_bmap["B-a"], _bmap["C-a"], pivot=_p["A-d"], rotation_axes=_da),
Joint(_bmap["C-b"], _bmap["A-b"], pivot=_p["B-d"], rotation_axes=_db),
Joint(_bmap["A-c"], _bmap["B-c"], pivot=_p["C-d"], rotation_axes=_dc),
Joint(_bmap["top-C"], _bmap["top-A"], pivot=_p["B-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-A"], _bmap["top-B"], pivot=_p["C-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-B"], _bmap["top-C"], pivot=_p["A-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-B"], _bmap["A-b"], pivot=_p["ab-0.1"], rotation_axes=_da),
Joint(_bmap["top-C"], _bmap["A-c"], pivot=_p["ca-0.9"], rotation_axes=_da),
Joint(_bmap["top-C"], _bmap["B-c"], pivot=_p["bc-0.1"], rotation_axes=_db),
Joint(_bmap["top-A"], _bmap["B-a"], pivot=_p["ab-0.9"], rotation_axes=_db),
Joint(_bmap["top-A"], _bmap["C-a"], pivot=_p["ca-0.1"], rotation_axes=_dc),
Joint(_bmap["top-B"], _bmap["C-b"], pivot=_p["bc-0.9"], rotation_axes=_dc),
Joint(_bmap["A-b"], _bmap["B-a"],
pivot=(_p["ab-0.1"] + _p["ab-0.9"] + _p["A-d"] + _p["B-d"]) / 4,
rotation_axes=np.cross(_dc, _dz)),
Joint(_bmap["B-c"], _bmap["C-b"],
pivot=(_p["bc-0.1"] + _p["bc-0.9"] + _p["B-d"] + _p["C-d"]) / 4,
rotation_axes=np.cross(_da, _dz)),
Joint(_bmap["C-a"], _bmap["A-c"],
pivot=(_p["ca-0.1"] + _p["ca-0.9"] + _p["C-d"] + _p["A-d"]) / 4,
rotation_axes=np.cross(_db, _dz)),
]
ax_z = v(0, 0, 1)
if stage >= 2:
_stage_2_points = {
f"{a}-u-{b}-d-{stage_2_frac}": lerp(_p[f"{a.lower()}{b.lower()}-0.1"], _p[f"{b}-d"], stage_2_frac)
for a in "ABC" for b in "ABC" if a != b
}
_p.update(_stage_2_points)
_stage_2_beam = {
f"s2-{a}{b}": beam_init(_p[f"{a}-u-{b}-d-{stage_2_frac}"], _p[f"{b}-u-{a}-d-{stage_2_frac}"])
for a, b in ("AB", "BC", "CA")
}
_bmap.update(_stage_2_beam)
_stage_2_joint = [
Joint(_bmap[f"s2-{a}{b}"], _bmap[f"{a}-{b.lower()}"], pivot=_p[f"{a}-u-{b}-d-{stage_2_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
] + [
Joint(_bmap[f"s2-{a}{b}"], _bmap[f"{b}-{a.lower()}"], pivot=_p[f"{b}-u-{a}-d-{stage_2_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
]
joints.extend(_stage_2_joint)
if stage >= 3:
_stage_3_points = {
f"{a}-u-{b}-d-{stage_3_frac}": lerp(_p[f"{a}-u"], _p[f"{b}-d"], stage_3_frac)
for a in "ABC" for b in "ABC" if a != b
}
_p.update(_stage_3_points)
_stage_3_beam = {
f"s3-{a}{b}": beam_init(_p[f"{a}-u-{b}-d-{stage_3_frac}"], _p[f"{b}-u-{a}-d-{stage_3_frac}"])
for a, b in ("AB", "BC", "CA")
}
_bmap.update(_stage_3_beam)
_stage_3_joint = [
Joint(_bmap[f"s3-{a}{b}"], _bmap[f"{a}-{b.lower()}"], pivot=_p[f"{a}-u-{b}-d-{stage_3_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
] + [
Joint(_bmap[f"s3-{a}{b}"], _bmap[f"{b}-{a.lower()}"], pivot=_p[f"{b}-u-{a}-d-{stage_3_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
]
joints.extend(_stage_3_joint)
if stage >= 4:
_indices = ["AB", "BC", "CA"]
_stage_4_points = {
f"s4-{_indices[i % 3]}": lerp(_p[f"{a}-u-{b}-d-{stage_2_frac}"], _p[f"{b}-u-{a}-d-{stage_2_frac}"], 0.5)
for i, (a, b) in enumerate(_indices)
}
_p.update(_stage_4_points)
_stage_4_beam = {
f"s4-{_indices[i % 3]}": beam_init(_p[f"s4-{_indices[i]}"], _p[f"{a.lower()}{b.lower()}-mid"])
for i, (a, b) in enumerate(_indices)
}
_bmap.update(_stage_4_beam)
_stage_4_joint = [
Joint(_bmap[f"s4-{_indices[i % 3]}"], _bmap[f"s2-{_indices[i % 3]}"],
pivot=_p[f"s4-{_indices[i]}"],
rotation_axes=np.cross((_dc, _da, _db)[i], v(0, 0, 1))
)
for i, (a, b) in enumerate(_indices)
] + [
Joint(_bmap[f"s4-{_indices[i % 3]}"], _bmap[f"top-{'CAB'[i]}"],
pivot=_p[f"{a.lower()}{b.lower()}-mid"],
rotation_axes=np.cross((_dc, _da, _db)[i], v(0, 0, 1))
)
for i, (a, b) in enumerate(_indices)
]
joints.extend(_stage_4_joint)
beams = list(_bmap.values())
model.add_beams(beams)
model.add_joints(joints)
return locals()
if __name__ == "__main__":
model = define(1)["model"]
model.visualize(show_hinge=True)
points = model.point_matrix()
edges = model.edge_matrix()
stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[]),
constraints = model.constraint_matrix()
new_stiffness, B = generalized_courant_fischer(
stiffness,
constraints
)
pairs = model.eigen_solve(num_pairs=20)
print([e for e, v in pairs])
for stage in range(1, 4 + 1):
model = define(stage)["model"]
model.save_json(f"output/table-stage{stage}.json")
|
[
"numpy.linalg.norm",
"numpy.cross",
"numpy.array",
"numpy.sqrt"
] |
[((115, 151), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.double'}), '([x, y, z], dtype=np.double)\n', (123, 151), True, 'import numpy as np\n'), ((180, 216), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.double'}), '([x, y, z], dtype=np.double)\n', (188, 216), True, 'import numpy as np\n'), ((1649, 1666), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (1663, 1666), True, 'import numpy as np\n'), ((379, 389), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (386, 389), True, 'import numpy as np\n'), ((424, 434), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (431, 434), True, 'import numpy as np\n'), ((468, 478), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (475, 478), True, 'import numpy as np\n'), ((515, 525), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (522, 525), True, 'import numpy as np\n'), ((560, 570), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (567, 570), True, 'import numpy as np\n'), ((3840, 3858), 'numpy.cross', 'np.cross', (['_dc', '_dz'], {}), '(_dc, _dz)\n', (3848, 3858), True, 'import numpy as np\n'), ((4010, 4028), 'numpy.cross', 'np.cross', (['_da', '_dz'], {}), '(_da, _dz)\n', (4018, 4028), True, 'import numpy as np\n'), ((4180, 4198), 'numpy.cross', 'np.cross', (['_db', '_dz'], {}), '(_db, _dz)\n', (4188, 4198), True, 'import numpy as np\n'), ((607, 617), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (614, 617), True, 'import numpy as np\n'), ((652, 662), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (659, 662), True, 'import numpy as np\n')]
|
import autoCorrection
import numpy as np
import unittest
class TestEndToEnd(unittest.TestCase):
def test_end_to_end(self):
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector()
correction = corrector.correct(counts=counts, size_factors=sf)
self.assertEqual(counts.shape, correction.shape)
class TestSavingAndLoading(unittest.TestCase):
def test_loading(self):
self.test_saving()
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector(model_name='test1', model_directory=".")
correction = corrector.correct(counts, sf, only_predict=True)
self.assertEqual(counts.shape, correction.shape)
def test_saving(self):
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector(model_name='test1', model_directory=".", save_model=True)
correction = corrector.correct(counts, sf)
self.assertEqual(counts.shape, correction.shape)
class TestSetSeed(unittest.TestCase):
def test_setSeed(self):
# generate data
nsamples = 15
ngenes = 20
counts = np.random.negative_binomial(n=20, p=0.2, size=(ngenes, nsamples))
sf = np.random.uniform(0.8, 1.2, size=(ngenes, nsamples))
# run the autocorrection 2 times with seed and one without. it should deviate
ac = autoCorrection.correctors
correct1 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0).correct(counts, sf)
correct2 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0, seed=42).correct(counts, sf)
correct3 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0, seed=42).correct(counts, sf)
# check if the results are similar. Due to randomness in the numbers we still have little changes
#self.assertTrue(sum(sum(np.round(correct2) == np.round(correct3))) > 0.9 * nsamples * ngenes)
self.assertTrue(sum(sum(np.round(correct1) == np.round(correct2))) < 0.3 * nsamples * ngenes)
self.assertTrue(sum(sum(np.round(correct1) == np.round(correct3))) < 0.3 * nsamples * ngenes)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.random.uniform",
"numpy.random.negative_binomial",
"numpy.ones",
"autoCorrection.correctors.AECorrector",
"numpy.round"
] |
[((2475, 2490), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2488, 2490), False, 'import unittest\n'), ((147, 201), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(10, 8)'}), '(n=20, p=0.2, size=(10, 8))\n', (174, 201), True, 'import numpy as np\n'), ((215, 231), 'numpy.ones', 'np.ones', (['(10, 8)'], {}), '((10, 8))\n', (222, 231), True, 'import numpy as np\n'), ((252, 291), 'autoCorrection.correctors.AECorrector', 'autoCorrection.correctors.AECorrector', ([], {}), '()\n', (289, 291), False, 'import autoCorrection\n'), ((542, 596), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(10, 8)'}), '(n=20, p=0.2, size=(10, 8))\n', (569, 596), True, 'import numpy as np\n'), ((610, 626), 'numpy.ones', 'np.ones', (['(10, 8)'], {}), '((10, 8))\n', (617, 626), True, 'import numpy as np\n'), ((647, 725), 'autoCorrection.correctors.AECorrector', 'autoCorrection.correctors.AECorrector', ([], {'model_name': '"""test1"""', 'model_directory': '"""."""'}), "(model_name='test1', model_directory='.')\n", (684, 725), False, 'import autoCorrection\n'), ((898, 952), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(10, 8)'}), '(n=20, p=0.2, size=(10, 8))\n', (925, 952), True, 'import numpy as np\n'), ((966, 982), 'numpy.ones', 'np.ones', (['(10, 8)'], {}), '((10, 8))\n', (973, 982), True, 'import numpy as np\n'), ((1003, 1103), 'autoCorrection.correctors.AECorrector', 'autoCorrection.correctors.AECorrector', ([], {'model_name': '"""test1"""', 'model_directory': '"""."""', 'save_model': '(True)'}), "(model_name='test1', model_directory=\n '.', save_model=True)\n", (1040, 1103), False, 'import autoCorrection\n'), ((1371, 1436), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(ngenes, nsamples)'}), '(n=20, p=0.2, size=(ngenes, nsamples))\n', (1398, 1436), True, 'import numpy as np\n'), ((1450, 1502), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)'], {'size': '(ngenes, nsamples)'}), '(0.8, 1.2, size=(ngenes, nsamples))\n', (1467, 1502), True, 'import numpy as np\n'), ((2266, 2284), 'numpy.round', 'np.round', (['correct1'], {}), '(correct1)\n', (2274, 2284), True, 'import numpy as np\n'), ((2288, 2306), 'numpy.round', 'np.round', (['correct2'], {}), '(correct2)\n', (2296, 2306), True, 'import numpy as np\n'), ((2368, 2386), 'numpy.round', 'np.round', (['correct1'], {}), '(correct1)\n', (2376, 2386), True, 'import numpy as np\n'), ((2390, 2408), 'numpy.round', 'np.round', (['correct3'], {}), '(correct3)\n', (2398, 2408), True, 'import numpy as np\n')]
|
# Import all packages required
# Type annotation imports
from typing import Union
from typing import Tuple
# Other imports
import os
import glob
import h5py
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import numpy as np
import pandas as pd
from scipy import interpolate
#-------------------------------------------------------------------------------------------------------#
# CREAM Data Utility class. Please refer to the docstring for details!
#-------------------------------------------------------------------------------------------------------#
class CREAM_Day():
"""
A class representing one particular day of the CREAM dataset.
The CREAM dataset has the following file structure:
|-CREAM
|------- 2018-08-23
| |--------- *.hdf5
| |--------- *.hdf5
| |--------- *.hdf5
| |--------- ......
|
|------- 2018-08-24
.......
This class corresponds to one of the subfolders, i.e. of the folders representing a particular day, such as, for
example, the first folder "2018-08-23". You have to create one CREAM_Day object per day folder in order to use the
data of the full dataset.
During initialization, the following attributes are set.
files_metadata_df (pandas.DataFrame): columns: Start_timestamp, End_timestamp, Filename to store start
end times of each file in this day
files (list): full path to every file in this day
minimum_request_timestamp (datetime.datetime): First timestamp of the day
maximum_request_timestamp (datetime.datetime): Last timestamp of the day
file_cache (dict): file cache for buffering already loaded files
day_date (datetime.datetime): day and date of the current object
This class also provides convenience functions to load the files of the CREAM dataset.
To load an arbitrary CREAM file, use the load_file method.
To load an arbitrary data window, based on the start_timestamp of the window to load, use the load_time_frame method.
To load the maintenance or product events as a pandas.DataFrame, use the load_machine_events method.
Via a parameter, one can also load the raw files that were generated by the coffee maker (they can be found in the
raw_coffee_maker_logs subfolder of the CREAM dataset).
To load the component events as a pandas.DataFrame, use the load_component_events_method.
To load information whether a specific day is a working day (German working day in the dataset), use the get_weekday_
information method.
Other self-explaining convenience functions are:
- get_datetime_from_filepath
- get_index_from_timestamp
- get_timestamp_from_index
Functions starting with an "_" underscore are private functions and are not intended for user usage.
"""
def __init__(self, cream_day_location: str, use_buffer : bool =False, buffer_size_files : int =5):
"""
Initialize the CREAM_Day object
Parameters
----------
cream_day_location (str): location of the root folder of the respective day in the CREAM dataset. Specify
a path to the respective day, not to the root of the overall CREAM datset!
use_buffer (boolean): default=False. In case it is set to True, files loaded via the load_file, or load_time_frame
method are stored in the cache of the CREAM_Day object. This speeds up streaming the dataset.
In case no buffer_size_file is provide, a default buffer_size_files of 5 is used.
Hence, the recent 5 files are stored in the cache. Old files are automatically removed from
the cache in case the buffer_size_files limit is exceeded.
buffer_size_files (int): Size of the file cache of the CREAM_Day object. Functionality of the cache is documented
in the use_buffer parameter description right above.
"""
self.dataset_location = cream_day_location
self.use_buffer = use_buffer
self.buffer_size_files = buffer_size_files
if self.buffer_size_files == 5 and use_buffer is True:
raise Warning("Buffer size was specified with size 5 (default value): a minimum buffer size of 5 files was set therefore")
# Initiate the file buffer dictionary
self.file_cache = {}
# Get all the files of the respective day
self.files = glob.glob(os.path.join(self.dataset_location, "*.hdf5"))
self.files.sort()
# We use the first file and the timestamps in the filenames in the dataset (of this day) to get the metadata information
# Get the timezone information from the filename timestamp
# Load Metadata from the first file of the respective device --> same for all of the device --> STATIC METADATA
with h5py.File(self.files[0], 'r', driver='core') as f:
self.sampling_rate = int(f.attrs['frequency']) # get the sampling rate
self.samples_per_file = len(f["voltage"]) # get the length of the signal
# get the start timestamp‚
start_timestamp = datetime(
year=int(f.attrs['year']),
month=int(f.attrs['month']),
day=int(f.attrs['day']),
hour=int(f.attrs['hours']),
minute=int(f.attrs['minutes']),
second=int(f.attrs['seconds']),
microsecond=int(f.attrs['microseconds']),
tzinfo=timezone(timedelta(hours=int(f.attrs['timezone'][1:4]), minutes=int(f.attrs['timezone'][4:]))))
self.file_duration_sec = 60 * 60 # each file, one hour --> seconds per file
self.number_of_files = len(self.files)
# Some file metadata for every file
file_start_times = [self.get_datetime_from_filepath(f) for f in self.files]
file_end_times = [timedelta(seconds=self.file_duration_sec) + ts for ts in file_start_times]
self.files_metadata_df = pd.DataFrame({"Start_timestamp": file_start_times,
"Filename": self.files,
"End_timestamp": file_end_times})
self.dataset_name = "CREAM"
# Compute the minimum and maximum time for this day, and the respective differences to the day before
self.minimum_request_timestamp = self.files_metadata_df.iloc[0].Start_timestamp
self.maximum_request_timestamp = self.files_metadata_df.iloc[-1].Start_timestamp + timedelta(seconds=self.file_duration_sec)
# Find the day of the dataset
folder_path = os.path.basename(os.path.normpath(self.dataset_location)) # name of the folder
date = folder_path.split("-")
self.day_date = datetime(year=int(date[0]), month=int(date[1]), day=int(date[2]))
# Initialize weekday information
self.weekday_information_df = None
def load_machine_events(self, file_path: str = None, filter_day : bool = False, raw_file=True) -> pd.DataFrame:
"""
Load the maintenance event file. The events are sorted by the time they occur.
Parameters
----------
file_path (str): path to the component events file (.csv) file
filter_day (boolean): default=False. If set to True, the DataFrame is filtered for the events belonging
to the CREAM_Day object
raw_file (boolean): default=True. If set to True, the user has to provide the path to the raw events file that
were generated by the coffee maker. They can be found in the raw_coffee_maker_logs subfolder
of the dataset.
Returns
-------
data (pd.DataFrame):
if raw_file=True: pd.DataFrame with columns "Timestamp", "Activity" (maintenance file) or
"Timestamp", "Product" (product file)
If raw_file=False: pd.DataFrame with columns
'Start_Timestamp', 'Automatic_Timestamp', 'Event_Type', 'End_Timestamp', 'Event_Duration_Seconds', 'Date',
Sorted descending by 'Start_Timestamp'.
"""
if file_path is None:
raise ValueError("Specify a file_path, containing the events file.")
if raw_file is True and "raw" not in file_path:
raise ValueError("In case you intend to load a raw_file, you also need to pass a path to a raw file to the "
"function!")
data = pd.read_csv(file_path)
# The timezone of the timestamps need to be from the same type
# We use the first file of the day_object to get
timezone = self.get_datetime_from_filepath(self.files[0]).tzinfo
if raw_file is True: # In case the raw product file is used
data.Timestamp = pd.to_datetime(data.Timestamp)
data = self._convert_timezone(data, "Timestamp", target_timezone=timezone)
data.sort_values("Timestamp", inplace=True)
data["Date"] = data.Timestamp.apply(lambda x: x.date())
else: # the manually adjusted and pre-processed product file is used
for column in data.columns:
# Convert all timestamp columns
if "Timestamp" in column:
data[column] = pd.to_datetime(data[column])
data = self._convert_timezone(data, column, target_timezone=timezone)
data["Date"] = data.End_Timestamp.apply(lambda x: x.date())
data.sort_values("Start_Timestamp", inplace=True)
if filter_day is True: # only return the event of the corresponding CREAM day
data = data[data["Date"] == self.day_date.date()]
return data
def load_component_events(self, file_path: str = None, filter_day : bool = False) -> pd.DataFrame:
"""
Load the labeled electrical events, i.e. the components events, file. The events are sorted by the time they occur.
Parameters
----------
file_path (str): path to the component events file (.csv) file
filter_day (boolean): default=False, if set to True, the DataFrame is filtered for the events belonging
to the CREAM_Day object
Returns
-------
data (pd.DataFrame): pd.DataFrame with columns:
'Start_Timestamp', 'Automatic_Timestamp', 'Event_Type', 'End_Timestamp', 'Event_Duration_Seconds', 'Date',
Sorted descending by 'Start_Timestamp'.
"""
if file_path is None:
raise ValueError("Specify a file_path, containing the events file.")
data = pd.read_csv(file_path)
# The timezone of the timestamps need to be from the same type
# We use the first file of the day_object to get
timezone = self.get_datetime_from_filepath(self.files[0]).tzinfo
for column in data.columns:
# Convert all timestamp columns
if "Timestamp" in column:
data[column] = pd.to_datetime(data[column])
data = self._convert_timezone(data, column, target_timezone=timezone)
data["Date"] = data.Timestamp.apply(lambda x: x.date())
data.sort_values("Timestamp", inplace=True)
if filter_day is True: # only return the event of the corresponding CREAM day
data = data[data["Date"] == self.day_date.date()]
return data
def load_file(self, file_path: str, return_noise: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Load a file of the CREAM dataset
If return_noise is specified, the noise channel is also returned. The current is 2-dimensional then.
The signals get pre-processed before they are returned by this function:
1. y-direction calibration: we center the signal around zero
2. calibration_factor: we calibrate the signal by the measurement device specific calibration_factor.
This calibration_factor is included in the metadata of the files.
Parameters
----------
file_path (string): path to the file to be loaded
return_noise (boolean): default=False. If set to True, the current of the noise socket is also returned.
Returns
-------
voltage (ndarray): voltage signal with shape=(1, file_length,). In case of an empty file None is returned.
current (ndarray): current signal either with shape (1, file_length) or (2, file_length)
In case of an empty file None is returned
"""
voltage = None
current = None
# Check if the file is already in the file cache
if self.use_buffer is True and file_path in self.file_cache:
voltage = self.file_cache[file_path]["voltage"]
current = self.file_cache[file_path]["current"]
return voltage, current
else:
# Check if the file is empty (zero bytes): if so return and empty current and voltage array
if os.stat(file_path).st_size > 0: # if not empty
with h5py.File(file_path, 'r', driver='core') as f:
voltage_offset, current_offset = self._adjust_amplitude_offset(f) # y value offset adjustment
for name in list(f):
signal = f[name][:] * 1.0
if name == 'voltage' and voltage_offset is not None: # the voltage signal
voltage = signal - voltage_offset
calibration_factor = f[name].attrs['calibration_factor']
voltage = np.multiply(voltage, calibration_factor)
elif "current1" in name and current_offset is not None: # the current signal of the coffee maker
current = signal - current_offset
calibration_factor = f[name].attrs['calibration_factor']
current = np.multiply(current, calibration_factor)
elif return_noise == True and "current6" in name and current_offset is not None: # the current signal of the noise channel
current_noise = signal - current_offset
calibration_factor = f[name].attrs['calibration_factor']
current_noise = np.multiply(current_noise, calibration_factor)
if return_noise is True:
current = np.array([current, current_noise])
voltage = np.array(voltage)
else:
current = np.array(current)
voltage = np.array(voltage)
# Before returning, check if we store the file in the cache and if we need to delete one instead from the cache
if self.use_buffer is True:
if len(self.file_cache) < self.buffer_size_files:
self.file_cache[file_path] = {"voltage" : np.array(voltage), "current": np.array(current)}
else:
sorted_filenames = list(self.file_cache.keys())
sorted_filenames.sort()
del self.file_cache[sorted_filenames[0]] #delete the oldest file
return np.array(voltage), np.array(current)
else: # if empty
return None, None
def load_file_metadata(self, file_path: str, attribute_list: list = []) -> dict:
"""
Load the file metadata for a specifc files.
The metadata is stored in the HDF5 attributes, details are documented in the data descriptor.
The following attributes are available:
["name", "first_trigger_id", "last_trigger_id", "sequence", "frequency", "year", "month", "day",
"hours", "minutes", "seconds", "microseconds", "timezone", "calibration_factor", "removed_offset"]
Parameters
----------
file_path (str): path to the file to be loaded. Needs to be the full-path, as provide by the "files"
attribute of the CREAM_Day object.
attribute_list (list): default=[], specify specifc attribute names to be loaded. If no
dedicated attributes are specified, all attributes are returned
Returns
-------
attributes_dict (dict): dictionary with all HDF5 attributes of a specifc file.
"""
if file_path is None:
raise ValueError("Specify a file path!")
all_attributes = ["name", "first_trigger_id", "last_trigger_id", "sequence", "frequency", "year", "month", "day",
"hours", "minutes", "seconds", "microseconds", "timezone", "calibration_factor", "removed_offset"]
if len(attribute_list) == 0: #use all attributes if non is specified
attribute_list = all_attributes
else:
# Check if user specified attributes exist in the metadata
for attr in attribute_list:
if attr not in all_attributes:
raise ValueError("The atttribute %s is not available!")
attributes_dict = {}
with h5py.File(file_path, 'r', driver='core') as f:
for attr in attribute_list:
if attr in ["calibration_factor", "removed_offset"]: #not in the attribute root of the hdf5 file
attributes_dict[attr] = f["voltage"].attrs[attr]
else: #attributes in the root of the hdf5 file
attributes_dict[attr] = f.attrs[attr]
return attributes_dict
def load_time_frame(self, start_datetime: datetime, duration : float, return_noise: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Loads an arbitrary time-frame of the CREAM dataset. Can be also used for streaming the data fast: in case
the caching parameter is enabled in the CREAM_Day object. Otherwise, the files will be reloaded every time
this method is called, thus, slowing down the data retrieval.
Parameters
----------
start_datetime (datetime.datetime): start timestamp of the window to load
duration (float): duration of the window to load (window size) in seconds. ATTENTION: if not provided in seconds,
wrong results are returned!
return_noise (boolean): default: False. If set to True, also returns the signal of the noise channel recorded in
CREAM dataset (from socket 6)
Returns
-------
voltage (numpy.ndarray): voltage signal of the window
current (numpy.ndarray): curent signal of the window. One dimensional if return_noise=False, two dimensional if
if return_noise=True. The first element is the coffee-maker signal, the second element
the noise signal.
"""
# Perform initial checks
if start_datetime < self.minimum_request_timestamp:
raise ValueError(
"The requested Time window is smaller then the minimum_request_timestamp of the day object")
end_datetime = start_datetime + timedelta(seconds=duration)
if end_datetime > self.maximum_request_timestamp:
raise ValueError("The requested Time window is bigger then the maximum_request_timestamp of the day object")
# determine all the files that are relevant for the requested time window
# The index of the first relevant_file: i.e. the last file that is smaller then the start_datetime
first_file_idx = self.files_metadata_df[self.files_metadata_df.Start_timestamp <= start_datetime].index[-1]
# The last relevant_file: i.e. the first file that has and End_timestamp that is bigger then the one we need
last_file_idx = self.files_metadata_df[self.files_metadata_df.End_timestamp >= end_datetime].index[0]
# Get all the files in between the first and the last file needed
relevant_files_df = self.files_metadata_df.loc[first_file_idx:last_file_idx]
if len(relevant_files_df) == 0:
raise ValueError("The timeframe requested does not lie within the current day!")
relevant_voltage = []
relevant_current = []
relevant_current_noise = []
for i, row in relevant_files_df.iterrows():
voltage, current = self.load_file(row.Filename, return_noise=return_noise)
relevant_voltage.append(voltage)
relevant_current.append(current)
if return_noise is True:
relevant_current.append(current[0])
relevant_current_noise.append(current[1])
# now stack together the relevant signals
relevant_voltage = np.concatenate(relevant_voltage, axis=-1)
relevant_current = np.concatenate(relevant_current, axis=-1)
if return_noise is True and len(relevant_current_noise) > 0:
relevant_current_noise = np.concatenate(relevant_current_noise, axis=-1)
# Compute the start_index
# 1.1 Compute the offset in the first file
start_index = int(self.get_index_from_timestamp(relevant_files_df.iloc[0].Start_timestamp, start_datetime))
end_index = int(self.get_index_from_timestamp(relevant_files_df.iloc[0].Start_timestamp, end_datetime))
# Get the voltage and current window
voltage = relevant_voltage[start_index:end_index] #there is only one voltage channel
if return_noise is True and len(relevant_current_noise) > 0:
current = [relevant_current[start_index:end_index], relevant_current_noise[start_index:end_index]]
else:
current = relevant_current[start_index:end_index]
voltage = np.array(voltage)
current = np.array(current)
return voltage, current
def compute_average_sampling_rate(self) -> float:
"""
Estimate the average sampling rate per day.
Load the metadata of every file of the current day.
Per file (one hour files), we compute the actual sampling rate.
We then average this number over all files of this day, resulting in the average sampling rate.
Calculate the difference between the first and last sample of a day based on
the timestamps of the files.
Sets the average_sampling_rate attribute of the CREAM_Day object.
One can compare the average_sampling_rate to the nominal one of 6400.
Parameters
----------
Returns
-------
average_sampling_rate (float): average sampling rate per day (computed over the files)
"""
FILE_LENGTH_SEC = 60 * 60 #one hour files
actual_sampling_rates = []
for file in self.files:
voltage, current = self.load_file(file_path=file)
samples_per_file = len(voltage)
actual_sampling_rate = samples_per_file / FILE_LENGTH_SEC
actual_sampling_rates.append(actual_sampling_rate)
self.average_sampling_rate = np.mean(actual_sampling_rates)
return self.average_sampling_rate
def get_datetime_from_filepath(self, filepath: str) -> datetime:
"""
Extracts the datetime from a filename of a CREAM file.
Parameters
----------
filepath (str): path to a CREAM file
Returns
-------
start_timestamp (datetime): start timestamp of the file, extracted from the filename
"""
filename = os.path.basename(filepath) # get the filename
string_timestamp = "-".join(filename.split("-")[2:-1])
datetime_object = datetime.strptime(string_timestamp, '%Y-%m-%dT%H-%M-%S.%fT%z') # string parse time
return datetime_object
def get_index_from_timestamp(self, start_timestamp: datetime, event_timestamp: datetime) -> int:
"""
Returns the index of the event, represented by the event_timestamp, relativ to the start_timestamp (i.e. start timestamp of the file of interest e.g.)
Parameters
----------
start_timestamp (datetime.datetime): start timestamp of the window the event is located at
event_timestamp (datetime.datetime): timestamp of the event of interest
Returns
-------
event_index (int): The resulting event index
"""
sec_since_start = event_timestamp - start_timestamp
event_index = sec_since_start.total_seconds() * (self.sampling_rate) # and # multiply by samples per second
return int(event_index)
def get_timestamp_from_index(self, start_timestamp: datetime, event_index: int) -> datetime:
"""
Returns the timestamp for an event index. The event index has to be relative to a start_timestamp of a window.
Parameters
----------
start_timestamp (datetime.datetime): start timestamp of the window.
event_index (int): Index of the event of interest, has to be relative to the start_timestamp provided.
Returns
-------
event_timestamp (datetime.datetime): The resulting timestamp
"""
seconds_per_sample = 1 / self.sampling_rate # 1 second / samples = seconds per sample
time_since_start = event_index * seconds_per_sample
event_ts = start_timestamp + timedelta(seconds=time_since_start)
return event_ts
def _adjust_amplitude_offset(self, file: h5py.File) -> Tuple[int, int]:
"""
Resembles the pre-processing functionality in the BLOND repository (one_second_data_summary_functions.py) by
<NAME>.
Computes the mean per period to get an estimate for the offset in each period.
This is done for the voltage signal.
The period length is computed using the nominal sampling rate. Tthis can deviate from the
actual period length. Therefore, we zero pad the voltage signal to get full periods again before computing
the mean.
Then we use the estimate per period, to linearly interpolate the mean values per period, to get an offset value
per sample point in the signal. We then use the offset of the voltage to compute the offset of the current by multiplying
it by the crest-coefficient of 1/sqrt(2), i.e., approx. 0.7 .
Parameters
----------
file (h5py.File): a h5py CREAM file.
Returns
-------
voltage_offset (int): the voltage offset to adjust for
current_offset (int): the current offset to adjust for
"""
length = len(file['voltage'])
# Compute the average period_length, using the nominal sampling rate
period_length = round(self.sampling_rate / 50)
# Get the missing samples, opposed to the optimal number of periods in the signal
remainder = divmod(length, period_length)[1]
voltage = np.pad(file['voltage'][:], (0, period_length - remainder), 'constant',
constant_values=0) # zero padding
voltage = voltage.reshape(-1, period_length) # the single periods, period wise reshape
mean_values_per_period = voltage.mean(axis=1) # compute the mean per period
# Create x values for the interpolation
x_per_period = np.linspace(1, length, len(mean_values_per_period), dtype=np.int) # number of periods
x_original = np.linspace(1, length, length, dtype=np.int)
# build a linear interpolation, that interpolates for each period witch offset it should have
# for each of the datapoints, interpolate the offset
voltage_offset = interpolate.interp1d(x_per_period, mean_values_per_period)(x_original)
current_offset = voltage_offset * 1 / np.sqrt(2) # roughly * 0.7
return voltage_offset, current_offset
def _convert_timezone(self, dataframe: pd.DataFrame, column_name : str, target_timezone:str) -> pd.DataFrame:
"""
Converts timezone in column_name column in dataframe to target_timezone
Parameters
----------
dataframe (pandas.DataFrame): DataFrame object, containing some time columns
column_name (str): Name of the column of interest, i.e. the name of a time column
target_timezone (str): datetime.datetime.tzinfo timezone information as a string. This is the target timezone.
Returns
-------
dataframe (pandas.DataFrame): DataFrame object, with the column_name column converted to the target_timezone
"""
ts_array = []
for i, row in dataframe.iterrows():
ts = row[column_name].tz_convert(target_timezone)
ts_array.append(ts)
dataframe[column_name] = ts_array
return dataframe
def get_weekday_information(self, date : Union[list, np.ndarray], file_path : str = None) -> pd.DataFrame:
"""
For a certain date, get the day related information from the file provided with the dataset.
Parameters
----------
date (list, np.ndarray): list of string dates to be checked, format: year-month-day
file_path (string): default=None if path is not provided, the default location of the file is assumed
Returns
-------
day_information_df (pd.DataFrame): DataFrame with columns:
Date (string, date format year-month-day), WorkingDay (boolean), Weekday (string)
"""
if file_path is None:
file_path = os.path.abspath(self.dataset_location + "/../" + "day_information.csv")
day_information_df = None
if self.weekday_information_df is None: # if not initialized yet
self.weekday_information_df = pd.read_csv(file_path)
if type(date) in [list, np.ndarray]:
if not all(isinstance(n, str) for n in date): # if not all dates are strings, convert them
date = [str(n) for n in date]
day_information_df = self.weekday_information_df[self.weekday_information_df.Date.isin(date)]
day_information_df.Date = day_information_df.Date.apply(lambda x: pd.to_datetime(x, format='%Y-%m-%d')).dt.date
return day_information_df
|
[
"pandas.read_csv",
"numpy.mean",
"scipy.interpolate.interp1d",
"os.path.join",
"pandas.DataFrame",
"numpy.pad",
"os.path.abspath",
"numpy.multiply",
"datetime.timedelta",
"os.path.normpath",
"numpy.linspace",
"h5py.File",
"os.stat",
"os.path.basename",
"datetime.datetime.strptime",
"pandas.to_datetime",
"numpy.concatenate",
"numpy.array",
"numpy.sqrt"
] |
[((6149, 6261), 'pandas.DataFrame', 'pd.DataFrame', (["{'Start_timestamp': file_start_times, 'Filename': self.files,\n 'End_timestamp': file_end_times}"], {}), "({'Start_timestamp': file_start_times, 'Filename': self.files,\n 'End_timestamp': file_end_times})\n", (6161, 6261), True, 'import pandas as pd\n'), ((8696, 8718), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (8707, 8718), True, 'import pandas as pd\n'), ((10855, 10877), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (10866, 10877), True, 'import pandas as pd\n'), ((21013, 21054), 'numpy.concatenate', 'np.concatenate', (['relevant_voltage'], {'axis': '(-1)'}), '(relevant_voltage, axis=-1)\n', (21027, 21054), True, 'import numpy as np\n'), ((21082, 21123), 'numpy.concatenate', 'np.concatenate', (['relevant_current'], {'axis': '(-1)'}), '(relevant_current, axis=-1)\n', (21096, 21123), True, 'import numpy as np\n'), ((22009, 22026), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (22017, 22026), True, 'import numpy as np\n'), ((22045, 22062), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (22053, 22062), True, 'import numpy as np\n'), ((23299, 23329), 'numpy.mean', 'np.mean', (['actual_sampling_rates'], {}), '(actual_sampling_rates)\n', (23306, 23329), True, 'import numpy as np\n'), ((23759, 23785), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (23775, 23785), False, 'import os\n'), ((23896, 23958), 'datetime.datetime.strptime', 'datetime.strptime', (['string_timestamp', '"""%Y-%m-%dT%H-%M-%S.%fT%z"""'], {}), "(string_timestamp, '%Y-%m-%dT%H-%M-%S.%fT%z')\n", (23913, 23958), False, 'from datetime import datetime\n'), ((27131, 27224), 'numpy.pad', 'np.pad', (["file['voltage'][:]", '(0, period_length - remainder)', '"""constant"""'], {'constant_values': '(0)'}), "(file['voltage'][:], (0, period_length - remainder), 'constant',\n constant_values=0)\n", (27137, 27224), True, 'import numpy as np\n'), ((27628, 27672), 'numpy.linspace', 'np.linspace', (['(1)', 'length', 'length'], {'dtype': 'np.int'}), '(1, length, length, dtype=np.int)\n', (27639, 27672), True, 'import numpy as np\n'), ((4593, 4638), 'os.path.join', 'os.path.join', (['self.dataset_location', '"""*.hdf5"""'], {}), "(self.dataset_location, '*.hdf5')\n", (4605, 4638), False, 'import os\n'), ((4997, 5041), 'h5py.File', 'h5py.File', (['self.files[0]', '"""r"""'], {'driver': '"""core"""'}), "(self.files[0], 'r', driver='core')\n", (5006, 5041), False, 'import h5py\n'), ((6681, 6722), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.file_duration_sec'}), '(seconds=self.file_duration_sec)\n', (6690, 6722), False, 'from datetime import timedelta\n'), ((6801, 6840), 'os.path.normpath', 'os.path.normpath', (['self.dataset_location'], {}), '(self.dataset_location)\n', (6817, 6840), False, 'import os\n'), ((9021, 9051), 'pandas.to_datetime', 'pd.to_datetime', (['data.Timestamp'], {}), '(data.Timestamp)\n', (9035, 9051), True, 'import pandas as pd\n'), ((17398, 17438), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {'driver': '"""core"""'}), "(file_path, 'r', driver='core')\n", (17407, 17438), False, 'import h5py\n'), ((19419, 19446), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration'}), '(seconds=duration)\n', (19428, 19446), False, 'from datetime import timedelta\n'), ((21231, 21278), 'numpy.concatenate', 'np.concatenate', (['relevant_current_noise'], {'axis': '(-1)'}), '(relevant_current_noise, axis=-1)\n', (21245, 21278), True, 'import numpy as np\n'), ((25572, 25607), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'time_since_start'}), '(seconds=time_since_start)\n', (25581, 25607), False, 'from datetime import timedelta\n'), ((27862, 27920), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x_per_period', 'mean_values_per_period'], {}), '(x_per_period, mean_values_per_period)\n', (27882, 27920), False, 'from scipy import interpolate\n'), ((27979, 27989), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (27986, 27989), True, 'import numpy as np\n'), ((29719, 29790), 'os.path.abspath', 'os.path.abspath', (["(self.dataset_location + '/../' + 'day_information.csv')"], {}), "(self.dataset_location + '/../' + 'day_information.csv')\n", (29734, 29790), False, 'import os\n'), ((29942, 29964), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (29953, 29964), True, 'import pandas as pd\n'), ((6041, 6082), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.file_duration_sec'}), '(seconds=self.file_duration_sec)\n', (6050, 6082), False, 'from datetime import timedelta\n'), ((11230, 11258), 'pandas.to_datetime', 'pd.to_datetime', (['data[column]'], {}), '(data[column])\n', (11244, 11258), True, 'import pandas as pd\n'), ((9512, 9540), 'pandas.to_datetime', 'pd.to_datetime', (['data[column]'], {}), '(data[column])\n', (9526, 9540), True, 'import pandas as pd\n'), ((13240, 13258), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (13247, 13258), False, 'import os\n'), ((13310, 13350), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {'driver': '"""core"""'}), "(file_path, 'r', driver='core')\n", (13319, 13350), False, 'import h5py\n'), ((14707, 14741), 'numpy.array', 'np.array', (['[current, current_noise]'], {}), '([current, current_noise])\n', (14715, 14741), True, 'import numpy as np\n'), ((14772, 14789), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (14780, 14789), True, 'import numpy as np\n'), ((14842, 14859), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (14850, 14859), True, 'import numpy as np\n'), ((14890, 14907), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (14898, 14907), True, 'import numpy as np\n'), ((15526, 15543), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (15534, 15543), True, 'import numpy as np\n'), ((15545, 15562), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (15553, 15562), True, 'import numpy as np\n'), ((13850, 13890), 'numpy.multiply', 'np.multiply', (['voltage', 'calibration_factor'], {}), '(voltage, calibration_factor)\n', (13861, 13890), True, 'import numpy as np\n'), ((15217, 15234), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (15225, 15234), True, 'import numpy as np\n'), ((15247, 15264), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (15255, 15264), True, 'import numpy as np\n'), ((30345, 30381), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'format': '"""%Y-%m-%d"""'}), "(x, format='%Y-%m-%d')\n", (30359, 30381), True, 'import pandas as pd\n'), ((14200, 14240), 'numpy.multiply', 'np.multiply', (['current', 'calibration_factor'], {}), '(current, calibration_factor)\n', (14211, 14240), True, 'import numpy as np\n'), ((14588, 14634), 'numpy.multiply', 'np.multiply', (['current_noise', 'calibration_factor'], {}), '(current_noise, calibration_factor)\n', (14599, 14634), True, 'import numpy as np\n')]
|
import json
import coreapi
import coreschema
from django.db.utils import IntegrityError
from django.shortcuts import get_object_or_404
from django.utils.datastructures import MultiValueDictKeyError
from numpydoc import docscrape
from rest_framework import status, schemas
from rest_framework.generics import ListAPIView, RetrieveAPIView, \
ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, \
_convert_clf_param, ATGridSearchCV
from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, \
CVResultScore
from AnyTimeGridSearchCV.grids.serializers import GridSearchSerializer, \
CVResultSerializer, DatasetSerializer
class EstimatorsListView(APIView):
"""
Returns a list of all available scikit-learn classifiers.
"""
def get(self, request, *args, **kwargs):
return Response(list(ESTIMATORS_DICT.keys()), status=status.HTTP_200_OK)
class EstimatorDetailView(APIView):
"""
Returns a detailed view of a scikit-learn classifier - all available arguments for the classifier.
"""
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'clf',
required=True,
location='path',
schema=coreschema.String(
description='scikit-learn Estimator name'
)
),
])
def get(self, request, *args, **kwargs):
try:
clf = ESTIMATORS_DICT[kwargs.get('clf',
'Not a valid scikit-learn estimator name')]
except KeyError:
return Response({'name': '', 'type': '', 'desc': ''},
status=status.HTTP_200_OK)
return Response([{'name': arg_name, 'type': arg_type, 'desc': arg_desc}
for arg_name, arg_type, arg_desc in docscrape.ClassDoc(clf)['Parameters']],
status=status.HTTP_200_OK)
class GridsListView(ListCreateAPIView):
"""
get:
Returns a list of all available grid searches.
post:
Creates a new grid search.
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
def post(self, request, *args, **kwargs):
return ListCreateAPIView.post(self, request, *args, **kwargs)
class GridDetailView(RetrieveAPIView):
"""
Returns the specified grid (uuid, dataset name and scikit-learn classifier name).
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
lookup_field = 'uuid'
class GridResultsListSchema(schemas.AutoSchema):
def get_manual_fields(self, path, method):
manual_fields = schemas.AutoSchema.get_manual_fields(self, path, method)
if method == 'GET':
return manual_fields
elif method == 'POST':
return manual_fields + [coreapi.Field('cv_data', required=True, location='form',
schema=coreschema.Object(description='Cross validation result'))]
class GridResultsList(ListCreateAPIView):
"""
get:
Returns a list of all the results (CV classifications) for given grid.
post:
Creates a new result instance for specified grid.
"""
queryset = CVResult.objects.all()
serializer_class = CVResultSerializer
schema = GridResultsListSchema(manual_fields=[
coreapi.Field(
'uuid',
required=True,
location='path',
schema=coreschema.String(
description='GridSearch UUID'
)
),
])
def get_queryset(self):
_gs = get_object_or_404(GridSearch, uuid=self.kwargs['uuid'])
return _gs.results.all()
def post(self, request, *args, **kwargs):
import numpy
_gs = get_object_or_404(GridSearch, uuid=self.kwargs['uuid'])
multimetric_scores = json.loads(request.data['cv_data'])
scorers = set(map(lambda j: j.split('_')[-1],
filter(lambda i: i != 'fit_time' and i != 'score_time',
multimetric_scores)))
cv_result, _ = CVResult.objects.get_or_create(gridsearch=_gs,
params=json.loads(request.data['params']))
cv_result.fit_time = multimetric_scores['fit_time']
cv_result.score_time = multimetric_scores['score_time']
cv_result.save()
CVResultScore.objects.bulk_create([CVResultScore(scorer=scorer, train_scores=multimetric_scores['train_%s' % scorer],
test_scores=multimetric_scores['test_%s' % scorer],
score=round(numpy.array(multimetric_scores[
'test_%s' % scorer]).mean(), 6),
cv_result=cv_result) for scorer in scorers])
return Response(CVResultSerializer(cv_result).data, status=status.HTTP_201_CREATED)
class DataSetsList(ListCreateAPIView):
"""
get:
Returns a list of all the existing Datasets.
post:
Creates a new Dataset instance.
"""
queryset = DataSet.objects.all()
serializer_class = DatasetSerializer
def post(self, request, *args, **kwargs):
import numpy
try:
name = request.data['name']
except MultiValueDictKeyError:
return Response('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)
if not name:
return Response('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)
try:
examples, labels = request.FILES['examples'], request.FILES['labels']
except MultiValueDictKeyError:
return Response('Missing dataset files', status=status.HTTP_400_BAD_REQUEST)
if examples.name != 'examples.csv':
return Response('Bad name of examples file', status=status.HTTP_400_BAD_REQUEST)
if labels.name != 'labels.csv':
return Response('Bad name of labels file', status=status.HTTP_400_BAD_REQUEST)
if len(numpy.genfromtxt(examples, delimiter=',')) != len(numpy.genfromtxt(labels, delimiter=',')):
return Response('Examples and labels are not the same length', status=status.HTTP_400_BAD_REQUEST)
try:
return Response(DatasetSerializer(DataSet.objects.create(name=name,
examples=examples,
labels=labels)).data,
status=status.HTTP_201_CREATED)
except IntegrityError:
return Response('Name already exists', status=status.HTTP_400_BAD_REQUEST)
class DataSetGridsListView(ListAPIView):
"""
Returns all grid searches on the given Dataset.
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'name',
required=True,
location='path',
schema=coreschema.String(
description='Dataset name'
)
),
])
def get_queryset(self):
_ds = get_object_or_404(DataSet, name=self.kwargs['name'])
return _ds.grid_searches.all()
class ATGridSearchCreateView(APIView):
"""
Creates a new ATGridSearch instance (with the grid specified in the request) and starts it.
"""
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'dataset',
required=True,
location='form',
schema=coreschema.String(description='Dataset name')
),
coreapi.Field(
'clf',
required=True,
location='form',
schema=coreschema.String(description='scikit-learn estimator name')
),
coreapi.Field(
'args',
required=True,
location='form',
schema=coreschema.Object(description='Grid to search'),
),
])
def post(self, request, *args, **kwargs):
try:
ds = DataSet.objects.get(name=request.data['dataset'])
except DataSet.DoesNotExist:
return Response('No DataSet named {}'.format(request.data['dataset']), status=status.HTTP_400_BAD_REQUEST)
try:
classifier = ESTIMATORS_DICT[request.data['clf']]
except KeyError:
return Response('No sklearn classifier named {}'.format(request.data['clf']), status=status.HTTP_400_BAD_REQUEST)
clf_params = {k: _convert_clf_param(v) for k, v in request.data['args'].items()}
gs = ATGridSearchCV(classifier(), clf_params, dataset=ds.pk)
gs.fit()
return Response(gs._uuid, status=status.HTTP_201_CREATED)
|
[
"AnyTimeGridSearchCV.grids.models.DataSet.objects.get",
"json.loads",
"AnyTimeGridSearchCV.grids.models.GridSearch.objects.all",
"AnyTimeGridSearchCV.grids.anytime_search._convert_clf_param",
"AnyTimeGridSearchCV.grids.anytime_search.ESTIMATORS_DICT.keys",
"numpydoc.docscrape.ClassDoc",
"AnyTimeGridSearchCV.grids.models.DataSet.objects.create",
"AnyTimeGridSearchCV.grids.serializers.CVResultSerializer",
"numpy.genfromtxt",
"AnyTimeGridSearchCV.grids.models.CVResult.objects.all",
"rest_framework.generics.ListCreateAPIView.post",
"django.shortcuts.get_object_or_404",
"rest_framework.response.Response",
"numpy.array",
"coreschema.Object",
"AnyTimeGridSearchCV.grids.models.DataSet.objects.all",
"rest_framework.schemas.AutoSchema.get_manual_fields",
"coreschema.String"
] |
[((2201, 2225), 'AnyTimeGridSearchCV.grids.models.GridSearch.objects.all', 'GridSearch.objects.all', ([], {}), '()\n', (2223, 2225), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((2546, 2570), 'AnyTimeGridSearchCV.grids.models.GridSearch.objects.all', 'GridSearch.objects.all', ([], {}), '()\n', (2568, 2570), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((3347, 3369), 'AnyTimeGridSearchCV.grids.models.CVResult.objects.all', 'CVResult.objects.all', ([], {}), '()\n', (3367, 3369), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((5331, 5352), 'AnyTimeGridSearchCV.grids.models.DataSet.objects.all', 'DataSet.objects.all', ([], {}), '()\n', (5350, 5352), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((7037, 7061), 'AnyTimeGridSearchCV.grids.models.GridSearch.objects.all', 'GridSearch.objects.all', ([], {}), '()\n', (7059, 7061), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((2332, 2386), 'rest_framework.generics.ListCreateAPIView.post', 'ListCreateAPIView.post', (['self', 'request', '*args'], {}), '(self, request, *args, **kwargs)\n', (2354, 2386), False, 'from rest_framework.generics import ListAPIView, RetrieveAPIView, ListCreateAPIView\n'), ((2764, 2820), 'rest_framework.schemas.AutoSchema.get_manual_fields', 'schemas.AutoSchema.get_manual_fields', (['self', 'path', 'method'], {}), '(self, path, method)\n', (2800, 2820), False, 'from rest_framework import status, schemas\n'), ((3722, 3777), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['GridSearch'], {'uuid': "self.kwargs['uuid']"}), "(GridSearch, uuid=self.kwargs['uuid'])\n", (3739, 3777), False, 'from django.shortcuts import get_object_or_404\n'), ((3893, 3948), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['GridSearch'], {'uuid': "self.kwargs['uuid']"}), "(GridSearch, uuid=self.kwargs['uuid'])\n", (3910, 3948), False, 'from django.shortcuts import get_object_or_404\n'), ((3978, 4013), 'json.loads', 'json.loads', (["request.data['cv_data']"], {}), "(request.data['cv_data'])\n", (3988, 4013), False, 'import json\n'), ((7410, 7462), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DataSet'], {'name': "self.kwargs['name']"}), "(DataSet, name=self.kwargs['name'])\n", (7427, 7462), False, 'from django.shortcuts import get_object_or_404\n'), ((8956, 9006), 'rest_framework.response.Response', 'Response', (['gs._uuid'], {'status': 'status.HTTP_201_CREATED'}), '(gs._uuid, status=status.HTTP_201_CREATED)\n', (8964, 9006), False, 'from rest_framework.response import Response\n'), ((5682, 5750), 'rest_framework.response.Response', 'Response', (['"""Missing dataset name"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)\n", (5690, 5750), False, 'from rest_framework.response import Response\n'), ((6037, 6110), 'rest_framework.response.Response', 'Response', (['"""Bad name of examples file"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Bad name of examples file', status=status.HTTP_400_BAD_REQUEST)\n", (6045, 6110), False, 'from rest_framework.response import Response\n'), ((6170, 6241), 'rest_framework.response.Response', 'Response', (['"""Bad name of labels file"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Bad name of labels file', status=status.HTTP_400_BAD_REQUEST)\n", (6178, 6241), False, 'from rest_framework.response import Response\n'), ((6368, 6464), 'rest_framework.response.Response', 'Response', (['"""Examples and labels are not the same length"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Examples and labels are not the same length', status=status.\n HTTP_400_BAD_REQUEST)\n", (6376, 6464), False, 'from rest_framework.response import Response\n'), ((8334, 8383), 'AnyTimeGridSearchCV.grids.models.DataSet.objects.get', 'DataSet.objects.get', ([], {'name': "request.data['dataset']"}), "(name=request.data['dataset'])\n", (8353, 8383), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((8791, 8812), 'AnyTimeGridSearchCV.grids.anytime_search._convert_clf_param', '_convert_clf_param', (['v'], {}), '(v)\n', (8809, 8812), False, 'from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, _convert_clf_param, ATGridSearchCV\n'), ((963, 985), 'AnyTimeGridSearchCV.grids.anytime_search.ESTIMATORS_DICT.keys', 'ESTIMATORS_DICT.keys', ([], {}), '()\n', (983, 985), False, 'from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, _convert_clf_param, ATGridSearchCV\n'), ((1691, 1764), 'rest_framework.response.Response', 'Response', (["{'name': '', 'type': '', 'desc': ''}"], {'status': 'status.HTTP_200_OK'}), "({'name': '', 'type': '', 'desc': ''}, status=status.HTTP_200_OK)\n", (1699, 1764), False, 'from rest_framework.response import Response\n'), ((4336, 4370), 'json.loads', 'json.loads', (["request.data['params']"], {}), "(request.data['params'])\n", (4346, 4370), False, 'import json\n'), ((5085, 5114), 'AnyTimeGridSearchCV.grids.serializers.CVResultSerializer', 'CVResultSerializer', (['cv_result'], {}), '(cv_result)\n', (5103, 5114), False, 'from AnyTimeGridSearchCV.grids.serializers import GridSearchSerializer, CVResultSerializer, DatasetSerializer\n'), ((5573, 5641), 'rest_framework.response.Response', 'Response', (['"""Missing dataset name"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)\n", (5581, 5641), False, 'from rest_framework.response import Response\n'), ((5904, 5973), 'rest_framework.response.Response', 'Response', (['"""Missing dataset files"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing dataset files', status=status.HTTP_400_BAD_REQUEST)\n", (5912, 5973), False, 'from rest_framework.response import Response\n'), ((6257, 6298), 'numpy.genfromtxt', 'numpy.genfromtxt', (['examples'], {'delimiter': '""","""'}), "(examples, delimiter=',')\n", (6273, 6298), False, 'import numpy\n'), ((6307, 6346), 'numpy.genfromtxt', 'numpy.genfromtxt', (['labels'], {'delimiter': '""","""'}), "(labels, delimiter=',')\n", (6323, 6346), False, 'import numpy\n'), ((6842, 6909), 'rest_framework.response.Response', 'Response', (['"""Name already exists"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Name already exists', status=status.HTTP_400_BAD_REQUEST)\n", (6850, 6909), False, 'from rest_framework.response import Response\n'), ((1338, 1398), 'coreschema.String', 'coreschema.String', ([], {'description': '"""scikit-learn Estimator name"""'}), "(description='scikit-learn Estimator name')\n", (1355, 1398), False, 'import coreschema\n'), ((1934, 1957), 'numpydoc.docscrape.ClassDoc', 'docscrape.ClassDoc', (['clf'], {}), '(clf)\n', (1952, 1957), False, 'from numpydoc import docscrape\n'), ((3582, 3630), 'coreschema.String', 'coreschema.String', ([], {'description': '"""GridSearch UUID"""'}), "(description='GridSearch UUID')\n", (3599, 3630), False, 'import coreschema\n'), ((6519, 6586), 'AnyTimeGridSearchCV.grids.models.DataSet.objects.create', 'DataSet.objects.create', ([], {'name': 'name', 'examples': 'examples', 'labels': 'labels'}), '(name=name, examples=examples, labels=labels)\n', (6541, 6586), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((7273, 7318), 'coreschema.String', 'coreschema.String', ([], {'description': '"""Dataset name"""'}), "(description='Dataset name')\n", (7290, 7318), False, 'import coreschema\n'), ((7825, 7870), 'coreschema.String', 'coreschema.String', ([], {'description': '"""Dataset name"""'}), "(description='Dataset name')\n", (7842, 7870), False, 'import coreschema\n'), ((7999, 8059), 'coreschema.String', 'coreschema.String', ([], {'description': '"""scikit-learn estimator name"""'}), "(description='scikit-learn estimator name')\n", (8016, 8059), False, 'import coreschema\n'), ((8189, 8236), 'coreschema.Object', 'coreschema.Object', ([], {'description': '"""Grid to search"""'}), "(description='Grid to search')\n", (8206, 8236), False, 'import coreschema\n'), ((3063, 3119), 'coreschema.Object', 'coreschema.Object', ([], {'description': '"""Cross validation result"""'}), "(description='Cross validation result')\n", (3080, 3119), False, 'import coreschema\n'), ((4825, 4876), 'numpy.array', 'numpy.array', (["multimetric_scores['test_%s' % scorer]"], {}), "(multimetric_scores['test_%s' % scorer])\n", (4836, 4876), False, 'import numpy\n')]
|
import os
import sys
import json
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import models
import worlds
class Executor(object):
STOP = 0
def __init__(self, config):
self.config = config
self.device = config.device
self.vocab = config.vocab
self.world = worlds.load(config)
model_config = config.executor.model
model_config.device = config.device
model_config.vocab_size = len(self.vocab)
model_config.loc_embed_size = config.world.loc_embed_size
model_config.max_instruction_length = config.executor.max_instruction_length
model_config.pad_idx = self.vocab['<PAD>']
self.model = models.load(model_config).to(self.device)
logging.info('model: ' + str(self.model))
self.optim = torch.optim.Adam(
self.model.parameters(), lr=model_config.learning_rate)
if hasattr(model_config, 'load_from'):
self.load(model_config.load_from)
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
self.max_instruction_length = config.executor.max_instruction_length
def _to_tensor(self, x):
return torch.tensor(x).to(self.device)
def _to_tensor_from_numpy(self, x):
return torch.from_numpy(x).to(self.device)
def _index_and_pad(self, xs, vocab, reverse=True):
encodings = []
masks = []
for x in xs:
x = x[:self.max_instruction_length] + ['<EOS>']
encodings.append([vocab[w] for w in x])
if reverse:
encodings[-1] = list(reversed(encodings[-1]))
masks.append([0] * len(encodings[-1]))
# Padding
max_len = max([len(encoding) for encoding in encodings])
for i, encoding in enumerate(encodings):
encoding.extend([vocab['<PAD>']] * (max_len - len(encoding)))
for mask in masks:
mask.extend([1] * (max_len - len(mask)))
encodings = self._to_tensor(encodings).long()
masks = self._to_tensor(masks).bool()
return encodings, masks
def _nav_action_variable(self, states):
max_num_a = max(len(state.adj_loc_list) for state in states)
invalid = np.zeros((self.batch_size, max_num_a), np.uint8)
action_embed_size = states[0].action_embeddings.shape[-1]
action_embeds = np.zeros(
(self.batch_size, max_num_a, action_embed_size), dtype=np.float32)
for i, state in enumerate(states):
num_a = len(state.adj_loc_list)
invalid[i, num_a:] = 1
action_embeds[i, :num_a, :] = state.action_embeddings
action_embeds = self._to_tensor_from_numpy(action_embeds).float()
invalid = self._to_tensor_from_numpy(invalid).bool()
return action_embeds, invalid
def init(self, init_poses, instructions, is_eval):
if is_eval:
self.model.eval()
else:
self.model.train()
self.is_eval = is_eval
self.batch_size = len(instructions)
self.state_seqs = []
self.pred_action_seqs = [[] for _ in range(self.batch_size)]
self.teacher_action_seqs = []
self.action_logit_seqs = []
self.logit_mask_seqs = []
self.terminated = [False] * self.batch_size
instr_encodings, instr_masks = self._index_and_pad(
instructions, self.vocab)
self.text_dec_h, self.state_dec_h, self.dec_time, self.instructions = \
self.model.encode(instr_encodings, instr_masks)
self.instruction_masks = instr_masks
self.prev_action_embeds = self.model.init_action(self.batch_size)
self.timer = self.config.executor.max_timesteps
init_states = self.world.init(init_poses)
return init_states
def act(self, states, teacher_actions=None, bc=False):
curr_view_features = [state.curr_view_features for state in states]
curr_view_features = self._to_tensor_from_numpy(
np.stack(curr_view_features))
all_action_embeds, logit_masks = self._nav_action_variable(states)
self.text_dec_h, self.state_dec_h, self.dec_time, action_logits = \
self.model.decode(
self.text_dec_h,
self.state_dec_h,
self.dec_time,
self.prev_action_embeds,
all_action_embeds,
self.instructions,
self.instruction_masks,
curr_view_features,
logit_masks
)
self.action_logit_seqs.append(action_logits)
self.logit_mask_seqs.append(logit_masks)
self.state_seqs.append(states)
if self.is_eval:
pred_actions = action_logits.max(dim=1)[1].tolist()
self.prev_actions = pred_actions
for i in range(self.batch_size):
if not self.terminated[i]:
self.pred_action_seqs[i].append(pred_actions[i])
else:
if bc:
pred_actions = teacher_actions
else:
pred_actions = D.Categorical(logits=action_logits).sample().tolist()
self.prev_actions = pred_actions
teacher_actions = self._to_tensor(teacher_actions).long()
for i in range(self.batch_size):
if self.terminated[i]:
teacher_actions[i] = -1
self.teacher_action_seqs.append(teacher_actions)
self.timer -= 1
for i in range(self.batch_size):
self.terminated[i] |= self.timer <= 0
self.terminated[i] |= self.prev_actions[i] == self.STOP
self.prev_action_embeds = all_action_embeds[np.arange(self.batch_size), pred_actions, :].detach()
return self.prev_actions
def has_terminated(self):
return all(self.terminated)
def get_action_seqs(self):
return self.pred_action_seqs
def predict(self, init_poses, instructions):
with torch.no_grad():
states = self.init(init_poses, instructions, True)
paths = [[state.viewpoint] for state in states]
poses = [[pose] for pose in init_poses]
while not self.has_terminated():
pred_actions = self.act(states)
states = states.step(pred_actions)
for i, state in enumerate(states):
pose = (state.scan, state.viewpoint, state.heading, state.elevation)
if not self.terminated[i]:
poses[i].append(pose)
if state.viewpoint != paths[i][-1]:
paths[i].append(states[i].viewpoint)
return paths, poses
def compute_loss(self):
assert len(self.teacher_action_seqs) == len(self.action_logit_seqs)
loss = 0
zipped_info = zip(self.action_logit_seqs, self.teacher_action_seqs)
for logits, refs in zipped_info:
loss += self.loss_fn(logits, refs)
return loss
def learn(self):
loss = self.compute_loss()
self.optim.zero_grad()
loss.backward()
self.optim.step()
return loss.item() / len(self.teacher_action_seqs)
def save(self, name, trajectories=None):
file_path = os.path.join(self.config.experiment_dir, name + '.ckpt')
ckpt = { 'model_state_dict': self.model.state_dict(),
'optim_state_dict': self.optim.state_dict() }
torch.save(ckpt, file_path)
logging.info('Saved %s model to %s' % (name, file_path))
def load(self, file_path):
ckpt = torch.load(file_path, map_location=self.device)
self.model.load_state_dict(ckpt['model_state_dict'])
self.optim.load_state_dict(ckpt['optim_state_dict'])
logging.info('Loaded model from %s' % file_path)
|
[
"numpy.stack",
"torch.distributions.Categorical",
"torch.load",
"torch.nn.CrossEntropyLoss",
"numpy.zeros",
"models.load",
"torch.save",
"logging.info",
"worlds.load",
"numpy.arange",
"torch.no_grad",
"os.path.join",
"torch.tensor",
"torch.from_numpy"
] |
[((391, 410), 'worlds.load', 'worlds.load', (['config'], {}), '(config)\n', (402, 410), False, 'import worlds\n'), ((1095, 1131), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (1114, 1131), True, 'import torch.nn as nn\n'), ((2302, 2350), 'numpy.zeros', 'np.zeros', (['(self.batch_size, max_num_a)', 'np.uint8'], {}), '((self.batch_size, max_num_a), np.uint8)\n', (2310, 2350), True, 'import numpy as np\n'), ((2441, 2516), 'numpy.zeros', 'np.zeros', (['(self.batch_size, max_num_a, action_embed_size)'], {'dtype': 'np.float32'}), '((self.batch_size, max_num_a, action_embed_size), dtype=np.float32)\n', (2449, 2516), True, 'import numpy as np\n'), ((7367, 7423), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', "(name + '.ckpt')"], {}), "(self.config.experiment_dir, name + '.ckpt')\n", (7379, 7423), False, 'import os\n'), ((7557, 7584), 'torch.save', 'torch.save', (['ckpt', 'file_path'], {}), '(ckpt, file_path)\n', (7567, 7584), False, 'import torch\n'), ((7593, 7649), 'logging.info', 'logging.info', (["('Saved %s model to %s' % (name, file_path))"], {}), "('Saved %s model to %s' % (name, file_path))\n", (7605, 7649), False, 'import logging\n'), ((7697, 7744), 'torch.load', 'torch.load', (['file_path'], {'map_location': 'self.device'}), '(file_path, map_location=self.device)\n', (7707, 7744), False, 'import torch\n'), ((7875, 7923), 'logging.info', 'logging.info', (["('Loaded model from %s' % file_path)"], {}), "('Loaded model from %s' % file_path)\n", (7887, 7923), False, 'import logging\n'), ((4080, 4108), 'numpy.stack', 'np.stack', (['curr_view_features'], {}), '(curr_view_features)\n', (4088, 4108), True, 'import numpy as np\n'), ((6068, 6083), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6081, 6083), False, 'import torch\n'), ((776, 801), 'models.load', 'models.load', (['model_config'], {}), '(model_config)\n', (787, 801), False, 'import models\n'), ((1254, 1269), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1266, 1269), False, 'import torch\n'), ((1342, 1361), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1358, 1361), False, 'import torch\n'), ((5780, 5806), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (5789, 5806), True, 'import numpy as np\n'), ((5184, 5219), 'torch.distributions.Categorical', 'D.Categorical', ([], {'logits': 'action_logits'}), '(logits=action_logits)\n', (5197, 5219), True, 'import torch.distributions as D\n')]
|
from typing import Protocol, Union
import dataclasses
import numpy as np
class Motion(Protocol):
"""Protocol of a 1D motion."""
"""Shift of motion along time axis."""
offset: float
def at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the position at time(s)."""
...
def d_at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the velocity at time(s)."""
...
@dataclasses.dataclass
class PolynomialMotion(Motion):
"""One-dimensional motion represented by a polynomial of degree N.
Args:
offset: Global time offset of this motion
coeffs: N+1 polynomial coefficients starting with the highest term.
"""
offset: float
coeffs: np.ndarray
degree: int = dataclasses.field(init=False)
def __post_init__(self):
self.degree = len(self.coeffs) - 1
self.coeffs = np.asarray(self.coeffs).reshape(-1, 1)
def at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the position at time(s)."""
scalar = np.isscalar(t)
t = np.atleast_1d(t)
v = np.vander(t - self.offset, self.degree + 1) # Nx(D+1)
x = v @ self.coeffs # Nx1
if scalar:
return x.item()
else:
return x.squeeze(-1)
def d_at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the velocity at time(s)."""
scalar = np.isscalar(t)
t = np.atleast_1d(t) - self.offset
dv = np.array(
[i * t ** (i - 1) for i in reversed(range(1, self.degree + 1))]
) # NxD
dx = dv.T @ self.coeffs[:-1]
if scalar:
return dx.item()
else:
return dx.squeeze(-1)
def poly_blend_3(m1: Motion, m2: Motion, tnow: float, h: float) -> PolynomialMotion:
"""Returns a third-degree polynomial function that blends two motions.
Args:
m1: First motion
m2: Second motion
tnow: Start of blend
h: Horizon of blend
Returns:
mblend: Polynomial motion blending m1 and m2 in segment [tnow, tnow+h].
"""
if h <= 0.0:
raise ValueError("Horizon has to be > 0.0")
A = np.zeros((4, 4))
b = np.zeros(4)
# Position at start (tnow) should match m1
# Note, the offset (shift) of blended motion will be tnow
A[0, 0] = 0
A[0, 1] = 0
A[0, 2] = 0
A[0, 3] = 1
b[0] = m1.at(tnow)
# Position at end of horizon should match m2
A[1, 0] = h ** 3
A[1, 1] = h ** 2
A[1, 2] = h
A[1, 3] = 1
b[1] = m2.at(tnow + h)
# Velocity at start should match m1
A[2, 0] = 0
A[2, 1] = 0
A[2, 2] = 1
A[2, 3] = 0
b[2] = m1.d_at(tnow)
# Velocity at end should match m2
A[3, 0] = 3 * h ** 2
A[3, 1] = 2 * h
A[3, 2] = 1
A[3, 3] = 0
b[3] = m2.d_at(tnow + h)
coeffs = np.linalg.solve(A, b) # TODO: handle singularities
return PolynomialMotion(tnow, coeffs)
@dataclasses.dataclass
class PolynomialMotionBlend(Motion):
"""A piecewise blended motion with C1 smoothness.
The blended motion consists of three pieces
- m1 when t < start
- blend when start <= t <= end of blending
- m2 when end < t
At joint points the positions and first order derivatives match up.
If `flatten` is True, m1 and m2 will be simplified assuming that t is
monotonically increasing and values of `t < start` are not of interest.
Otherwise, recursive blending may lead to memory overflow.
"""
m1: Motion
m2: Motion
offset: float
horizon: float
blend: Motion = dataclasses.field(init=False)
flatten: dataclasses.InitVar[bool] = False
def __post_init__(self, flatten: bool):
if flatten:
self.m1 = _flatten(self.m1, self.offset)
self.m2 = _flatten(self.m2, self.offset)
self.blend = poly_blend_3(self.m1, self.m2, self.offset, self.horizon)
def at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return self._compute(t, "at")
def d_at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return self._compute(t, "d_at")
@property
def range(self):
return (self.offset, self.offset + self.horizon)
def _compute(
self, t: Union[float, np.ndarray], attr: str
) -> Union[float, np.ndarray]:
scalar = np.isscalar(t)
t = np.atleast_1d(t)
low, high = self.range
x = np.empty_like(t)
mask = t < low
x[mask] = getattr(self.m1, attr)(t[mask])
mask = t > high
x[mask] = getattr(self.m2, attr)(t[mask])
mask = np.logical_and(t >= low, t <= high)
x[mask] = getattr(self.blend, attr)(t[mask])
if scalar:
return x.item()
else:
return x
def _flatten(m: Motion, offset: float) -> Motion:
"""Recursively simplify older motions to avoid stacking of blends.
The resulting motion is identical fo `t>=offset`, but may change for
values less than offset.
"""
if isinstance(m, PolynomialMotionBlend):
if m.range[1] < offset:
return m.m2
elif m.range[0] < offset:
return m.blend
else:
return _flatten(m.m1, offset)
else:
return m
|
[
"numpy.vander",
"numpy.logical_and",
"numpy.isscalar",
"numpy.asarray",
"numpy.zeros",
"numpy.empty_like",
"dataclasses.field",
"numpy.atleast_1d",
"numpy.linalg.solve"
] |
[((802, 831), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (819, 831), False, 'import dataclasses\n'), ((2260, 2276), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (2268, 2276), True, 'import numpy as np\n'), ((2285, 2296), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2293, 2296), True, 'import numpy as np\n'), ((2934, 2955), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2949, 2955), True, 'import numpy as np\n'), ((3672, 3701), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (3689, 3701), False, 'import dataclasses\n'), ((1106, 1120), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (1117, 1120), True, 'import numpy as np\n'), ((1133, 1149), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (1146, 1149), True, 'import numpy as np\n'), ((1163, 1206), 'numpy.vander', 'np.vander', (['(t - self.offset)', '(self.degree + 1)'], {}), '(t - self.offset, self.degree + 1)\n', (1172, 1206), True, 'import numpy as np\n'), ((1490, 1504), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (1501, 1504), True, 'import numpy as np\n'), ((4448, 4462), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (4459, 4462), True, 'import numpy as np\n'), ((4475, 4491), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (4488, 4491), True, 'import numpy as np\n'), ((4536, 4552), 'numpy.empty_like', 'np.empty_like', (['t'], {}), '(t)\n', (4549, 4552), True, 'import numpy as np\n'), ((4715, 4750), 'numpy.logical_and', 'np.logical_and', (['(t >= low)', '(t <= high)'], {}), '(t >= low, t <= high)\n', (4729, 4750), True, 'import numpy as np\n'), ((1517, 1533), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (1530, 1533), True, 'import numpy as np\n'), ((927, 950), 'numpy.asarray', 'np.asarray', (['self.coeffs'], {}), '(self.coeffs)\n', (937, 950), True, 'import numpy as np\n')]
|
import scipy.sparse as sp
import pandas as pd
import numpy as np
import torch
import h5py
def get_adj(num_rows, num_cols, row_idx, col_idx, device):
adj = torch.zeros((num_rows, num_cols), dtype=torch.float32, device=device)
adj[row_idx, col_idx] = 1.
adj = adj / adj.sum(dim=1, keepdim=True)
adj.masked_fill_(torch.isnan(adj), 0)
return adj
def load_matlab_file(path_file, name_field):
db = h5py.File(path_file, 'r')
ds = db[name_field]
try:
if 'ir' in ds.keys():
data = np.asarray(ds['data'])
ir = np.asarray(ds['ir'])
jc = np.asarray(ds['jc'])
out = sp.csc_matrix((data, ir, jc))
except AttributeError:
out = np.asarray(ds).T
db.close()
return out.astype(np.int)
def matrix2data(matrix, rating):
idx = np.argwhere(matrix > 0)
rows = idx[:, 0]
columns = idx[:, 1]
ratings = rating[rows, columns].reshape(-1, 1)
data = np.concatenate([idx, ratings], axis=1)
data = pd.DataFrame(data, columns=('user', 'movie', 'rating'))
return data
|
[
"pandas.DataFrame",
"h5py.File",
"numpy.asarray",
"numpy.argwhere",
"scipy.sparse.csc_matrix",
"torch.zeros",
"torch.isnan",
"numpy.concatenate"
] |
[((162, 231), 'torch.zeros', 'torch.zeros', (['(num_rows, num_cols)'], {'dtype': 'torch.float32', 'device': 'device'}), '((num_rows, num_cols), dtype=torch.float32, device=device)\n', (173, 231), False, 'import torch\n'), ((424, 449), 'h5py.File', 'h5py.File', (['path_file', '"""r"""'], {}), "(path_file, 'r')\n", (433, 449), False, 'import h5py\n'), ((831, 854), 'numpy.argwhere', 'np.argwhere', (['(matrix > 0)'], {}), '(matrix > 0)\n', (842, 854), True, 'import numpy as np\n'), ((964, 1002), 'numpy.concatenate', 'np.concatenate', (['[idx, ratings]'], {'axis': '(1)'}), '([idx, ratings], axis=1)\n', (978, 1002), True, 'import numpy as np\n'), ((1014, 1069), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "('user', 'movie', 'rating')"}), "(data, columns=('user', 'movie', 'rating'))\n", (1026, 1069), True, 'import pandas as pd\n'), ((330, 346), 'torch.isnan', 'torch.isnan', (['adj'], {}), '(adj)\n', (341, 346), False, 'import torch\n'), ((533, 555), 'numpy.asarray', 'np.asarray', (["ds['data']"], {}), "(ds['data'])\n", (543, 555), True, 'import numpy as np\n'), ((573, 593), 'numpy.asarray', 'np.asarray', (["ds['ir']"], {}), "(ds['ir'])\n", (583, 593), True, 'import numpy as np\n'), ((611, 631), 'numpy.asarray', 'np.asarray', (["ds['jc']"], {}), "(ds['jc'])\n", (621, 631), True, 'import numpy as np\n'), ((650, 679), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ir, jc)'], {}), '((data, ir, jc))\n', (663, 679), True, 'import scipy.sparse as sp\n'), ((721, 735), 'numpy.asarray', 'np.asarray', (['ds'], {}), '(ds)\n', (731, 735), True, 'import numpy as np\n')]
|
import numpy as np
from ...dimensions.dim_linear import DimLinear
from ...dimensions.dim_angular import DimAngular
from ...dimensions import DimRadian
from ..cross_sect_base import CrossSectBase, CrossSectToken
__all__ = ['CrossSectParallelogram']
class CrossSectParallelogram(CrossSectBase):
def __init__(self, **kwargs: any) -> None:
'''
Intialization function for Parallelogram class. This function takes in
arguments and saves the information passed to private variable to make
them read-only
Parameters
----------
**kwargs : any
DESCRIPTION. Keyword arguments provided to the initialization function.
The following argument names have to be included in order for the code
to execute: name, dim_l, dim_t, dim_theta, location.
Returns
-------
None
'''
self._create_attr(kwargs)
super()._validate_attr()
self._validate_attr()
@property
def dim_l(self):
return self._dim_l
@property
def dim_t(self):
return self._dim_t
@property
def dim_theta(self):
return self._dim_theta
def draw(self, drawer):
l = self.dim_l # height of the parallelogram
t = self.dim_t # width of the parallelogram
theta = DimRadian(self.dim_theta) # angle of the parallelogram
x = [0, l * np.cos(theta), l * np.cos(theta) + t / np.sin(theta), t / np.sin(theta)]
y = [0, l * np.sin(theta), l * np.sin(theta), 0];
z = np.array([x, y])
coords = np.transpose(z)
points = self.location.transform_coords(coords)
# draw parallelogram
side_1 = drawer.draw_line(points[0], points[1])
side_2 = drawer.draw_line(points[1], points[2])
side_3 = drawer.draw_line(points[2], points[3])
side_4 = drawer.draw_line(points[3], points[0])
x_coord = (l * np.cos(theta) + t / np.sin(theta)) / 2
y_coord = l * np.sin(theta) / 2
ic = np.array([[x_coord, y_coord]])
inner_coord = self.location.transform_coords(ic)
segments = [side_1, side_2, side_3, side_4]
cs_token = CrossSectToken(inner_coord[0], segments)
return cs_token
def _validate_attr(self):
if not isinstance(self._dim_l, DimLinear):
raise TypeError('dim_l is not of DimLinear')
if not isinstance(self._dim_t, DimLinear):
raise TypeError('dim_t is not of DimLinear')
if not isinstance(self._dim_theta, DimAngular):
raise TypeError('dim_theta is not of DimAngular')
|
[
"numpy.transpose",
"numpy.sin",
"numpy.array",
"numpy.cos"
] |
[((1556, 1572), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1564, 1572), True, 'import numpy as np\n'), ((1591, 1606), 'numpy.transpose', 'np.transpose', (['z'], {}), '(z)\n', (1603, 1606), True, 'import numpy as np\n'), ((2036, 2066), 'numpy.array', 'np.array', (['[[x_coord, y_coord]]'], {}), '([[x_coord, y_coord]])\n', (2044, 2066), True, 'import numpy as np\n'), ((1411, 1424), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1417, 1424), True, 'import numpy as np\n'), ((1469, 1482), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1475, 1482), True, 'import numpy as np\n'), ((1505, 1518), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1511, 1518), True, 'import numpy as np\n'), ((1524, 1537), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1530, 1537), True, 'import numpy as np\n'), ((2004, 2017), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2010, 2017), True, 'import numpy as np\n'), ((1430, 1443), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1436, 1443), True, 'import numpy as np\n'), ((1450, 1463), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1456, 1463), True, 'import numpy as np\n'), ((1943, 1956), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1949, 1956), True, 'import numpy as np\n'), ((1963, 1976), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1969, 1976), True, 'import numpy as np\n')]
|
from pyraf import iraf
import glob, os
import numpy as np
import pylab as py
import math, datetime
import pyfits
from gcwork import objects
from . import dar
def diffDarOnOff(cleanDir1, cleanDir2):
files1tmp = glob.glob(cleanDir1 + '/c????.fits')
files2tmp = glob.glob(cleanDir2 + '/c????.fits')
for f1 in files1tmp:
cname1 = f1.split('/')[-1]
for f2 in files2tmp:
cname2 = f2.split('/')[-1]
if (cname1 == cname2):
outname = cname1.replace('c', 'diff')
print('IMARITH: %s - %s = %s' % (cname1, cname2, outname))
if (os.path.exists(outname)):
iraf.imdelete(outname)
iraf.imarith(f1, '-', f2, outname)
def plotScalePosangOverNight(alignRoot, imgDir):
# Read in the list of images used in the alignment
listFile = open(alignRoot+'.list', 'r')
parang = []
for line in listFile:
_data = line.split()
lisFile = _data[0].split('/')[-1]
if (lisFile.startswith('mag')):
continue
fitsFile = imgDir + lisFile.split('_')[0] + '.fits'
# Get header info
hdr = pyfits.getheader( fitsFile )
parang.append( hdr['PARANG'] )
parang = np.array(parang)
numEpochs = len(parang)
# Load scales/angles
scale = np.zeros(numEpochs, float)
angle = np.zeros(numEpochs, float)
sgrax = np.zeros(numEpochs, float)
sgray = np.zeros(numEpochs, float)
scaleErr = np.zeros(numEpochs, float)
angleErr = np.zeros(numEpochs, float)
sgraxErr = np.zeros(numEpochs, float)
sgrayErr = np.zeros(numEpochs, float)
imgPA = np.zeros(numEpochs, float)
for e in range(numEpochs):
trans = objects.Transform()
trans.loadFromAbsolute(root='./', align=alignRoot + '.trans', idx=e+1)
trans.linearToSpherical(silent=1, override=False)
scale[e] = trans.scale
angle[e] = math.degrees(trans.angle)
scale *= 9.96
py.clf()
py.subplot(2, 1, 1)
py.plot(parang, scale, 'k.')
py.ylabel('Plate Scale (mas/pix)')
py.xlabel('Parallactic Angle (deg)')
py.title('Relative Transformation')
py.subplot(2, 1, 2)
py.plot(parang, angle, 'k.')
py.ylabel('Position Angle (deg)')
py.xlabel('Parallactic Angle (deg)')
py.savefig('plots/scale_pa_vs_parang.png')
def plotDarCoeffsVsZenith():
effWave = 2.12 # microns
utc = datetime.datetime(2008, 6, 15, 0, 0, 0)
utc2hst = datetime.timedelta(hours=-10)
hst = utc + utc2hst
(refA, refB) = dar.keckDARcoeffs(effWave, hst.year, hst.month, hst.day,
hst.hour, hst.minute)
elevation = np.arange(30.0, 90.0, 1.0)
tanz = np.tan((90.0 - elevation) * math.pi / 180.0)
tmp = 1.0 + tanz**2
darCoeffL = tmp * (refA + 3.0 * refB * tanz**2)
darCoeffQ = -tmp * (refA*tanz +
3.0 * refB * (tanz + 2.0*tanz**3))
# Convert DAR coefficients for use with arcseconds
darCoeffL *= 1.0
darCoeffQ *= 1.0 / 206265.0
# 1" sep
linear1 = darCoeffL * 1.0 * 10**3 # in mas
quadra1 = darCoeffQ * 1.0**2 * 10**3 # in mas
# 10" sep
linear2 = darCoeffL * 10.0 * 10**3 # in mas
quadra2 = darCoeffQ * 10.0**2 * 10**3 # in mas
# 60" sep
linear3 = darCoeffL * 60.0 * 10**3 # in mas
quadra3 = darCoeffQ * 60.0**2 * 10**3 # in mas
print(' Linear(mas) Quardatic(mas)')
print('1" sep %12.7f %12.7f' % (linear1.mean(), quadra1.mean()))
print('10" sep %12.7f %12.7f' % (linear2.mean(), quadra2.mean()))
print('60" sep %12.7f %12.7f' % (linear3.mean(), quadra3.mean()))
py.clf()
py.semilogy(elevation, linear1, 'r-')
py.semilogy(elevation, -quadra1, 'r--')
py.semilogy(elevation, linear2, 'b-')
py.semilogy(elevation, -quadra2, 'b--')
py.semilogy(elevation, linear3, 'g-')
py.semilogy(elevation, -quadra3, 'g--')
py.legend(('1" lin', '1" quad',
'10" lin', '10" quad', '60" lin', '60" quad'), loc='lower left')
py.xlabel('Elevation (deg)')
py.ylabel('Delta-R (mas)')
py.savefig('dar_linear_vs_quad_terms.png')
py.savefig('dar_linear_vs_quad_terms.eps')
|
[
"pyraf.iraf.imarith",
"numpy.arange",
"glob.glob",
"pylab.title",
"pylab.ylabel",
"os.path.exists",
"numpy.tan",
"datetime.timedelta",
"pylab.xlabel",
"pylab.legend",
"gcwork.objects.Transform",
"datetime.datetime",
"pylab.subplot",
"pylab.savefig",
"math.degrees",
"pyraf.iraf.imdelete",
"pylab.semilogy",
"numpy.zeros",
"pyfits.getheader",
"numpy.array",
"pylab.clf",
"pylab.plot"
] |
[((215, 251), 'glob.glob', 'glob.glob', (["(cleanDir1 + '/c????.fits')"], {}), "(cleanDir1 + '/c????.fits')\n", (224, 251), False, 'import glob, os\n'), ((268, 304), 'glob.glob', 'glob.glob', (["(cleanDir2 + '/c????.fits')"], {}), "(cleanDir2 + '/c????.fits')\n", (277, 304), False, 'import glob, os\n'), ((1292, 1308), 'numpy.array', 'np.array', (['parang'], {}), '(parang)\n', (1300, 1308), True, 'import numpy as np\n'), ((1375, 1401), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1383, 1401), True, 'import numpy as np\n'), ((1414, 1440), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1422, 1440), True, 'import numpy as np\n'), ((1453, 1479), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1461, 1479), True, 'import numpy as np\n'), ((1492, 1518), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1500, 1518), True, 'import numpy as np\n'), ((1534, 1560), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1542, 1560), True, 'import numpy as np\n'), ((1576, 1602), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1584, 1602), True, 'import numpy as np\n'), ((1618, 1644), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1626, 1644), True, 'import numpy as np\n'), ((1660, 1686), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1668, 1686), True, 'import numpy as np\n'), ((1699, 1725), 'numpy.zeros', 'np.zeros', (['numEpochs', 'float'], {}), '(numEpochs, float)\n', (1707, 1725), True, 'import numpy as np\n'), ((2044, 2052), 'pylab.clf', 'py.clf', ([], {}), '()\n', (2050, 2052), True, 'import pylab as py\n'), ((2057, 2076), 'pylab.subplot', 'py.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2067, 2076), True, 'import pylab as py\n'), ((2081, 2109), 'pylab.plot', 'py.plot', (['parang', 'scale', '"""k."""'], {}), "(parang, scale, 'k.')\n", (2088, 2109), True, 'import pylab as py\n'), ((2114, 2148), 'pylab.ylabel', 'py.ylabel', (['"""Plate Scale (mas/pix)"""'], {}), "('Plate Scale (mas/pix)')\n", (2123, 2148), True, 'import pylab as py\n'), ((2153, 2189), 'pylab.xlabel', 'py.xlabel', (['"""Parallactic Angle (deg)"""'], {}), "('Parallactic Angle (deg)')\n", (2162, 2189), True, 'import pylab as py\n'), ((2194, 2229), 'pylab.title', 'py.title', (['"""Relative Transformation"""'], {}), "('Relative Transformation')\n", (2202, 2229), True, 'import pylab as py\n'), ((2235, 2254), 'pylab.subplot', 'py.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2245, 2254), True, 'import pylab as py\n'), ((2259, 2287), 'pylab.plot', 'py.plot', (['parang', 'angle', '"""k."""'], {}), "(parang, angle, 'k.')\n", (2266, 2287), True, 'import pylab as py\n'), ((2292, 2325), 'pylab.ylabel', 'py.ylabel', (['"""Position Angle (deg)"""'], {}), "('Position Angle (deg)')\n", (2301, 2325), True, 'import pylab as py\n'), ((2330, 2366), 'pylab.xlabel', 'py.xlabel', (['"""Parallactic Angle (deg)"""'], {}), "('Parallactic Angle (deg)')\n", (2339, 2366), True, 'import pylab as py\n'), ((2372, 2414), 'pylab.savefig', 'py.savefig', (['"""plots/scale_pa_vs_parang.png"""'], {}), "('plots/scale_pa_vs_parang.png')\n", (2382, 2414), True, 'import pylab as py\n'), ((2485, 2524), 'datetime.datetime', 'datetime.datetime', (['(2008)', '(6)', '(15)', '(0)', '(0)', '(0)'], {}), '(2008, 6, 15, 0, 0, 0)\n', (2502, 2524), False, 'import math, datetime\n'), ((2539, 2568), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(-10)'}), '(hours=-10)\n', (2557, 2568), False, 'import math, datetime\n'), ((2746, 2772), 'numpy.arange', 'np.arange', (['(30.0)', '(90.0)', '(1.0)'], {}), '(30.0, 90.0, 1.0)\n', (2755, 2772), True, 'import numpy as np\n'), ((2784, 2828), 'numpy.tan', 'np.tan', (['((90.0 - elevation) * math.pi / 180.0)'], {}), '((90.0 - elevation) * math.pi / 180.0)\n', (2790, 2828), True, 'import numpy as np\n'), ((3753, 3761), 'pylab.clf', 'py.clf', ([], {}), '()\n', (3759, 3761), True, 'import pylab as py\n'), ((3766, 3803), 'pylab.semilogy', 'py.semilogy', (['elevation', 'linear1', '"""r-"""'], {}), "(elevation, linear1, 'r-')\n", (3777, 3803), True, 'import pylab as py\n'), ((3808, 3847), 'pylab.semilogy', 'py.semilogy', (['elevation', '(-quadra1)', '"""r--"""'], {}), "(elevation, -quadra1, 'r--')\n", (3819, 3847), True, 'import pylab as py\n'), ((3853, 3890), 'pylab.semilogy', 'py.semilogy', (['elevation', 'linear2', '"""b-"""'], {}), "(elevation, linear2, 'b-')\n", (3864, 3890), True, 'import pylab as py\n'), ((3895, 3934), 'pylab.semilogy', 'py.semilogy', (['elevation', '(-quadra2)', '"""b--"""'], {}), "(elevation, -quadra2, 'b--')\n", (3906, 3934), True, 'import pylab as py\n'), ((3940, 3977), 'pylab.semilogy', 'py.semilogy', (['elevation', 'linear3', '"""g-"""'], {}), "(elevation, linear3, 'g-')\n", (3951, 3977), True, 'import pylab as py\n'), ((3982, 4021), 'pylab.semilogy', 'py.semilogy', (['elevation', '(-quadra3)', '"""g--"""'], {}), "(elevation, -quadra3, 'g--')\n", (3993, 4021), True, 'import pylab as py\n'), ((4027, 4127), 'pylab.legend', 'py.legend', (['(\'1" lin\', \'1" quad\', \'10" lin\', \'10" quad\', \'60" lin\', \'60" quad\')'], {'loc': '"""lower left"""'}), '((\'1" lin\', \'1" quad\', \'10" lin\', \'10" quad\', \'60" lin\',\n \'60" quad\'), loc=\'lower left\')\n', (4036, 4127), True, 'import pylab as py\n'), ((4144, 4172), 'pylab.xlabel', 'py.xlabel', (['"""Elevation (deg)"""'], {}), "('Elevation (deg)')\n", (4153, 4172), True, 'import pylab as py\n'), ((4177, 4203), 'pylab.ylabel', 'py.ylabel', (['"""Delta-R (mas)"""'], {}), "('Delta-R (mas)')\n", (4186, 4203), True, 'import pylab as py\n'), ((4209, 4251), 'pylab.savefig', 'py.savefig', (['"""dar_linear_vs_quad_terms.png"""'], {}), "('dar_linear_vs_quad_terms.png')\n", (4219, 4251), True, 'import pylab as py\n'), ((4256, 4298), 'pylab.savefig', 'py.savefig', (['"""dar_linear_vs_quad_terms.eps"""'], {}), "('dar_linear_vs_quad_terms.eps')\n", (4266, 4298), True, 'import pylab as py\n'), ((1210, 1236), 'pyfits.getheader', 'pyfits.getheader', (['fitsFile'], {}), '(fitsFile)\n', (1226, 1236), False, 'import pyfits\n'), ((1774, 1793), 'gcwork.objects.Transform', 'objects.Transform', ([], {}), '()\n', (1791, 1793), False, 'from gcwork import objects\n'), ((1994, 2019), 'math.degrees', 'math.degrees', (['trans.angle'], {}), '(trans.angle)\n', (2006, 2019), False, 'import math, datetime\n'), ((621, 644), 'os.path.exists', 'os.path.exists', (['outname'], {}), '(outname)\n', (635, 644), False, 'import glob, os\n'), ((706, 740), 'pyraf.iraf.imarith', 'iraf.imarith', (['f1', '"""-"""', 'f2', 'outname'], {}), "(f1, '-', f2, outname)\n", (718, 740), False, 'from pyraf import iraf\n'), ((667, 689), 'pyraf.iraf.imdelete', 'iraf.imdelete', (['outname'], {}), '(outname)\n', (680, 689), False, 'from pyraf import iraf\n')]
|
from jupyterthemes import jtplot
import numpy as np
import os
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.ndimage import filters
from textwrap import wrap
import torch
import vectorized_agents as va
import vectorized_env as ve
jtplot.style()
DEVICE = torch.device('cuda')
if DEVICE == torch.device('cpu'):
os.environ['OMP_NUM_THREADS'] = '4'
n_envs = 50
else:
os.environ['OMP_NUM_THREADS'] = '8'
n_envs = 200
ENV_KWARGS = dict(
n_envs=n_envs,
env_device=DEVICE,
out_device=DEVICE,
reward_type=ve.EVERY_STEP_EV_ZEROSUM
)
all_ensemble_names = ['a3c_agent_small_8_32', 'awac_agent_small_8_64_32_1_norm', 'a3c_agent_small_8_64_32_2']
PLAYER_1s = [
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[:2], weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble([all_ensemble_names[0], all_ensemble_names[2]], weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[-2:], weight_logits=False, deterministic_policy=True),
va.SavedRLAgentMultiObsEnsemble(all_ensemble_names, weight_logits=False, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[:2], weight_logits=True, deterministic_policy=True),
#va.SavedRLAgentMultiObsEnsemble([all_ensemble_names[0], all_ensemble_names[2]], weight_logits=True, deterministic_policy=True),
### LEFT OFF HERE:
#va.SavedRLAgentMultiObsEnsemble(all_ensemble_names[-2:], weight_logits=True, deterministic_policy=True),
va.SavedRLAgentMultiObsEnsemble(all_ensemble_names, weight_logits=True, deterministic_policy=True),
# va.SavedRLAgentEnsemble('a3c_agent_small_8_64_32_2', weight_logits=True, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgentEnsemble('awac_agent_small_8_64_32_1_norm', weight_logits=False, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgentEnsemble('a3c_agent_small_8_32', weight_logits=True, device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('awac_agent_small_8_64_32_1_norm_v1-230', device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('a3c_agent_small_8_32-790', device=DEVICE, deterministic_policy=True),
# va.SavedRLAgent('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE, deterministic_policy=False)
]
PLAYER_2s = [
va.BasicThompsonSampling(),
va.PullVegasSlotMachines(),
va.PullVegasSlotMachinesImproved(),
va.SavedRLAgent('a3c_agent_small_8_32-790', device=DEVICE, deterministic_policy=True),
va.SavedRLAgent('awac_agent_small_8_64_32_1_norm_v1-230', deterministic_policy=True),
va.SavedRLAgent('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE, deterministic_policy=False),
#va.SavedRLAgentEnsemble('a3c_agent_small_8_32', weight_logits=True, device=DEVICE, deterministic_policy=True),
va.SavedRLAgentEnsemble('a3c_agent_small_8_64_32_2', weight_logits=True, device=DEVICE, deterministic_policy=True),
#va.SavedRLAgentEnsemble('awac_agent_small_8_64_32_1_norm', weight_logits=False, device=DEVICE, deterministic_policy=True),
]
def wrap_title(title):
return '\n'.join(wrap(title, 55, break_long_words=True))
if __name__ == '__main__':
for player_1 in PLAYER_1s:
for player_2 in PLAYER_2s:
if player_1 == player_2:
continue
p1_score, rewards_over_time = va.run_vectorized_vs(player_1, player_2, display_out=True, **ENV_KWARGS)
rewards_over_time = rewards_over_time.cpu().numpy().squeeze()
cumulative_ymax = 10
expected_ymax = 0.10
q = np.linspace(0., 100., 11)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
n_rows = 2
n_cols = 2
fig, axes = plt.subplots(n_rows, n_cols, figsize=(8 * n_cols, 8 * n_rows))
fig_title = (f'{player_1.name} -vs- {player_2.name}\n'
f'{p1_score * 100.:.2f}% winrate over {rewards_over_time.shape[1]} games')
fig.suptitle(wrap_title(fig_title))
axes = axes.ravel()
axes[0].plot(np.cumsum(rewards_over_time.mean(axis=1)))
axes[0].set_ylim((-cumulative_ymax, cumulative_ymax))
axes[0].set_title(wrap_title(f"{player_1.name} cumulative expected advantage"))
window_size = 50
axes[1].plot(filters.uniform_filter1d(rewards_over_time.mean(axis=1), window_size, mode='constant'))
axes[1].set_ylim((-expected_ymax, expected_ymax))
axes[1].set_title(wrap_title(f"{player_1.name} per-step expected advantage"))
for i, quantile, val in zip(
range(len(q)),
q,
np.percentile(np.cumsum(rewards_over_time,
axis=0),
q, axis=1)):
color_idx = int(abs((len(q) - 1.) / 2. - i))
axes[2].plot(val, label=f'Percentile: {quantile:.0f}',
color=colors[color_idx],
alpha=1. / (color_idx + 1),
# linewidth=3./(color_idx+1)
)
axes[2].set_ylim((-cumulative_ymax * 5, cumulative_ymax * 5))
if len(q) <= 5:
axes[2].legend()
axes[2].set_title(wrap_title(f"{player_1.name} cumulative expected advantage (percentiles)"))
for i, quantile, val in zip(
range(len(q)),
q,
np.percentile(filters.uniform_filter1d(rewards_over_time,
window_size * 5,
mode='reflect',
axis=0),
q, axis=1)):
color_idx = int(abs((len(q) - 1.) / 2. - i))
axes[3].plot(val, label=f'Percentile: {quantile:.0f}',
color=colors[color_idx],
alpha=1. / (color_idx + 1),
# linewidth=3./(color_idx+1)
)
axes[3].set_ylim((-expected_ymax, expected_ymax))
if len(q) <= 5:
axes[3].legend()
axes[3].set_title(wrap_title(f"{player_1.name} per-step expected advantage over time (percentiles)"))
plt.tight_layout(rect=[0., 0., 1., 0.9])
p_names_abbrev = []
for p in (player_1, player_2):
if type(p) == va.SavedRLAgent:
p_names_abbrev.append(p.agent_name)
if p.name.endswith('_deterministic'):
p_names_abbrev[-1] += '_deterministic'
else:
p_names_abbrev[-1] += '_stochastic'
elif type(p) in (va.SavedRLAgentEnsemble, va.SavedRLAgentMultiObsEnsemble):
if type(p) == va.SavedRLAgentEnsemble:
p_names_abbrev.append(f'ensemble_{p.ensemble_name}')
else:
p_names_abbrev.append(f'multiObsEnsemble_{p.ensemble_name}')
if p.ensemble_model.weight_logits:
p_names_abbrev[-1] += '_weight_logits'
else:
p_names_abbrev[-1] += '_weight_probs'
if p.name.endswith('_deterministic'):
p_names_abbrev[-1] += '_deterministic'
else:
p_names_abbrev[-1] += '_stochastic'
else:
p_names_abbrev.append(p.name)
save_fig_title = f'{p_names_abbrev[0]}__{p_names_abbrev[1]}'
if type(player_1) in (va.SavedRLAgent, va.SavedRLAgentEnsemble, va.SavedRLAgentMultiObsEnsemble):
save_fig_folder = f'saved_figures/{p_names_abbrev[0]}'
else:
save_fig_folder = 'saved_figures'
Path(save_fig_folder).mkdir(exist_ok=True)
fig.savefig(f'{save_fig_folder}/{save_fig_title}.png', dpi=100)
plt.close(fig)
|
[
"matplotlib.pyplot.tight_layout",
"jupyterthemes.jtplot.style",
"vectorized_agents.SavedRLAgent",
"textwrap.wrap",
"vectorized_agents.run_vectorized_vs",
"matplotlib.pyplot.close",
"vectorized_agents.PullVegasSlotMachines",
"matplotlib.pyplot.subplots",
"numpy.cumsum",
"pathlib.Path",
"scipy.ndimage.filters.uniform_filter1d",
"vectorized_agents.SavedRLAgentMultiObsEnsemble",
"torch.device",
"numpy.linspace",
"vectorized_agents.SavedRLAgentEnsemble",
"vectorized_agents.PullVegasSlotMachinesImproved",
"vectorized_agents.BasicThompsonSampling"
] |
[((253, 267), 'jupyterthemes.jtplot.style', 'jtplot.style', ([], {}), '()\n', (265, 267), False, 'from jupyterthemes import jtplot\n'), ((278, 298), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (290, 298), False, 'import torch\n'), ((312, 331), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (324, 331), False, 'import torch\n'), ((1064, 1167), 'vectorized_agents.SavedRLAgentMultiObsEnsemble', 'va.SavedRLAgentMultiObsEnsemble', (['all_ensemble_names'], {'weight_logits': '(False)', 'deterministic_policy': '(True)'}), '(all_ensemble_names, weight_logits=False,\n deterministic_policy=True)\n', (1095, 1167), True, 'import vectorized_agents as va\n'), ((1545, 1647), 'vectorized_agents.SavedRLAgentMultiObsEnsemble', 'va.SavedRLAgentMultiObsEnsemble', (['all_ensemble_names'], {'weight_logits': '(True)', 'deterministic_policy': '(True)'}), '(all_ensemble_names, weight_logits=True,\n deterministic_policy=True)\n', (1576, 1647), True, 'import vectorized_agents as va\n'), ((2333, 2359), 'vectorized_agents.BasicThompsonSampling', 'va.BasicThompsonSampling', ([], {}), '()\n', (2357, 2359), True, 'import vectorized_agents as va\n'), ((2365, 2391), 'vectorized_agents.PullVegasSlotMachines', 'va.PullVegasSlotMachines', ([], {}), '()\n', (2389, 2391), True, 'import vectorized_agents as va\n'), ((2397, 2431), 'vectorized_agents.PullVegasSlotMachinesImproved', 'va.PullVegasSlotMachinesImproved', ([], {}), '()\n', (2429, 2431), True, 'import vectorized_agents as va\n'), ((2437, 2526), 'vectorized_agents.SavedRLAgent', 'va.SavedRLAgent', (['"""a3c_agent_small_8_32-790"""'], {'device': 'DEVICE', 'deterministic_policy': '(True)'}), "('a3c_agent_small_8_32-790', device=DEVICE,\n deterministic_policy=True)\n", (2452, 2526), True, 'import vectorized_agents as va\n'), ((2528, 2616), 'vectorized_agents.SavedRLAgent', 'va.SavedRLAgent', (['"""awac_agent_small_8_64_32_1_norm_v1-230"""'], {'deterministic_policy': '(True)'}), "('awac_agent_small_8_64_32_1_norm_v1-230',\n deterministic_policy=True)\n", (2543, 2616), True, 'import vectorized_agents as va\n'), ((2618, 2715), 'vectorized_agents.SavedRLAgent', 'va.SavedRLAgent', (['"""a3c_agent_small_8_64_32_2_v2-30"""'], {'device': 'DEVICE', 'deterministic_policy': '(False)'}), "('a3c_agent_small_8_64_32_2_v2-30', device=DEVICE,\n deterministic_policy=False)\n", (2633, 2715), True, 'import vectorized_agents as va\n'), ((2833, 2951), 'vectorized_agents.SavedRLAgentEnsemble', 'va.SavedRLAgentEnsemble', (['"""a3c_agent_small_8_64_32_2"""'], {'weight_logits': '(True)', 'device': 'DEVICE', 'deterministic_policy': '(True)'}), "('a3c_agent_small_8_64_32_2', weight_logits=True,\n device=DEVICE, deterministic_policy=True)\n", (2856, 2951), True, 'import vectorized_agents as va\n'), ((3125, 3163), 'textwrap.wrap', 'wrap', (['title', '(55)'], {'break_long_words': '(True)'}), '(title, 55, break_long_words=True)\n', (3129, 3163), False, 'from textwrap import wrap\n'), ((3364, 3436), 'vectorized_agents.run_vectorized_vs', 'va.run_vectorized_vs', (['player_1', 'player_2'], {'display_out': '(True)'}), '(player_1, player_2, display_out=True, **ENV_KWARGS)\n', (3384, 3436), True, 'import vectorized_agents as va\n'), ((3594, 3621), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(11)'], {}), '(0.0, 100.0, 11)\n', (3605, 3621), True, 'import numpy as np\n'), ((3762, 3824), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {'figsize': '(8 * n_cols, 8 * n_rows)'}), '(n_rows, n_cols, figsize=(8 * n_cols, 8 * n_rows))\n', (3774, 3824), True, 'import matplotlib.pyplot as plt\n'), ((6456, 6499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0.0, 0.0, 1.0, 0.9]'}), '(rect=[0.0, 0.0, 1.0, 0.9])\n', (6472, 6499), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8186), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8181, 8186), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4764), 'numpy.cumsum', 'np.cumsum', (['rewards_over_time'], {'axis': '(0)'}), '(rewards_over_time, axis=0)\n', (4737, 4764), True, 'import numpy as np\n'), ((5564, 5652), 'scipy.ndimage.filters.uniform_filter1d', 'filters.uniform_filter1d', (['rewards_over_time', '(window_size * 5)'], {'mode': '"""reflect"""', 'axis': '(0)'}), "(rewards_over_time, window_size * 5, mode='reflect',\n axis=0)\n", (5588, 5652), False, 'from scipy.ndimage import filters\n'), ((8041, 8062), 'pathlib.Path', 'Path', (['save_fig_folder'], {}), '(save_fig_folder)\n', (8045, 8062), False, 'from pathlib import Path\n')]
|
from unittest import TestCase
import numpy as np
import xarray as xr
from xarray.testing import assert_equal, assert_allclose
import numpy.testing as npt
from sklearn_xarray import wrap
from sklearn.base import clone
from sklearn.preprocessing import StandardScaler, KernelCenterer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVC
from tests.mocks import (
DummyEstimator,
DummyTransformer,
ReshapingEstimator,
)
class EstimatorWrapperTests(TestCase):
def setUp(self):
self.X = xr.Dataset(
{
"var_2d": (["sample", "feat_1"], np.random.random((100, 10))),
"var_3d": (
["sample", "feat_1", "feat_2"],
np.random.random((100, 10, 10)),
),
},
{
"sample": range(100),
"feat_1": range(10),
"feat_2": range(10),
"dummy": (["sample", "feat_1"], np.random.random((100, 10))),
},
)
def test_update_restore_dims(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 0, 5)),
reshapes={"feature": ["feat_1", "feat_2"]},
)
X = self.X.var_3d
estimator.fit(X)
X_out = estimator.estimator_.transform(X.values)
dims_new = estimator._update_dims(X, X_out)
Xt = xr.DataArray(X_out, dims=dims_new)
assert dims_new == ["sample", "feature"]
Xr_out = estimator.estimator_.inverse_transform(X_out)
dims_old = estimator._restore_dims(Xt, Xr_out)
assert dims_old == ["sample", "feat_1", "feat_2"]
def test_update_coords(self):
pass
def test_params(self):
estimator = StandardScaler(with_mean=False)
params = estimator.get_params()
params.update(
{"estimator": estimator, "reshapes": None, "sample_dim": None}
)
# check params set in constructor
wrapper = wrap(estimator)
self.assertEqual(wrapper.get_params(), params)
self.assertEqual(wrapper.with_mean, False)
# check params set by attribute
wrapper.with_std = False
params.update({"with_std": False})
self.assertEqual(wrapper.get_params(), params)
# check params set with set_params
wrapper.set_params(copy=False)
params.update({"copy": False})
self.assertEqual(wrapper.get_params(), params)
def test_attributes(self):
estimator = wrap(StandardScaler())
# check pass-through wrapper
estimator.fit(self.X.var_2d.values)
npt.assert_allclose(estimator.mean_, estimator.estimator_.mean_)
# check DataArray wrapper
estimator.fit(self.X.var_2d)
npt.assert_allclose(estimator.mean_, estimator.estimator_.mean_)
# check Dataset wrapper
estimator.fit(self.X.var_2d.to_dataset())
npt.assert_allclose(
estimator.mean_["var_2d"],
estimator.estimator_dict_["var_2d"].mean_,
)
class PublicInterfaceTests(TestCase):
def setUp(self):
self.X = xr.Dataset(
{
"var_2d": (["sample", "feat_1"], np.random.random((100, 10))),
"var_3d": (
["sample", "feat_1", "feat_2"],
np.random.random((100, 10, 10)),
),
},
{
"sample": range(100),
"feat_1": range(10),
"feat_2": range(10),
"dummy": (["sample", "feat_1"], np.random.random((100, 10))),
},
)
def test_dummy_estimator(self):
estimator = wrap(DummyEstimator())
# test DataArray
X_da = self.X.var_2d
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.predict(X_ds)
assert_equal(yp, X_ds)
def test_dummy_transformer(self):
estimator = wrap(DummyTransformer())
# test DataArray
X_da = self.X.var_2d
estimator.fit(X_da)
yp = estimator.transform(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.transform(X_ds)
assert_equal(yp, X_ds)
def test_wrapped_transformer(self):
estimator = wrap(StandardScaler())
# test DataArray
X_da = self.X.var_2d
estimator.partial_fit(X_da)
assert_allclose(
X_da, estimator.inverse_transform(estimator.transform(X_da))
)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
estimator.fit(X_ds)
assert_allclose(
X_ds, estimator.inverse_transform(estimator.transform(X_ds))
)
def test_ndim_dummy_estimator(self):
estimator = wrap(DummyEstimator())
# test DataArray
X_da = self.X.var_3d
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_equal(yp, X_da)
# test Dataset
X_ds = self.X
estimator.fit(X_ds)
yp = estimator.predict(X_ds)
assert_equal(yp, X_ds)
def test_reshaping_estimator(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 2)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_2d
y = X_da[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
y = X_ds.var_2d[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_2d
assert_allclose(yp, y)
def test_reshaping_transformer(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 2)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_3d
y = X_da[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_da)
yp = estimator.transform(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
y = X_ds.var_2d[:, :2].drop("feat_1")
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.transform(X_ds).var_2d
assert_allclose(yp, y)
def test_reshaping_estimator_singleton(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 0)), reshapes="feat_1"
)
# test DataArray
X_da = self.X.var_2d
y = X_da[:, 0].drop("feat_1")
estimator.fit(X_da)
yp = estimator.predict(X_da)
assert_allclose(yp, y)
# test Dataset
X_ds = self.X
y = X_ds.var_2d[:, 0].drop("feat_1")
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_2d
assert_allclose(yp, y)
def test_ndim_reshaping_estimator(self):
estimator = wrap(
ReshapingEstimator(new_shape=(-1, 5, 0)),
reshapes={"feature": ["feat_1", "feat_2"]},
)
# test DataArray
X_da = self.X.var_3d
Xt = (
X_da[:, :5, 0]
.drop(["feat_1", "feat_2"])
.rename({"feat_1": "feature"})
)
Xt["dummy"] = Xt.dummy[:, 0]
estimator.fit(X_da)
Xt_da = estimator.transform(X_da)
estimator.inverse_transform(Xt_da)
assert_allclose(Xt_da, Xt)
# test Dataset
X_ds = self.X.var_3d.to_dataset()
y = X_ds.var_3d[:, :5, 0].drop(["feat_1", "feat_2"])
y = y.rename({"feat_1": "feature"})
y["dummy"] = y.dummy[:, 0]
estimator.fit(X_ds)
yp = estimator.predict(X_ds).var_3d
assert_allclose(yp, y)
def test_sample_dim(self):
from sklearn.decomposition import PCA
estimator = wrap(
PCA(n_components=5), reshapes="feat_1", sample_dim="sample"
)
# test DataArray
X_da = self.X.var_2d
Xt_da = estimator.fit_transform(X_da)
Xr_da = estimator.inverse_transform(Xt_da)
npt.assert_equal(Xt_da.shape, (100, 5))
npt.assert_equal(Xr_da.shape, (100, 10))
# test Dataset
X_ds = self.X.var_2d.to_dataset()
Xt = estimator.fit_transform(X_ds)
npt.assert_equal(Xt.var_2d.shape, (100, 5))
def test_score(self):
from sklearn.linear_model import LinearRegression
estimator = wrap(LinearRegression, reshapes="feat_1")
# test DataArray
X_da = self.X.var_2d
y = np.random.random(100)
estimator.fit(X_da, y)
estimator.score(X_da, y)
# test Dataset
X_ds = self.X.var_2d.to_dataset()
wrapper = estimator.fit(X_ds, y)
wrapper.score(X_ds, y)
def test_partial_fit(self):
estimator = wrap(StandardScaler())
# check pass-through wrapper
estimator.partial_fit(self.X.var_2d.values)
assert hasattr(estimator, "mean_")
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X)
# check DataArray wrapper
estimator = clone(estimator)
estimator.partial_fit(self.X.var_2d)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d.values)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X)
assert hasattr(estimator, "mean_")
# check Dataset wrapper
estimator = clone(estimator)
estimator.partial_fit(self.X.var_2d.to_dataset())
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d.values)
with self.assertRaises(ValueError):
estimator.partial_fit(self.X.var_2d)
assert hasattr(estimator, "mean_")
def test_classifier():
lr = wrap(LogisticRegression)
# wrappers don't pass check_estimator anymore because estimators
# "should not set any attribute apart from parameters during init"
assert hasattr(lr, "predict")
assert hasattr(lr, "decision_function")
lr = wrap(LogisticRegression)
assert hasattr(lr, "C")
svc_proba = wrap(SVC(probability=True))
# check_estimator(svc_proba) fails because the wrapper is not excluded
# from tests that are known to fail for SVC...
assert hasattr(svc_proba, "predict_proba")
assert hasattr(svc_proba, "predict_log_proba")
def test_regressor():
lr = wrap(LinearRegression, compat=True)
assert hasattr(lr, "predict")
assert hasattr(lr, "score")
lr = wrap(LinearRegression)
assert hasattr(lr, "normalize")
def test_transformer():
wrap(KernelCenterer, compat=True)
tr = wrap(KernelCenterer)
assert hasattr(tr, "transform")
ss = wrap(StandardScaler)
# check_estimator(ss) fails because the wrapper is not excluded
# from tests that are known to fail for StandardScaler...
assert hasattr(ss, "partial_fit")
assert hasattr(ss, "inverse_transform")
assert hasattr(ss, "fit_transform")
|
[
"sklearn.base.clone",
"xarray.testing.assert_equal",
"sklearn.preprocessing.StandardScaler",
"tests.mocks.ReshapingEstimator",
"tests.mocks.DummyEstimator",
"xarray.testing.assert_allclose",
"numpy.random.random",
"xarray.DataArray",
"numpy.testing.assert_equal",
"sklearn.svm.SVC",
"sklearn.decomposition.PCA",
"numpy.testing.assert_allclose",
"tests.mocks.DummyTransformer",
"sklearn_xarray.wrap"
] |
[((10179, 10203), 'sklearn_xarray.wrap', 'wrap', (['LogisticRegression'], {}), '(LogisticRegression)\n', (10183, 10203), False, 'from sklearn_xarray import wrap\n'), ((10432, 10456), 'sklearn_xarray.wrap', 'wrap', (['LogisticRegression'], {}), '(LogisticRegression)\n', (10436, 10456), False, 'from sklearn_xarray import wrap\n'), ((10788, 10823), 'sklearn_xarray.wrap', 'wrap', (['LinearRegression'], {'compat': '(True)'}), '(LinearRegression, compat=True)\n', (10792, 10823), False, 'from sklearn_xarray import wrap\n'), ((10900, 10922), 'sklearn_xarray.wrap', 'wrap', (['LinearRegression'], {}), '(LinearRegression)\n', (10904, 10922), False, 'from sklearn_xarray import wrap\n'), ((10990, 11023), 'sklearn_xarray.wrap', 'wrap', (['KernelCenterer'], {'compat': '(True)'}), '(KernelCenterer, compat=True)\n', (10994, 11023), False, 'from sklearn_xarray import wrap\n'), ((11034, 11054), 'sklearn_xarray.wrap', 'wrap', (['KernelCenterer'], {}), '(KernelCenterer)\n', (11038, 11054), False, 'from sklearn_xarray import wrap\n'), ((11101, 11121), 'sklearn_xarray.wrap', 'wrap', (['StandardScaler'], {}), '(StandardScaler)\n', (11105, 11121), False, 'from sklearn_xarray import wrap\n'), ((1423, 1457), 'xarray.DataArray', 'xr.DataArray', (['X_out'], {'dims': 'dims_new'}), '(X_out, dims=dims_new)\n', (1435, 1457), True, 'import xarray as xr\n'), ((1784, 1815), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (1798, 1815), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((2025, 2040), 'sklearn_xarray.wrap', 'wrap', (['estimator'], {}), '(estimator)\n', (2029, 2040), False, 'from sklearn_xarray import wrap\n'), ((2662, 2726), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['estimator.mean_', 'estimator.estimator_.mean_'], {}), '(estimator.mean_, estimator.estimator_.mean_)\n', (2681, 2726), True, 'import numpy.testing as npt\n'), ((2807, 2871), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['estimator.mean_', 'estimator.estimator_.mean_'], {}), '(estimator.mean_, estimator.estimator_.mean_)\n', (2826, 2871), True, 'import numpy.testing as npt\n'), ((2963, 3057), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (["estimator.mean_['var_2d']", "estimator.estimator_dict_['var_2d'].mean_"], {}), "(estimator.mean_['var_2d'], estimator.estimator_dict_[\n 'var_2d'].mean_)\n", (2982, 3057), True, 'import numpy.testing as npt\n'), ((3879, 3901), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_da'], {}), '(yp, X_da)\n', (3891, 3901), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((4023, 4045), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_ds'], {}), '(yp, X_ds)\n', (4035, 4045), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((4263, 4285), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_da'], {}), '(yp, X_da)\n', (4275, 4285), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((4409, 4431), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_ds'], {}), '(yp, X_ds)\n', (4421, 4431), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5138, 5160), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_da'], {}), '(yp, X_da)\n', (5150, 5160), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5282, 5304), 'xarray.testing.assert_equal', 'assert_equal', (['yp', 'X_ds'], {}), '(yp, X_ds)\n', (5294, 5304), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5657, 5679), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (5672, 5679), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((5910, 5932), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (5925, 5932), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((6289, 6311), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (6304, 6311), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((6544, 6566), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (6559, 6566), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((6892, 6914), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (6907, 6914), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((7089, 7111), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (7104, 7111), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((7656, 7682), 'xarray.testing.assert_allclose', 'assert_allclose', (['Xt_da', 'Xt'], {}), '(Xt_da, Xt)\n', (7671, 7682), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((7972, 7994), 'xarray.testing.assert_allclose', 'assert_allclose', (['yp', 'y'], {}), '(yp, y)\n', (7987, 7994), False, 'from xarray.testing import assert_equal, assert_allclose\n'), ((8345, 8384), 'numpy.testing.assert_equal', 'npt.assert_equal', (['Xt_da.shape', '(100, 5)'], {}), '(Xt_da.shape, (100, 5))\n', (8361, 8384), True, 'import numpy.testing as npt\n'), ((8393, 8433), 'numpy.testing.assert_equal', 'npt.assert_equal', (['Xr_da.shape', '(100, 10)'], {}), '(Xr_da.shape, (100, 10))\n', (8409, 8433), True, 'import numpy.testing as npt\n'), ((8553, 8596), 'numpy.testing.assert_equal', 'npt.assert_equal', (['Xt.var_2d.shape', '(100, 5)'], {}), '(Xt.var_2d.shape, (100, 5))\n', (8569, 8596), True, 'import numpy.testing as npt\n'), ((8704, 8745), 'sklearn_xarray.wrap', 'wrap', (['LinearRegression'], {'reshapes': '"""feat_1"""'}), "(LinearRegression, reshapes='feat_1')\n", (8708, 8745), False, 'from sklearn_xarray import wrap\n'), ((8814, 8835), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (8830, 8835), True, 'import numpy as np\n'), ((9487, 9503), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (9492, 9503), False, 'from sklearn.base import clone\n'), ((9832, 9848), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (9837, 9848), False, 'from sklearn.base import clone\n'), ((10507, 10528), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (10510, 10528), False, 'from sklearn.svm import SVC\n'), ((1139, 1179), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 0, 5)'}), '(new_shape=(-1, 0, 5))\n', (1157, 1179), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((2554, 2570), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2568, 2570), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((3731, 3747), 'tests.mocks.DummyEstimator', 'DummyEstimator', ([], {}), '()\n', (3745, 3747), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((4111, 4129), 'tests.mocks.DummyTransformer', 'DummyTransformer', ([], {}), '()\n', (4127, 4129), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((4499, 4515), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4513, 4515), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((4990, 5006), 'tests.mocks.DummyEstimator', 'DummyEstimator', ([], {}), '()\n', (5004, 5006), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((5385, 5422), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 2)'}), '(new_shape=(-1, 2))\n', (5403, 5422), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((6015, 6052), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 2)'}), '(new_shape=(-1, 2))\n', (6033, 6052), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((6657, 6694), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 0)'}), '(new_shape=(-1, 0))\n', (6675, 6694), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((7197, 7237), 'tests.mocks.ReshapingEstimator', 'ReshapingEstimator', ([], {'new_shape': '(-1, 5, 0)'}), '(new_shape=(-1, 5, 0))\n', (7215, 7237), False, 'from tests.mocks import DummyEstimator, DummyTransformer, ReshapingEstimator\n'), ((8113, 8132), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(5)'}), '(n_components=5)\n', (8116, 8132), False, 'from sklearn.decomposition import PCA\n'), ((9101, 9117), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9115, 9117), False, 'from sklearn.preprocessing import StandardScaler, KernelCenterer\n'), ((633, 660), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (649, 660), True, 'import numpy as np\n'), ((763, 794), 'numpy.random.random', 'np.random.random', (['(100, 10, 10)'], {}), '((100, 10, 10))\n', (779, 794), True, 'import numpy as np\n'), ((1004, 1031), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (1020, 1031), True, 'import numpy as np\n'), ((3242, 3269), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (3258, 3269), True, 'import numpy as np\n'), ((3372, 3403), 'numpy.random.random', 'np.random.random', (['(100, 10, 10)'], {}), '((100, 10, 10))\n', (3388, 3403), True, 'import numpy as np\n'), ((3613, 3640), 'numpy.random.random', 'np.random.random', (['(100, 10)'], {}), '((100, 10))\n', (3629, 3640), True, 'import numpy as np\n')]
|
#!/usr/bin/env pythonw
import numpy as np
from astropy.visualization import stretch, interval
from astropy.io import fits
from astropy import wcs
from reproject import reproject_interp
from matplotlib import pyplot as plt
def scaleImage(image, a=1, stretch_type='asinh'):
reagon = interval.AsymmetricPercentileInterval(10., 99.95)
vmin, vmax = reagon.get_limits(image)
if stretch_type == 'log':
scale = stretch.LogStretch(a=a)
elif stretch_type == 'asinh':
scale = stretch.AsinhStretch(a=a)
elif stretch_type == 'sqrt':
scale = stretch.SqrtStretch()
image_scaled = (scale + reagon)(image)
return image_scaled
def removeNaN(data):
bdx = ~np.isfinite(data)
data[bdx] = 0
def make_images(base, index_cut=1300, filters='gri', gzip=False, **kwargs):
hdus = []
images_scaled = []
for fdx, filt in enumerate(filters):
file_name = '{0}-{1}.fits'.format(base, filt)
if gzip:
file_name += '.gz'
hdu = fits.open(file_name)
w = wcs.WCS(hdu[0].header)
newf = fits.PrimaryHDU()
newf.data = hdu[0].data[index_cut:-index_cut, index_cut:-index_cut]
newf.header = hdu[0].header
newf.header.update(w[index_cut:-index_cut, index_cut:-index_cut].to_header())
hdus.append(newf)
if fdx > 0:
scidata, footprint = reproject_interp(newf, hdus[0].header)
scidata = newf.data
scidata[scidata < 0] = 0
image = scaleImage(scidata, **kwargs)
removeNaN(image)
images_scaled.append(image)
plt.imsave('{0}_{1}_{2}.png'.format(base, filt, kwargs.get('stretch_type', 'asinh')), image, cmap='Greys_r', origin='lower')
RGB_image = np.zeros([images_scaled[0].shape[0], images_scaled[0].shape[1], 3])
RGB_image[:, :, 0] = images_scaled[2]
RGB_image[:, :, 1] = images_scaled[1]
RGB_image[:, :, 2] = images_scaled[0]
RGB_image[RGB_image > 1] = 1
RGB_image[RGB_image < 0] = 0
plt.imsave('{0}_{1}_{2}.png'.format(base, filters, kwargs.get('stretch_type', 'asinh')), RGB_image, origin='lower')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Create single band and false color images from fits files'
)
parser.add_argument(
'base_name',
type=str,
help='the base name of the fits files (note: all files must be named `{base_name}-{filter_letter}`)'
)
parser.add_argument(
'-c',
'--crop',
type=int,
default=1,
help='an integer used to corp the fits images (by index of array)'
)
parser.add_argument(
'-f',
'--filters',
type=str,
default='gri',
choices=['gri', 'rbi', 'ugr'],
help='a three letter string representing the filters contained in each fits file'
)
parser.add_argument(
'-a',
type=float,
default=0.1,
help='the `a` parameter used in the streact function'
)
parser.add_argument(
'-s',
'--stretch',
type=str,
default='asinh',
choices=['asinh', 'log', 'sqrt'],
help='the type of stretch to use for the fits image'
)
parser.add_argument(
'-g',
'--gzip',
action='store_true',
help='use this flag if the input files are gzipped'
)
args = parser.parse_args()
make_images(
args.base_name,
index_cut=args.crop,
filters=args.filters,
gzip=args.gzip,
a=args.a,
stretch_type=args.stretch
)
|
[
"astropy.visualization.stretch.LogStretch",
"astropy.visualization.interval.AsymmetricPercentileInterval",
"argparse.ArgumentParser",
"astropy.io.fits.PrimaryHDU",
"numpy.zeros",
"numpy.isfinite",
"astropy.visualization.stretch.AsinhStretch",
"astropy.wcs.WCS",
"reproject.reproject_interp",
"astropy.io.fits.open",
"astropy.visualization.stretch.SqrtStretch"
] |
[((288, 338), 'astropy.visualization.interval.AsymmetricPercentileInterval', 'interval.AsymmetricPercentileInterval', (['(10.0)', '(99.95)'], {}), '(10.0, 99.95)\n', (325, 338), False, 'from astropy.visualization import stretch, interval\n'), ((1729, 1796), 'numpy.zeros', 'np.zeros', (['[images_scaled[0].shape[0], images_scaled[0].shape[1], 3]'], {}), '([images_scaled[0].shape[0], images_scaled[0].shape[1], 3])\n', (1737, 1796), True, 'import numpy as np\n'), ((2171, 2272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create single band and false color images from fits files"""'}), "(description=\n 'Create single band and false color images from fits files')\n", (2194, 2272), False, 'import argparse\n'), ((426, 449), 'astropy.visualization.stretch.LogStretch', 'stretch.LogStretch', ([], {'a': 'a'}), '(a=a)\n', (444, 449), False, 'from astropy.visualization import stretch, interval\n'), ((698, 715), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (709, 715), True, 'import numpy as np\n'), ((1006, 1026), 'astropy.io.fits.open', 'fits.open', (['file_name'], {}), '(file_name)\n', (1015, 1026), False, 'from astropy.io import fits\n'), ((1039, 1061), 'astropy.wcs.WCS', 'wcs.WCS', (['hdu[0].header'], {}), '(hdu[0].header)\n', (1046, 1061), False, 'from astropy import wcs\n'), ((1077, 1094), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (1092, 1094), False, 'from astropy.io import fits\n'), ((500, 525), 'astropy.visualization.stretch.AsinhStretch', 'stretch.AsinhStretch', ([], {'a': 'a'}), '(a=a)\n', (520, 525), False, 'from astropy.visualization import stretch, interval\n'), ((1372, 1410), 'reproject.reproject_interp', 'reproject_interp', (['newf', 'hdus[0].header'], {}), '(newf, hdus[0].header)\n', (1388, 1410), False, 'from reproject import reproject_interp\n'), ((575, 596), 'astropy.visualization.stretch.SqrtStretch', 'stretch.SqrtStretch', ([], {}), '()\n', (594, 596), False, 'from astropy.visualization import stretch, interval\n')]
|
import math
import pytz
import sys
import time
from datetime import date
from . import wait_times, util, arrival_history, trip_times, errors, constants, timetables, routeconfig
import pandas as pd
import numpy as np
# Represents a range of days with a time range within each day.
# RouteMetrics can calculate various statistics over a range.
class Range:
def __init__(self, dates: list, start_time_str: str, end_time_str: str, tz: pytz.timezone):
self.dates = dates # list of datetime.date objects
self.start_time_str = start_time_str # if None, no start time filter
self.end_time_str = end_time_str # if None, no end time filter
self.tz = tz
# RouteMetrics allows computing various metrics for a particular route,
# such as headways, wait times, and trip times,
# including over various date and time ranges.
#
# It caches the arrival history and data frames so that the different
# metrics calculations can reuse the same arrivals data without
# needing to reload it from disk each time.
#
class RouteMetrics:
def __init__(self, agency_id, route_id):
self.agency_id = agency_id
self.route_id = route_id
self.arrival_histories = {}
self.data_frames = {}
self.timetables = {}
def get_arrival_history(self, d):
if d in self.arrival_histories:
return self.arrival_histories[d]
print(f'loading arrival history for route {self.route_id} on {d}', file=sys.stderr)
try:
self.arrival_histories[d] = history = arrival_history.get_by_date(self.agency_id, self.route_id, d)
except FileNotFoundError as ex:
print(f'Arrival history not found for route {self.route_id} on {d}', file=sys.stderr)
history = arrival_history.ArrivalHistory(self.agency_id, self.route_id, {});
return history
def get_history_data_frame(self, d, direction_id=None, stop_id=None):
key = f'history_{str(d)}_{stop_id}_{direction_id}'
if key in self.data_frames:
return self.data_frames[key]
history = self.get_arrival_history(d)
print(f'loading data frame {key} for route {self.route_id}', file=sys.stderr)
df = history.get_data_frame(stop_id=stop_id, direction_id=direction_id)
self.data_frames[key] = df
return df
def get_timetable(self, d):
if d not in self.timetables.keys():
self.timetables[d] = timetables.get_by_date(self.agency_id, self.route_id, d)
return self.timetables[d]
def get_timetable_data_frame(self, d, direction_id=None, stop_id=None):
timetable = self.get_timetable(d)
timetable_key = f'timetable_{str(d)}_{stop_id}_{direction_id}'
if timetable_key not in self.data_frames:
self.data_frames[timetable_key] = timetable.get_data_frame(stop_id=stop_id, direction_id=direction_id)
return self.data_frames[timetable_key]
def get_wait_time_stats(self, direction_id, stop_id, rng: Range):
return self._get_wait_time_stats(direction_id, stop_id, rng, self.get_history_data_frame)
def get_scheduled_wait_time_stats(self, direction_id, stop_id, rng: Range):
return self._get_wait_time_stats(direction_id, stop_id, rng, self.get_timetable_data_frame)
def _get_wait_time_stats(self, direction_id, stop_id, rng: Range, get_data_frame):
wait_stats_arr = []
for d in rng.dates:
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
df = get_data_frame(d, stop_id=stop_id, direction_id=direction_id)
departure_time_values = np.sort(df['DEPARTURE_TIME'].values)
wait_stats = wait_times.get_stats(departure_time_values, start_time, end_time)
wait_stats_arr.append(wait_stats)
if len(wait_stats_arr) == 1:
return wait_stats_arr[0]
else:
return wait_times.combine_stats(wait_stats_arr)
def get_arrivals(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_history_data_frame, 'TIME')
def get_departures(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_history_data_frame, 'DEPARTURE_TIME')
def get_scheduled_arrivals(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_timetable_data_frame, 'TIME')
def get_scheduled_departures(self, direction_id, stop_id, rng: Range):
return self._get_count(direction_id, stop_id, rng, self.get_timetable_data_frame, 'DEPARTURE_TIME')
def _get_count(self, direction_id, stop_id, rng: Range, get_data_frame, time_field):
if stop_id is None:
return None
count = 0
for d in rng.dates:
df = get_data_frame(d, direction_id=direction_id, stop_id=stop_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
df = df[df[time_field] >= start_time]
if end_time is not None:
df = df[df[time_field] < end_time]
count += len(df)
return count
def get_departure_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range):
return self._get_schedule_adherence(direction_id, stop_id, early_sec, late_sec, rng, 'DEPARTURE_TIME')
def get_arrival_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range):
return self._get_schedule_adherence(direction_id, stop_id, early_sec, late_sec, rng, 'TIME')
def _get_schedule_adherence(self, direction_id, stop_id, early_sec, late_sec, rng: Range, time_field):
if stop_id is None:
return None
compared_timetable_arr = []
now = time.time()
for d in rng.dates:
stop_timetable = self.get_timetable_data_frame(d, direction_id=direction_id, stop_id=stop_id)
stop_arrivals = self.get_history_data_frame(d, direction_id=direction_id, stop_id=stop_id)
scheduled_time_values = np.sort(stop_timetable[time_field].values)
actual_time_values = np.sort(stop_arrivals[time_field].values)
comparison_df = timetables.match_schedule_to_actual_times(
scheduled_time_values,
actual_time_values,
early_sec = early_sec,
late_sec = late_sec,
)
comparison_df[time_field] = scheduled_time_values
if len(comparison_df) and comparison_df[time_field].iloc[-1] >= now:
comparison_df = comparison_df[comparison_df[time_field] < now]
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
comparison_df = comparison_df[comparison_df[time_field] >= start_time]
if end_time is not None:
comparison_df = comparison_df[comparison_df[time_field] < end_time]
compared_timetable_arr.append(comparison_df)
return pd.concat(compared_timetable_arr)
def get_headway_schedule_deltas(self, direction_id, stop_id, rng: Range):
headway_delta_arr = []
now = time.time()
for d in rng.dates:
timetable_df = self.get_timetable_data_frame(d, direction_id=direction_id, stop_id=stop_id)
history_df = self.get_history_data_frame(d, direction_id=direction_id, stop_id=stop_id)
departure_time_values = np.sort(history_df['DEPARTURE_TIME'].values)
scheduled_departure_time_values = np.sort(timetable_df['DEPARTURE_TIME'].values)
comparison_df = timetables.match_actual_times_to_schedule(
departure_time_values,
scheduled_departure_time_values
)
comparison_df['DEPARTURE_TIME'] = departure_time_values
comparison_df['headway'] = np.r_[np.nan, compute_headway_minutes(departure_time_values)]
comparison_df = comparison_df[np.isfinite(comparison_df['headway'].values) & np.isfinite(comparison_df['closest_scheduled_headway'].values)]
if len(comparison_df) and comparison_df['DEPARTURE_TIME'].iloc[-1] >= now:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] < now]
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] >= start_time]
if end_time is not None:
comparison_df = comparison_df[comparison_df['DEPARTURE_TIME'] < end_time]
headway_delta = comparison_df['headway'].values - comparison_df['closest_scheduled_headway'].values
headway_delta_arr.append(headway_delta)
return np.concatenate(headway_delta_arr)
def get_scheduled_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range):
return self._get_trip_times(direction_id, start_stop_id, end_stop_id, rng, self.get_timetable_data_frame)
def get_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range):
return self._get_trip_times(direction_id, start_stop_id, end_stop_id, rng, self.get_history_data_frame)
def _get_trip_times(self, direction_id, start_stop_id, end_stop_id, rng: Range, get_data_frame):
completed_trips_arr = []
if end_stop_id is None:
return None
is_loop = False
route_config = routeconfig.get_route_config(self.agency_id, self.route_id)
if route_config is not None:
if direction_id is not None:
dir_info = route_config.get_direction_info(direction_id)
else:
direction_ids = route_config.get_directions_for_stop(start_stop_id)
dir_info = route_config.get_direction_info(direction_ids[0]) if len(direction_ids) > 0 else None
if dir_info is not None:
is_loop = dir_info.is_loop()
for d in rng.dates:
s1_df = get_data_frame(d, stop_id=start_stop_id, direction_id=direction_id)
s2_df = get_data_frame(d, stop_id=end_stop_id, direction_id=direction_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
if start_time is not None:
s1_df = s1_df[s1_df['DEPARTURE_TIME'] >= start_time]
if end_time is not None:
s1_df = s1_df[s1_df['DEPARTURE_TIME'] < end_time]
completed_trip_times = trip_times.get_completed_trip_times(
s1_df['TRIP'].values,
s1_df['DEPARTURE_TIME'].values,
s2_df['TRIP'].values,
s2_df['TIME'].values,
is_loop = is_loop
)
completed_trips_arr.append(completed_trip_times)
return np.concatenate(completed_trips_arr)
def get_headways(self, direction_id, stop_id, rng: Range):
return self._get_headways(direction_id, stop_id, rng, self.get_history_data_frame)
def get_scheduled_headways(self, direction_id, stop_id, rng: Range):
return self._get_headways(direction_id, stop_id, rng, self.get_timetable_data_frame)
def _get_headways(self, direction_id, stop_id, rng: Range, get_data_frame):
headway_min_arr = []
for d in rng.dates:
df = get_data_frame(d, direction_id=direction_id, stop_id=stop_id)
start_time = util.get_timestamp_or_none(d, rng.start_time_str, rng.tz)
end_time = util.get_timestamp_or_none(d, rng.end_time_str, rng.tz)
departure_time_values = np.sort(df['DEPARTURE_TIME'].values)
headway_min = compute_headway_minutes(departure_time_values, start_time, end_time)
headway_min_arr.append(headway_min)
return np.concatenate(headway_min_arr)
def compute_headway_minutes(time_values, start_time=None, end_time=None):
if start_time is not None:
start_index = np.searchsorted(time_values, start_time, 'left')
else:
start_index = 0
if end_time is not None:
end_index = np.searchsorted(time_values, end_time, 'left')
else:
end_index = len(time_values)
if start_index == 0:
start_index = 1
if start_index > end_index:
end_index = start_index
return (time_values[start_index:end_index] - time_values[start_index - 1 : end_index - 1]) / 60
|
[
"numpy.searchsorted",
"numpy.isfinite",
"time.time",
"numpy.sort",
"pandas.concat",
"numpy.concatenate"
] |
[((6056, 6067), 'time.time', 'time.time', ([], {}), '()\n', (6065, 6067), False, 'import time\n'), ((7407, 7440), 'pandas.concat', 'pd.concat', (['compared_timetable_arr'], {}), '(compared_timetable_arr)\n', (7416, 7440), True, 'import pandas as pd\n'), ((7567, 7578), 'time.time', 'time.time', ([], {}), '()\n', (7576, 7578), False, 'import time\n'), ((9264, 9297), 'numpy.concatenate', 'np.concatenate', (['headway_delta_arr'], {}), '(headway_delta_arr)\n', (9278, 9297), True, 'import numpy as np\n'), ((11392, 11427), 'numpy.concatenate', 'np.concatenate', (['completed_trips_arr'], {}), '(completed_trips_arr)\n', (11406, 11427), True, 'import numpy as np\n'), ((12366, 12397), 'numpy.concatenate', 'np.concatenate', (['headway_min_arr'], {}), '(headway_min_arr)\n', (12380, 12397), True, 'import numpy as np\n'), ((12526, 12574), 'numpy.searchsorted', 'np.searchsorted', (['time_values', 'start_time', '"""left"""'], {}), "(time_values, start_time, 'left')\n", (12541, 12574), True, 'import numpy as np\n'), ((12659, 12705), 'numpy.searchsorted', 'np.searchsorted', (['time_values', 'end_time', '"""left"""'], {}), "(time_values, end_time, 'left')\n", (12674, 12705), True, 'import numpy as np\n'), ((3744, 3780), 'numpy.sort', 'np.sort', (["df['DEPARTURE_TIME'].values"], {}), "(df['DEPARTURE_TIME'].values)\n", (3751, 3780), True, 'import numpy as np\n'), ((6343, 6385), 'numpy.sort', 'np.sort', (['stop_timetable[time_field].values'], {}), '(stop_timetable[time_field].values)\n', (6350, 6385), True, 'import numpy as np\n'), ((6419, 6460), 'numpy.sort', 'np.sort', (['stop_arrivals[time_field].values'], {}), '(stop_arrivals[time_field].values)\n', (6426, 6460), True, 'import numpy as np\n'), ((7849, 7893), 'numpy.sort', 'np.sort', (["history_df['DEPARTURE_TIME'].values"], {}), "(history_df['DEPARTURE_TIME'].values)\n", (7856, 7893), True, 'import numpy as np\n'), ((7941, 7987), 'numpy.sort', 'np.sort', (["timetable_df['DEPARTURE_TIME'].values"], {}), "(timetable_df['DEPARTURE_TIME'].values)\n", (7948, 7987), True, 'import numpy as np\n'), ((12168, 12204), 'numpy.sort', 'np.sort', (["df['DEPARTURE_TIME'].values"], {}), "(df['DEPARTURE_TIME'].values)\n", (12175, 12204), True, 'import numpy as np\n'), ((8374, 8418), 'numpy.isfinite', 'np.isfinite', (["comparison_df['headway'].values"], {}), "(comparison_df['headway'].values)\n", (8385, 8418), True, 'import numpy as np\n'), ((8421, 8483), 'numpy.isfinite', 'np.isfinite', (["comparison_df['closest_scheduled_headway'].values"], {}), "(comparison_df['closest_scheduled_headway'].values)\n", (8432, 8483), True, 'import numpy as np\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
PRECISION = 8 # in signs after dot
def objective_file_name(output_prefix, input_basename, module_basename):
return output_prefix + input_basename + "_F_" + module_basename + ".txt"
def jacobian_file_name(output_prefix, input_basename, module_basename):
return output_prefix + input_basename + "_J_" + module_basename + ".txt"
def time_to_string(objective_time, derivative_time):
obj_time_str = np.format_float_scientific(
objective_time,
unique=False,
precision=PRECISION
)
der_time_str = np.format_float_scientific(
derivative_time,
unique=False,
precision=PRECISION
)
return f"{obj_time_str}\n{der_time_str}"
def save_time_to_file(filepath, objective_time, derivative_time):
# open file in write mode or create new one if it does not exist
out = open(filepath,"w")
out.write(time_to_string(objective_time, derivative_time))
out.close()
def value_to_string(value):
return np.format_float_scientific(value, unique=False, precision=PRECISION)
def save_value_to_file(filepath, value):
out = open(filepath,"w")
out.write(value_to_string(value))
out.close()
def save_vector_to_file(filepath, gradient):
out = open(filepath,"w")
for value in gradient:
out.write(value_to_string(value) + '\n')
out.close()
def save_jacobian_to_file(filepath, jacobian):
out = open(filepath,"w")
# output row-major matrix
for row in jacobian:
out.write(value_to_string(row[0]))
for value in row[1:]:
out.write('\t' + value_to_string(value))
out.write('\n')
out.close()
def save_errors_to_file(filepath, reprojection_error, zach_weight_error):
out = open(filepath,"w")
out.write("Reprojection error:\n")
for value in reprojection_error:
out.write(value_to_string(value) + '\n')
out.write("Zach weight error:\n")
for value in zach_weight_error:
out.write(value_to_string(value) + '\n')
out.close()
def save_sparse_j_to_file(filepath, J):
out = open(filepath,"w")
out.write(f"{J.nrows} {J.ncols}\n")
out.write(f"{len(J.rows)}\n")
for row in J.rows:
out.write(f"{row} ")
out.write('\n')
out.write(f"{len(J.cols)}\n")
for column in J.cols:
out.write(f"{column} ")
out.write('\n')
for value in J.vals:
out.write(value_to_string(value) + ' ')
out.close()
|
[
"numpy.format_float_scientific"
] |
[((507, 584), 'numpy.format_float_scientific', 'np.format_float_scientific', (['objective_time'], {'unique': '(False)', 'precision': 'PRECISION'}), '(objective_time, unique=False, precision=PRECISION)\n', (533, 584), True, 'import numpy as np\n'), ((635, 713), 'numpy.format_float_scientific', 'np.format_float_scientific', (['derivative_time'], {'unique': '(False)', 'precision': 'PRECISION'}), '(derivative_time, unique=False, precision=PRECISION)\n', (661, 713), True, 'import numpy as np\n'), ((1080, 1148), 'numpy.format_float_scientific', 'np.format_float_scientific', (['value'], {'unique': '(False)', 'precision': 'PRECISION'}), '(value, unique=False, precision=PRECISION)\n', (1106, 1148), True, 'import numpy as np\n')]
|
#%%
import numpy as np
import sys
sys.path.append('..')
from utils.tester import Tester
import pickle
import os
import matplotlib.pyplot as plt
import math
import networkx as nx
import random
city_name = 'Phoenix'
save_file_name = '2021-04-23_14-02-29'
seed = 45
# city_name = 'Seattle'
# save_file_name = '2021-03-21_23-18-05'
# seed = 10
# city_name = 'Dallas'
# save_file_name = '2021-04-09_21-11-28'
fontsize = 20
legend_fontsize = 20
# %%
# Load results data
base_directory = os.getcwd()
base_directory = base_directory[0:base_directory.find('src')+3]
file_path = os.path.join(base_directory, 'optimization', 'save', save_file_name)
with open(file_path,'rb') as f:
tester = pickle.load(f)
if city_name == 'Phoenix':
data_folder_name = 'Phoenix'
if city_name == 'Seattle':
data_folder_name = 'IntercityFlow_Seattle'
if city_name == 'Dallas':
data_folder_name = 'Intercity_Dallas'
# Load city data
city_data_file_path = os.path.join(base_directory, '..', 'data', data_folder_name, 'data_processing_outputs', 'city_data.p')
with open(city_data_file_path,'rb') as f:
city_data = pickle.load(f)
city_list = list(city_data.keys())
num_cities = len(city_list)
num_city = tester.params['m']
num_time = tester.params['n']
num_entity = tester.params['num_entity']
phi_val = np.array(tester.results['phi_best'])
scale_frac = tester.params['scale_frac']
phi_average = np.zeros((num_city, num_city), dtype=np.float)
for i in range(num_city):
for j in range(num_city):
if not (i == j) and np.average(phi_val[:,i,j]) > 0.0:
phi_average[i,j] = np.average(phi_val[:, i, j])
for city_ind in range(num_city):
phi_average[city_ind,:] = phi_average[city_ind,:] * tester.problem_data['Ntot'][city_ind] * scale_frac
phi_average[:,:] = np.log(phi_average[:,:]+1e-3)
max_val = np.max(phi_average[:,:])
phi_average = phi_average / max_val
# print(phi_average)
# %%
edge_weight_list = []
# Visualize the resulting adjacency matrix
G = nx.DiGraph()
for i in range(num_cities):
G.add_node(city_list[i])
for j in range(num_cities):
if phi_average[i,j] > 0.0:
G.add_edge(city_list[i], city_list[j], weight=phi_average[i,j])
edge_weight_list.append(phi_average[i,j])
if city_name == 'Dallas':
city_data['Johnson']['y_loc'] = 32.385655
city_data['Johnson']['x_loc'] = -97.335191
city_data['Ellis']['y_loc'] = 32.362181
city_data['Ellis']['x_loc'] = -96.803901
city_data['Kaufman']['y_loc'] = 32.613997
city_data['Kaufman']['x_loc'] = -96.283543
city_data['Parker']['y_loc'] = 32.783855
city_data['Parker']['x_loc'] = -97.802077
city_data['Rockwall']['y_loc'] = 32.900920
city_data['Rockwall']['x_loc'] = -96.404271
city_data['Collin']['y_loc'] = 33.20671
city_data['Collin']['x_loc'] = -96.587485
city_data['Denton']['y_loc'] = 33.199884
city_data['Denton']['x_loc'] = -97.089478
city_data['Wise']['y_loc'] = 33.219515
city_data['Wise']['x_loc'] = -97.647529
city_data['Tarrant']['y_loc'] = 32.770195
city_data['Tarrant']['x_loc'] = -97.264026
city_data['Dallas']['y_loc'] = 32.77
city_data['Dallas']['x_loc'] = -96.79
pos = dict()
for i in range(num_cities):
city = city_list[i]
x_loc = city_data[city]['x_loc']
y_loc = city_data[city]['y_loc']
pos[city] = np.array([x_loc, y_loc])
edge_width_list = np.array(edge_weight_list)
edge_width_list = np.exp(edge_width_list)
edge_width_list = edge_width_list / np.max(edge_width_list)
edge_width_list = edge_width_list * 5
options = {
"node_color": "#A0CBE2",
"edge_color": edge_weight_list,
"node_size": tester.problem_data['Ntot'],
"width": edge_width_list,
"edge_cmap": plt.cm.Blues,
"with_labels": False,
"edge_vmin": 0.0,
# "edge_vmax": 100.0
}
print(phi_average[1,:])
print(edge_weight_list)
random.seed(seed)
np.random.seed(seed=seed)
pos = nx.spring_layout(G)
# pos = nx.spectral_layout(G)
#print(city_data['Dallas']['population'])
# %%
plt.figure(figsize=(20,10))
# nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_labels(G, pos)
# nx.draw_networkx_edges(G_fully_connected, pos, edge_color='red')
nx.draw(G, pos, **options)
save_location = os.path.join(base_directory, 'plotting', city_name, 'saved_plots')
filename = os.path.join(save_location, '{}scale_cost_by_pop_phi_graph.png'.format(save_file_name))
plt.savefig(filename, bbox_inches='tight')
plt.show()
#plt.title('Adjacency Matrix with Scaled Demand Threshold {},\n Total Number of Edges: {}'.format(0.02, np.sum(adj_mat)), fontsize=15)
# %%
|
[
"numpy.random.seed",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.exp",
"networkx.draw_networkx_labels",
"os.path.join",
"sys.path.append",
"numpy.max",
"random.seed",
"matplotlib.pyplot.show",
"numpy.average",
"networkx.draw",
"networkx.DiGraph",
"numpy.log",
"os.getcwd",
"numpy.zeros",
"networkx.spring_layout",
"numpy.array",
"matplotlib.pyplot.savefig"
] |
[((34, 55), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (49, 55), False, 'import sys\n'), ((488, 499), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (497, 499), False, 'import os\n'), ((577, 645), 'os.path.join', 'os.path.join', (['base_directory', '"""optimization"""', '"""save"""', 'save_file_name'], {}), "(base_directory, 'optimization', 'save', save_file_name)\n", (589, 645), False, 'import os\n'), ((949, 1055), 'os.path.join', 'os.path.join', (['base_directory', '""".."""', '"""data"""', 'data_folder_name', '"""data_processing_outputs"""', '"""city_data.p"""'], {}), "(base_directory, '..', 'data', data_folder_name,\n 'data_processing_outputs', 'city_data.p')\n", (961, 1055), False, 'import os\n'), ((1300, 1336), 'numpy.array', 'np.array', (["tester.results['phi_best']"], {}), "(tester.results['phi_best'])\n", (1308, 1336), True, 'import numpy as np\n'), ((1393, 1439), 'numpy.zeros', 'np.zeros', (['(num_city, num_city)'], {'dtype': 'np.float'}), '((num_city, num_city), dtype=np.float)\n', (1401, 1439), True, 'import numpy as np\n'), ((1779, 1812), 'numpy.log', 'np.log', (['(phi_average[:, :] + 0.001)'], {}), '(phi_average[:, :] + 0.001)\n', (1785, 1812), True, 'import numpy as np\n'), ((1820, 1845), 'numpy.max', 'np.max', (['phi_average[:, :]'], {}), '(phi_average[:, :])\n', (1826, 1845), True, 'import numpy as np\n'), ((1978, 1990), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1988, 1990), True, 'import networkx as nx\n'), ((3377, 3403), 'numpy.array', 'np.array', (['edge_weight_list'], {}), '(edge_weight_list)\n', (3385, 3403), True, 'import numpy as np\n'), ((3422, 3445), 'numpy.exp', 'np.exp', (['edge_width_list'], {}), '(edge_width_list)\n', (3428, 3445), True, 'import numpy as np\n'), ((3855, 3872), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3866, 3872), False, 'import random\n'), ((3873, 3898), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (3887, 3898), True, 'import numpy as np\n'), ((3905, 3924), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (3921, 3924), True, 'import networkx as nx\n'), ((4005, 4033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (4015, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4066, 4097), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {}), '(G, pos)\n', (4089, 4097), True, 'import networkx as nx\n'), ((4165, 4191), 'networkx.draw', 'nx.draw', (['G', 'pos'], {}), '(G, pos, **options)\n', (4172, 4191), True, 'import networkx as nx\n'), ((4209, 4275), 'os.path.join', 'os.path.join', (['base_directory', '"""plotting"""', 'city_name', '"""saved_plots"""'], {}), "(base_directory, 'plotting', city_name, 'saved_plots')\n", (4221, 4275), False, 'import os\n'), ((4375, 4417), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""'}), "(filename, bbox_inches='tight')\n", (4386, 4417), True, 'import matplotlib.pyplot as plt\n'), ((4418, 4428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((691, 705), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (702, 705), False, 'import pickle\n'), ((1110, 1124), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1121, 1124), False, 'import pickle\n'), ((3333, 3357), 'numpy.array', 'np.array', (['[x_loc, y_loc]'], {}), '([x_loc, y_loc])\n', (3341, 3357), True, 'import numpy as np\n'), ((3482, 3505), 'numpy.max', 'np.max', (['edge_width_list'], {}), '(edge_width_list)\n', (3488, 3505), True, 'import numpy as np\n'), ((1589, 1617), 'numpy.average', 'np.average', (['phi_val[:, i, j]'], {}), '(phi_val[:, i, j])\n', (1599, 1617), True, 'import numpy as np\n'), ((1524, 1552), 'numpy.average', 'np.average', (['phi_val[:, i, j]'], {}), '(phi_val[:, i, j])\n', (1534, 1552), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import random
import math
import os
import time
from sklearn.metrics import average_precision_score
# ------------------------------------------------------ loading libraries ----
# --- setting random seed -----------------------------------------------------
seed_n = 42
np.random.seed(seed_n)
random.seed(seed_n)
tf.random.set_seed(seed_n)
combination = 3057
# loading model
model = tf.keras.models.load_model('/project/M-ABeICU176709/delirium/data/outputs/models/{:06d}/model.hdf5'.format(combination))
# loading data
X_adm_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_adm5y_validation.pickle', 'rb'))
X_temp_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_temp_validation.pickle', 'rb'))
y_12h_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_12h_validation.pickle', 'rb'))
y_24h_val = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_24h_validation.pickle', 'rb'))
# loading data
X_adm_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_adm5y_train.pickle', 'rb'))
X_temp_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/X_temp_train.pickle', 'rb'))
y_12h_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_12h_train.pickle', 'rb'))
y_24h_train = pickle.load(open('/project/M-ABeICU176709/delirium/data/inputs/preprocessed/y_24h_train.pickle', 'rb'))
# -----------------------------------------------------------------------------
for set in [('train', X_adm_train, X_temp_train, y_12h_train, y_24h_train), ('validation', X_adm_val, X_temp_val, y_12h_val, y_24h_val)]:
# Predicting y_12h and y_24h
results = model.predict(x = [set[1], set[2]],
verbose = 0)
y_12h_hat = results[0]
y_24h_hat = results[1]
AUPRC_12h = average_precision_score(set[3], y_12h_hat)
AUPRC_24h = average_precision_score(set[4], y_24h_hat)
AUPRC_mean = (AUPRC_12h + AUPRC_24h) / 2
print(f'set: {set[0]}, AUPRC_12h: {AUPRC_12h}, AUPRC_24h: {AUPRC_24h}, AUPRC_mean: {AUPRC_mean}')
|
[
"tensorflow.random.set_seed",
"sklearn.metrics.average_precision_score",
"random.seed",
"numpy.random.seed"
] |
[((353, 375), 'numpy.random.seed', 'np.random.seed', (['seed_n'], {}), '(seed_n)\n', (367, 375), True, 'import numpy as np\n'), ((376, 395), 'random.seed', 'random.seed', (['seed_n'], {}), '(seed_n)\n', (387, 395), False, 'import random\n'), ((396, 422), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed_n'], {}), '(seed_n)\n', (414, 422), True, 'import tensorflow as tf\n'), ((2012, 2054), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['set[3]', 'y_12h_hat'], {}), '(set[3], y_12h_hat)\n', (2035, 2054), False, 'from sklearn.metrics import average_precision_score\n'), ((2071, 2113), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['set[4]', 'y_24h_hat'], {}), '(set[4], y_24h_hat)\n', (2094, 2113), False, 'from sklearn.metrics import average_precision_score\n')]
|
import numpy as np
def quotient(rri):
rri = np.array(rri)
L = len(rri) - 1
indices = np.where((rri[:L - 1] / rri[1:L] < 0.8) |
(rri[:L - 1] / rri[1:L] > 1.2) |
(rri[1:L] / rri[:L - 1] < 0.8) |
(rri[1:L] / rri[:L - 1] > 1.2))
return np.delete(rri, indices)
def moving_average(rri, order=3):
return _moving_function(rri, order, np.mean)
def moving_median(rri, order=3):
return _moving_function(rri, order, np.median)
def _moving_function(rri, order, func):
offset = int(order / 2)
filt_rri = np.array(rri.copy(), dtype=np.float64)
for i in range(offset, len(rri) - offset, 1):
filt_rri[i] = func(rri[i - offset:i + offset + 1])
return filt_rri
|
[
"numpy.where",
"numpy.array",
"numpy.delete"
] |
[((50, 63), 'numpy.array', 'np.array', (['rri'], {}), '(rri)\n', (58, 63), True, 'import numpy as np\n'), ((100, 243), 'numpy.where', 'np.where', (['((rri[:L - 1] / rri[1:L] < 0.8) | (rri[:L - 1] / rri[1:L] > 1.2) | (rri[1:L\n ] / rri[:L - 1] < 0.8) | (rri[1:L] / rri[:L - 1] > 1.2))'], {}), '((rri[:L - 1] / rri[1:L] < 0.8) | (rri[:L - 1] / rri[1:L] > 1.2) |\n (rri[1:L] / rri[:L - 1] < 0.8) | (rri[1:L] / rri[:L - 1] > 1.2))\n', (108, 243), True, 'import numpy as np\n'), ((321, 344), 'numpy.delete', 'np.delete', (['rri', 'indices'], {}), '(rri, indices)\n', (330, 344), True, 'import numpy as np\n')]
|
import itertools
import os
import re
import sys
# scans files to construct an empirical prior
from bifs import BIFS
# numpy >= 1.17
from numpy.random import Generator, PCG64
import numpy as np
class RunningMean:
"""Accepts values one at a time and computes the mean and sd of all values seen so far.
The inputs are arrays, which must all have the same shape. Mean and sd are accumulated
separately for each cell in the array.
"""
def __init__(self, sd=True):
"""If sd is false do not accumulate second moment.
Clients should not request information related to the sd in that case.
"""
self.n = 0
self._second = sd # as in second moment
def observation(self, x):
"x is an array-like object which is considered a single observation"
self.n += 1
if self.n == 1:
self._mns = x
if self._second:
# ss will turn into a matrix later
self._ss = 0.0
else:
lastdelta = x-self._mns
self._mns += (lastdelta)/self.n
if self._second:
# element by element multiplication in next line
self._ss += lastdelta*(x-self._mns)
def mean(self):
"return array of means so far"
return self._mns
def sd(self):
"return array of sd so far"
# element by element square root
return np.sqrt(self._ss/(self.n-1))
class AbstractEmpiricalScanner:
""" This class consumes a list of images and computes statistics on them. Each statistic is computed separately for
each voxel, i.e. the result in the (2, 5) cell refers to all the (2, 5) cells in all the images (or their Fourier counterparts).
All images must have the same dimensions, and they should be aligned with each other
for the results to be meaningful.
The mean and sd of the modulus is always accumulated; values for the phase can be requested as well, as can the correlations between the
phase and modulus (again, at each point in Fourier space).
Finally, one can request a sample of the original voxels in image space.
Concrete classes provide particular ways to get images. They then pass the images to _statsAccumulate and,
optionally, _voxAccumulate (possibly different images for each) and call
_post when done. At that point, and only that point, are results available from self.modulus() and, if requested,
self.phase(), self.corr(), and self.voxels().
For backward compatility, self.mns, self.sds, and self.vox accessor return the mean and sd of self.modulus() and the voxels.
Don't rely on that in new code.
image_mask optionally indicates which areas of the image to ignore.
It must be a boolean array with the same shape as image files.
All voxels selected by image_mask are set to zero before doing BIFS processing.
The mask applies to the original image NOT to the fourier space version, which will
generally have non-0 values in the image_mask region.
It is the subclass responsibility to implement these semantics.
Note the "mask" here is not a mask in the numpy sense of a masked array, which
concerns missing values.
voxel sampling only considers the non-ignored regions, but the number sampled will be based on
the total voxel count before masking.
"""
def __init__(self, sampleFraction=0, seed=85792359, image_mask=None, phase=False, corr=False):
"""Setup for scan of images
if sampleFraction is >0 (and it should be <=1) then that fraction of the image voxels will be retained.
In that case, seed is used to set the random number generator.
If phase is true, accumulate statistics on the phase as well as the modulus.
If corr is true, accumulate statistics on the phase and its covariance with the modulus.
Covariance is on a cell by cell basis.
"""
self.sampleFraction = sampleFraction
self._modulus = RunningMean()
if phase or corr:
self._getPhase = True
self._phase = RunningMean()
if corr:
self._getcorr = True
self._xy = RunningMean(sd=False)
if sampleFraction>0:
self._voxels = []
self._rg = Generator(PCG64(seed))
self.masking = (image_mask is not None)
if self.masking:
self.image_mask = image_mask
self.image_keep = np.logical_not(image_mask)
self._benchmarkHdr = None # checks for consistent headers
self._mismatch = set() # holds keys that had a mismatch
self._bifs = BIFS()
def modulus(self)->RunningMean:
return self._modulus
def phase(self)->RunningMean:
return self._phase
def corr(self):
"Note we return the correlation matrix itself, not an accumulator"
return (self._xy.mean()-self._modulus.mean()*self._phase.mean())/ \
(self._modulus.sd()*self._phase.sd())
def voxels(self):
"return 1-d array sorted by intensity"
return self._voxels
def __getattr__(self, name):
## backward compatibility only
if name == "mns":
return self.modulus().mean()
if name == "sds":
return self.modulus().sd()
if name == "vox":
return self.voxels()
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
def _do_one(self, file):
"file is a path-like object. Read it in and accumulate information"
self._bifs.load_image_file(file)
if self.masking:
# dirty trick. But doesn't invalidate anything else in _bifs.
self._bifs._init_image[self.image_mask] = 0.0
self._modulus.observation(self._bifs.mod_image())
if self._getPhase:
self._phase.observation(self._bifs.phase_image())
if self._getcorr:
# next multiplication is element by element
self._xy.observation(self._bifs.phase_image()*self._bifs.mod_image())
if self.sampleFraction>0:
self._voxAccumulate(self._bifs.init_image())
hdr = self._bifs.read_imfile.header
if not self._benchmarkHdr:
# first header encountered
self._benchmarkHdr = hdr
# could not delete the following key
# it actually doesn't appear in the objects attributes
#del benchmarkHdr.__dict__['db_name'] # differences expected and no concern
else:
for key in self._benchmarkHdr:
if key == 'db_name':
continue
if key.startswith("scl_"):
# values were array(nan, dtype=float32) and I had no luck testing for them
# in various ways
continue
v1 = self._benchmarkHdr[key]
v2 = hdr[key]
if (v1 != v2).any():
self._mismatch.add(key)
def _voxAccumulate(self, m):
"""accumulate voxel values.
In the most likely case, the voxels are from image space while the empirical prior
is from k-space. So we provide seperate functions for the 2 values.
Calling this is pointless unless sampleFraction>0.
"""
# always base number sampled on the complete image size
nSamp = int(m.size*self.sampleFraction)
if self.masking:
self._voxels.append(self._rg.choice(m[self.image_keep], nSamp))
else:
# m.ravel is not an acceptable first argument to choice
# actually, it should have been np.ravel(m)
# m.flatten and the mask selection above both create copies, unfortunately
self._voxels.append(self._rg.choice(m.flatten(), nSamp))
def _statsPost(self):
"""
Finalize computation of voxel by voxel statistics for all images.
Call after all images have been seen.
Results returned as arrays self.mns and self.sds.
"""
# currently handled by RunningMean instances automatically
pass
def _voxPost(self):
"""
Finalize accumulated voxels.
"""
if self.sampleFraction>0:
self._voxels = np.concatenate(self._voxels)
self._voxels.sort()
def _post(self):
"wrap up all processing"
self._statsPost()
self._voxPost()
def nImages(self) -> int:
"number of images processed so far = number of files read unless error"
return self._modulus.n
class EmpiricalScanner(AbstractEmpiricalScanner):
"""Scan selected images on disk, ensuring they are alll compatible.
topDir path like object indicating where in the file system the scan should start
all subdirectories will be scanned recursively unless they are excluded.
matchFile <String> regular expression for the file name of image files we want.
Matching is on the file name only, not its full path.
exclude <String> optional regular expression. Any directory matching this pattern is excluded.
Any file that satisfies matchFile is excluded if it also matches exclude.
ostr A stream-like object that will receive routines notices of skipped files and statistics.
See AbstractEmpiricalScanner for sampleFraction, seed and image_mask.
The files are read in and converted to k-space. We compute the mean and sd of the k-space images,
and optionally accumulate voxels from the original image.
We also check that the headers are consistent. This works for .nii files, and may or may not for others.
"""
def __init__(self, sampleFraction=0, seed=85792359, topDir=".", matchFile="", exclude=None, image_mask=None, phase=False, corr=False, ostr=sys.stdout):
super().__init__(sampleFraction, seed, image_mask, phase, corr)
self._topDir = topDir
self._matchRE = re.compile(matchFile, re.I)
if exclude:
self._excludeRE = re.compile(exclude, re.I)
else:
self._excludeRE = None
self.go(ostr=ostr)
def go(self, ostr=sys.stdout):
"""Actually perform the scan.
Note this is triggered by object initialization.
Repeated calls may not work.
ostr is an output stream
"""
for root, dirs, files in os.walk(self._topDir):
if self._excludeRE:
# avoid directories with our target case for whom we are trying to predict
iKill = [ i for i, d in zip(itertools.count(), dirs) if self._excludeRE.search(d)]
if iKill:
nKill = 0
for i in iKill:
i -= nKill
print("Skipping {}".format(dirs[i]), file=ostr)
del dirs[i-nKill]
nKill += 1
# look for files to import
if files:
for f in files:
if not self._matchRE.search(f):
continue
if self._excludeRE:
if self._excludeRE.search(f):
print("Skipping {}".format(f), file=ostr)
continue
self._do_one(os.path.join(root, f))
self._post()
class FeedScanner(AbstractEmpiricalScanner):
"""A scanner that accepts anything iterable as a list of file names to scan"""
def __init__(self, files, sampleFraction=0, seed=85792359, image_mask=None, phase=False, corr=False, ostr=sys.stdout):
super().__init__(sampleFraction, seed, image_mask, phase, corr)
self._files = files
self.go(ostr=ostr)
def go(self):
for f in self._files:
self._do_one(f)
self._post()
|
[
"numpy.random.PCG64",
"numpy.logical_not",
"os.walk",
"itertools.count",
"bifs.BIFS",
"numpy.sqrt",
"os.path.join",
"numpy.concatenate",
"re.compile"
] |
[((1426, 1458), 'numpy.sqrt', 'np.sqrt', (['(self._ss / (self.n - 1))'], {}), '(self._ss / (self.n - 1))\n', (1433, 1458), True, 'import numpy as np\n'), ((4677, 4683), 'bifs.BIFS', 'BIFS', ([], {}), '()\n', (4681, 4683), False, 'from bifs import BIFS\n'), ((10015, 10042), 're.compile', 're.compile', (['matchFile', 're.I'], {}), '(matchFile, re.I)\n', (10025, 10042), False, 'import re\n'), ((10442, 10463), 'os.walk', 'os.walk', (['self._topDir'], {}), '(self._topDir)\n', (10449, 10463), False, 'import os\n'), ((4497, 4523), 'numpy.logical_not', 'np.logical_not', (['image_mask'], {}), '(image_mask)\n', (4511, 4523), True, 'import numpy as np\n'), ((8336, 8364), 'numpy.concatenate', 'np.concatenate', (['self._voxels'], {}), '(self._voxels)\n', (8350, 8364), True, 'import numpy as np\n'), ((10093, 10118), 're.compile', 're.compile', (['exclude', 're.I'], {}), '(exclude, re.I)\n', (10103, 10118), False, 'import re\n'), ((4340, 4351), 'numpy.random.PCG64', 'PCG64', (['seed'], {}), '(seed)\n', (4345, 4351), False, 'from numpy.random import Generator, PCG64\n'), ((11376, 11397), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (11388, 11397), False, 'import os\n'), ((10632, 10649), 'itertools.count', 'itertools.count', ([], {}), '()\n', (10647, 10649), False, 'import itertools\n')]
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import sys
import os
from autoaim import helpers
class Camera():
def __init__(self, source):
self.source = source
self.capture = cv2.VideoCapture(source)
if type(source) is int:
self.__camera = True
def snapshot(self, start, stop, interval, save_to, width=1024, height=768):
'''
start: "hour:minute:second"
stop : "hour:minute:second"
interval: 1000(ms)
save_to: url
'''
capture = self.capture
if self.__camera:
capture.set(cv2.CAP_PROP_FPS, 30)
capture.set(3, width)
capture.set(4, height)
start = self.__parse_time(start)
stop = self.__parse_time(stop)
for i in range(int((stop-start)*1000/interval)):
success, img = capture.read()
if success:
helpers.showoff(img, timeout=interval, update=True)
cv2.imwrite(save_to+str(i)+'.jpeg', img)
else:
fps = round(capture.get(cv2.CAP_PROP_FPS))
start = self.__parse_time(start) * fps
stop = self.__parse_time(stop) * fps
step = int(interval / 1000 * fps)
for i in range(start, stop, step):
capture.set(cv2.CAP_PROP_POS_FRAMES, i)
success, img = capture.read()
if success:
helpers.showoff(img, timeout=interval, update=True)
cv2.imwrite(save_to+str(i)+'.jpeg', img)
def release(self):
self.capture.release()
def __parse_time(self, str):
t = np.array([int(x) for x in str.split(':')])
w = np.array([3600, 60, 1])
return t.dot(w).item(0)
if __name__ == '__main__':
cam = Camera(0)
cam.snapshot('00:00:00', '00:01:00', 200, 'data/capture/')
|
[
"cv2.VideoCapture",
"autoaim.helpers.showoff",
"numpy.array"
] |
[((205, 229), 'cv2.VideoCapture', 'cv2.VideoCapture', (['source'], {}), '(source)\n', (221, 229), False, 'import cv2\n'), ((1730, 1753), 'numpy.array', 'np.array', (['[3600, 60, 1]'], {}), '([3600, 60, 1])\n', (1738, 1753), True, 'import numpy as np\n'), ((936, 987), 'autoaim.helpers.showoff', 'helpers.showoff', (['img'], {'timeout': 'interval', 'update': '(True)'}), '(img, timeout=interval, update=True)\n', (951, 987), False, 'from autoaim import helpers\n'), ((1461, 1512), 'autoaim.helpers.showoff', 'helpers.showoff', (['img'], {'timeout': 'interval', 'update': '(True)'}), '(img, timeout=interval, update=True)\n', (1476, 1512), False, 'from autoaim import helpers\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
""" model.py: A custom model for CityPersons. """
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
import data
def get_model():
''' Returns the model a pretrained model for finetunning on CityPersons. '''
# load a model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# replace the classifier with a new one, that has
# num_classes which is user-defined
num_classes = 2 # 1 class (person) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def get_transform(train):
''' Converts a PIL image into PyTorch tensor. '''
transforms = []
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def save_model(model, path="./models/entire_model.pt"):
torch.save(model, path)
print('Model saved to ' + path)
def load_model(path="./models/entire_model.pt"):
if torch.cuda.is_available():
return torch.load(path)
else:
return torch.load(path, map_location=torch.device('cpu'))
def convert(img, img_raw):
'''
Converts the image from dataset back to the raw format:
* rescales it from [0,1] back to [0,255] range;
* flips the channels back to [height,width,3] format;
* converts from tensor to numpy array;
* converts from numpy array to PIL Image;
* checks if the image was augmented - flipped horizontally
'''
img = Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())
img = np.array(img)
print('img shape: %d x %d x %d' % img.shape)
img = Image.fromarray(np.uint8(img)).convert('RGB')
img_flipped = np.array(img.transpose(Image.FLIP_LEFT_RIGHT))
img_raw = np.array(img_raw)
img_was_flipped = np.sum(img_flipped.flatten() == img_raw.flatten()) == img_flipped.shape[0] * img_flipped.shape[1] * img_flipped.shape[2]
print('Image was flipped: %r' % img_was_flipped)
return img
## testing on images from Hambrug
if __name__ == "__main__":
img_path = './datasets/citypersons/hamburg/'
anno_path = './datasets/citypersons/CityPersons/annotations/'
# split dataset into train and test
dataset = data.HamburgDataset(img_path, anno_dict, get_transform(train=True))
dataset_test = data.HamburgDataset(img_path, anno_dict, get_transform(train=False))
# permute the indices
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
# train: 248 - 50 examples
# test: 50 examples
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
if train:
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)
model = get_model()
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# Let's train the model for 10 epochs, evaluating at the end of every epoch.
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
save(model)
else:
model = load_model()
## error analysis
# raw image
img_raw = Image.open(img_path + imgs[0])
anno_raw = anno_dict[imgs[0]]
# same image from the dataset
idx = indices.index(0)
img, anno = dataset[idx]
img = convert_back(img, img_raw)
# put the model in evaluation mode
model.eval()
with torch.no_grad():
prediction = model([img.to(device)])[0]
preds = prediction['boxes'] # predicted bboxes
preds = preds.cpu().data.numpy() # to numpy array
scores = prediction['scores'] # scores of predicted bboxes
scores = scores.cpu().data.numpy()
# keep only bboxes where score > threshold:
threshold = .3
highs = list(np.where(scores > threshold)[0])
# transform the bboxes from tensor to list and back to [x, y, w, h] format
bboxes_x0x1y0y1 = []
for high in highs:
bboxes_x0x1y0y1.append(list(preds[high]))
bboxes = []
for bbox in bboxes_x0x1y0y1:
bbox = list(bbox)
x0, y0 = bbox[0], bbox[1]
x1, y1 = bbox[2], bbox[3]
bboxes.append([x0, y0, x1 - x0, y1 - y0])
# draw the predicted bounding boxes
# TODO: add ground truth bboxes in green
plt.rcParams['figure.figsize'] = [12, 8]
fig, ax = plt.subplots()
ax.imshow(img);
for bbox in bboxes:
rect = patches.Rectangle(
(bbox[0], bbox[1]), bbox[2], bbox[3],
linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.title(img_name)
plt.show()
|
[
"torchvision.models.detection.faster_rcnn.FastRCNNPredictor",
"torch.optim.lr_scheduler.StepLR",
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.load",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"engine.train_one_epoch",
"numpy.uint8",
"torch.manual_seed",
"transforms.RandomHorizontalFlip",
"torch.cuda.is_available",
"torch.utils.data.Subset",
"torch.save",
"engine.evaluate",
"transforms.Compose",
"numpy.where",
"numpy.array",
"transforms.ToTensor",
"torch.optim.SGD"
] |
[((496, 565), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'torchvision.models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (548, 565), False, 'import torchvision\n'), ((926, 969), 'torchvision.models.detection.faster_rcnn.FastRCNNPredictor', 'FastRCNNPredictor', (['in_features', 'num_classes'], {}), '(in_features, num_classes)\n', (943, 969), False, 'from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n'), ((1317, 1338), 'transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (1326, 1338), True, 'import transforms as T\n'), ((1400, 1423), 'torch.save', 'torch.save', (['model', 'path'], {}), '(model, path)\n', (1410, 1423), False, 'import torch\n'), ((1517, 1542), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1540, 1542), False, 'import torch\n'), ((2107, 2120), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2115, 2120), True, 'import numpy as np\n'), ((2306, 2323), 'numpy.array', 'np.array', (['img_raw'], {}), '(img_raw)\n', (2314, 2323), True, 'import numpy as np\n'), ((2955, 2975), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (2972, 2975), False, 'import torch\n'), ((3098, 3145), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset', 'indices[:-50]'], {}), '(dataset, indices[:-50])\n', (3121, 3145), False, 'import torch\n'), ((3165, 3217), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset_test', 'indices[-50:]'], {}), '(dataset_test, indices[-50:])\n', (3188, 3217), False, 'import torch\n'), ((1112, 1124), 'transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1122, 1124), True, 'import transforms as T\n'), ((1559, 1575), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1569, 1575), False, 'import torch\n'), ((3309, 3421), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(2)', 'shuffle': '(True)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset, batch_size=2, shuffle=True,\n num_workers=4, collate_fn=utils.collate_fn)\n', (3336, 3421), False, 'import torch\n'), ((3471, 3589), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_test'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset_test, batch_size=1, shuffle=False,\n num_workers=4, collate_fn=utils.collate_fn)\n', (3498, 3589), False, 'import torch\n'), ((3901, 3969), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.005)', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n', (3916, 3969), False, 'import torch\n'), ((4135, 4201), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(3)', 'gamma': '(0.1)'}), '(optimizer, step_size=3, gamma=0.1)\n', (4166, 4201), False, 'import torch\n'), ((5138, 5153), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5151, 5153), False, 'import torch\n'), ((1276, 1303), 'transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (1298, 1303), True, 'import transforms as T\n'), ((3653, 3678), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3676, 3678), False, 'import torch\n'), ((3629, 3649), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3641, 3649), False, 'import torch\n'), ((3684, 3703), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3696, 3703), False, 'import torch\n'), ((4514, 4590), 'engine.train_one_epoch', 'train_one_epoch', (['model', 'optimizer', 'data_loader', 'device', 'epoch'], {'print_freq': '(10)'}), '(model, optimizer, data_loader, device, epoch, print_freq=10)\n', (4529, 4590), False, 'from engine import train_one_epoch, evaluate\n'), ((4717, 4765), 'engine.evaluate', 'evaluate', (['model', 'data_loader_test'], {'device': 'device'}), '(model, data_loader_test, device=device)\n', (4725, 4765), False, 'from engine import train_one_epoch, evaluate\n'), ((5497, 5525), 'numpy.where', 'np.where', (['(scores > threshold)'], {}), '(scores > threshold)\n', (5505, 5525), True, 'import numpy as np\n'), ((1631, 1650), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1643, 1650), False, 'import torch\n'), ((2196, 2209), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (2204, 2209), True, 'import numpy as np\n')]
|
# Code adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
from __future__ import division
from pytorch3d.ops.knn import knn_points
import torch
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import helper_functions
FLOAT_EPS = np.finfo(np.float).eps
pixel_coords = None
import kornia
from scipy.spatial.transform import Rotation as R
def preprocess_depth_output_2_point_cloud_all(depth_maps, masks, intrinsics):
'''
Pre process data for pose network
Function mean subtracts the point cloud to bring it to origin and downsamples it to 2048 points
'''
batch_size, num_views, height, width = depth_maps.size()
depth_maps = helper_functions.sigmoid_2_depth(depth_maps)
point_cloud_list_all_views = []
rotated_point_cloud_list_all_views = []
for view in range(num_views):
src_camera_coords = pixel2cam(depth_maps[:, view].unsqueeze(0), intrinsics.inverse())
src_camera_coords = src_camera_coords.reshape(batch_size, 3, height*width) # [B 3 H*W]
if torch.cuda.is_available():
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).cuda().float() # [B 3 3]
else:
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).float() # [B 3 3]
point_cloud_list = []
rotated_point_cloud_list = []
masks_batch = masks[:, view]
for i in range(batch_size):
src_camera_coords_view = src_camera_coords[i] # [3 H*W]
mask = masks_batch[i] # [H W]
mask = mask.reshape(1, -1).squeeze() # [H*W]
# Extracting the points only within mask region
src_camera_coords_view = src_camera_coords_view[:, (mask == 1.0)]
# Mean center value
src_camera_coords_view = src_camera_coords_view - src_camera_coords_view.mean(axis = 1).unsqueeze(1).repeat(1, src_camera_coords_view.size(1)) #[3 masksize]
# Downsample to 2048 points
src_camera_coords_view = torch.nn.functional.interpolate(src_camera_coords_view.unsqueeze(0), size = 2048).squeeze(0)
point_cloud_list.append(src_camera_coords_view)
src_camera_coords_downsampled = torch.stack(point_cloud_list) # [B 3 2048]
rot_src_camera_coords = random_rotation @ src_camera_coords_downsampled # [B 3 2048]
point_cloud_list_all_views.append(src_camera_coords_downsampled)
rotated_point_cloud_list_all_views.append(rot_src_camera_coords)
camera_point_clouds_downsampled = torch.stack(point_cloud_list_all_views, dim = 1) # [B views 2048]
rotated_camera_point_clouds_downsampled = torch.stack(rotated_point_cloud_list_all_views, dim = 1) # [B views 2048]
return camera_point_clouds_downsampled, rotated_camera_point_clouds_downsampled
def preprocess_depth_output_2_point_cloud(depth_maps, masks_batch, intrinsics):
'''
Pre process data for pose network
Function mean subtracts the point cloud to bring it to origin and downsamples it to 2048 points
'''
batch_size, _, height, width = depth_maps.size()
depth_maps = helper_functions.sigmoid_2_depth(depth_maps)
src_camera_coords = pixel2cam(depth_maps[:, 0].unsqueeze(0), intrinsics.inverse())
src_camera_coords = src_camera_coords.reshape(batch_size, 3, height*width) # [B 3 H*W]
if torch.cuda.is_available():
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).cuda().float() # [B 3 3]
else:
random_rotation = torch.from_numpy(R.random(batch_size, random_state=1024).as_matrix()).float() # [B 3 3]
point_cloud_list = []
rotated_point_cloud_list = []
for i in range(batch_size):
src_camera_coords_view = src_camera_coords[i] # [3 H*W]
mask = masks_batch[i] # [H W]
mask = mask.reshape(1, -1).squeeze() # [H*W]
# Extracting the points only within mask region
src_camera_coords_view = src_camera_coords_view[:, (mask == 1.0)]
# mean center value
src_camera_coords_view = src_camera_coords_view - src_camera_coords_view.mean(axis = 1).unsqueeze(1).repeat(1, src_camera_coords_view.size(1)) #[3 masksize]
# Downsample to 2048 points
src_camera_coords_view = torch.nn.functional.interpolate(src_camera_coords_view.unsqueeze(0), size = 2048).squeeze(0)
point_cloud_list.append(src_camera_coords_view)
src_camera_coords_downsampled = torch.stack(point_cloud_list) # [B 3 2048]
rot_src_camera_coords = random_rotation @ src_camera_coords_downsampled # [B 3 2048]
return src_camera_coords_downsampled, rot_src_camera_coords
def depth_decode(depth_image):
# # first 16 bits (first 2 channels) are 16-bit depth
# R is the 8 LSB and G are the others
depth_image_16 = depth_image[:,:,[1, 0]]
# B are 8-bit version
depth_image_8 = depth_image[:,:,2]
# last 8 are empty
depth_single_channel = np.zeros((depth_image_16.shape[0], depth_image_16.shape[1]))
# convert 16 bit to actual depth values
for i in range(depth_single_channel.shape[0]):
for j in range(depth_single_channel.shape[1]):
bit_str = '{0:08b}'.format(depth_image_16[i, j, 0]) + '{0:08b}'.format(depth_image_16[i, j, 1])
depth_single_channel[i, j] = int(bit_str, 2)
return depth_single_channel
def set_id_grid(depth):
global pixel_coords
b, _, h, w = depth.size()
i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]
j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]
ones = torch.ones(1,h,w).type_as(depth)
#print("i_range",i_range.device)
#print("j_range",j_range.device)
#print("ones",ones.device)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1).type_as(depth) # [1, 3, H, W]
pixel_coords.to(depth.device)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr):
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.float() @ cam_coords_flat
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr.float() # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-4)
X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
# print(pixel_coords.reshape(b,h,w,2).shape)
return pixel_coords.reshape(b,h,w,2)
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
b, _, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
pixel_coords = pixel_coords.to(depth.device)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).reshape(b, 3, -1) # [B, 3, H*W]
#print("-"*10)
#print("Pixel", pixel_coords.device)
#print("Depth", depth.device)
#print("intrinsics_inv",intrinsics_inv.device)
#print("current_pixel_coords",current_pixel_coords.device)
#print("-"*10)
cam_coords = (intrinsics_inv.float() @ current_pixel_coords.float())
cam_coords = cam_coords.reshape(b, 3, h, w)
return cam_coords * depth.clamp(min=1e-1)
def quat2mat(quat):
x, y, z, w = quat[:,0], quat[:,1], quat[:,2], quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
n = w2 + x2 + y2 + z2
x = x / n
y = y / n
z = z / n
w = w / n
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([1 - 2*y2 - 2*z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, 1 - 2*x2 - 2*z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, 1 - 2*x2 - 2*y2], dim=1).reshape(B, 3, 3)
return rotMat
def pose_vec2mat(vec):
size_list = list(vec.size())
if len(size_list) == 3:
# if dimension is [B 4 4] for multiview blender dataset
return vec
else:
# If dimension is [B 7] for multiview nocs dataset
b = vec.size(0)
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:,3:]
rot_mat = quat2mat(rot) # [B, 3, 3]
invert_mat = torch.eye(4)
invert_mat[0, 0] *= -1
invert_mat[1, 1] *= -1
# Adding 0.5 offset for dataset
transform_mat = torch.cat([rot_mat, (translation) + 0.5], dim=2) # [B, 3, 4]
transform_mat = torch.cat([transform_mat, torch.tensor([[0,0,0,1]]).unsqueeze(0).expand(1,1,4).type_as(transform_mat).repeat(b, 1, 1)], dim=1) # [B, 4, 4]
return transform_mat @ invert_mat.type_as(transform_mat)
def inverse_warp(tgt_image, depth, intrinsics, src_pose, tgt_pose):
src_camera_coords = pixel2cam(depth, intrinsics.inverse())
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
src_cam_to_tgt_cam = tgt_pose_mat.inverse() @ src_pose_mat
tgt_cam_2_proj = intrinsics @ src_cam_to_tgt_cam[:, :3, :] # Bx3x3 Bx3x4
rot, tr = tgt_cam_2_proj[:,:,:3], tgt_cam_2_proj[:,:,-1:]
tgt_pix_coords = cam2pixel(src_camera_coords, rot, tr)
tgt_image = tgt_image.type_as(tgt_pix_coords)
projected_img = F.grid_sample(tgt_image, tgt_pix_coords, padding_mode='zeros', align_corners=False)
valid_points = tgt_pix_coords.abs().max(dim=-1)[0] <= 1
return projected_img, valid_points
def inverse_warp_2(tgt_image, depth, intrinsics, src_pose, tgt_pose):
'''
Inverse warp function using Kornia
'''
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
b = tgt_image.size(0)
h = torch.tensor(tgt_image.size(2)).repeat(b)
w = torch.tensor(tgt_image.size(3)).repeat(b)
intrinsics = torch.cat([intrinsics.float(), torch.tensor([[0, 0, 0]]).unsqueeze(2).expand(1, 3, 1).type_as(intrinsics).repeat(b, 1, 1).float()], dim = 2)
intrinsics = torch.cat([intrinsics, torch.tensor([[0, 0, 0, 1]]).expand(1, 1, 4).type_as(intrinsics).repeat(b, 1, 1).float() ], dim = 1)
pinhole_tgt = kornia.geometry.PinholeCamera(intrinsics, tgt_pose_mat.float(), h, w)
pinhole_src = kornia.geometry.PinholeCamera(intrinsics, src_pose_mat.float(), h, w)
image_src = kornia.geometry.depth_warp(pinhole_tgt, pinhole_src, depth.float(), tgt_image.float(), tgt_image.size(2), tgt_image.size(3))
return image_src, image_src
def project_depth_point_cloud(depth, intrinsics, src_pose, tgt_pose):
'''
Project point cloud from src to tgt pose
'''
src_camera_coords = pixel2cam(depth, intrinsics.inverse()) # [B, 3, H, W]
b, _, h, w = src_camera_coords.size()
src_pose_mat = pose_vec2mat(src_pose)
tgt_pose_mat = pose_vec2mat(tgt_pose)
# source camera coordinates
src_camera_coords = src_camera_coords.reshape(b, 3, h*w)
src_cam_to_tgt_cam = tgt_pose_mat.inverse() @ src_pose_mat
ones = torch.ones((b, 1, h*w), device=src_camera_coords.device)
#print("ones",ones.device)
#print("src_camera_coords",src_camera_coords.device)
src_camera_coords_homogeneous = torch.cat([src_camera_coords, ones], dim = 1) # [B, 4, H*W]
# destination camera coordinates
projected_coords = src_cam_to_tgt_cam.float() @ src_camera_coords_homogeneous.float() # [B, 4, H*W]
projected_coords = projected_coords[:, :3, :]
return src_camera_coords, projected_coords
def NOCS_map_2_point_cloud(nocs_image_tensor, mask):
'''
Convert NOCS maps to point cloud
Input:
nocs_image_tensor - [B, 3, H, W] - torch tensor
mask - [B, H, W] - torch tensor
Returns:
nocs_point_cloud_list - B element list - [3, masked dims]
indices_list - B element list - [2, masked dims]
'''
indices_list = []
nocs_point_cloud_list = []
B, views, H, W = nocs_image_tensor.shape
for i in range(nocs_image_tensor.shape[0]):
ind = torch.from_numpy(((mask[i, :, :] > 0.5).nonzero().cpu()).numpy())
h = ind[:, 0]
w = ind[:, 1]
#torch.sigmoid((mask[i, :, :] - 0.5)* 100)
#h = h.detach()
#w = w.detach()
#print(h.max(), w.max(), h.min(), w.min())
nocs_point_cloud = nocs_image_tensor[i, :, h, w] # [3, mask]
nocs_point_cloud.detach_()
nocs_point_cloud_list.append(nocs_point_cloud)
indices_list.append(torch.stack([h, w]).detach()) # [2, mask]
return nocs_point_cloud_list, indices_list
def get_NOCS_correspondences(nocs_image_tensor_source, mask_source, nocs_image_tensor_target, mask_target):
'''
Get NOCS correspondences
Input:
nocs_image_tensor_source - [B, 3, H, W]
mask_source - [B, H, W]
nocs_image_tensor_target - [B, 3, H, W]
mask_target - [B, H, W]
Returns:
indices_depth_list - list of tensors with indices of shape [2, masked_dim]
'''
B, views, H, W = nocs_image_tensor_source.shape
indices_depth_list_target = []
indices_depth_list_source = []
for i in range(B):
nocs_point_cloud_list_source, indices_list_source = NOCS_map_2_point_cloud(nocs_image_tensor_source[i, :, :, :].unsqueeze(0), mask_source[i, 0, :, :].unsqueeze(0))
nocs_point_cloud_list_target, indices_list_target = NOCS_map_2_point_cloud(nocs_image_tensor_target[i, :, :, :].unsqueeze(0), mask_target[i, 0, :, :].unsqueeze(0))
pc_1, ind_1 = nocs_point_cloud_list_source[0], indices_list_source[0] # [3, mask_size], [2, mask_size]
pc_2, ind_2 = nocs_point_cloud_list_target[0], indices_list_target[0] # [3, mask_size]
# Perform NOCS KNN matching
out = knn_points(pc_1.transpose(0, 1).unsqueeze(0), pc_2.transpose(0, 1).unsqueeze(0)) # [1, masked_dim, 3]
corresponding_idx = out.idx[0, :, 0] # [masked_dim]
corresponding_idx = ind_2[:, corresponding_idx]
indices_depth_list_source.append(ind_1)
indices_depth_list_target.append(corresponding_idx)
return indices_depth_list_source, indices_depth_list_target
if __name__ == "__main__":
src_pose = torch.tensor([[1663.45703125, 46.258087158203128, -2127.346435546875, 0.008096654899418354, -0.3257482051849365, 0.0027897413820028307, 0.9454177618026733]])
tgt_pose = torch.tensor([[1889.214599609375, 221.49795532226563, -1699.667724609375, 0.039696164429187778, -0.4065377712249756, 0.01768353208899498, 0.9125999212265015]])
src_pose_2 = torch.tensor([[2011.62060546875, 374.8108215332031, -1255.8643798828125,0.06847226619720459, -0.48349833488464358, 0.03797297552227974, 0.8718366026878357]])
depth = Image.open('./test-images/depth.png')
depth = np.array(depth)
depth = depth_decode(depth)
depth = torch.tensor(depth).unsqueeze(0).unsqueeze(1).float()
# print(depth)
# plt.imshow(depth[0][0])
# plt.show()
tgt_image = cv2.imread('./test-images/rgb.png')
tgt_image = torch.tensor(tgt_image).unsqueeze(0).permute(0, 3, 1, 2).float() / 255.0
intrinsics = torch.tensor([
[617.1,0.0,320.0],
[0.0,617.1,240.0],
[0.0,0.0,1.0],
])
scale_factor = 1
src_pose[0, :3] = src_pose[0, :3] / scale_factor
tgt_pose[0, :3] = tgt_pose[0, :3] / scale_factor
src_pose_2[0, :3] = src_pose_2[0, :3] / scale_factor
x_factor = -1
src_pose[0, 0] = src_pose[0, 0] * x_factor
tgt_pose[0, 0] = tgt_pose[0, 0] * x_factor
src_pose_2[0, 0] = src_pose_2[0, 0] * x_factor
src_pose[0, 4:6] = src_pose[0, 4:6] * -1
tgt_pose[0, 4:6] = tgt_pose[0, 4:6] * -1
src_pose_2[0, 4:6] = src_pose_2[0, 4:6] * -1
intrinsics = intrinsics.unsqueeze(0)
warp=inverse_warp(tgt_image, depth, intrinsics, tgt_pose, src_pose)
warp=warp[0].permute(0,2,3,1)
plt.imshow(warp[0])
plt.show()
|
[
"torch.ones",
"matplotlib.pyplot.show",
"torch.stack",
"torch.nn.functional.grid_sample",
"torch.eye",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"torch.cat",
"scipy.spatial.transform.Rotation.random",
"PIL.Image.open",
"helper_functions.sigmoid_2_depth",
"numpy.finfo",
"cv2.imread",
"torch.cuda.is_available",
"numpy.array",
"torch.arange",
"torch.tensor"
] |
[((339, 357), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (347, 357), True, 'import numpy as np\n'), ((762, 806), 'helper_functions.sigmoid_2_depth', 'helper_functions.sigmoid_2_depth', (['depth_maps'], {}), '(depth_maps)\n', (794, 806), False, 'import helper_functions\n'), ((2657, 2703), 'torch.stack', 'torch.stack', (['point_cloud_list_all_views'], {'dim': '(1)'}), '(point_cloud_list_all_views, dim=1)\n', (2668, 2703), False, 'import torch\n'), ((2769, 2823), 'torch.stack', 'torch.stack', (['rotated_point_cloud_list_all_views'], {'dim': '(1)'}), '(rotated_point_cloud_list_all_views, dim=1)\n', (2780, 2823), False, 'import torch\n'), ((3238, 3282), 'helper_functions.sigmoid_2_depth', 'helper_functions.sigmoid_2_depth', (['depth_maps'], {}), '(depth_maps)\n', (3270, 3282), False, 'import helper_functions\n'), ((3469, 3494), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3492, 3494), False, 'import torch\n'), ((4573, 4602), 'torch.stack', 'torch.stack', (['point_cloud_list'], {}), '(point_cloud_list)\n', (4584, 4602), False, 'import torch\n'), ((5065, 5125), 'numpy.zeros', 'np.zeros', (['(depth_image_16.shape[0], depth_image_16.shape[1])'], {}), '((depth_image_16.shape[0], depth_image_16.shape[1]))\n', (5073, 5125), True, 'import numpy as np\n'), ((6664, 6700), 'torch.stack', 'torch.stack', (['[X_norm, Y_norm]'], {'dim': '(2)'}), '([X_norm, Y_norm], dim=2)\n', (6675, 6700), False, 'import torch\n'), ((9546, 9633), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['tgt_image', 'tgt_pix_coords'], {'padding_mode': '"""zeros"""', 'align_corners': '(False)'}), "(tgt_image, tgt_pix_coords, padding_mode='zeros',\n align_corners=False)\n", (9559, 9633), True, 'import torch.nn.functional as F\n'), ((11230, 11288), 'torch.ones', 'torch.ones', (['(b, 1, h * w)'], {'device': 'src_camera_coords.device'}), '((b, 1, h * w), device=src_camera_coords.device)\n', (11240, 11288), False, 'import torch\n'), ((11411, 11454), 'torch.cat', 'torch.cat', (['[src_camera_coords, ones]'], {'dim': '(1)'}), '([src_camera_coords, ones], dim=1)\n', (11420, 11454), False, 'import torch\n'), ((14435, 14602), 'torch.tensor', 'torch.tensor', (['[[1663.45703125, 46.258087158203125, -2127.346435546875, \n 0.008096654899418354, -0.3257482051849365, 0.0027897413820028305, \n 0.9454177618026733]]'], {}), '([[1663.45703125, 46.258087158203125, -2127.346435546875, \n 0.008096654899418354, -0.3257482051849365, 0.0027897413820028305, \n 0.9454177618026733]])\n', (14447, 14602), False, 'import torch\n'), ((14608, 14777), 'torch.tensor', 'torch.tensor', (['[[1889.214599609375, 221.49795532226562, -1699.667724609375, \n 0.039696164429187775, -0.4065377712249756, 0.01768353208899498, \n 0.9125999212265015]]'], {}), '([[1889.214599609375, 221.49795532226562, -1699.667724609375, \n 0.039696164429187775, -0.4065377712249756, 0.01768353208899498, \n 0.9125999212265015]])\n', (14620, 14777), False, 'import torch\n'), ((14785, 14953), 'torch.tensor', 'torch.tensor', (['[[2011.62060546875, 374.8108215332031, -1255.8643798828125, \n 0.06847226619720459, -0.48349833488464355, 0.03797297552227974, \n 0.8718366026878357]]'], {}), '([[2011.62060546875, 374.8108215332031, -1255.8643798828125, \n 0.06847226619720459, -0.48349833488464355, 0.03797297552227974, \n 0.8718366026878357]])\n', (14797, 14953), False, 'import torch\n'), ((14957, 14994), 'PIL.Image.open', 'Image.open', (['"""./test-images/depth.png"""'], {}), "('./test-images/depth.png')\n", (14967, 14994), False, 'from PIL import Image\n'), ((15007, 15022), 'numpy.array', 'np.array', (['depth'], {}), '(depth)\n', (15015, 15022), True, 'import numpy as np\n'), ((15205, 15240), 'cv2.imread', 'cv2.imread', (['"""./test-images/rgb.png"""'], {}), "('./test-images/rgb.png')\n", (15215, 15240), False, 'import cv2\n'), ((15347, 15420), 'torch.tensor', 'torch.tensor', (['[[617.1, 0.0, 320.0], [0.0, 617.1, 240.0], [0.0, 0.0, 1.0]]'], {}), '([[617.1, 0.0, 320.0], [0.0, 617.1, 240.0], [0.0, 0.0, 1.0]])\n', (15359, 15420), False, 'import torch\n'), ((16089, 16108), 'matplotlib.pyplot.imshow', 'plt.imshow', (['warp[0]'], {}), '(warp[0])\n', (16099, 16108), True, 'import matplotlib.pyplot as plt\n'), ((16113, 16123), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16121, 16123), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1150), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1148, 1150), False, 'import torch\n'), ((2335, 2364), 'torch.stack', 'torch.stack', (['point_cloud_list'], {}), '(point_cloud_list)\n', (2346, 2364), False, 'import torch\n'), ((8564, 8576), 'torch.eye', 'torch.eye', (['(4)'], {}), '(4)\n', (8573, 8576), False, 'import torch\n'), ((8704, 8750), 'torch.cat', 'torch.cat', (['[rot_mat, translation + 0.5]'], {'dim': '(2)'}), '([rot_mat, translation + 0.5], dim=2)\n', (8713, 8750), False, 'import torch\n'), ((5742, 5761), 'torch.ones', 'torch.ones', (['(1)', 'h', 'w'], {}), '(1, h, w)\n', (5752, 5761), False, 'import torch\n'), ((5900, 5944), 'torch.stack', 'torch.stack', (['(j_range, i_range, ones)'], {'dim': '(1)'}), '((j_range, i_range, ones), dim=1)\n', (5911, 5944), False, 'import torch\n'), ((7911, 8104), 'torch.stack', 'torch.stack', (['[1 - 2 * y2 - 2 * z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, 1 -\n 2 * x2 - 2 * z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, 1 -\n 2 * x2 - 2 * y2]'], {'dim': '(1)'}), '([1 - 2 * y2 - 2 * z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz +\n 2 * xy, 1 - 2 * x2 - 2 * z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx +\n 2 * yz, 1 - 2 * x2 - 2 * y2], dim=1)\n', (7922, 8104), False, 'import torch\n'), ((12701, 12720), 'torch.stack', 'torch.stack', (['[h, w]'], {}), '([h, w])\n', (12712, 12720), False, 'import torch\n'), ((3670, 3709), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (3678, 3709), True, 'from scipy.spatial.transform import Rotation as R\n'), ((5567, 5585), 'torch.arange', 'torch.arange', (['(0)', 'h'], {}), '(0, h)\n', (5579, 5585), False, 'import torch\n'), ((5656, 5674), 'torch.arange', 'torch.arange', (['(0)', 'w'], {}), '(0, w)\n', (5668, 5674), False, 'import torch\n'), ((15068, 15087), 'torch.tensor', 'torch.tensor', (['depth'], {}), '(depth)\n', (15080, 15087), False, 'import torch\n'), ((1338, 1377), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (1346, 1377), True, 'from scipy.spatial.transform import Rotation as R\n'), ((15257, 15280), 'torch.tensor', 'torch.tensor', (['tgt_image'], {}), '(tgt_image)\n', (15269, 15280), False, 'import torch\n'), ((3539, 3578), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (3547, 3578), True, 'from scipy.spatial.transform import Rotation as R\n'), ((1199, 1238), 'scipy.spatial.transform.Rotation.random', 'R.random', (['batch_size'], {'random_state': '(1024)'}), '(batch_size, random_state=1024)\n', (1207, 1238), True, 'from scipy.spatial.transform import Rotation as R\n'), ((10268, 10296), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (10280, 10296), False, 'import torch\n'), ((8818, 8846), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (8830, 8846), False, 'import torch\n'), ((10118, 10143), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (10130, 10143), False, 'import torch\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
def many2many(n_gestures=2, n_frames=300, n_features=21, rnn_units=32):
"""Model for predicting labels for a sequence of multiple gestures
Arguments:
n_gestures -- int, size of gesture vocabulary
n_frames -- int, number of frames per training example
n_features -- int, number of features
rnn_units -- int, size of LSTM hidden state
Note:
Not bidirectional
"""
inputs = tf.keras.Input(shape=(n_frames,n_features))
# x = layers.Bidirectional(layers.LSTM(rnn_units, return_sequences=False))(x)
# x = layers.BatchNormalization()(x)
x = layers.LSTM(rnn_units, return_sequences=True)(inputs)
x = layers.Dense(n_gestures, activation='softmax')(x)
outputs = x
model = tf.keras.Model(inputs=inputs, outputs=outputs, name='many2one')
model.summary()
return model
def many2one_model(n_gestures=2, n_frames=35, n_features=21, rnn_units=4, bidirectional=True, n_lstm_layers=1, n_dense_layers=1, dense_size=4, recurrent_dropout=0.0):
"""Model for predicting labels for a single gesture
Arguments:
n_gestures -- int, size of gesture vocabulary. 2 indicates gesture/non gesture only
n_frames -- int, number of frames per training example
n_features -- int, number of features
rnn_units -- int, size of LSTM hidden state
layers -- int, number of LSTM layers
Note:
Bidirectional
"""
inputs = tf.keras.Input(shape=(n_frames,n_features))
x = inputs
for i in range(n_lstm_layers):
if bidirectional == True:
if i == n_lstm_layers - 1: # check whether or not this is the last layer of LSTMs
# if this is the last layer, the return sequences should be false
x = layers.Bidirectional(layers.LSTM(rnn_units, return_sequences=False, recurrent_dropout=recurrent_dropout))(x)
else:
x = layers.Bidirectional(layers.LSTM(rnn_units, return_sequences=True, recurrent_dropout=recurrent_dropout))(x)
else:
if i == n_lstm_layers - 1: # check whether or not this is the last layer of LSTMs
x = layers.LSTM(rnn_units, return_sequences=False, stateful=False, recurrent_dropout=recurrent_dropout)(x)
else:
x = layers.LSTM(rnn_units, return_sequences=True, stateful=False, recurrent_dropout=recurrent_dropout)(x)
for i in range(n_dense_layers - 1):
x = layers.Dense(dense_size, activation='relu')(x)
x = layers.Dense(n_gestures, activation='softmax')(x)
outputs = x
model = tf.keras.Model(inputs=inputs, outputs=outputs, name='many2one')
model.summary()
return model
def plt_metric(history, metric='loss'):
"""plots metrics from the history of a model
Arguments:
history -- history of a keras model
metric -- str, metric to be plotted
"""
plt.plot(history.history[metric])
plt.plot(history.history['val_' + metric])
plt.title('model ' + metric)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
def plt_pred(y, pred):
"""Plots truth labels vs predicted labels for an example"""
labels = np.argmax(np.squeeze(pred), axis=-1)
plt.plot(labels)
plt.plot(y)
plt.title('predicted vs labels')
plt.ylabel('label')
plt.xlabel('time step')
plt.legend(['predicted', 'labels'], loc='upper left')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"matplotlib.pyplot.legend",
"tensorflow.keras.Model",
"tensorflow.keras.layers.LSTM",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((526, 570), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(n_frames, n_features)'}), '(shape=(n_frames, n_features))\n', (540, 570), True, 'import tensorflow as tf\n'), ((847, 910), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""many2one"""'}), "(inputs=inputs, outputs=outputs, name='many2one')\n", (861, 910), True, 'import tensorflow as tf\n'), ((1520, 1564), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(n_frames, n_features)'}), '(shape=(n_frames, n_features))\n', (1534, 1564), True, 'import tensorflow as tf\n'), ((2672, 2735), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""many2one"""'}), "(inputs=inputs, outputs=outputs, name='many2one')\n", (2686, 2735), True, 'import tensorflow as tf\n'), ((2988, 3021), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[metric]'], {}), '(history.history[metric])\n', (2996, 3021), True, 'import matplotlib.pyplot as plt\n'), ((3026, 3068), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_' + metric]"], {}), "(history.history['val_' + metric])\n", (3034, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3073, 3101), 'matplotlib.pyplot.title', 'plt.title', (["('model ' + metric)"], {}), "('model ' + metric)\n", (3082, 3101), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3124), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['metric'], {}), '(metric)\n', (3116, 3124), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3148), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3139, 3148), True, 'import matplotlib.pyplot as plt\n'), ((3153, 3200), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (3163, 3200), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3360), 'matplotlib.pyplot.plot', 'plt.plot', (['labels'], {}), '(labels)\n', (3352, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3376), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y)\n', (3373, 3376), True, 'import matplotlib.pyplot as plt\n'), ((3381, 3413), 'matplotlib.pyplot.title', 'plt.title', (['"""predicted vs labels"""'], {}), "('predicted vs labels')\n", (3390, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3437), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""label"""'], {}), "('label')\n", (3428, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time step"""'], {}), "('time step')\n", (3452, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3470, 3523), 'matplotlib.pyplot.legend', 'plt.legend', (["['predicted', 'labels']"], {'loc': '"""upper left"""'}), "(['predicted', 'labels'], loc='upper left')\n", (3480, 3523), True, 'import matplotlib.pyplot as plt\n'), ((701, 746), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(True)'}), '(rnn_units, return_sequences=True)\n', (712, 746), False, 'from tensorflow.keras import layers\n'), ((763, 809), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['n_gestures'], {'activation': '"""softmax"""'}), "(n_gestures, activation='softmax')\n", (775, 809), False, 'from tensorflow.keras import layers\n'), ((2588, 2634), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['n_gestures'], {'activation': '"""softmax"""'}), "(n_gestures, activation='softmax')\n", (2600, 2634), False, 'from tensorflow.keras import layers\n'), ((3313, 3329), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (3323, 3329), True, 'import numpy as np\n'), ((2527, 2570), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['dense_size'], {'activation': '"""relu"""'}), "(dense_size, activation='relu')\n", (2539, 2570), False, 'from tensorflow.keras import layers\n'), ((2227, 2330), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(False)', 'stateful': '(False)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=False, stateful=False,\n recurrent_dropout=recurrent_dropout)\n', (2238, 2330), False, 'from tensorflow.keras import layers\n'), ((2368, 2470), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(True)', 'stateful': '(False)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=True, stateful=False,\n recurrent_dropout=recurrent_dropout)\n', (2379, 2470), False, 'from tensorflow.keras import layers\n'), ((1865, 1953), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(False)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=False, recurrent_dropout=\n recurrent_dropout)\n', (1876, 1953), False, 'from tensorflow.keras import layers\n'), ((2012, 2099), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['rnn_units'], {'return_sequences': '(True)', 'recurrent_dropout': 'recurrent_dropout'}), '(rnn_units, return_sequences=True, recurrent_dropout=\n recurrent_dropout)\n', (2023, 2099), False, 'from tensorflow.keras import layers\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 26/04/2016
Versão 1.0
@author: Ricieri (ELP)
Python 3.4.4
"""
"""
Reviewed on 15/10/2020
Versão 1.0 rev.A - rounded printing values to 3 decimal places and displays '°C' instead of 'ºC'.
@author: Marcelo (ELP)
Python 3.8.6
"""
"""
Reviewed on 06/05/2021
Versão 1.0 rev.B - Added FAC_DCDC_EMA variables.
@author: Marcelo (ELT)
Python 3.9.5
"""
import struct
import glob
import serial
import time
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
import os
from datetime import datetime
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Listas de Entidades BSMP
A posição da entidade na lista corresponde ao seu ID BSMP
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
UDC_FIRMWARE_VERSION = "0.42 2021-05-06"
ListVar = ['iLoad1','iLoad2','iMod1','iMod2','iMod3','iMod4','vLoad',
'vDCMod1','vDCMod2','vDCMod3','vDCMod4','vOutMod1','vOutMod2',
'vOutMod3','vOutMod4','temp1','temp2','temp3','temp4','ps_OnOff',
'ps_OpMode','ps_Remote','ps_OpenLoop','ps_SoftInterlocks',
'ps_HardInterlocks','iRef','wfmRef_Gain','wfmRef_Offset','sigGen_Enable','sigGen_Type',
'sigGen_Ncycles','sigGenPhaseStart','sigGen_PhaseEnd','sigGen_Freq',
'sigGen_Amplitude','sigGen_Offset','sigGen_Aux','dp_ID','dp_Class','dp_Coeffs','ps_Model',
'wfmRef_PtrBufferStart','wfmRef_PtrBufferEnd','wfmRef_PtrBufferK','wfmRef_SyncMode']
ListCurv = ['wfmRef_Curve','sigGen_SweepAmp','samplesBuffer','fullwfmRef_Curve','wfmRef_Blocks','samplesBuffer_blocks']
ListFunc = ['TurnOn','TurnOff','OpenLoop','ClosedLoop','OpMode','RemoteInterface',
'SetISlowRef','ConfigWfmRef','ConfigSigGen', 'EnableSigGen',
'DisableSigGen','ConfigDPModule','WfmRefUpdate','ResetInterlocks','ConfigPSModel',
'ConfigHRADC','ConfigHRADCOpMode','EnableHRADCSampling','DisableHRADCSampling','ResetWfmRef',
'SetRSAddress','EnableSamplesBuffer','DisableSamplesBuffer','SetISlowRefx4','SelectHRADCBoard','SelectTestSource',
'ResetHRADCBoards','Config_nHRADC','ReadHRADC_UFM','WriteHRADC_UFM','EraseHRADC_UFM','ReadHRADC_BoardData']
ListTestFunc = ['UdcIoExpanderTest', 'UdcLedTest', 'UdcBuzzerTest', 'UdcEepromTest', 'UdcFlashTest', 'UdcRamTest',
'UdcRtcTest', 'UdcSensorTempTest', 'UdcIsoPlaneTest', 'UdcAdcTest', 'UdcUartTest', 'UdcLoopBackTest',
'UdcComTest', 'UdcI2cIsoTest']
ListHRADCInputType = ['Vin_bipolar','Vin_unipolar_p','Vin_unipolar_n','Iin_bipolar','Iin_unipolar_p',
'Iin_unipolar_n','Vref_bipolar_p','Vref_bipolar_n','GND','Vref_unipolar_p',
'Vref_unipolar_n','GND_unipolar','Temp','Reserved0','Reserved1','Reserved2']
ListPSModels = ['FBP_100kHz', 'FBP_Parallel_100kHz', 'FAC_ACDC_10kHz', 'FAC_DCDC_20kHz',
'FAC_Full_ACDC_10kHz', 'FAC_Full_DCDC_20kHz', 'FAP_ACDC',
'FAP_DCDC_20kHz', 'TEST_HRPWM', 'TEST_HRADC', 'JIGA_HRADC',
'FAP_DCDC_15kHz_225A', 'FBPx4_100kHz', 'FAP_6U_DCDC_20kHz',
'JIGA_BASTIDOR']
ListPSModels_v2_1 = ['Empty','FBP','FBP_DCLink','FAC_ACDC','FAC_DCDC',
'FAC_2S_ACDC','FAC_2S_DCDC','FAC_2P4S_ACDC','FAC_2P4S_DCDC',
'FAP','FAP_4P','FAC_DCDC_EMA','FAP_2P2S','FAP_IMAS',
'FAC_2P_ACDC_IMAS','FAC_2P_DCDC_IMAS','Invalid','Invalid',
'Invalid','Invalid','Invalid','Invalid','Invalid','Invalid',
'Invalid','Invalid','Invalid','Invalid','Invalid','Invalid',
'Invalid','Uninitialized']
ListVar_v2_1 = ['ps_status','ps_setpoint','ps_reference','firmware_version',
'counter_set_slowref','counter_sync_pulse','siggen_enable',
'siggen_type','siggen_num_cycles','siggen_n','siggen_freq',
'siggen_amplitude','siggen_offset','siggen_aux_param',
'wfmref_selected','wfmref_sync_mode','wfmref_gain',
'wfmref_offset','p_wfmref_start','p_wfmref_end','p_wfmref_idx']
#ListCurv_v2_1 = ['wfmref','buf_samples_ctom','buf_samples_mtoc']
ListCurv_v2_1 = ['wfmref_data_0','wfmref_data_1','buf_samples_ctom']
ListFunc_v2_1 = ['turn_on','turn_off','open_loop','closed_loop','select_op_mode',
'reset_interlocks','set_command_interface',
'set_serial_termination','unlock_udc','lock_udc',
'cfg_source_scope','cfg_freq_scope','cfg_duration_scope',
'enable_scope','disable_scope','sync_pulse','set_slowref',
'set_slowref_fbp','set_slowref_readback_mon',
'set_slowref_fbp_readback_mon','set_slowref_readback_ref',
'set_slowref_fbp_readback_ref','reset_counters','cfg_wfmref',
'select_wfmref','get_wfmref_size','reset_wfmref','cfg_siggen',
'set_siggen','enable_siggen','disable_siggen','set_param','get_param',
'save_param_eeprom','load_param_eeprom', 'save_param_bank',
'load_param_bank','set_dsp_coeffs','get_dsp_coeff',
'save_dsp_coeffs_eeprom', 'load_dsp_coeffs_eeprom',
'save_dsp_modules_eeprom', 'load_dsp_modules_eeprom','reset_udc']
ListOpMode_v2_1 = ['Off','Interlock','Initializing','SlowRef','SlowRefSync',
'Cycle','RmpWfm','MigWfm','FastRef']
ListSigGenTypes_v2_1 = ['Sine','DampedSine','Trapezoidal','DampedSquaredSine',
'Square']
ListParameters = ['PS_Name','PS_Model','Num_PS_Modules','Command_Interface',
'RS485_Baudrate','RS485_Address','RS485_Termination',
'UDCNet_Address','Ethernet_IP','Ethernet_Subnet_Mask',
'Buzzer_Volume','Freq_ISR_Controller','Freq_TimeSlicer',
'Control_Loop_State','Max_Ref','Min_Ref','Max_Ref_OpenLoop',
'Min_Ref_OpenLoop',
'PWM_Freq','PWM_DeadTime','PWM_Max_Duty','PWM_Min_Duty',
'PWM_Max_Duty_OpenLoop','PWM_Min_Duty_OpenLoop',
'PWM_Lim_Duty_Share','HRADC_Num_Boards','HRADC_Freq_SPICLK',
'HRADC_Freq_Sampling','HRADC_Enable_Heater',
'HRADC_Enable_Monitor','HRADC_Type_Transducer',
'HRADC_Gain_Transducer','HRADC_Offset_Transducer','SigGen_Type',
'SigGen_Num_Cycles','SigGen_Freq','SigGen_Amplitude',
'SigGen_Offset','SigGen_Aux_Param','WfmRef_ID_WfmRef',
'WfmRef_SyncMode','WfmRef_Frequency','WfmRef_Gain',
'WfmRef_Offset','Analog_Var_Max','Analog_Var_Min',
'Hard_Interlocks_Debounce_Time','Hard_Interlocks_Reset_Time',
'Soft_Interlocks_Debounce_Time','Soft_Interlocks_Reset_Time',
'Scope_Sampling_Frequency','Scope_Source','','','','','','',
'','','','','Password','Enable_Onboard_EEPROM']
ListBCBFunc = ['ClearPof', 'SetPof', 'ReadPof', 'EnableBuzzer', 'DisableBuzzer',
'SendUartData', 'GetUartData', 'SendCanData', 'GetCanData',
'GetI2cData']
typeFormat = {'uint8_t': 'BBHBB', 'uint16_t': 'BBHHB', 'uint32_t': 'BBHIB',
'float': 'BBHfB'}
bytesFormat = {'Uint16': 'H', 'Uint32': 'L', 'Uint64': 'Q', 'float': 'f'}
typeSize = {'uint8_t': 6, 'uint16_t': 7, 'uint32_t': 9, 'float': 9}
num_blocks_curves_fbp = [4, 4, 4]
num_blocks_curves_fax = [16, 16, 16]
size_curve_block = [1024, 1024, 1024]
ufmOffset = {'serial': 0, 'calibdate': 4, 'variant': 9, 'rburden': 10,
'calibtemp': 12, 'vin_gain': 14, 'vin_offset': 16,
'iin_gain': 18, 'iin_offset': 20, 'vref_p': 22, 'vref_n': 24,
'gnd': 26}
hradcVariant = ['HRADC-FBP','HRADC-FAX-A','HRADC-FAX-B','HRADC-FAX-C','HRADC-FAX-D']
hradcInputTypes = ['GND', 'Vref_bipolar_p', 'Vref_bipolar_n', 'Temp',
'Vin_bipolar_p', 'Vin_bipolar_n', 'Iin_bipolar_p','Iin_bipolar_n']
NUM_MAX_COEFFS_DSP = 12
num_dsp_classes = 7
num_dsp_modules = [4, 4, 4, 6, 8, 4, 2, 2]
num_coeffs_dsp_modules = [0, 1, 1, 4, 8, 16, 2]
dsp_classes_names = ["DSP_Error", "DSP_SRLim", "DSP_LPF","DSP_PI",
"DSP_IIR_2P2Z", "DSP_IIR_3P3Z", "DSP_VdcLink_FeedForward",
"DSP_Vect_Product"]
# FBP
list_fbp_soft_interlocks = ['Heat-Sink Overtemperature']
list_fbp_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'DCLink Overvoltage',
'DCLink Undervoltage',
'DCLink Relay Fault',
'DCLink Fuse Fault',
'MOSFETs Driver Fault',
'Welded Relay Fault']
# FBP DC-Link
list_fbp_dclink_hard_interlocks = ['Power_Module_1_Fault',
'Power_Module_2_Fault',
'Power_Module_3_Fault',
'Total_Output_Overvoltage',
'Power_Module_1_Overvoltage',
'Power_Module_2_Overvoltage',
'Power_Module_3_Overvoltage',
'Total_Output_Undervoltage',
'Power_Module_1_Undervoltage',
'Power_Module_2_Undervoltage',
'Power_Module_3_Undervoltage',
'Smoke_Detector','External_Interlock']
# FAC ACDC
list_fac_acdc_soft_interlocks = []
list_fac_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_acdc_iib_is_interlocks = ['Rectifier Overvoltage',
'Input Overcurrent',
'IGBT Overtemperature',
'IGBT Overtemperature HW',
'Driver Overvoltage',
'Driver Overcurrent',
'Top Driver Error',
'Bottom Driver Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_is_alarms = ['Rectifier Overvoltage',
'Input Overcurrent',
'IGBT Overtemperature',
'Driver Overvoltage',
'Driver Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_cmd_interlocks = ['Capbank Overvoltage',
'Output Overvoltage',
'External Boards Overvoltage',
'Auxiliary Board Overcurrent',
'IDB Board Overcurrent',
'Rectifier Inductor Overtemperature',
'Rectifier Heat-Sink Overtemperature',
'AC Mains Overcurrent',
'Emergency Button',
'AC Mains Undervoltage',
'AC Mains Overvoltage',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_cmd_alarms = ['Capbank Overvoltage',
'Output Overvoltage',
'External Boards Overvoltage',
'Auxiliary Board Overcurrent',
'IDB Board Overcurrent',
'Rectifier Inductor Overtemperature',
'Rectifier Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAC DCDC
list_fac_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault']
list_fac_dcdc_hard_interlocks = ['Load Overcurrent',
'CapBank Overvoltage',
'CapBank Undervoltage',
'IIB Interlock',
'External Interlock',
'Rack Interlock']
list_fac_dcdc_iib_interlocks = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 1 Overtemperature HW',
'IGBT 2 Overtemperature',
'IGBT 2 Overtemperature HW',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Top Driver 1 Error',
'Bottom Driver 1 Error',
'Top Driver 2 Error',
'Bottom Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_dcdc_iib_alarms = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAC-2S AC/DC
list_fac_2s_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_2s_acdc_iib_is_interlocks = list_fac_acdc_iib_is_interlocks
list_fac_2s_acdc_iib_cmd_interlocks = list_fac_acdc_iib_cmd_interlocks
list_fac_2s_acdc_iib_is_alarms = list_fac_acdc_iib_is_alarms
list_fac_2s_acdc_iib_cmd_alarms = list_fac_acdc_iib_cmd_alarms
# FAC-2S DC/DC
list_fac_2s_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault']
list_fac_2s_dcdc_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank Overvoltage',
'Module 2 CapBank Overvoltage',
'Module 1 CapBank Undervoltage',
'Module 2 CapBank Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'External Interlock',
'Rack Interlock']
list_fac_2s_dcdc_iib_interlocks = list_fac_dcdc_iib_interlocks
list_fac_2s_dcdc_iib_alarms = list_fac_dcdc_iib_alarms
# FAC-2P4S AC/DC
list_fac_2p4s_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_2p4s_acdc_iib_is_interlocks = list_fac_acdc_iib_is_interlocks
list_fac_2p4s_acdc_iib_cmd_interlocks = list_fac_acdc_iib_cmd_interlocks
list_fac_2p4s_acdc_iib_is_alarms = list_fac_acdc_iib_is_alarms
list_fac_2p4s_acdc_iib_cmd_alarms = list_fac_acdc_iib_cmd_alarms
# FAC-2P4S DC/DC
list_fac_2p4s_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent',
'Arms High Difference',
'Complementary PS Interlock']
list_fac_2p4s_dcdc_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank Overvoltage',
'Module 2 CapBank Overvoltage',
'Module 3 CapBank Overvoltage',
'Module 4 CapBank Overvoltage',
'Module 5 CapBank Overvoltage',
'Module 6 CapBank Overvoltage',
'Module 7 CapBank Overvoltage',
'Module 8 CapBank Overvoltage',
'Module 1 CapBank Undervoltage',
'Module 2 CapBank Undervoltage',
'Module 3 CapBank Undervoltage',
'Module 4 CapBank Undervoltage',
'Module 5 CapBank Undervoltage',
'Module 6 CapBank Undervoltage',
'Module 7 CapBank Undervoltage',
'Module 8 CapBank Undervoltage',
'IIB 1 Itlk',
'IIB 2 Itlk',
'IIB 3 Itlk',
'IIB 4 Itlk',
'IIB 5 Itlk',
'IIB 6 Itlk',
'IIB 7 Itlk',
'IIB 8 Itlk']
list_fac_2p4s_dcdc_iib_interlocks = list_fac_dcdc_iib_interlocks
list_fac_2p4s_dcdc_iib_alarms = list_fac_dcdc_iib_alarms
# FAP
list_fap_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'IGBTs Current High Difference']
list_fap_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'DCLink Overvoltage',
'DCLink Undervoltage',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IIB Itlk']
list_fap_iib_interlocks = ['Input Overvoltage',
'Output Overvoltage',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Driver 1 Error',
'Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'DCLink Contactor Fault',
'Contact Sticking of Contactor',
'External Interlock',
'Rack Interlock',
'High Leakage Current',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fap_iib_alarms = ['Input Overvoltage',
'Output Overvoltage',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'High Leakage Current',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAP-4P
list_fap_4p_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'IGBTs Current High Difference']
list_fap_4p_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'IGBT 1 Mod 1 Overcurrent',
'IGBT 2 Mod 1 Overcurrent',
'IGBT 1 Mod 2 Overcurrent',
'IGBT 2 Mod 2 Overcurrent',
'IGBT 1 Mod 3 Overcurrent',
'IGBT 2 Mod 3 Overcurrent',
'IGBT 1 Mod 4 Overcurrent',
'IGBT 2 Mod 4 Overcurrent',
'Welded Contactor Mod 1 Fault',
'Welded Contactor Mod 2 Fault',
'Welded Contactor Mod 3 Fault',
'Welded Contactor Mod 4 Fault',
'Opened Contactor Mod 1 Fault',
'Opened Contactor Mod 2 Fault',
'Opened Contactor Mod 3 Fault',
'Opened Contactor Mod 4 Fault',
'DCLink Mod 1 Overvoltage',
'DCLink Mod 2 Overvoltage',
'DCLink Mod 3 Overvoltage',
'DCLink Mod 4 Overvoltage',
'DCLink Mod 1 Undervoltage',
'DCLink Mod 2 Undervoltage',
'DCLink Mod 3 Undervoltage',
'DCLink Mod 4 Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'IIB Mod 3 Itlk',
'IIB Mod 4 Itlk']
list_fap_4p_iib_interlocks = list_fap_iib_interlocks
list_fap_4p_iib_alarms = list_fap_iib_alarms
# FAC DCDC EMA
list_fac_dcdc_ema_soft_interlocks = ['DCCT Fault',
'Load Feedback Fault']
list_fac_dcdc_ema_hard_interlocks = ['Load Overcurrent',
'DCLink Overvoltage',
'DCLink Undervoltage',
'Emergency Button',
'Load Waterflow',
'Load Overtemperature',
'IIB Itlk']
list_fac_dcdc_ema_iib_interlocks = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 1 Overtemperature HW',
'IGBT 2 Overtemperature',
'IGBT 2 Overtemperature HW',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Top Driver 1 Error',
'Bottom Driver 1 Error',
'Top Driver 2 Error',
'Bottom Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_dcdc_ema_iib_alarms = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAP-2P2S
list_fap_2p2s_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'Arms High Difference',
'IGBTs Current High Difference',
'Complementary PS Interlock']
list_fap_2p2s_hard_interlocks = ['Load Overcurrent',
'IGBT 1 Mod 1 Overcurrent',
'IGBT 2 Mod 1 Overcurrent',
'IGBT 1 Mod 2 Overcurrent',
'IGBT 2 Mod 2 Overcurrent',
'IGBT 1 Mod 3 Overcurrent',
'IGBT 2 Mod 3 Overcurrent',
'IGBT 1 Mod 4 Overcurrent',
'IGBT 2 Mod 4 Overcurrent',
'Welded Contactor Mod 1 Fault',
'Welded Contactor Mod 2 Fault',
'Welded Contactor Mod 3 Fault',
'Welded Contactor Mod 4 Fault',
'Opened Contactor Mod 1 Fault',
'Opened Contactor Mod 2 Fault',
'Opened Contactor Mod 3 Fault',
'Opened Contactor Mod 4 Fault',
'DCLink Mod 1 Overvoltage',
'DCLink Mod 2 Overvoltage',
'DCLink Mod 3 Overvoltage',
'DCLink Mod 4 Overvoltage',
'DCLink Mod 1 Undervoltage',
'DCLink Mod 2 Undervoltage',
'DCLink Mod 3 Undervoltage',
'DCLink Mod 4 Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'IIB Mod 3 Itlk',
'IIB Mod 4 Itlk',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent']
list_fap_2p2s_iib_interlocks = list_fap_iib_interlocks
list_fap_2p2s_iib_alarms = list_fap_iib_alarms
# FAP 225A
list_fap_225A_soft_interlocks = ['IGBTs Current High Difference']
list_fap_225A_hard_interlocks = ['Load Overcurrent',
'DCLink Contactor Fault',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent']
# FAC-2P ACDC
list_fac_2p_acdc_imas_soft_interlocks = []
list_fac_2p_acdc_imas_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overcurrent',
'AC Mains Contactor Fault',
'Module A Interlock',
'Module B Interlock',
'DCDC Interlock']
# FAC-2P DCDC
list_fac_2p_dcdc_imas_soft_interlocks = []
list_fac_2p_dcdc_imas_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank_Overvoltage',
'Module 2 CapBank_Overvoltage',
'Module 1 CapBank_Undervoltage',
'Module 2 CapBank_Undervoltage',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent',
'Arms High_Difference',
'ACDC Interlock']
class SerialDRS(object):
ser = serial.Serial()
def __init__(self):
#self.ser=serial.Serial()
self.MasterAdd = '\x00'
self.SlaveAdd = '\x01'
self.BCastAdd = '\xFF'
self.ComWriteVar = '\x20'
self.WriteFloatSizePayload = '\x00\x05'
self.WriteDoubleSizePayload = '\x00\x03'
self.ComReadVar = '\x10\x00\x01'
self.ComRequestCurve = '\x40'
self.ComSendWfmRef = '\x41'
self.ComFunction = '\x50'
self.DP_MODULE_MAX_COEFF = 16
self.ListDPClass = ['ELP_Error','ELP_SRLim','ELP_LPF','ELP_PI_dawu','ELP_IIR_2P2Z','ELP_IIR_3P3Z',
'DCL_PID','DCL_PI','DCL_DF13','DCL_DF22','DCL_23']
self.ListHardInterlocks = ['Sobrecorrente', 'Interlock Externo', 'Falha AC',
'Falha ACDC', 'Falha DCDC','Sobretensao','Falha Resistor Precarga','Falha Carga Capacitores Saída',
'Botão de Emergência', 'OUT_OVERVOLTAGE', 'IN_OVERVOLTAGE','ARM1_OVERCURRENT','ARM2_OVERCURRENT',
'IN_OVERCURRENT','DRIVER1_FAULT','DRIVER2_FAULT','OUT1_OVERCURRENT','OUT2_OVERCURRENT','OUT1_OVERVOLTAGE',
'OUT2_OVERVOLTAGE','LEAKAGE_OVERCURRENT','AC_OVERCURRENT']
self.ListSoftInterlocks = ['IGBT1_OVERTEMP','IGBT2_OVERTEMP','L1_OVERTEMP','L2_OVERTEMP','HEATSINK_OVERTEMP','WATER_OVERTEMP',
'RECTFIER1_OVERTEMP','RECTFIER2_OVERTEMP','AC_TRANSF_OVERTEMP','WATER_FLUX_FAULT','OVER_HUMIDITY_FAULT']
print("\n pyDRS - compatible UDC firmware version: " + UDC_FIRMWARE_VERSION + "\n")
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções Internas da Classe
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Converte float para hexadecimal
def float_to_hex(self, value):
hex_value = struct.pack('f', value)
return hex_value.decode('ISO-8859-1')
# Converte lista de float para hexadecimal
def float_list_to_hex(self, value_list):
hex_list = b''
for value in value_list:
hex_list = hex_list + struct.pack('f', value)
return hex_list.decode('ISO-8859-1')
def format_list_size(self, in_list, max_size):
out_list = in_list[0:max_size]
if max_size > len(in_list):
for i in range(max_size - len(in_list)):
out_list.append(0)
return out_list
# Converte double para hexadecimal
def double_to_hex(self,value):
hex_value = struct.pack('H',value)
return hex_value.decode('ISO-8859-1')
# Converte unsigned int para hexadecimal
def uint32_to_hex(self,value):
hex_value = struct.pack('I',value)
return hex_value.decode('ISO-8859-1')
# Converte indice para hexadecimal
def index_to_hex(self,value):
hex_value = struct.pack('B',value)
return hex_value.decode('ISO-8859-1')
# Converte payload_size para hexadecimal
def size_to_hex(self,value):
hex_value = struct.pack('>H',value)
return hex_value.decode('ISO-8859-1')
# Função Checksum
def checksum(self, packet):
b=bytearray(packet.encode('ISO-8859-1'))
csum =(256-sum(b))%256
hcsum = struct.pack('B',csum)
send_msg = packet + hcsum.decode(encoding='ISO-8859-1')
return send_msg
# Função de leitura de variável
def read_var(self,var_id):
send_msg = self.checksum(self.SlaveAdd+self.ComReadVar+var_id)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
def is_open(self):
return self.ser.isOpen()
def _convertToUint16List(self, val, format):
val_16 = []
val_b = struct.pack(bytesFormat[format],val)
print(val_b)
for i in range(0,len(val_b),2):
val_16.append(struct.unpack('H',val_b[i:i+2])[0])
print(val_16)
return val_16
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Chamada de Entidades Funções BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def TurnOn_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOn'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOn(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOn'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def turn_on(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('turn_on'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOff_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOff'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOff(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOff'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def turn_off(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('turn_off'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def open_loop(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('open_loop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def closed_loop(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('closed_loop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpenLoop(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpenLoop'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClosedLoop(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ClosedLoop'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpenLoop_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpenLoop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClosedLoop_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ClosedLoop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpMode(self,op_mode):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_opmode
hex_opmode = self.double_to_hex(op_mode)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpMode'))+hex_opmode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def RemoteInterface(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('RemoteInterface'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetISlowRef(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetISlowRef'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigWfmRef(self,gain,offset):
payload_size = self.size_to_hex(1+4+4) #Payload: ID + gain + offset
hex_gain = self.float_to_hex(gain)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigWfmRef'))+hex_gain+hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigSigGen(self,sigType,nCycles,phaseStart,phaseEnd):
payload_size = self.size_to_hex(1+2+2+4+4) #Payload: ID + type + nCycles + phaseStart + phaseEnd
hex_sigType = self.double_to_hex(sigType)
hex_nCycles = self.double_to_hex(nCycles)
hex_phaseStart = self.float_to_hex(phaseStart)
hex_phaseEnd = self.float_to_hex(phaseEnd)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigSigGen'))+hex_sigType+hex_nCycles+hex_phaseStart+hex_phaseEnd
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableSigGen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableSigGen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableSigGen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableSigGen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigDPModule(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigDPModule'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigDPModuleFull(self,dp_id,dp_class,dp_coeffs):
self.Write_dp_ID(dp_id)
self.Write_dp_Class(dp_class)
self.Write_dp_Coeffs(dp_coeffs)
self.ConfigDPModule()
def WfmRefUpdate(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('WfmRefUpdate'))
send_msg = self.checksum(self.BCastAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
def ResetInterlocks(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetInterlocks'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_interlocks(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_interlocks'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigPSModel(self,ps_model):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_Model
hex_model = self.double_to_hex(ps_model)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigPSModel'))+hex_model
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigHRADC(self,hradcID,freqSampling,inputType,enableHeater,enableMonitor):
payload_size = self.size_to_hex(1+2+4+2+2+2) #Payload: ID + hradcID + freqSampling + inputType + enableHeater + enableMonitor
hex_hradcID = self.double_to_hex(hradcID)
hex_freq = self.float_to_hex(freqSampling)
hex_type = self.double_to_hex(ListHRADCInputType.index(inputType))
hex_enHeater = self.double_to_hex(enableHeater)
hex_enMonitor = self.double_to_hex(enableMonitor)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigHRADC'))+hex_hradcID+hex_freq+hex_type+hex_enHeater+hex_enMonitor
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigHRADCOpMode(self,hradcID,opMode):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + hradcID + opMode
hex_hradcID = self.double_to_hex(hradcID)
hex_opMode = self.double_to_hex(opMode)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigHRADCOpMode'))+hex_hradcID+hex_opMode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableHRADCSampling(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableHRADCSampling'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableHRADCSampling(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableHRADCSampling'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ResetWfmRef(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetWfmRef'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetRSAddress(self,rs_address):
payload_size = self.size_to_hex(1+2) #Payload: ID + rs_address
hex_add = self.double_to_hex(rs_address)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetRSAddress'))+hex_add
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableSamplesBuffer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableSamplesBuffer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableSamplesBuffer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableSamplesBuffer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SelectHRADCBoard(self,hradcID):
payload_size = self.size_to_hex(1+2) #Payload: ID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SelectHRADCBoard'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SelectTestSource(self,inputType):
payload_size = self.size_to_hex(1+2) #Payload: inputType
hex_type = self.double_to_hex(ListHRADCInputType.index(inputType))
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SelectTestSource'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ResetHRADCBoards(self, enable):
payload_size = self.size_to_hex(1+2) #Payload: ID+enable(2)
hex_enable = self.double_to_hex(enable)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetHRADCBoards'))+hex_enable
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def Config_nHRADC(self,nHRADC):
payload_size = self.size_to_hex(1+2) #Payload: nHRADC
hex_nhradc = self.double_to_hex(nHRADC)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('Config_nHRADC'))+hex_nhradc
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ReadHRADC_UFM(self,hradcID,ufmadd):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + hradcID + ufmadd
hex_hradcID = self.double_to_hex(hradcID)
hex_ufmadd = self.double_to_hex(ufmadd)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ReadHRADC_UFM'))+hex_hradcID+hex_ufmadd
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def WriteHRADC_UFM(self,hradcID,ufmadd,ufmdata):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + hradcID + ufmadd + ufmdata
hex_hradcID = self.double_to_hex(hradcID)
hex_ufmadd = self.double_to_hex(ufmadd)
hex_ufmdata = self.double_to_hex(ufmdata)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('WriteHRADC_UFM'))+hex_hradcID+hex_ufmadd+hex_ufmdata
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EraseHRADC_UFM(self,hradcID):
payload_size = self.size_to_hex(1+2) #Payload: ID + hradcID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EraseHRADC_UFM'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def InitHRADC_BoardData(self, serial = 12345678, day = 1, mon = 1,
year = 2017, hour = 12, minutes = 30,
variant = 'HRADC-FBP', rburden = 20, calibtemp = 40,
vin_gain = 1, vin_offset = 0, iin_gain = 1,
iin_offset = 0, vref_p = 5, vref_n = -5, gnd = 0):
boardData = {'serial': serial, 'variant': variant, 'rburden': rburden,
'tm_mday': day, 'tm_mon': mon, 'tm_year': year,
'tm_hour': hour, 'tm_min': minutes, 'calibtemp': calibtemp,
'vin_gain': vin_gain, 'vin_offset': vin_offset,
'iin_gain': iin_gain, 'iin_offset': iin_offset,
'vref_p': vref_p, 'vref_n': vref_n, 'gnd': gnd}
return boardData
def WriteHRADC_BoardData(self,hradcID,boardData):
print('Configurando placa em UFM mode...')
self.ConfigHRADCOpMode(hradcID,1)
time.sleep(0.5)
print('\nEnviando serial number...')
ufmdata_16 = self._convertToUint16List(boardData['serial'],'Uint64')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['serial'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando variante...')
ufmdata_16 = self._convertToUint16List(hradcVariant.index(boardData['variant']),'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['variant'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando rburden...')
ufmdata_16 = self._convertToUint16List(boardData['rburden'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['rburden'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando calibdate...')
ufmdata_16 = self._convertToUint16List(boardData['tm_mday'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate'],ufmdata_16[i])
time.sleep(0.1)
# Month
ufmdata_16 = self._convertToUint16List(boardData['tm_mon'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+1,ufmdata_16[i])
time.sleep(0.1)
# Year
ufmdata_16 = self._convertToUint16List(boardData['tm_year'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+2,ufmdata_16[i])
time.sleep(0.1)
# Hour
ufmdata_16 = self._convertToUint16List(boardData['tm_hour'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+3,ufmdata_16[i])
time.sleep(0.1)
# Minutes
ufmdata_16 = self._convertToUint16List(boardData['tm_min'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+4,ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando calibtemp...')
ufmdata_16 = self._convertToUint16List(boardData['calibtemp'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibtemp'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vin_gain...')
ufmdata_16 = self._convertToUint16List(boardData['vin_gain'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vin_gain'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vin_offset...')
ufmdata_16 = self._convertToUint16List(boardData['vin_offset'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vin_offset'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando iin_gain...')
ufmdata_16 = self._convertToUint16List(boardData['iin_gain'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['iin_gain'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando iin_offset...')
ufmdata_16 = self._convertToUint16List(boardData['iin_offset'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['iin_offset'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vref_p...')
ufmdata_16 = self._convertToUint16List(boardData['vref_p'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vref_p'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vref_n...')
ufmdata_16 = self._convertToUint16List(boardData['vref_n'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vref_n'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando gnd...')
ufmdata_16 = self._convertToUint16List(boardData['gnd'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['gnd'],ufmdata_16[i])
time.sleep(0.1)
print('Colocando a placa em Sampling mode...')
self.ConfigHRADCOpMode(hradcID,0)
def ReadHRADC_BoardData(self,hradcID):
print('Configurando placa em UFM mode...')
print(self.ConfigHRADCOpMode(hradcID,1))
time.sleep(0.5)
print('Extraindo dados da placa...')
payload_size = self.size_to_hex(1+2) #Payload: ID + hradcID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ReadHRADC_BoardData'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
print(self.ser.read(6))
print('Lendo dados da placa...')
self.read_var(self.index_to_hex(50+hradcID))
reply_msg = self.ser.read(1+1+2+56+1)
print(reply_msg)
print(len(reply_msg))
val = struct.unpack('BBHLLHHHHHHfffffffffB',reply_msg)
try:
boardData = self.InitHRADC_BoardData(val[3]+val[4]*pow(2,32),val[5],
val[6],val[7],val[8],val[9],
hradcVariant[val[10]],val[11],
val[12],val[13],val[14],val[15],
val[16],val[17],val[18],val[19])
except:
print('\n### Placa não inicializada ###\n')
boardData = self.InitHRADC_BoardData(serial = int(input('Digite o S/N: ')))
print('\n')
print('Colocando a placa em Sampling mode...')
print(self.ConfigHRADCOpMode(hradcID,0))
time.sleep(0.5)
return boardData
def UpdateHRADC_BoardData(self,hradcID):
variant = len(hradcVariant)
while variant >= len(hradcVariant) or variant < 0:
variant = int(input("Enter HRADC variant number:\n 0: HRADC-FBP\n 1: HRADC-FAX-A\n 2: HRADC-FAX-B\n 3: HRADC-FAX-C\n 4: HRADC-FAX-D\n\n>>> "))
variant = hradcVariant[variant]
boardData = self.ReadHRADC_BoardData(hradcID)
boardData['variant'] = variant
boardData['vin_offset'] = np.float32(0)
boardData['iin_offset'] = np.float32(0)
if variant == 'HRADC-FBP':
boardData['rburden'] = np.float32(20)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-A':
boardData['rburden'] = np.float32(0)
boardData['vin_gain'] = np.float32(6.0/5.0)
boardData['iin_gain'] = np.float32(6.0/5.0)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-B':
boardData['rburden'] = np.float32(0)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-C':
boardData['rburden'] = np.float32(5)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-D':
boardData['rburden'] = np.float32(1)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
print('\n\nBoard data from HRADC of slot #' + str(hradcID) + ' is about to be overwritten by the following data:')
print(boardData)
i = input('\n Do you want to proceed? [y/n]: ')
if i is 'Y' or i is 'y':
self.ConfigHRADCOpMode(hradcID,1)
time.sleep(0.1)
self.EraseHRADC_UFM(hradcID)
time.sleep(0.5)
self.ResetHRADCBoards(1)
time.sleep(0.5)
self.ResetHRADCBoards(0)
time.sleep(1.5)
self.WriteHRADC_BoardData(hradcID,boardData)
boardData_new = self.ReadHRADC_BoardData(hradcID)
print(boardData_new)
print(boardData)
if boardData_new == boardData:
print('\n\n ### Operation was successful !!! ### \n\n')
else:
print('\n\n ### Operation failed !!! ### \n\n')
return [boardData, boardData_new]
def GetHRADCs_BoardData(self,numHRADC):
boardData_list = []
for i in range(numHRADC):
boardData_list.append(self.ReadHRADC_BoardData(i))
return boardData_list
def UdcEepromTest(self, rw, data=None):
if data is not None:
payload_size = self.size_to_hex(12)
hex_rw = self.double_to_hex(rw)
hex_byte_0 = self.double_to_hex(data[0])
hex_byte_1 = self.double_to_hex(data[1])
hex_byte_2 = self.double_to_hex(data[2])
hex_byte_3 = self.double_to_hex(data[3])
hex_byte_4 = self.double_to_hex(data[4])
hex_byte_5 = self.double_to_hex(data[5])
hex_byte_6 = self.double_to_hex(data[6])
hex_byte_7 = self.double_to_hex(data[7])
hex_byte_8 = self.double_to_hex(data[8])
hex_byte_9 = self.double_to_hex(data[9])
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcEepromTest'))+hex_rw[0]+ \
hex_byte_0[0] + hex_byte_1[0] + hex_byte_2[0] + hex_byte_3[0] + hex_byte_4[0] + hex_byte_5[0]+ \
hex_byte_6[0] + hex_byte_7[0] + hex_byte_8[0] + hex_byte_9[0]
print(send_packet.encode('ISO-8859-1'))
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(15)
def UdcFlashTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcFlashTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcRamTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcRamTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcAdcTest(self, rw, channel):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_channel = self.double_to_hex(channel)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcAdcTest'))+hex_rw[0]+hex_channel[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcSensorTempTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcSensorTempTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcRtcTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcRtcTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcUartTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcUartTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcIoExpanderTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcIoExpanderTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
# def UdcEthernetTest(self, rw):
# payload_size = self.size_to_hex(2)
# hex_rw = self.double_to_hex(rw)
# send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcEthernetTest'))+hex_rw
# self.ser.write(send_packet.encode('ISO-8859-1'))
# return self.ser.read()
def UdcIsoPlaneTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcIsoPlaneTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcLoopBackTest(self, rw, channel):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_channel = self.double_to_hex(channel)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcLoopBackTest'))+hex_rw[0]+hex_channel[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcLedTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcLedTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcBuzzerTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcBuzzerTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcComTest(self, rw, val):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_value = self.double_to_hex(val)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcComTest'))+hex_rw[0]+hex_value[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
time.sleep(0.2)
return self.ser.read(6)
def UdcI2cIsoTest(self, rw, val):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_value = self.double_to_hex(val)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcI2cIsoTest'))+hex_rw[0]+hex_value[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def SetISlowRefx4(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetISlowRefx4'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SetPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClearPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('ClearPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ReadPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('ReadPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableBuzzer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('EnableBuzzer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableBuzzer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('DisableBuzzer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SendUartData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SendUartData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetUartData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetUartData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SendCanData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SendCanData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetCanData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetCanData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetI2cData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetI2cData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def read_ps_status(self):
self.read_var(self.index_to_hex(ListVar_v2_1.index('ps_status')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
status = {}
status['state'] = ListOpMode_v2_1[(val[3] & 0b0000000000001111)]
status['open_loop'] = (val[3] & 0b0000000000010000) >> 4
status['interface'] = (val[3] & 0b0000000001100000) >> 5
status['active'] = (val[3] & 0b0000000010000000) >> 7
status['model'] = ListPSModels_v2_1[(val[3] & 0b0001111100000000) >> 8]
status['unlocked'] = (val[3] & 0b0010000000000000) >> 13
#print(status)
return status
def set_ps_name(self,ps_name):
if type(ps_name) == str:
for n in range(len(ps_name)):
self.set_param('PS_Name', n, float(ord(ps_name[n])))
for i in range(n+1,64):
self.set_param('PS_Name', i, float(ord(" ")))
def get_ps_name(self):
ps_name = ""
for n in range(64):
ps_name = ps_name + chr(int(self.get_param('PS_Name', n)))
if ps_name[-3:] == ' ':
ps_name = ps_name[:n-2]
break
return ps_name
def set_slowref(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_slowref_fbp(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_slowref_readback_mon(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_readback_mon'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def set_slowref_fbp_readback_mon(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp_readback_mon'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(21)
if(len(reply_msg) == 6):
return reply_msg
else:
val = struct.unpack('BBHffffB',reply_msg)
return [val[3],val[4],val[5],val[6]]
def set_slowref_readback_ref(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_readback_ref'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def set_slowref_fbp_readback_ref(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp_readback_ref'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(21)
if(len(reply_msg) == 6):
return reply_msg
else:
val = struct.unpack('BBHffffB',reply_msg)
return [val[3],val[4],val[5],val[6]]
def set_param(self, param_id, n, value):
payload_size = self.size_to_hex(1+2+2+4) #Payload: ID + param id + [n] + value
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_value = self.float_to_hex(value)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_param'))+hex_id+hex_n+hex_value
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def get_param(self, param_id, n = 0):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + param id + [n]
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('get_param'))+hex_id+hex_n
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
if len(reply_msg) == 9:
val = struct.unpack('BBHfB',reply_msg)
return val[3]
else:
#print('Invalid parameter')
return float('nan')
def save_param_eeprom(self, param_id, n = 0, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + param id + [n] + memory type
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_param_eeprom'))+hex_id+hex_n+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def load_param_eeprom(self, param_id, n = 0, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + param id + [n] + memory type
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_param_eeprom'))+hex_id+hex_n+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def save_param_bank(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_param_bank'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_param_bank(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_param_bank'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def get_param_bank(self, list_param = ListParameters, timeout = 0.5, print_modules = True):
timeout_old = self.ser.timeout
#self.ser.timeout = 0.05
param_bank = []
for param_name in list_param:
param_row = [param_name]
for n in range(64):
if param_name == 'PS_Name':
p = self.get_ps_name()
param_row.append(p)
#if(print_modules):
#print('PS_Name: ' + p)
self.ser.timeout = timeout
break
else:
p = self.get_param(param_name,n)
if math.isnan(p):
break
param_row.append(p)
#if(print_modules):
#print(param_name + "[" + str(n) + "]: " + str(p))
if(print_modules):
print(param_row)
param_bank.append(param_row)
self.ser.timeout = timeout_old
return param_bank
def store_param_bank_csv(self, bank):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=',')
for param_row in bank:
writer.writerow(param_row)
def enable_onboard_eeprom(self):
self.set_param('Enable_Onboard_EEPROM',0,0)
self.save_param_eeprom('Enable_Onboard_EEPROM',0,2)
def disable_onboard_eeprom(self):
self.set_param('Enable_Onboard_EEPROM',0,1)
self.save_param_eeprom('Enable_Onboard_EEPROM',0,2)
def set_dsp_coeffs(self, dsp_class, dsp_id, coeffs_list = [0,0,0,0,0,0,0,0,0,0,0,0]):
coeffs_list_full = self.format_list_size(coeffs_list, NUM_MAX_COEFFS_DSP)
payload_size = self.size_to_hex(1+2+2+4*NUM_MAX_COEFFS_DSP)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_coeffs = self.float_list_to_hex(coeffs_list_full)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_dsp_coeffs'))+hex_dsp_class+hex_dsp_id+hex_coeffs
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_dsp_coeff(self, dsp_class, dsp_id, coeff):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_coeff = self.double_to_hex(coeff)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('get_dsp_coeff'))+hex_dsp_class+hex_dsp_id+hex_coeff
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
#print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def save_dsp_coeffs_eeprom(self, dsp_class, dsp_id, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_dsp_coeffs_eeprom'))+hex_dsp_class+hex_dsp_id+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_dsp_coeffs_eeprom(self, dsp_class, dsp_id, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_dsp_coeffs_eeprom'))+hex_dsp_class+hex_dsp_id+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def save_dsp_modules_eeprom(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_dsp_modules_eeprom'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_dsp_modules_eeprom(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_dsp_modules_eeprom'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_udc(self):
reply = input('\nEste comando realiza o reset do firmware da placa UDC, e por isso, so e executado caso a fonte esteja desligada. \nCaso deseje apenas resetar interlocks, utilize o comando reset_interlocks(). \n\nTem certeza que deseja prosseguir? [Y/N]: ')
if reply == 'Y' or reply == 'y':
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_udc'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
def run_bsmp_func(self,id_func,print_msg = 0):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(id_func)
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if print_msg:
print(reply_msg)
return reply_msg
def run_bsmp_func_all_ps(self,p_func,add_list,arg = None,delay = 0.5, print_reply = 1):
old_add = self.GetSlaveAdd()
for add in add_list:
self.SetSlaveAdd(add)
if arg == None:
r = p_func()
else:
r = p_func(arg)
if print_reply:
print('\n Add ' + str(add))
print(r)
time.sleep(delay)
self.SetSlaveAdd(old_add)
def cfg_source_scope(self,p_source):
payload_size = self.size_to_hex(1+4) #Payload: ID + p_source
hex_op_mode = self.uint32_to_hex(p_source)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_source_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_freq_scope(self,freq):
payload_size = self.size_to_hex(1+4) #Payload: ID + freq
hex_op_mode = self.float_to_hex(freq)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_freq_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_duration_scope(self,duration):
payload_size = self.size_to_hex(1+4) #Payload: ID + duration
hex_op_mode = self.float_to_hex(duration)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_duration_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def enable_scope(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('enable_scope'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def disable_scope(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('disable_scope'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_scope_vars(self):
print('\n### Scope Variables ###\n')
print('Frequency: ' + str((round(self.read_bsmp_variable(25,'float'),3))))
print('Duration: ' + str((round(self.read_bsmp_variable(26,'float'),3))))
print('Source Data: ' + str((round(self.read_bsmp_variable(27,'uint32_t'),3))))
def sync_pulse(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('sync_pulse'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def select_op_mode(self,op_mode):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_op_mode = self.double_to_hex(ListOpMode_v2_1.index(op_mode))
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('select_op_mode'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_serial_termination(self,term_enable):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_enable = self.double_to_hex(term_enable)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_serial_termination'))+hex_enable
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_command_interface(self,interface):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_interface = self.double_to_hex(interface)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_command_interface'))+hex_interface
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def unlock_udc(self,password):
payload_size = self.size_to_hex(1+2) #Payload: ID + password
hex_password = self.double_to_hex(password)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('unlock_udc'))+hex_password
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def lock_udc(self,password):
payload_size = self.size_to_hex(1+2) #Payload: ID + password
hex_password = self.double_to_hex(password)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('lock_udc'))+hex_password
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_counters(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_counters'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_siggen(self,sig_type,num_cycles,freq,amplitude,offset,aux0,aux1,aux2,aux3):
payload_size = self.size_to_hex(1+2+2+4+4+4+4*4)
hex_sig_type = self.double_to_hex(ListSigGenTypes_v2_1.index(sig_type))
hex_num_cycles = self.double_to_hex(num_cycles)
hex_freq = self.float_to_hex(freq)
hex_amplitude = self.float_to_hex(amplitude)
hex_offset = self.float_to_hex(offset)
hex_aux0 = self.float_to_hex(aux0)
hex_aux1 = self.float_to_hex(aux1)
hex_aux2 = self.float_to_hex(aux2)
hex_aux3 = self.float_to_hex(aux3)
send_packet = self.ComFunction + payload_size + self.index_to_hex(ListFunc_v2_1.index('cfg_siggen')) + hex_sig_type + hex_num_cycles + hex_freq + hex_amplitude + hex_offset + hex_aux0 + hex_aux1 + hex_aux2 + hex_aux3
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_siggen(self,freq,amplitude,offset):
payload_size = self.size_to_hex(1+4+4+4)
hex_freq = self.float_to_hex(freq)
hex_amplitude = self.float_to_hex(amplitude)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction + payload_size + self.index_to_hex(ListFunc_v2_1.index('set_siggen')) + hex_freq + hex_amplitude + hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def enable_siggen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('enable_siggen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def disable_siggen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('disable_siggen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_wfmref(self, idx, sync_mode, frequency, gain = 1, offset = 0):
payload_size = self.size_to_hex(1+2+2+4+4+4) #Payload: ID + idx + sync_mode + frequency + gain + offset
hex_idx = self.double_to_hex(idx)
hex_mode = self.double_to_hex(sync_mode)
hex_freq = self.float_to_hex(frequency)
hex_gain = self.float_to_hex(gain)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_wfmref'))+hex_idx+hex_mode+hex_freq+hex_gain+hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def select_wfmref(self,idx):
payload_size = self.size_to_hex(1+2) #Payload: ID + idx
hex_idx = self.double_to_hex(idx)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('select_wfmref'))+hex_idx
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_wfmref(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('reset_wfmref'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_wfmref_vars(self,curve_id):
print('\n### WfmRef ' + str(curve_id) + ' Variables ###\n')
print('Length: ' + str((round(self.read_bsmp_variable(20+curve_id*3,'uint32_t'),3)-round(self.read_bsmp_variable(19+curve_id*3,'uint32_t'),3))/2+1))
print('Index: ' + str((round(self.read_bsmp_variable(21+curve_id*3,'uint32_t'),3)-round(self.read_bsmp_variable(19+curve_id*3,'uint32_t'),3))/2+1))
print('WfmRef Selected: ' + str(round(self.read_bsmp_variable(14,'uint16_t'),3)))
print('Sync Mode: ' + str(round(self.read_bsmp_variable(15,'uint16_t'),3)))
print('Frequency: ' + str(round(self.read_bsmp_variable(16,'float'),3)) + " Hz")
print('Gain: ' + str(round(self.read_bsmp_variable(17,'float'),3)))
print('Offset: ' + str(round(self.read_bsmp_variable(18,'float'),3)))
def read_csv_file(self,filename, type = 'float'):
csv_list = []
with open(filename, newline = '') as f:
reader = csv.reader(f)
for row in reader:
if type == 'float':
row_converted = float(row[0])
elif type == 'string' or type == 'str':
row_converted = str(row[0])
csv_list.append(row_converted)
print('Length of list: ' + str(len(csv_list)) + '\n')
return csv_list
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Leitura de Valores das Variáveis BSMP
O retorno do método são os valores double/float da respectiva variavel
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def read_bsmp_variable(self,id_var,type_var,print_msg = 0):
self.read_var(self.index_to_hex(id_var))
reply_msg = self.ser.read(typeSize[type_var])
if print_msg:
print(reply_msg)
val = struct.unpack(typeFormat[type_var],reply_msg)
return val[3]
def read_bsmp_variable_gen(self,id_var,size_bytes,print_msg = 0):
self.read_var(self.index_to_hex(id_var))
reply_msg = self.ser.read(size_bytes+5)
if print_msg:
print(reply_msg)
return reply_msg
def read_udc_arm_version(self):
self.read_var(self.index_to_hex(3))
reply_msg = self.ser.read(133)
val = struct.unpack('16s',reply_msg[4:20])
return val[0].decode('utf-8')
def read_udc_c28_version(self):
self.read_var(self.index_to_hex(3))
reply_msg = self.ser.read(133)
val = struct.unpack('16s',reply_msg[20:36])
return val[0].decode('utf-8')
def read_udc_version(self):
print('\n ARM: ' + self.read_udc_arm_version())
print(' C28: ' + self.read_udc_c28_version())
def Read_iLoad1(self):
self.read_var(self.index_to_hex(ListVar.index('iLoad1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iLoad2(self):
self.read_var(self.index_to_hex(ListVar.index('iLoad2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod1(self):
self.read_var(self.index_to_hex(ListVar.index('iMod1')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod2(self):
self.read_var(self.index_to_hex(ListVar.index('iMod2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod3(self):
self.read_var(self.index_to_hex(ListVar.index('iMod3')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod4(self):
self.read_var(self.index_to_hex(ListVar.index('iMod4')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vLoad(self):
self.read_var(self.index_to_hex(ListVar.index('vLoad')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod1(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod2(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod2')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod3(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod3')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod4(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod4')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod1(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod2(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod2')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod3(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod3')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod4(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod4')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp1(self):
self.read_var(self.index_to_hex(ListVar.index('temp1')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp2(self):
self.read_var(self.index_to_hex(ListVar.index('temp2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp3(self):
self.read_var(self.index_to_hex(ListVar.index('temp3')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp4(self):
self.read_var(self.index_to_hex(ListVar.index('temp4')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_ps_OnOff(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OnOff')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_OpMode(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OpMode')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_Remote(self):
self.read_var(self.index_to_hex(ListVar.index('ps_Remote')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_OpenLoop(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OpenLoop')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_SoftInterlocks(self):
op_bin = 1
ActiveSoftInterlocks = []
SoftInterlocksList = ['N/A', 'Sobre-tensao na carga 1', 'N/A',\
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 2', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 3', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 4', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A']
self.read_var(self.index_to_hex(ListVar.index('ps_SoftInterlocks')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHIB',reply_msg)
print('Soft Interlocks ativos:')
for i in range(len('{0:b}'.format(val[3]))):
if (val[3] & (op_bin << i)) == 2**i:
ActiveSoftInterlocks.append(SoftInterlocksList[i])
print(SoftInterlocksList[i])
print('---------------------------------------------------------------')
return val[3]
def Read_ps_HardInterlocks(self):
op_bin = 1
ActiveHardInterlocks = []
HardInterlocksList = ['Sobre-corrente na carga 1', 'N/A', \
'Sobre-tensao no DC-Link do modulo 1', \
'Sub-tensao no DC-Link do modulo 1', \
'Falha no rele de entrada do DC-Link do modulo 1', \
'Falha no fusivel de entrada do DC-Link do modulo 1', \
'Falha nos drivers do modulo 1', \
'Sobre-temperatura no modulo 1', \
'Sobre-corrente na carga 2', 'N/A', \
'Sobre-tensao no DC-Link do modulo 2', \
'Sub-tensao no DC-Link do modulo 2', \
'Falha no rele de entrada do DC-Link do modulo 2', \
'Falha no fusivel de entrada do DC-Link do modulo 2', \
'Falha nos drivers do modulo 2', \
'Sobre-temperatura no modulo 2', \
'Sobre-corrente na carga 3', 'N\A', \
'Sobre-tensao no DC-Link do modulo 3', \
'Sub-tensao no DC-Link do modulo 3', \
'Falha no rele de entrada no DC-Link do modulo 3', \
'Falha no fusivel de entrada do DC-Link do modulo 3', \
'Falha nos drivers do modulo 3', \
'Sobre-temperatura no modulo 3', \
'Sobre-corrente na carga 4', 'N/A', \
'Sobre-tensao no DC-Link do modulo 4', \
'Sub-tensao no DC-Link do modulo 4', \
'Falha no rele de entrada do DC-Link do modulo 4', \
'Falha no fusivel de entrada do DC-Link do modulo 4', \
'Falha nos drivers do modulo 4', \
'Sobre-temperatura no modulo 4']
self.read_var(self.index_to_hex(ListVar.index('ps_HardInterlocks')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHIB',reply_msg)
print('Hard Interlocks ativos:')
for i in range(len('{0:b}'.format(val[3]))):
if (val[3] & (op_bin << i)) == 2**i:
ActiveHardInterlocks.append(HardInterlocksList[i])
print(HardInterlocksList[i])
print('---------------------------------------------------------------')
return val[3]
def Read_iRef(self):
self.read_var(self.index_to_hex(ListVar.index('iRef')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_wfmRef_Gain(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_Gain')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_wfmRef_Offset(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_Offset')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Enable(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Enable')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_Type(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Type')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_Ncycles(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Ncycles')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_PhaseStart(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_PhaseStart')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_PhaseEnd(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_PhaseEnd')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Freq(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Freq')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Amplitude(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Amplitude')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Offset(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Offset')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Aux(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Aux')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_dp_ID(self):
self.read_var(self.index_to_hex(ListVar.index('dp_ID')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_dp_Class(self):
self.read_var(self.index_to_hex(ListVar.index('dp_Class')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_dp_Coeffs(self):
self.read_var(self.index_to_hex(ListVar.index('dp_Coeffs')))
reply_msg = self.ser.read(69)
val = struct.unpack('BBHffffffffffffffffB',reply_msg)
return [val[3],val[4],val[5],val[6],val[7],val[8],val[9],val[10],val[11],val[12],val[13],val[14],val[15],val[16],val[17],val[18]]
def Read_ps_Model(self):
self.read_var(self.index_to_hex(ListVar.index('ps_Model')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val
def read_ps_model(self):
reply_msg = self.Read_ps_Model()
return ListPSModels[reply_msg[3]]
def Read_wfmRef_PtrBufferStart(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferStart')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_PtrBufferEnd(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferEnd')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_PtrBufferK(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferK')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_SyncMode(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_SyncMode')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_iRef1(self):
self.read_var(self.index_to_hex(45))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef2(self):
self.read_var(self.index_to_hex(46))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef3(self):
self.read_var(self.index_to_hex(47))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef4(self):
self.read_var(self.index_to_hex(48))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_counterSetISlowRefx4(self):
self.read_var(self.index_to_hex(49))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Escrita de Valores das Variáveis BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Write_sigGen_Freq(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Freq'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Amplitude(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Amplitude'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Offset(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Offset'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Aux(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Aux'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_ID(self,double_value):
hex_double = self.double_to_hex(double_value)
send_packet = self.ComWriteVar+self.WriteDoubleSizePayload+self.index_to_hex(ListVar.index('dp_ID'))+hex_double
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_Class(self,double_value):
hex_double = self.double_to_hex(double_value)
send_packet = self.ComWriteVar+self.WriteDoubleSizePayload+self.index_to_hex(ListVar.index('dp_Class'))+hex_double
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_Coeffs(self,list_float):
hex_float_list = []
#list_full = list_float[:]
#while(len(list_full) < self.DP_MODULE_MAX_COEFF):
# list_full.append(0)
list_full = [0 for i in range(self.DP_MODULE_MAX_COEFF)]
list_full[:len(list_float)] = list_float[:]
for float_value in list_full:
hex_float = self.float_to_hex(float(float_value))
hex_float_list.append(hex_float)
str_float_list = ''.join(hex_float_list)
payload_size = self.size_to_hex(1+4*self.DP_MODULE_MAX_COEFF) #Payload: ID + 16floats
send_packet = self.ComWriteVar+payload_size+self.index_to_hex(ListVar.index('dp_Coeffs'))+str_float_list
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Escrita de Curvas BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Send_wfmRef_Curve(self,block_idx,data):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(ListCurv.index('wfmRef_Curve'))+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Recv_wfmRef_Curve(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('wfmRef_Curve'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+8192+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
for k in range(7,len(recv_msg)-1,4):
val.append(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer(self):
block_hex = struct.pack('>H',0).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('samplesBuffer'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+16384+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
try:
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
except:
pass
return val
def Send_fullwfmRef_Curve(self,block_idx,data):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(ListCurv.index('fullwfmRef_Curve'))+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Recv_fullwfmRef_Curve(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('fullwfmRef_Curve'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+16384+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
for k in range(7,len(recv_msg)-1,4):
val.append(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer_blocks(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('samplesBuffer_blocks'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
#t0 = time.time()
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+1024+1) #Address+Command+Size+ID+Block_idx+data+checksum
#print(time.time()-t0)
#print(recv_msg)
val = []
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer_allblocks(self):
buff = []
#self.DisableSamplesBuffer()
for i in range(0,16):
#t0 = time.time()
buff.extend(self.Recv_samplesBuffer_blocks(i))
#print(time.time()-t0)
#self.EnableSamplesBuffer()
return buff
def read_curve_block(self,curve_id,block_id):
block_hex = struct.pack('>H',block_id).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: curve_id + block_id
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(curve_id)+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
#t0 = time.time()
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+size_curve_block[curve_id]+1) #Address+Command+Size+ID+Block_idx+data+checksum
#print(time.time()-t0)
#print(recv_msg)
val = []
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
return val
def write_curve_block(self,curve_id,block_id,data):
block_hex = struct.pack('>H',block_id).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(curve_id)+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def write_wfmref(self,curve,data):
#curve = ListCurv_v2_1.index('wfmref')
block_size = int(size_curve_block[curve]/4)
print(block_size)
blocks = [data[x:x+block_size] for x in range(0, len(data), block_size)]
ps_status = self.read_ps_status()
wfmref_selected = self.read_bsmp_variable(14,'uint16_t')
if( (wfmref_selected == curve) and (ps_status['state'] == 'RmpWfm' or ps_status['state'] == 'MigWfm') ):
print("\n The specified curve ID is currently selected and PS is on " + ps_status['state'] + " state. Choose a different curve ID to proceed.\n")
else:
for block_id in range(len(blocks)):
self.write_curve_block(curve, block_id, blocks[block_id])
print(blocks[block_id])
def read_buf_samples_ctom(self):
buf = []
curve_id = ListCurv_v2_1.index('buf_samples_ctom')
ps_status = self.read_ps_status()
if ps_status['model'] == 'FBP':
for i in range(num_blocks_curves_fbp[curve_id]):
buf.extend(self.read_curve_block(curve_id,i))
else:
for i in range(num_blocks_curves_fax[curve_id]):
buf.extend(self.read_curve_block(curve_id,i))
return buf
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções Serial
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Connect(self,port='COM2',baud=6000000):
try:
SerialDRS.ser = serial.Serial(port,baud,timeout=1) #port format should be 'COM'+number
return True
except:
return False
def Disconnect(self):
if (self.ser.isOpen()):
try:
self.ser.close()
return True
except:
return False
def SetSlaveAdd(self,address):
self.SlaveAdd = struct.pack('B',address).decode('ISO-8859-1')
def GetSlaveAdd(self):
return struct.unpack('B',self.SlaveAdd.encode())[0]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções auxiliares
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def read_vars_common(self, print_all = False):
loop_state = ["Closed Loop","Open Loop"]
ps_status = self.read_ps_status()
if ps_status['open_loop'] == 0:
if (ps_status['model'] == 'FAC_ACDC') or (ps_status['model'] == 'FAC_2S_ACDC') or (ps_status['model'] == 'FAC_2P4S_ACDC'):
setpoint_unit = " V"
else:
setpoint_unit = " A"
else:
setpoint_unit = " %"
print("\nPS Model: " + ps_status['model'])
print("State: " + ps_status['state'])
print("Loop State: " + loop_state[ps_status['open_loop']])
print("\nSetpoint: " + str(round(self.read_bsmp_variable(1,'float'),3)) + setpoint_unit)
print("Reference: " + str(round(self.read_bsmp_variable(2,'float'),3)) + setpoint_unit)
if print_all:
print(self.read_ps_status())
print("\nCounter set_slowref: " + str(round(self.read_bsmp_variable(4,'uint32_t'),3)))
print("Counter sync pulse: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
self.get_siggen_vars()
self.get_wfmref_vars(0)
self.get_wfmref_vars(1)
self.get_scope_vars()
def decode_interlocks(self,reg_interlocks,list_interlocks):
active_interlocks = []
for i in range(32):
if(reg_interlocks & (1 << i)):
active_interlocks.append(list_interlocks[i])
print('\t' + list_interlocks[i])
return active_interlocks
def read_vars_fbp(self, n = 1, dt = 0.5):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fbp_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " V")
print("Load Resistance: " + str(abs(round(self.read_bsmp_variable(34,'float') / self.read_bsmp_variable(33,'float'),3))) + " Ohm")
print("Load Power: " + str(abs(round(self.read_bsmp_variable(34,'float') * self.read_bsmp_variable(33,'float'),3))) + " W")
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " V")
print("Heat-Sink Temp: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " °C")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
time.sleep(dt)
except:
pass
def read_vars_fbp_dclink(self, n = 1, dt = 0.5):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("\nHard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_dclink_hard_interlocks)
print("\nModules status: " + str(round(self.read_bsmp_variable(33,'uint32_t'),3)))
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " V")
print("PS1 Voltage: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " V")
print("PS2 Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("PS3 Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("Dig Pot Tap: " + str(round(self.read_bsmp_variable(38,'uint8_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_acdc(self, n = 1, dt = 0.5, iib = 1):
#try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
time.sleep(dt)
#except:
# pass
def read_vars_fac_dcdc(self, n = 1, dt = 0.5, iib = 1):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
print("WfmRef Index: " + str( (round(self.read_bsmp_variable(20,'uint32_t'),3) - round(self.read_bsmp_variable(18,'uint32_t'),3))/2 + 1))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_dcdc_hard_interlocks)
iib_itlks = self.read_bsmp_variable(51,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(52,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_dcdc_iib_alarms)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nDuty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " A")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(51,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(52,'uint32_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_dcdc_ema(self, n = 1, dt = 0.5, iib = 0):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_dcdc_ema_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_dcdc_ema_hard_interlocks)
iib_itlks = self.read_bsmp_variable(49,'uint32_t')
print("IIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_dcdc_ema_iib_interlocks)
iib_alarms = self.read_bsmp_variable(50,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_dcdc_ema_iib_alarms)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)))
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)))
print("\nDuty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)))
if(iib):
print("\nIIB Input Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IIB IGBT 1 Temp: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " °C")
print("IIB IGBT 2 Temp: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(49,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(50,'uint32_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_2s_acdc(self, n = 1, add_mod_a = 2, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(add_mod_a)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print('\n *** MODULE A ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_2s_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_2s_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_2s_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_2s_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
self.SetSlaveAdd(add_mod_a+1)
print('\n *** MODULE B ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_2s_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_2s_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_2s_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_2s_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2s_dcdc(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 14*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_dcdc_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " %")
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " %")
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(40 + iib_offset,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(41 + iib_offset,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(42 + iib_offset,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(43 + iib_offset,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(44 + iib_offset,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(45 + iib_offset,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(46 + iib_offset,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(47 + iib_offset,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(48 + iib_offset,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(49 + iib_offset,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(50 + iib_offset,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(51 + iib_offset,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(52 + iib_offset,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(53 + iib_offset,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2s_dcdc_iib_alarms)
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2p4s_acdc(self, n = 1, add_mod_a = 1, dt = 0.5, iib = 0):
self.read_vars_fac_2s_acdc(n, add_mod_a, dt, iib)
def read_vars_fac_2p4s_dcdc(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p4s_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p4s_dcdc_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)))
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)))
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)))
print("\nArm Current 1: " + str(round(self.read_bsmp_variable(36,'float'),3)))
print("Arm Current 2: " + str(round(self.read_bsmp_variable(37,'float'),3)))
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(38,'float'),3)))
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(39,'float'),3)))
print("CapBank Voltage 3: " + str(round(self.read_bsmp_variable(40,'float'),3)))
print("CapBank Voltage 4: " + str(round(self.read_bsmp_variable(41,'float'),3)))
print("CapBank Voltage 5: " + str(round(self.read_bsmp_variable(42,'float'),3)))
print("CapBank Voltage 6: " + str(round(self.read_bsmp_variable(43,'float'),3)))
print("CapBank Voltage 7: " + str(round(self.read_bsmp_variable(44,'float'),3)))
print("CapBank Voltage 8: " + str(round(self.read_bsmp_variable(45,'float'),3)))
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(46,'float'),3)))
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(47,'float'),3)))
print("Duty-Cycle 3: " + str(round(self.read_bsmp_variable(48,'float'),3)))
print("Duty-Cycle 4: " + str(round(self.read_bsmp_variable(49,'float'),3)))
print("Duty-Cycle 5: " + str(round(self.read_bsmp_variable(50,'float'),3)))
print("Duty-Cycle 6: " + str(round(self.read_bsmp_variable(51,'float'),3)))
print("Duty-Cycle 7: " + str(round(self.read_bsmp_variable(52,'float'),3)))
print("Duty-Cycle 8: " + str(round(self.read_bsmp_variable(53,'float'),3)))
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(55, 'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(58,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(59,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(60,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(61,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(62,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(63,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(64,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(65,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(66,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2p4s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(67,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2p4s_dcdc_iib_alarms)
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(68,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(69,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(70,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(71,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(72,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(73,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(74,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(75,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(76,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(77,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(78,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(79,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(80,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2p4s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(81,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2p4s_dcdc_iib_alarms)
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fap(self, n = 1, com_add = 1, dt = 0.5, iib = 1):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_hard_interlocks)
iib_itlks = self.read_bsmp_variable(56,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_iib_interlocks)
iib_alarms = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_iib_alarms)
iload = self.read_bsmp_variable(33,'float')
print("\nLoad Current: " + str(round(iload,3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
if not iload == 0:
print("\nLoad Resistance: " + str(abs(round(self.read_bsmp_variable(43,'float') / iload ,3))) + " Ohm")
else:
print("\nLoad Resistance: 0 Ohm")
print("Load Power: " + str(abs(round(self.read_bsmp_variable(43,'float') * self.read_bsmp_variable(33,'float'),3))) + " W")
print("\nDC-Link Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nIGBT 1 Current: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IGBT 2 Current: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("\nIGBT 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " %")
if(iib):
print("\nIIB Input Voltage: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " V")
print("IIB Output Voltage: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " V")
print("IIB IGBT 1 Current: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IIB IGBT 2 Current: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("IIB IGBT 1 Temp: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " °C")
print("IIB IGBT 2 Temp: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " A")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " °C")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(56,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fap_4p(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 16*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_4p_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_4p_hard_interlocks)
for j in range(4):
iib_itlks = self.read_bsmp_variable(72 + j*16,'uint32_t')
print("\nIIB " + str(j+1) + " Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_4p_iib_interlocks)
iib_alarms = self.read_bsmp_variable(73 + j*16,'uint32_t')
print("IIB " + str(j+1) + " Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_4p_iib_alarms)
print("\n Mean Load Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("Load Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nIGBT 1 Current Mod 1: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IGBT 2 Current Mod 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IGBT 1 Current Mod 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IGBT 2 Current Mod 2: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IGBT 1 Current Mod 3: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " A")
print("IGBT 2 Current Mod 3: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " A")
print("IGBT 1 Current Mod 4: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " A")
print("IGBT 2 Current Mod 4: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("\nDC-Link Voltage Mod 1: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " V")
print("DC-Link Voltage Mod 2: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " V")
print("DC-Link Voltage Mod 3: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("DC-Link Voltage Mod 4: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("\nMean Duty-Cycle: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " %")
if not iib == 0:
print("\nIIB " + str(iib) + " Input Voltage: " + str(round(self.read_bsmp_variable(58 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Output Voltage: " + str(round(self.read_bsmp_variable(59 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " IGBT 1 Current: " + str(round(self.read_bsmp_variable(60 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 2 Current: " + str(round(self.read_bsmp_variable(61 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 1 Temp: " + str(round(self.read_bsmp_variable(62 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " IGBT 2 Temp: " + str(round(self.read_bsmp_variable(63 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Driver Voltage: " + str(round(self.read_bsmp_variable(64 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Driver Current 1: " + str(round(self.read_bsmp_variable(65 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Driver Current 2: " + str(round(self.read_bsmp_variable(66 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Inductor Temp: " + str(round(self.read_bsmp_variable(67 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Heat-Sink Temp: " + str(round(self.read_bsmp_variable(68 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Ground Leakage Current: " + str(round(self.read_bsmp_variable(69 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Board Temp: " + str(round(self.read_bsmp_variable(70 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Board RH: " + str(round(self.read_bsmp_variable(71 + iib_offset,'float'),3)) + " %")
print("IIB " + str(iib) + " Interlocks: " + str(round(self.read_bsmp_variable(72 + iib_offset,'uint32_t'),3)))
print("IIB " + str(iib) + " Alarms: " + str(round(self.read_bsmp_variable(73 + iib_offset,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except Exception as e:
print(e)
self.SetSlaveAdd(old_add)
def read_vars_fap_2p2s(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 16*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_2p2s_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_2p2s_hard_interlocks)
for j in range(4):
iib_itlks = self.read_bsmp_variable(78 + j*16,'uint32_t')
print("\nIIB " + str(j+1) + " Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_4p_iib_interlocks)
iib_alarms = self.read_bsmp_variable(79 + j*16,'uint32_t')
print("IIB " + str(j+1) + " Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_4p_iib_alarms)
print("\nMean Load Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nArm Current 1: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("Arm Current 2: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("\nIGBT 1 Current Mod 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IGBT 2 Current Mod 1: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IGBT 1 Current Mod 2: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IGBT 2 Current Mod 2: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " A")
print("IGBT 1 Current Mod 3: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " A")
print("IGBT 2 Current Mod 3: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " A")
print("IGBT 1 Current Mod 4: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IGBT 2 Current Mod 4: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("\nDC-Link Voltage Mod 1: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " V")
print("DC-Link Voltage Mod 2: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("DC-Link Voltage Mod 3: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " V")
print("DC-Link Voltage Mod 4: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " V")
print("\nMean Duty-Cycle: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("\nIGBT 1 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(58,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(59,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(60,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(61,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(62,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(63,'float'),3)) + " %")
if not iib == 0:
print("\nIIB " + str(iib) + " Input Voltage: " + str(round(self.read_bsmp_variable(64 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Output Voltage: " + str(round(self.read_bsmp_variable(65 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " IGBT 1 Current: " + str(round(self.read_bsmp_variable(66 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 2 Current: " + str(round(self.read_bsmp_variable(67 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 1 Temp: " + str(round(self.read_bsmp_variable(68 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " IGBT 2 Temp: " + str(round(self.read_bsmp_variable(69 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Driver Voltage: " + str(round(self.read_bsmp_variable(70 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Driver Current 1: " + str(round(self.read_bsmp_variable(71 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Driver Current 2: " + str(round(self.read_bsmp_variable(72 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Inductor Temp: " + str(round(self.read_bsmp_variable(73 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Heat-Sink Temp: " + str(round(self.read_bsmp_variable(74 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Ground Leakage Current: " + str(round(self.read_bsmp_variable(75 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Board Temp: " + str(round(self.read_bsmp_variable(76 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Board RH: " + str(round(self.read_bsmp_variable(77 + iib_offset,'float'),3)) + " %")
print("IIB " + str(iib) + " Interlocks: " + str(round(self.read_bsmp_variable(78 + iib_offset,'uint32_t'),3)))
print("IIB " + str(iib) + " Alarms: " + str(round(self.read_bsmp_variable(79 + iib_offset,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except Exception as e:
print(e)
self.SetSlaveAdd(old_add)
def read_vars_fap_225A(self, n = 1, com_add = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_225A_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_225A_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("\nIGBT 1 Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("IGBT 2 Current: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nIGBT 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fbp_2s_ufjf(self, n = 1, com_add = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fbp_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Error: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("\nMod 1 Load Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("Mod 3 Load Voltage: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " V")
#print("\nMod 1 DC-Link Voltage: " + str(round(self.read_bsmp_variable(29,'float'),3)) + " V")
#print("Mod 1 Temperature: " + str(round(self.read_bsmp_variable(31,'float'),3)) + " °C")
#print("\nMod 3 DC-Link Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
#print("Mod 3 Temperature: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " °C")
print("\nMod 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(32,'float'),3)) + " %")
print("Mod 3 Duty-Cycle: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2p_acdc_imas(self, n = 1, add_mod_a = 2, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(add_mod_a)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print('\n *** MODULE A ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_acdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_acdc_imas_hard_interlocks)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
self.SetSlaveAdd(add_mod_a+1)
print('\n *** MODULE B ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_acdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_acdc_imas_hard_interlocks)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
raise
def read_vars_fac_2p_dcdc_imas(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_dcdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_dcdc_imas_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + ' A')
print("Load Current Error: " + str(round(self.read_bsmp_variable(34,'float'),3)) + ' A')
print("\nArm 1 Current: " + str(round(self.read_bsmp_variable(35,'float'),3)) + ' A')
print("Arm 2 Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + ' A')
print("Arms Current Diff: " + str(round(self.read_bsmp_variable(37,'float'),3)) + ' A')
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + ' V')
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + ' V')
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(40,'float'),3)) + ' %')
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(41,'float'),3)) + ' %')
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(42,'float'),3)) + ' %')
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
raise
def check_param_bank(self, param_file):
fbp_param_list = []
max_sampling_freq = 600000
c28_sysclk = 150e6
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'Num_PS_Modules' and param[1] > 4:
print("Invalid " + str(param[0]) + ": " + str(param[1]) + ". Maximum is 4")
elif str(param[0]) == 'Freq_ISR_Controller' and param[1] > 6000000:
print("Invalid " + str(param[0]) + ": " + str(param[1]) + ". Maximum is 4" )
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def get_default_ramp_waveform(self, interval=500, nrpts=4000, ti=None, fi=None, forms=None):
from siriuspy.magnet.util import get_default_ramp_waveform
return get_default_ramp_waveform(interval, nrpts, ti, fi, forms)
def save_ramp_waveform(self, ramp):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(ramp)
def save_ramp_waveform_col(self, ramp):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f)
for val in ramp:
writer.writerow([val])
def read_vars_fac_n(self, n = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.SetSlaveAdd(1)
self.read_vars_fac_dcdc()
print('\n-----------------------\n')
self.SetSlaveAdd(2)
self.read_vars_fac_acdc()
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def get_step_buffer_fbp_ufjf(self, net1, net2, i_0, i_f, dly):
self.set_param('Analog_Var_Max',4,net1)
self.set_param('Analog_Var_Max',5,net2)
self.set_slowref(i_0)
time.sleep(0.5)
self.enable_buf_samples()
time.sleep(dly)
self.set_slowref(i_f)
self.disable_buf_samples()
buf = self.read_buf_samples_ctom()
buf1 = buf[0:4096:2]
buf2 = buf[1:4096:2]
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.plot(buf1)
ax1.grid()
ax2.plot(buf2)
ax2.grid()
fig.show()
return [buf1,buf2]
def set_buf_samples_freq(self, fs):
self.set_param('Freq_TimeSlicer',1,fs)
self.save_param_eeprom('Freq_TimeSlicer',1)
self.reset_udc()
def calc_pi(self, r_load, l_load, f_bw, v_dclink, send_drs = 0, dsp_id = 0):
kp = 2*3.1415*f_bw*l_load/v_dclink
ki = kp*r_load/l_load
print('\n Kp = ' + str(kp))
print(' Ki = ' + str(ki) + '\n')
if send_drs:
self.set_dsp_coeffs(3,dsp_id,[kp,ki,0.95,-0.95])
return [kp,ki]
def config_dsp_modules_drs_fap_tests(self):
kp_load = 0
ki_load = 20.95
kp_share = 0.000032117
ki_share = 0.0012
drs.set_dsp_coeffs(3,0,[kp_load,ki_load,0.6,0])
drs.set_dsp_coeffs(3,1,[kp_share,ki_share,0.0015,-0.0015])
drs.save_dsp_modules_eeprom()
def set_prbs_sampling_freq(self,freq, type_memory):
self.set_param('Freq_TimeSlicer',0,freq)
self.set_param('Freq_TimeSlicer',1,freq)
self.save_param_bank(type_memory)
def get_dsp_modules_bank(self, list_dsp_classes = [1,2,3,4,5,6], print_modules = 1):
dsp_modules_bank = []
for dsp_class in list_dsp_classes:
for dsp_id in range(num_dsp_modules[dsp_class]):
dsp_module = [dsp_classes_names[dsp_class], dsp_class, dsp_id]
for dsp_coeff in range(num_coeffs_dsp_modules[dsp_class]):
try:
coeff = self.get_dsp_coeff(dsp_class,dsp_id,dsp_coeff)
if dsp_class == 3 and dsp_coeff == 1:
coeff *= self.get_param('Freq_ISR_Controller',0)
dsp_module.append(coeff)
except:
dsp_module.append('nan')
dsp_modules_bank.append(dsp_module)
if(print_modules):
print(dsp_module)
return dsp_modules_bank
def store_dsp_modules_bank_csv(self, bank):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=',')
for dsp_module in bank:
writer.writerow(dsp_module)
def set_dsp_modules_bank(self, dsp_modules_file, save_eeprom = 0):
dsp_modules_row = []
with open(dsp_modules_file,newline='') as f:
reader = csv.reader(f)
for dsp_module in reader:
if not dsp_module == []:
if not dsp_module[0][0] == '#':
list_coeffs = []
for coeff in dsp_module[3:3+num_coeffs_dsp_modules[int(dsp_module[1])]]:
list_coeffs.append(float(coeff))
print(str(int(dsp_module[1])) + ' ' + str(int(dsp_module[2])) + ' ' + str(list_coeffs))
self.set_dsp_coeffs(int(dsp_module[1]),int(dsp_module[2]),list_coeffs)
if(save_eeprom):
self.save_dsp_modules_eeprom()
else:
print('\n *** Aviso: Os coeficientes configurados não foram salvos na memória EEPROM. Caso deseje salvar, utilize o argumento save_eeprom = 1')
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def select_param_bank(self, cfg_dsp_modules = 0):
add = int(input('\n Digite o endereco serial atual do controlador a ser configurado: '))
oldadd = self.GetSlaveAdd()
self.SetSlaveAdd(add)
areas = ['IA','LA','PA']
ps_models = ['fbp','fbp_dclink','fap','fap_4p','fap_2p4s','fac','fac_2s']
ps_folders = ['fbp','fbp_dclink','fap','fap',]
la_fap = ['TB-Fam:PS-B','TS-01:PS-QF1A','TS-01:PS-QF1B','TS-02:PS-QD2',
'TS-02:PS-QF2','TS-03:PS-QF3','TS-04:PS-QD4A','TS-04:PS-QD4B',
'TS-04:PS-QF4']
print('\n Selecione area: \n')
print(' 0: Sala de racks')
print(' 1: Linhas de transporte')
print(' 2: Sala de fontes\n')
area = int(input(' Digite o numero correspondente: '))
if area == 0:
sector = input('\n Digite o setor da sala de racks [1 a 20]: ')
if int(sector) < 10:
sector = '0' + sector
rack = input('\n Escolha o rack em que a fonte se encontra [1/2/3]: ')
#if (rack != '1') and (rack != '2'):
if not ((rack == '1') or (rack == '2') or (sector == '09' and rack == '3')):
print(' \n *** RACK INEXISTENTE ***\n')
return
print('\n Escolha o tipo de fonte: \n')
print(' 0: FBP')
print(' 1: FBP-DCLink\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0:
crate = '_crate_' + input('\n Digite a posicao do bastidor, de cima para baixo. Leve em conta os bastidores que ainda nao foram instalados : ')
elif ps_model == 1:
crate = ''
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
file_dir = '../ps_parameters/IA-' + sector + '/' + ps_models[ps_model] + '/'
file_name = 'parameters_' + ps_models[ps_model] + '_IA-' + sector + 'RaPS0' + rack + crate + '.csv'
file_path = file_dir + file_name
print('\n Banco de parametros a ser utilizado: ' + file_path)
elif area == 1:
print('\n Escolha o tipo de fonte: \n')
print(' 0: FBP')
print(' 1: FBP-DCLink')
print(' 2: FAP\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0 or ps_model == 1:
crate = input('\n Digite a posicao do bastidor, de cima para baixo. Leve em conta os bastidores que ainda nao foram instalados : ')
ps_name = '_LA-RaPS06_crate_' + crate
file_dir = '../ps_parameters/LA/' + ps_models[ps_model] + '/'
file_name = 'parameters_' + ps_models[ps_model] + ps_name + '.csv'
file_path = file_dir + file_name
elif ps_model == 2:
ps_list = []
file_dir = '../ps_parameters/LA/fap/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de fontes FAP da linha de transporte ### \n')
for idx, ps in enumerate(ps_list):
print(' ' + str(idx) + ': ' + ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
print('\n Banco de parametros a ser utilizado: ' + file_path)
elif area == 2:
print('\n Escolha o tipo de fonte: \n')
print(' 0: FAC')
print(' 1: FAP\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0:
ps_list = []
file_dir = '../ps_parameters/PA/fac/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de bastidores de controle FAC da sala de fontes ### \n')
for idx, ps in enumerate(ps_list):
print(' ', idx, ': ', ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
elif ps_model == 1:
ps_list = []
file_dir = '../ps_parameters/PA/fap/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de bastidores de controle FAP da sala de fontes ### \n')
for idx, ps in enumerate(ps_list):
print(' ', idx, ': ', ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
print('\n Banco de parametros a ser utilizado: ' + file_path)
else:
print(' \n *** SALA INEXISTENTE ***\n')
return
r = input('\n Tem certeza que deseja prosseguir? [Y/N]: ')
if (r != 'Y') and (r != 'y'):
print(' \n *** OPERAÇÃO CANCELADA ***\n')
return
self.SetSlaveAdd(add)
if ps_model == 0 and cfg_dsp_modules == 1:
print('\n Enviando parametros de controle para controlador ...')
dsp_file_dir = '../dsp_parameters/IA-' + sector + '/' + ps_models[ps_model] + '/'
dsp_file_name = 'dsp_parameters_' + ps_models[ps_model] + '_IA-' + sector + 'RaPS0' + rack + crate + '.csv'
dsp_file_path = dsp_file_dir + dsp_file_name
self.set_dsp_modules_bank(dsp_file_path)
print('\n Gravando parametros de controle na memoria ...')
time.sleep(1)
self.save_dsp_modules_eeprom()
print('\n Enviando parametros de operacao para controlador ...\n')
time.sleep(1)
self.set_param_bank(file_path)
print('\n Gravando parametros de operacao na memoria EEPROM onboard ...')
self.save_param_bank(2)
time.sleep(5)
print('\n Resetando UDC ...')
self.reset_udc()
time.sleep(2)
print('\n Pronto! Não se esqueça de utilizar o novo endereço serial para se comunicar com esta fonte! :)\n')
self.SetSlaveAdd(oldadd)
def get_siggen_vars(self):
print('\n### SigGen Variables ###\n')
print('Enable: ' + str((round(self.read_bsmp_variable(6,'uint16_t'),3))))
print('Type: ' + ListSigGenTypes_v2_1[int(round(self.read_bsmp_variable(7,'uint16_t'),3))])
print('Num Cycles: ' + str(round(self.read_bsmp_variable(8,'uint16_t'),3)))
print('Index: ' + str(round(self.read_bsmp_variable(9,'float'),3)))
print('Frequency: ' + str(round(self.read_bsmp_variable(10,'float'),3)))
print('Amplitude: ' + str(round(self.read_bsmp_variable(11,'float'),3)))
print('Offset: ' + str(round(self.read_bsmp_variable(12,'float'),3)))
self.read_var(self.index_to_hex(13))
reply_msg = self.ser.read(21)
val = struct.unpack('BBHffffB',reply_msg)
print('Aux Param 0: ' + str(val[3]))
print('Aux Param 1: ' + str(val[4]))
print('Aux Param 2: ' + str(val[5]))
print('Aux Param 3: ' + str(val[6]))
def firmware_initialization(self):
print("\n ### Inicialização de firmware ### \n")
print("\n Lendo status...")
print(self.read_ps_status())
print("\n Lendo versão de firmware...")
self.read_udc_version()
print("\n Desbloqueando UDC...")
print(self.unlock_udc(0xFFFF))
print("\n Habilitando EEPROM onboard...")
self.enable_onboard_eeprom()
print("\n Alterando senha...")
print(self.set_param('Password',0,0xCAFE))
print(self.save_param_eeprom('Password',0,2))
print("\n Configurando banco de parâmetros...")
self.select_param_bank()
print("\n ### Fim da inicialização de firmware ### \n")
def cfg_hensys_ps_model(self):
list_files = ['fbp_dclink/parameters_fbp_dclink_hensys.csv',
'fac/parameters_fac_acdc_hensys.csv',
'fac/parameters_fac_dcdc_hensys.csv',
'fac/parameters_fac_2s_acdc_hensys.csv',
'fac/parameters_fac_2s_dcdc_hensys.csv',
'fac/parameters_fac_2p4s_acdc_hensys.csv',
'fac/parameters_fac_2p4s_dcdc_hensys.csv',
'fap/parameters_fap_hensys.csv',
'fap/parameters_fap_2p2s_hensys.csv',
'fap/parameters_fap_4p_hensys.csv']
print('\n Desbloqueando UDC ...')
print(self.unlock_udc(0xCAFE))
print('\n *** Escolha o modelo de fonte a ser configurado ***\n')
print(' 0: FBP-DClink')
print(' 1: FAC-ACDC')
print(' 2: FAC-DCDC')
print(' 3: FAC-2S-ACDC')
print(' 4: FAC-2S-DCDC')
print(' 5: FAC-2P4S-ACDC')
print(' 6: FAC-2P4S-DCDC')
print(' 7: FAP')
print(' 8: FAP-2P2S')
print(' 9: FAP-4P')
model_idx = int(input('\n Digite o índice correspondente: '))
file_path = '../ps_parameters/development/' + list_files[model_idx]
print('\n Banco de parametros a ser utilizado: ' + file_path)
r = input('\n Tem certeza que deseja prosseguir? [Y/N]: ')
if (r != 'Y') and (r != 'y'):
print(' \n *** OPERAÇÃO CANCELADA ***\n')
return
print('\n Enviando parametros de operacao para controlador ...\n')
time.sleep(1)
self.set_param_bank(file_path)
print('\n Gravando parametros de operacao na memoria EEPROM onboard ...')
self.save_param_bank(2)
time.sleep(5)
print('\n Resetando UDC ...')
self.reset_udc()
time.sleep(2)
print('\n Pronto! Nao se esqueca de utilizar o novo endereco serial para se comunicar com esta fonte! :)\n')
def test_bid_board(self, password):
r = input("\n Antes de iniciar, certifique-se que o bastidor foi energizado sem a placa BID.\n Para prosseguir, conecte a placa BID a ser testada e pressione qualquer tecla... ")
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria onboard ...")
print(self.load_param_bank(type_memory = 2))
print("\n Banco de parametros da memoria onboard:\n")
max_param = ListParameters.index('Scope_Source')
param_bank_onboard = []
for param in ListParameters[0:max_param]:
val = self.get_param(param,0)
print(param + ':',val)
param_bank_onboard.append(val)
print("\n Salvando banco de parametros na memoria offboard ...")
print(self.save_param_bank(type_memory = 1))
time.sleep(5)
print("\n Resetando UDC ...")
self.reset_udc()
time.sleep(3)
self.read_ps_status()
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria offboard ...")
print(self.load_param_bank(type_memory = 1))
self.read_ps_status()
print("\n Verificando banco de parametros offboard apos reset ... \n")
try:
param_bank_offboard = []
for param in ListParameters[0:max_param]:
val = self.get_param(param,0)
print(param, val)
param_bank_offboard.append(val)
if param_bank_onboard == param_bank_offboard:
print("\n Placa BID aprovada!\n")
else:
print("\n Placa BID reprovada!\n")
except:
print(" Placa BID reprovada!\n")
def upload_parameters_bid(self, password):
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria offboard ...")
print(self.load_param_bank(type_memory = 1))
time.sleep(1)
print("\n Salvando banco de parametros na memoria onboard ...")
print(self.save_param_bank(type_memory = 2))
time.sleep(5)
print("\n Carregando coeficientes de controle da memoria offboard ...")
print(self.load_dsp_modules_eeprom(type_memory = 1))
time.sleep(1)
print("\n Salvando coeficientes de controle na memoria onboard ...\n")
print(self.save_dsp_modules_eeprom(type_memory = 2))
def download_parameters_bid(self,password):
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria onboard ...")
print(self.load_param_bank(type_memory = 2))
time.sleep(1)
print("\n Salvando banco de parametros na memoria offboard ...")
print(self.save_param_bank(type_memory = 1))
time.sleep(5)
print("\n Carregando coeficientes de controle da memoria onboard ...")
print(self.load_dsp_modules_eeprom(type_memory = 2))
time.sleep(1)
print("\n Salvando coeficientes de controle na memoria offboard ...")
print(self.save_dsp_modules_eeprom(type_memory = 1))
|
[
"serial.Serial",
"siriuspy.magnet.util.get_default_ramp_waveform",
"math.isnan",
"csv.reader",
"csv.writer",
"numpy.float32",
"struct.unpack",
"struct.pack",
"time.sleep",
"matplotlib.pyplot.figure",
"os.path.join",
"os.listdir"
] |
[((31867, 31882), 'serial.Serial', 'serial.Serial', ([], {}), '()\n', (31880, 31882), False, 'import serial\n'), ((34022, 34045), 'struct.pack', 'struct.pack', (['"""f"""', 'value'], {}), "('f', value)\n", (34033, 34045), False, 'import struct\n'), ((34679, 34702), 'struct.pack', 'struct.pack', (['"""H"""', 'value'], {}), "('H', value)\n", (34690, 34702), False, 'import struct\n'), ((34850, 34873), 'struct.pack', 'struct.pack', (['"""I"""', 'value'], {}), "('I', value)\n", (34861, 34873), False, 'import struct\n'), ((35013, 35036), 'struct.pack', 'struct.pack', (['"""B"""', 'value'], {}), "('B', value)\n", (35024, 35036), False, 'import struct\n'), ((35181, 35205), 'struct.pack', 'struct.pack', (['""">H"""', 'value'], {}), "('>H', value)\n", (35192, 35205), False, 'import struct\n'), ((35402, 35424), 'struct.pack', 'struct.pack', (['"""B"""', 'csum'], {}), "('B', csum)\n", (35413, 35424), False, 'import struct\n'), ((35886, 35923), 'struct.pack', 'struct.pack', (['bytesFormat[format]', 'val'], {}), '(bytesFormat[format], val)\n', (35897, 35923), False, 'import struct\n'), ((53741, 53756), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (53751, 53756), False, 'import time\n'), ((58197, 58212), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (58207, 58212), False, 'import time\n'), ((58868, 58917), 'struct.unpack', 'struct.unpack', (['"""BBHLLHHHHHHfffffffffB"""', 'reply_msg'], {}), "('BBHLLHHHHHHfffffffffB', reply_msg)\n", (58881, 58917), False, 'import struct\n'), ((59626, 59641), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (59636, 59641), False, 'import time\n'), ((60145, 60158), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60155, 60158), True, 'import numpy as np\n'), ((60193, 60206), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60203, 60206), True, 'import numpy as np\n'), ((68649, 68664), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (68659, 68664), False, 'import time\n'), ((73319, 73352), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (73332, 73352), False, 'import struct\n'), ((75957, 75990), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (75970, 75990), False, 'import struct\n'), ((77353, 77386), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (77366, 77386), False, 'import struct\n'), ((86458, 86491), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (86471, 86491), False, 'import struct\n'), ((101204, 101250), 'struct.unpack', 'struct.unpack', (['typeFormat[type_var]', 'reply_msg'], {}), '(typeFormat[type_var], reply_msg)\n', (101217, 101250), False, 'import struct\n'), ((101650, 101687), 'struct.unpack', 'struct.unpack', (['"""16s"""', 'reply_msg[4:20]'], {}), "('16s', reply_msg[4:20])\n", (101663, 101687), False, 'import struct\n'), ((101859, 101897), 'struct.unpack', 'struct.unpack', (['"""16s"""', 'reply_msg[20:36]'], {}), "('16s', reply_msg[20:36])\n", (101872, 101897), False, 'import struct\n'), ((102252, 102285), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102265, 102285), False, 'import struct\n'), ((102452, 102485), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102465, 102485), False, 'import struct\n'), ((102650, 102683), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102663, 102683), False, 'import struct\n'), ((102848, 102881), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (102861, 102881), False, 'import struct\n'), ((103046, 103079), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103059, 103079), False, 'import struct\n'), ((103244, 103277), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103257, 103277), False, 'import struct\n'), ((103442, 103475), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103455, 103475), False, 'import struct\n'), ((103669, 103702), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103682, 103702), False, 'import struct\n'), ((103896, 103929), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (103909, 103929), False, 'import struct\n'), ((104123, 104156), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104136, 104156), False, 'import struct\n'), ((104350, 104383), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104363, 104383), False, 'import struct\n'), ((104579, 104612), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104592, 104612), False, 'import struct\n'), ((104808, 104841), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (104821, 104841), False, 'import struct\n'), ((105037, 105070), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105050, 105070), False, 'import struct\n'), ((105266, 105299), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105279, 105299), False, 'import struct\n'), ((105464, 105497), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105477, 105497), False, 'import struct\n'), ((105662, 105695), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105675, 105695), False, 'import struct\n'), ((105860, 105893), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (105873, 105893), False, 'import struct\n'), ((106058, 106091), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (106071, 106091), False, 'import struct\n'), ((106262, 106295), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106275, 106295), False, 'import struct\n'), ((106468, 106501), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106481, 106501), False, 'import struct\n'), ((106674, 106707), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106687, 106707), False, 'import struct\n'), ((106884, 106917), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (106897, 106917), False, 'import struct\n'), ((107761, 107794), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (107774, 107794), False, 'import struct\n'), ((110757, 110790), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (110770, 110790), False, 'import struct\n'), ((111290, 111323), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (111303, 111323), False, 'import struct\n'), ((111500, 111533), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (111513, 111533), False, 'import struct\n'), ((111714, 111747), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (111727, 111747), False, 'import struct\n'), ((111928, 111961), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (111941, 111961), False, 'import struct\n'), ((112138, 112171), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (112151, 112171), False, 'import struct\n'), ((112354, 112387), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (112367, 112387), False, 'import struct\n'), ((112576, 112609), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (112589, 112609), False, 'import struct\n'), ((112794, 112827), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (112807, 112827), False, 'import struct\n'), ((113004, 113037), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113017, 113037), False, 'import struct\n'), ((113224, 113257), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113237, 113257), False, 'import struct\n'), ((113438, 113471), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113451, 113471), False, 'import struct\n'), ((113646, 113679), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (113659, 113679), False, 'import struct\n'), ((113844, 113877), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (113857, 113877), False, 'import struct\n'), ((114048, 114081), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (114061, 114081), False, 'import struct\n'), ((114255, 114303), 'struct.unpack', 'struct.unpack', (['"""BBHffffffffffffffffB"""', 'reply_msg'], {}), "('BBHffffffffffffffffB', reply_msg)\n", (114268, 114303), False, 'import struct\n'), ((114590, 114623), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (114603, 114623), False, 'import struct\n'), ((114930, 114963), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (114943, 114963), False, 'import struct\n'), ((115156, 115189), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (115169, 115189), False, 'import struct\n'), ((115378, 115411), 'struct.unpack', 'struct.unpack', (['"""BBHIB"""', 'reply_msg'], {}), "('BBHIB', reply_msg)\n", (115391, 115411), False, 'import struct\n'), ((115596, 115629), 'struct.unpack', 'struct.unpack', (['"""BBHHB"""', 'reply_msg'], {}), "('BBHHB', reply_msg)\n", (115609, 115629), False, 'import struct\n'), ((115774, 115807), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (115787, 115807), False, 'import struct\n'), ((115952, 115985), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (115965, 115985), False, 'import struct\n'), ((116130, 116163), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (116143, 116163), False, 'import struct\n'), ((116308, 116341), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (116321, 116341), False, 'import struct\n'), ((116501, 116534), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (116514, 116534), False, 'import struct\n'), ((196523, 196580), 'siriuspy.magnet.util.get_default_ramp_waveform', 'get_default_ramp_waveform', (['interval', 'nrpts', 'ti', 'fi', 'forms'], {}), '(interval, nrpts, ti, fi, forms)\n', (196548, 196580), False, 'from siriuspy.magnet.util import get_default_ramp_waveform\n'), ((197888, 197903), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (197898, 197903), False, 'import time\n'), ((197946, 197961), 'time.sleep', 'time.sleep', (['dly'], {}), '(dly)\n', (197956, 197961), False, 'import time\n'), ((198142, 198154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (198152, 198154), True, 'import matplotlib.pyplot as plt\n'), ((209448, 209461), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (209458, 209461), False, 'import time\n'), ((209623, 209636), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (209633, 209636), False, 'import time\n'), ((209718, 209731), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (209728, 209731), False, 'import time\n'), ((210666, 210702), 'struct.unpack', 'struct.unpack', (['"""BBHffffB"""', 'reply_msg'], {}), "('BBHffffB', reply_msg)\n", (210679, 210702), False, 'import struct\n'), ((213327, 213340), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (213337, 213340), False, 'import time\n'), ((213511, 213524), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (213521, 213524), False, 'import time\n'), ((213605, 213618), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (213615, 213618), False, 'import time\n'), ((214706, 214719), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (214716, 214719), False, 'import time\n'), ((214809, 214822), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (214819, 214822), False, 'import time\n'), ((216003, 216016), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (216013, 216016), False, 'import time\n'), ((216159, 216172), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (216169, 216172), False, 'import time\n'), ((216331, 216344), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (216341, 216344), False, 'import time\n'), ((216778, 216791), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (216788, 216791), False, 'import time\n'), ((216935, 216948), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (216945, 216948), False, 'import time\n'), ((217106, 217119), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (217116, 217119), False, 'import time\n'), ((54010, 54025), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54020, 54025), False, 'import time\n'), ((54296, 54311), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54306, 54311), False, 'import time\n'), ((54560, 54575), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54570, 54575), False, 'import time\n'), ((54829, 54844), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (54839, 54844), False, 'import time\n'), ((55073, 55088), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55083, 55088), False, 'import time\n'), ((55317, 55332), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55327, 55332), False, 'import time\n'), ((55561, 55576), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55571, 55576), False, 'import time\n'), ((55807, 55822), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (55817, 55822), False, 'import time\n'), ((56077, 56092), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56087, 56092), False, 'import time\n'), ((56344, 56359), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56354, 56359), False, 'import time\n'), ((56617, 56632), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56627, 56632), False, 'import time\n'), ((56884, 56899), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (56894, 56899), False, 'import time\n'), ((57157, 57172), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57167, 57172), False, 'import time\n'), ((57418, 57433), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57428, 57433), False, 'import time\n'), ((57679, 57694), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57689, 57694), False, 'import time\n'), ((57931, 57946), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (57941, 57946), False, 'import time\n'), ((60286, 60300), 'numpy.float32', 'np.float32', (['(20)'], {}), '(20)\n', (60296, 60300), True, 'import numpy as np\n'), ((60337, 60350), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60347, 60350), True, 'import numpy as np\n'), ((60387, 60400), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60397, 60400), True, 'import numpy as np\n'), ((61932, 61947), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (61942, 61947), False, 'import time\n'), ((62001, 62016), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (62011, 62016), False, 'import time\n'), ((62066, 62081), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (62076, 62081), False, 'import time\n'), ((62131, 62146), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (62141, 62146), False, 'import time\n'), ((76776, 76812), 'struct.unpack', 'struct.unpack', (['"""BBHffffB"""', 'reply_msg'], {}), "('BBHffffB', reply_msg)\n", (76789, 76812), False, 'import struct\n'), ((78172, 78208), 'struct.unpack', 'struct.unpack', (['"""BBHffffB"""', 'reply_msg'], {}), "('BBHffffB', reply_msg)\n", (78185, 78208), False, 'import struct\n'), ((79790, 79823), 'struct.unpack', 'struct.unpack', (['"""BBHfB"""', 'reply_msg'], {}), "('BBHfB', reply_msg)\n", (79803, 79823), False, 'import struct\n'), ((82684, 82697), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (82694, 82697), False, 'import csv\n'), ((84703, 84731), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (84713, 84731), False, 'import csv\n'), ((90091, 90108), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (90101, 90108), False, 'import time\n'), ((100152, 100165), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (100162, 100165), False, 'import csv\n'), ((128091, 128127), 'serial.Serial', 'serial.Serial', (['port', 'baud'], {'timeout': '(1)'}), '(port, baud, timeout=1)\n', (128104, 128127), False, 'import serial\n'), ((138194, 138208), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (138204, 138208), False, 'import time\n'), ((194755, 194768), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (194765, 194768), False, 'import csv\n'), ((195713, 195726), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (195723, 195726), False, 'import csv\n'), ((196763, 196791), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (196773, 196791), False, 'import csv\n'), ((197020, 197033), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (197030, 197033), False, 'import csv\n'), ((200531, 200559), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (200541, 200559), False, 'import csv\n'), ((200831, 200844), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (200841, 200844), False, 'import csv\n'), ((201842, 201855), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (201852, 201855), False, 'import csv\n'), ((209295, 209308), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (209305, 209308), False, 'import time\n'), ((34276, 34299), 'struct.pack', 'struct.pack', (['"""f"""', 'value'], {}), "('f', value)\n", (34287, 34299), False, 'import struct\n'), ((60565, 60578), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60575, 60578), True, 'import numpy as np\n'), ((60615, 60636), 'numpy.float32', 'np.float32', (['(6.0 / 5.0)'], {}), '(6.0 / 5.0)\n', (60625, 60636), True, 'import numpy as np\n'), ((60671, 60692), 'numpy.float32', 'np.float32', (['(6.0 / 5.0)'], {}), '(6.0 / 5.0)\n', (60681, 60692), True, 'import numpy as np\n'), ((120611, 120639), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (120622, 120639), False, 'import struct\n'), ((121233, 121261), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (121244, 121261), False, 'import struct\n'), ((121772, 121809), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (121785, 121809), False, 'import struct\n'), ((121885, 121905), 'struct.pack', 'struct.pack', (['""">H"""', '(0)'], {}), "('>H', 0)\n", (121896, 121905), False, 'import struct\n'), ((122600, 122628), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (122611, 122628), False, 'import struct\n'), ((123230, 123258), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (123241, 123258), False, 'import struct\n'), ((123774, 123811), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (123787, 123811), False, 'import struct\n'), ((123904, 123932), 'struct.pack', 'struct.pack', (['""">H"""', 'block_idx'], {}), "('>H', block_idx)\n", (123915, 123932), False, 'import struct\n'), ((124533, 124570), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (124546, 124570), False, 'import struct\n'), ((124972, 124999), 'struct.pack', 'struct.pack', (['""">H"""', 'block_id'], {}), "('>H', block_id)\n", (124983, 124999), False, 'import struct\n'), ((125635, 125672), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (125648, 125672), False, 'import struct\n'), ((125775, 125802), 'struct.pack', 'struct.pack', (['""">H"""', 'block_id'], {}), "('>H', block_id)\n", (125786, 125802), False, 'import struct\n'), ((128473, 128498), 'struct.pack', 'struct.pack', (['"""B"""', 'address'], {}), "('B', address)\n", (128484, 128498), False, 'import struct\n'), ((132213, 132227), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (132223, 132227), False, 'import time\n'), ((133450, 133464), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (133460, 133464), False, 'import time\n'), ((142148, 142162), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (142158, 142162), False, 'import time\n'), ((145538, 145552), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (145548, 145552), False, 'import time\n'), ((155491, 155505), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (155501, 155505), False, 'import time\n'), ((159636, 159650), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (159646, 159650), False, 'import time\n'), ((166918, 166932), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (166928, 166932), False, 'import time\n'), ((171657, 171671), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (171667, 171671), False, 'import time\n'), ((178404, 178418), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (178414, 178418), False, 'import time\n'), ((185441, 185455), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (185451, 185455), False, 'import time\n'), ((187216, 187230), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (187226, 187230), False, 'import time\n'), ((189443, 189457), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (189453, 189457), False, 'import time\n'), ((191974, 191988), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (191984, 191988), False, 'import time\n'), ((194368, 194382), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (194378, 194382), False, 'import time\n'), ((197567, 197581), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (197577, 197581), False, 'import time\n'), ((36010, 36044), 'struct.unpack', 'struct.unpack', (['"""H"""', 'val_b[i:i + 2]'], {}), "('H', val_b[i:i + 2])\n", (36023, 36044), False, 'import struct\n'), ((60859, 60872), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (60869, 60872), True, 'import numpy as np\n'), ((60909, 60922), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60919, 60922), True, 'import numpy as np\n'), ((60959, 60972), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (60969, 60972), True, 'import numpy as np\n'), ((84085, 84098), 'math.isnan', 'math.isnan', (['p'], {}), '(p)\n', (84095, 84098), False, 'import math\n'), ((122439, 122476), 'struct.unpack', 'struct.unpack', (['"""f"""', 'recv_msg[k:k + 4]'], {}), "('f', recv_msg[k:k + 4])\n", (122452, 122476), False, 'import struct\n'), ((61129, 61142), 'numpy.float32', 'np.float32', (['(5)'], {}), '(5)\n', (61139, 61142), True, 'import numpy as np\n'), ((61179, 61192), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61189, 61192), True, 'import numpy as np\n'), ((61229, 61242), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61239, 61242), True, 'import numpy as np\n'), ((205777, 205797), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (205787, 205797), False, 'import os\n'), ((206922, 206942), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (206932, 206942), False, 'import os\n'), ((61411, 61424), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61421, 61424), True, 'import numpy as np\n'), ((61461, 61474), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61471, 61474), True, 'import numpy as np\n'), ((61511, 61524), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (61521, 61524), True, 'import numpy as np\n'), ((207630, 207650), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (207640, 207650), False, 'import os\n'), ((205837, 205866), 'os.path.join', 'os.path.join', (['file_dir', 'entry'], {}), '(file_dir, entry)\n', (205849, 205866), False, 'import os\n'), ((206982, 207011), 'os.path.join', 'os.path.join', (['file_dir', 'entry'], {}), '(file_dir, entry)\n', (206994, 207011), False, 'import os\n'), ((207690, 207719), 'os.path.join', 'os.path.join', (['file_dir', 'entry'], {}), '(file_dir, entry)\n', (207702, 207719), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import irispy.iris_tools as iris_tools
import numpy as np
import numpy.testing as np_test
source_data = np.array([[ 0.563, 1.132, -1.343],
[-0.719, 1.441, 1.566]])
source_data1 = np.array([[1, 2, 3],
[4, 5, 6]])
def test_convert_DN_to_photons_NUV():
"""
"""
expected_output = np.array([[ 10.134, 20.376, -24.174],
[-12.942, 25.938, 28.188]])
photons_count = iris_tools.convert_DN_to_photons(source_data, 'NUV')
np_test.assert_allclose(photons_count, expected_output)
def test_convert_DN_to_photons_FUV():
"""
"""
expected_output = np.array([[ 2.252, 4.528, -5.372],
[-2.876, 5.764, 6.264]])
photons_count = iris_tools.convert_DN_to_photons(source_data, 'FUV')
np_test.assert_allclose(photons_count, expected_output)
def test_convert_DN_to_photons_SJI():
"""
"""
expected_output = np.array( [[18, 36, 54],
[72, 90, 108]])
photons_count = iris_tools.convert_DN_to_photons(source_data1, 'SJI')
np_test.assert_allclose(photons_count, expected_output)
def test_convert_photons_to_DN_NUV():
"""
"""
expected_output = np.array([[ 0.05555556, 0.11111111, 0.16666667],
[ 0.22222222, 0.27777778, 0.33333333]])
DN = iris_tools.convert_photons_to_DN(source_data1, 'NUV')
np_test.assert_allclose(DN, expected_output)
def test_convert_photons_to_DN_FUV():
"""
"""
expected_output = np.array([[ 0.25, 0.5 , 0.75],
[ 1. , 1.25, 1.5 ]])
DN = iris_tools.convert_photons_to_DN(source_data1, 'FUV')
np_test.assert_allclose(DN, expected_output)
def test_convert_photons_to_DN_SJI():
"""
"""
expected_output = np.array( [[ 0.05555556, 0.11111111, 0.16666667],
[ 0.22222222, 0.27777778, 0.33333333]])
photons_count = iris_tools.convert_photons_to_DN(source_data1, 'SJI')
np_test.assert_allclose(photons_count, expected_output)
def test_calculate_intensity_fractional_uncertainty_photons_NUV():
"""
"""
expected_output = np.array([[21.62313576, 10.82312339, 7.223111057],
[5.423098745, 4.34308646, 3.623074202]])
calculated_intensity = iris_tools.calculate_intensity_fractional_uncertainty(source_data1, 'photons', 'NUV')
np_test.assert_allclose(expected_output, calculated_intensity)
def test_calculate_intensity_fractional_uncertainty_photons_FUV():
"""
"""
expected_output = np.array([[ 12.44025723, 6.240192305 , 4.173461127],
[ 3.140063694, 2.52 , 2.106603375]])
calculated_intensity = iris_tools.calculate_intensity_fractional_uncertainty(source_data1, 'photons', 'FUV')
np_test.assert_allclose(expected_output, calculated_intensity)
def test_calculate_intensity_fractional_uncertainty_photons_SJI():
"""
"""
expected_output = np.array([[21.62313576, 10.82312339, 7.223111057],
[5.423098745, 4.34308646, 3.623074202]])
calculated_intensity = iris_tools.calculate_intensity_fractional_uncertainty(source_data1, 'photons', 'SJI')
np_test.assert_allclose(expected_output, calculated_intensity)
def test_calculate_intensity_fractional_uncertainty_data_not_recognised():
"""
"""
assert pytest.raises(ValueError, iris_tools.calculate_intensity_fractional_uncertainty, source_data1, None, 'FUV')
def test_get_iris_response_response_version():
"""
"""
assert pytest.raises(ValueError, iris_tools.get_iris_response, response_version=4)
def test_get_iris_response_not_equal_to_one():
"""
"""
assert pytest.raises(ValueError, iris_tools.get_iris_response, pre_launch=True, response_version=3)
def test_get_iris_response_response_file():
"""
"""
assert pytest.raises(KeyError, iris_tools.get_iris_response, response_file="hello.py")
# def test_get_iris_response():
# """
# """
# def test_gaussian1d_on_linear_bg():
# """
# """
# def test_calculate_orbital_wavelength_variation():
# """
# """
|
[
"irispy.iris_tools.calculate_intensity_fractional_uncertainty",
"pytest.raises",
"irispy.iris_tools.convert_DN_to_photons",
"numpy.array",
"irispy.iris_tools.convert_photons_to_DN",
"numpy.testing.assert_allclose"
] |
[((181, 239), 'numpy.array', 'np.array', (['[[0.563, 1.132, -1.343], [-0.719, 1.441, 1.566]]'], {}), '([[0.563, 1.132, -1.343], [-0.719, 1.441, 1.566]])\n', (189, 239), True, 'import numpy as np\n'), ((265, 297), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (273, 297), True, 'import numpy as np\n'), ((372, 436), 'numpy.array', 'np.array', (['[[10.134, 20.376, -24.174], [-12.942, 25.938, 28.188]]'], {}), '([[10.134, 20.376, -24.174], [-12.942, 25.938, 28.188]])\n', (380, 436), True, 'import numpy as np\n'), ((471, 523), 'irispy.iris_tools.convert_DN_to_photons', 'iris_tools.convert_DN_to_photons', (['source_data', '"""NUV"""'], {}), "(source_data, 'NUV')\n", (503, 523), True, 'import irispy.iris_tools as iris_tools\n'), ((526, 581), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (549, 581), True, 'import numpy.testing as np_test\n'), ((651, 709), 'numpy.array', 'np.array', (['[[2.252, 4.528, -5.372], [-2.876, 5.764, 6.264]]'], {}), '([[2.252, 4.528, -5.372], [-2.876, 5.764, 6.264]])\n', (659, 709), True, 'import numpy as np\n'), ((744, 796), 'irispy.iris_tools.convert_DN_to_photons', 'iris_tools.convert_DN_to_photons', (['source_data', '"""FUV"""'], {}), "(source_data, 'FUV')\n", (776, 796), True, 'import irispy.iris_tools as iris_tools\n'), ((799, 854), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (822, 854), True, 'import numpy.testing as np_test\n'), ((924, 963), 'numpy.array', 'np.array', (['[[18, 36, 54], [72, 90, 108]]'], {}), '([[18, 36, 54], [72, 90, 108]])\n', (932, 963), True, 'import numpy as np\n'), ((990, 1043), 'irispy.iris_tools.convert_DN_to_photons', 'iris_tools.convert_DN_to_photons', (['source_data1', '"""SJI"""'], {}), "(source_data1, 'SJI')\n", (1022, 1043), True, 'import irispy.iris_tools as iris_tools\n'), ((1046, 1101), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (1069, 1101), True, 'import numpy.testing as np_test\n'), ((1170, 1261), 'numpy.array', 'np.array', (['[[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, 0.33333333]]'], {}), '([[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, \n 0.33333333]])\n', (1178, 1261), True, 'import numpy as np\n'), ((1280, 1333), 'irispy.iris_tools.convert_photons_to_DN', 'iris_tools.convert_photons_to_DN', (['source_data1', '"""NUV"""'], {}), "(source_data1, 'NUV')\n", (1312, 1333), True, 'import irispy.iris_tools as iris_tools\n'), ((1336, 1380), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['DN', 'expected_output'], {}), '(DN, expected_output)\n', (1359, 1380), True, 'import numpy.testing as np_test\n'), ((1450, 1497), 'numpy.array', 'np.array', (['[[0.25, 0.5, 0.75], [1.0, 1.25, 1.5]]'], {}), '([[0.25, 0.5, 0.75], [1.0, 1.25, 1.5]])\n', (1458, 1497), True, 'import numpy as np\n'), ((1528, 1581), 'irispy.iris_tools.convert_photons_to_DN', 'iris_tools.convert_photons_to_DN', (['source_data1', '"""FUV"""'], {}), "(source_data1, 'FUV')\n", (1560, 1581), True, 'import irispy.iris_tools as iris_tools\n'), ((1584, 1628), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['DN', 'expected_output'], {}), '(DN, expected_output)\n', (1607, 1628), True, 'import numpy.testing as np_test\n'), ((1698, 1789), 'numpy.array', 'np.array', (['[[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, 0.33333333]]'], {}), '([[0.05555556, 0.11111111, 0.16666667], [0.22222222, 0.27777778, \n 0.33333333]])\n', (1706, 1789), True, 'import numpy as np\n'), ((1819, 1872), 'irispy.iris_tools.convert_photons_to_DN', 'iris_tools.convert_photons_to_DN', (['source_data1', '"""SJI"""'], {}), "(source_data1, 'SJI')\n", (1851, 1872), True, 'import irispy.iris_tools as iris_tools\n'), ((1875, 1930), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['photons_count', 'expected_output'], {}), '(photons_count, expected_output)\n', (1898, 1930), True, 'import numpy.testing as np_test\n'), ((2029, 2124), 'numpy.array', 'np.array', (['[[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646, \n 3.623074202]]'], {}), '([[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646,\n 3.623074202]])\n', (2037, 2124), True, 'import numpy as np\n'), ((2154, 2243), 'irispy.iris_tools.calculate_intensity_fractional_uncertainty', 'iris_tools.calculate_intensity_fractional_uncertainty', (['source_data1', '"""photons"""', '"""NUV"""'], {}), "(source_data1,\n 'photons', 'NUV')\n", (2207, 2243), True, 'import irispy.iris_tools as iris_tools\n'), ((2241, 2303), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['expected_output', 'calculated_intensity'], {}), '(expected_output, calculated_intensity)\n', (2264, 2303), True, 'import numpy.testing as np_test\n'), ((2401, 2491), 'numpy.array', 'np.array', (['[[12.44025723, 6.240192305, 4.173461127], [3.140063694, 2.52, 2.106603375]]'], {}), '([[12.44025723, 6.240192305, 4.173461127], [3.140063694, 2.52, \n 2.106603375]])\n', (2409, 2491), True, 'import numpy as np\n'), ((2544, 2633), 'irispy.iris_tools.calculate_intensity_fractional_uncertainty', 'iris_tools.calculate_intensity_fractional_uncertainty', (['source_data1', '"""photons"""', '"""FUV"""'], {}), "(source_data1,\n 'photons', 'FUV')\n", (2597, 2633), True, 'import irispy.iris_tools as iris_tools\n'), ((2631, 2693), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['expected_output', 'calculated_intensity'], {}), '(expected_output, calculated_intensity)\n', (2654, 2693), True, 'import numpy.testing as np_test\n'), ((2791, 2886), 'numpy.array', 'np.array', (['[[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646, \n 3.623074202]]'], {}), '([[21.62313576, 10.82312339, 7.223111057], [5.423098745, 4.34308646,\n 3.623074202]])\n', (2799, 2886), True, 'import numpy as np\n'), ((2916, 3005), 'irispy.iris_tools.calculate_intensity_fractional_uncertainty', 'iris_tools.calculate_intensity_fractional_uncertainty', (['source_data1', '"""photons"""', '"""SJI"""'], {}), "(source_data1,\n 'photons', 'SJI')\n", (2969, 3005), True, 'import irispy.iris_tools as iris_tools\n'), ((3003, 3065), 'numpy.testing.assert_allclose', 'np_test.assert_allclose', (['expected_output', 'calculated_intensity'], {}), '(expected_output, calculated_intensity)\n', (3026, 3065), True, 'import numpy.testing as np_test\n'), ((3161, 3273), 'pytest.raises', 'pytest.raises', (['ValueError', 'iris_tools.calculate_intensity_fractional_uncertainty', 'source_data1', 'None', '"""FUV"""'], {}), "(ValueError, iris_tools.\n calculate_intensity_fractional_uncertainty, source_data1, None, 'FUV')\n", (3174, 3273), False, 'import pytest\n'), ((3336, 3411), 'pytest.raises', 'pytest.raises', (['ValueError', 'iris_tools.get_iris_response'], {'response_version': '(4)'}), '(ValueError, iris_tools.get_iris_response, response_version=4)\n', (3349, 3411), False, 'import pytest\n'), ((3479, 3575), 'pytest.raises', 'pytest.raises', (['ValueError', 'iris_tools.get_iris_response'], {'pre_launch': '(True)', 'response_version': '(3)'}), '(ValueError, iris_tools.get_iris_response, pre_launch=True,\n response_version=3)\n', (3492, 3575), False, 'import pytest\n'), ((3636, 3715), 'pytest.raises', 'pytest.raises', (['KeyError', 'iris_tools.get_iris_response'], {'response_file': '"""hello.py"""'}), "(KeyError, iris_tools.get_iris_response, response_file='hello.py')\n", (3649, 3715), False, 'import pytest\n')]
|
"""
Copyright (C) 2020, <NAME>, https://www.gagolewski.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import scipy.stats
def load_data(fname, preprocess):
"""
Loads the data matrix and preprocesses it, see benchmark() for more details.
"""
X = np.loadtxt(fname, ndmin=2)
X = X[:, X.var(axis=0) > 0] # remove all columns of 0 variance
# add a tiny bit of white noise:
X += np.random.normal(0.0, X.std(ddof=1)*1e-6, size=X.shape)
if preprocess == "scale_standard": # mean/sd
s = X.std(axis=0, ddof=1)
X = (X-X.mean(axis=0))/s
elif preprocess == "scale_robust": # median/(1.4826*MAD)
s = np.median(np.abs(X-np.median(X, axis=0)), axis=0)
s = s/scipy.stats.norm().ppf(0.75) # i.e., s*1.4826
s[s<1e-12] = 1.0 # don't scale columns of zero MAD
X = (X-np.median(X, axis=0))/s
elif preprocess == "original":
s = X.std(axis=None, ddof=1) # scale all columns proportionally
X = (X-X.mean(axis=0))/s
else:
raise Exception("unknown `preprocess`")
X = X.astype(np.float32, order="C", copy=False) # work with float32
return X
|
[
"numpy.median",
"numpy.loadtxt"
] |
[((1269, 1295), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'ndmin': '(2)'}), '(fname, ndmin=2)\n', (1279, 1295), True, 'import numpy as np\n'), ((1840, 1860), 'numpy.median', 'np.median', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1849, 1860), True, 'import numpy as np\n'), ((1675, 1695), 'numpy.median', 'np.median', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1684, 1695), True, 'import numpy as np\n')]
|
import numpy as np
import random
class ExperienceMemory(object):
def __init__(self, capacity, stateLength):
self.__capacity = capacity
self.__usedCapacity = 0
self.__stateLength = stateLength
self.__writePosition = 0
self.__writePositionReseted = False
self.__ids = np.zeros((self.__capacity), dtype='uint64')
self.__states = np.zeros((self.__capacity, self.__stateLength), dtype='float32')
self.__actions = np.zeros((self.__capacity), dtype='uint8')
self.__rewards = np.zeros((self.__capacity), dtype='float32')
self.__nextStates = np.zeros((self.__capacity, self.__stateLength), dtype='float32')
self.__nextStateIsTerminalStates = np.zeros((self.__capacity), dtype='bool')
self.__sampleCounter = 0
def store(self, state, action, reward, nextState, nextStateIsTerminalState):
experienceId = self.__sampleCounter
self.__ids[self.__writePosition] = experienceId
self.__states[self.__writePosition] = state
self.__actions[self.__writePosition] = action
self.__rewards[self.__writePosition] = reward
self.__nextStates[self.__writePosition] = nextState
self.__nextStateIsTerminalStates[self.__writePosition] = nextStateIsTerminalState
self.__writePosition += 1
self.__sampleCounter += 1
if not self.__writePositionReseted:
self.__usedCapacity += 1
if self.__writePosition == self.__capacity:
self.__writePosition = 0
self.__writePositionReseted = True
return experienceId
def size(self):
return self.__usedCapacity
def sample(self, numberOfSamples):
if self.__usedCapacity < numberOfSamples:
return None, None, None, None, None, None
sampleIndex = random.sample(range(self.__usedCapacity), numberOfSamples)
return self.__ids[sampleIndex], self.__states[sampleIndex], self.__actions[sampleIndex], self.__rewards[sampleIndex], self.__nextStates[sampleIndex], self.__nextStateIsTerminalStates[sampleIndex]
|
[
"numpy.zeros"
] |
[((320, 361), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""uint64"""'}), "(self.__capacity, dtype='uint64')\n", (328, 361), True, 'import numpy as np\n'), ((388, 452), 'numpy.zeros', 'np.zeros', (['(self.__capacity, self.__stateLength)'], {'dtype': '"""float32"""'}), "((self.__capacity, self.__stateLength), dtype='float32')\n", (396, 452), True, 'import numpy as np\n'), ((478, 518), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""uint8"""'}), "(self.__capacity, dtype='uint8')\n", (486, 518), True, 'import numpy as np\n'), ((546, 588), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""float32"""'}), "(self.__capacity, dtype='float32')\n", (554, 588), True, 'import numpy as np\n'), ((619, 683), 'numpy.zeros', 'np.zeros', (['(self.__capacity, self.__stateLength)'], {'dtype': '"""float32"""'}), "((self.__capacity, self.__stateLength), dtype='float32')\n", (627, 683), True, 'import numpy as np\n'), ((727, 766), 'numpy.zeros', 'np.zeros', (['self.__capacity'], {'dtype': '"""bool"""'}), "(self.__capacity, dtype='bool')\n", (735, 766), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import argparse
import sys
from typing import List, Optional
import bdfparser
import numpy as np
DRAWING_CHARS = "8,10,176-223"
def main():
args = parse_args()
font = bdfparser.Font(args.bdf_file)
# Choose glyphs from font and convert them to bitmaps
bitmaps = [get_bitmap_for_character(font, i) for i in range(256)]
# Make sure we're settled on the desired glyph height
height = args.height
if not height:
height = max(len(b) for b in bitmaps if b is not None)
# Make sure we have bitmaps for all the characters
fallback_bitmap = np.zeros((height, 8), dtype=np.uint8)
for i in range(len(bitmaps)):
if bitmaps[i] is None:
print(f"Warning: no glyph for char {i}", file=sys.stderr)
bitmaps[i] = fallback_bitmap
# Make the bitmaps all the same size
for i in range(len(bitmaps)):
bitmaps[i] = resize(bitmaps[i], 8, height, i in args.extend_chars)
# Write them to disk
with open(args.output_file, "wb") as f:
f.write(b"".join(to_bytes(b) for b in bitmaps))
def parse_args():
parser = argparse.ArgumentParser(
description="A tool for converting BDF fonts into DOS font format"
)
parser.add_argument(
"bdf_file", type=str, metavar="BDF-FILE",
help="BDF font file to convert"
)
parser.add_argument(
"output_file", type=str, metavar="OUTPUT-FILE",
help="Filename of resulting DOS font file"
)
parser.add_argument(
"--height", type=int, metavar="ROWS",
help="Target height. Glyphs that are too short will be padded to fit."
)
parser.add_argument(
"--extend-chars", "-x",
type=parse_byte_ranges, metavar="CHARS",
default=DRAWING_CHARS,
help=f"""
For the given character codes, enlarge the glyphs so that they
touch the edges of the bounding box. Only has an effect if the
bounding box is larger than the glyph size. If flag is not present,
defaults to "{DRAWING_CHARS}" (mostly CP437's box/line chars).
"""
)
return parser.parse_args()
def parse_byte_ranges(s):
"""Parses strings like "1,3-5" into set(1,3,4,5)."""
result = set()
for term in s.split(","):
parts = [int(p) for p in term.split("-")]
if len(parts) == 1:
hi = parts[0]
lo = parts[0]
elif len(parts) == 2:
lo, hi = min(parts), max(parts)
else:
raise ValueError(
f"""Couldn't parse "{term}" as byte or as a range of bytes"""
)
if lo < 0:
raise ValueError(f"Value out of range: {lo}")
elif hi > 255:
raise ValueError(f"Value out of range: {hi}")
result.update(range(lo, hi + 1))
return result
def get_bitmap_for_character(
font: bdfparser.Font,
char: int
) -> Optional[np.array]:
"""Returns a bitmap from the font that can represent the given CP437 code.
If no suitable glyph can be found, returns None.
"""
codepoints = get_codepoints_for_cp437(char)
available = font.glyphs.keys()
for codepoint in codepoints:
if codepoint in available:
glyph = font.glyphbycp(codepoint)
bitmap = to_bitmap(glyph)
return bitmap
return None
def get_codepoints_for_cp437(x) -> List[int]:
"""Returns possible Unicode codepoints for the given CP437 character.
This function returns a list because that allows for potential fallback
codepoints if the font does not have complete coverage. Currently, though,
this implementation only returns 1 codepoint for each character.
"""
# Handle printable ASCII chars
if x >= 32 and x <= 126:
return [x]
# Handle control chars, extended ASCII
LOWER = " ☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼"
UPPER = "⌂" \
"ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒ" \
"áíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐" \
"└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀" \
"αßΓπΣσµτΦϴΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ "
if x < 32:
code = ord(LOWER[x])
else:
code = ord(UPPER[x - 127])
return [code]
def to_bitmap(glyph: bdfparser.Glyph) -> np.array:
"""Converts a Glyph into a 2D array of zeros and ones."""
lines = glyph.draw().todata() # Array of strings like "10101"
return np.array(
[[int(bit) for bit in line] for line in lines],
dtype=np.uint8
)
def resize(bitmap: np.array, new_width, new_height, extend=False):
height, width = bitmap.shape
def split(diff):
x = diff//2
y = diff - x
return (x, y)
add_top, add_bottom = split(new_height - height)
add_left, add_right = split(new_width - width)
for add_lines in [add_left, add_top, add_right, add_bottom]:
bitmap = np.rot90(bitmap)
if add_lines < 0:
# Delete lines from base of array
bitmap = bitmap[:add_lines]
elif add_lines > 0:
# Add lines to base of array
_, current_width = bitmap.shape
new_lines_shape = (add_lines, current_width)
if extend:
pattern_length = max(get_pattern_length(bitmap), 1)
pattern = bitmap[-pattern_length:]
new_lines = np.resize(pattern, new_lines_shape)
else:
new_lines = np.zeros(new_lines_shape, dtype=np.uint8)
bitmap = np.concatenate([bitmap, new_lines])
return bitmap
def get_pattern_length(bitmap: np.array, max_length=4):
"""Measure the length of any repeating pattern at the bottom of the array.
For example, if the bottom rows were of the form ...ABCDECDE, this
function would return 3, because the three rows CDE repeat.
This function returns the length of the longest pattern it finds, not to
surpass max_length. Returns 0 if no repeating pattern is found.
"""
height, _ = bitmap.shape
max_length = min(max_length, height//2)
for length in range(max_length, 1, -1):
a = bitmap[-length:]
b = bitmap[-2*length:-length]
if np.array_equal(a, b):
return length
return 0
def to_bytes(bitmap: np.array):
height, width = bitmap.shape
assert(1 <= height <= 32)
assert(width == 8)
def to_byte(row):
result = 0
for bit in row:
result = (result << 1) + bit
return result
return bytes(to_byte(row) for row in bitmap)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"numpy.resize",
"numpy.zeros",
"numpy.rot90",
"bdfparser.Font",
"numpy.array_equal",
"numpy.concatenate"
] |
[((201, 230), 'bdfparser.Font', 'bdfparser.Font', (['args.bdf_file'], {}), '(args.bdf_file)\n', (215, 230), False, 'import bdfparser\n'), ((604, 641), 'numpy.zeros', 'np.zeros', (['(height, 8)'], {'dtype': 'np.uint8'}), '((height, 8), dtype=np.uint8)\n', (612, 641), True, 'import numpy as np\n'), ((1128, 1224), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A tool for converting BDF fonts into DOS font format"""'}), "(description=\n 'A tool for converting BDF fonts into DOS font format')\n", (1151, 1224), False, 'import argparse\n'), ((4864, 4880), 'numpy.rot90', 'np.rot90', (['bitmap'], {}), '(bitmap)\n', (4872, 4880), True, 'import numpy as np\n'), ((6156, 6176), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (6170, 6176), True, 'import numpy as np\n'), ((5478, 5513), 'numpy.concatenate', 'np.concatenate', (['[bitmap, new_lines]'], {}), '([bitmap, new_lines])\n', (5492, 5513), True, 'import numpy as np\n'), ((5333, 5368), 'numpy.resize', 'np.resize', (['pattern', 'new_lines_shape'], {}), '(pattern, new_lines_shape)\n', (5342, 5368), True, 'import numpy as np\n'), ((5415, 5456), 'numpy.zeros', 'np.zeros', (['new_lines_shape'], {'dtype': 'np.uint8'}), '(new_lines_shape, dtype=np.uint8)\n', (5423, 5456), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import math
class Position_Encoder(object):
def __init__(self, emb_size, max_len=5000):
self.emb_size = emb_size
self.max_len = max_len
pe = np.zeros([max_len, emb_size], np.float32)
position = np.expand_dims(np.arange(0, max_len), 1).astype(np.float32)
div_term = np.exp(np.arange(0 ,emb_size, 2).astype(np.float32) * -(math.log(10000.0) / emb_size))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe = np.expand_dims(pe, 1)
self.pe = tf.Variable(pe, trainable=False)
def __call__(self, inputs, seq_length):
with tf.variable_scope('position_encoder'):
embs = tf.transpose(inputs, [1, 0, 2])
max_time = tf.shape(embs)[0]
batch_size = tf.shape(embs)[1]
embs = embs * tf.sqrt(float(self.emb_size))
embs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
embs_ta = embs_ta.unstack(embs)
output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
t0 = tf.constant(0, dtype=tf.int32)
f0 = tf.zeros([batch_size], dtype=tf.bool)
mask = tf.expand_dims(tf.cast(tf.sequence_mask(seq_length), tf.float32), -1)
def loop_fn(t, output_ta, f):
cur_emb = embs_ta.read(t)
output = tf.concat([cur_emb, tf.tile(self.pe[t], [batch_size, 1])], -1)
output_ta = output_ta.write(t, output)
f = tf.greater_equal(t + 1, seq_length)
return t + 1, output_ta, f
_, output_ta, _ = tf.while_loop(
cond=lambda _1, _2, f: tf.logical_not(tf.reduce_all(f)),
body=loop_fn,
loop_vars=(t0, output_ta, f0)
)
embs = tf.transpose(output_ta.stack(), [1, 0, 2])
embs *= mask
return embs
class Cnn_extractor(object):
def __init__(self, hidden_dim):
self.hidden_dim = hidden_dim
self.sw0 = tf.layers.Conv1D(self.hidden_dim, 1, padding='same')
self.bn0 = tf.layers.BatchNormalization()
self.sw1 = tf.layers.Conv1D(self.hidden_dim, 1, padding='same')
self.bn1 = tf.layers.BatchNormalization()
self.sw2 = tf.layers.Conv1D(self.hidden_dim, 2, padding='same')
self.bn2 = tf.layers.BatchNormalization()
self.sw2_2 = tf.layers.Conv1D(self.hidden_dim, 2, padding='same')
self.bn2_2 = tf.layers.BatchNormalization()
self.sw3 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')
self.bn3 = tf.layers.BatchNormalization()
self.sw3_2 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')
self.bn3_2 = tf.layers.BatchNormalization()
self.sw3_3 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')
self.bn3_3 = tf.layers.BatchNormalization()
def __call__(self, input):
with tf.variable_scope('cnn_extractor'):
input = self.sw0(input)
input = tf.nn.selu(input)
input = self.bn0(input)
sw1 = self.sw1(input)
sw1 = tf.nn.selu(sw1)
sw1 = self.bn1(sw1)
sw2 = self.sw2(input)
sw2 = tf.nn.selu(sw2)
sw2 = self.bn2(sw2)
sw2 = self.sw2_2(sw2)
sw2 = tf.nn.selu(sw2)
sw2 = self.bn2_2(sw2)
sw3 = self.sw3(input)
sw3 = tf.nn.selu(sw3)
sw3 = self.bn3(sw3)
sw3 = self.sw3_2(sw3)
sw3 = tf.nn.selu(sw3)
sw3 = self.bn3_2(sw3)
sw3 = self.sw3_3(sw3)
sw3 = tf.nn.selu(sw3)
sw3 = self.bn3_3(sw3)
cnn_output = tf.concat([sw1, sw2, sw3], -1)
cnn_output = tf.layers.dense(cnn_output, self.hidden_dim, activation=tf.nn.selu)
return tf.nn.dropout(cnn_output, keep_prob=0.5)
class Attention(object):
def __init__(self, hidden_dim, num_tags):
super(Attention, self).__init__()
self.hidden_dim = hidden_dim
self.num_tags = num_tags
self.attn_dense = tf.layers.Dense(self.hidden_dim, use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
self.attn_linear = tf.layers.Dense(self.hidden_dim, use_bias=True, activation=tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
self.__init_embs()
def __init_embs(self):
with tf.variable_scope('tag_embedding'):
self._tag_embeddings = tf.get_variable(name='_tag_embeddings', shape=[self.num_tags, 25], dtype=tf.float32)
def __call__(self, input, sequence_lengths):
with tf.variable_scope('attention'):
tag_embeddings = tf.nn.embedding_lookup(params=self._tag_embeddings,
ids=tf.constant(list(range(self.num_tags)), dtype=tf.int32),
name='tag_embeddings')
query = tf.transpose(input, [1, 0, 2])
max_time = tf.shape(query)[0]
batch_size = tf.shape(query)[1]
context = tf.tile(tf.expand_dims(tag_embeddings, 0),
[batch_size, 1, 1])
query_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
query_ta = query_ta.unstack(query)
attn_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)
t0 = tf.constant(0, dtype=tf.int32)
f0 = tf.zeros([batch_size], dtype=tf.bool)
def loop_fn(t, attn_ta, output_ta, f):
cur_q = query_ta.read(t)
gamma_h = self.attn_dense(context)
gamma_h = tf.squeeze(tf.matmul(gamma_h, tf.expand_dims(cur_q, -1)), -1)
weights = tf.nn.softmax(gamma_h, -1)
c_t = tf.squeeze(tf.matmul(tf.expand_dims(weights, 1), context), 1)
output = self.attn_linear(tf.concat([c_t, cur_q], -1))
attn_ta = attn_ta.write(t, gamma_h)
output_ta = output_ta.write(t, output)
f = tf.greater_equal(t + 1, sequence_lengths)
return t + 1, attn_ta, output_ta, f
_, attn_ta, output_ta, _ = tf.while_loop(
cond=lambda _1, _2, _3, f: tf.logical_not(tf.reduce_all(f)),
body=loop_fn,
loop_vars=(t0, attn_ta, output_ta, f0)
)
self.attn_cnn_outputs = tf.transpose(output_ta.stack(), [1, 0, 2])
attn_weights = tf.transpose(attn_ta.stack(), [1, 0, 2])
return attn_weights, self.attn_cnn_outputs
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reduce_all",
"tensorflow.Variable",
"numpy.sin",
"numpy.arange",
"tensorflow.greater_equal",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.nn.selu",
"math.log",
"tensorflow.layers.BatchNormalization",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.tile",
"numpy.cos",
"tensorflow.sequence_mask",
"tensorflow.layers.Conv1D",
"tensorflow.zeros_initializer",
"tensorflow.expand_dims",
"tensorflow.layers.dense",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.TensorArray",
"tensorflow.nn.dropout"
] |
[((213, 254), 'numpy.zeros', 'np.zeros', (['[max_len, emb_size]', 'np.float32'], {}), '([max_len, emb_size], np.float32)\n', (221, 254), True, 'import numpy as np\n'), ((462, 489), 'numpy.sin', 'np.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (468, 489), True, 'import numpy as np\n'), ((512, 539), 'numpy.cos', 'np.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (518, 539), True, 'import numpy as np\n'), ((553, 574), 'numpy.expand_dims', 'np.expand_dims', (['pe', '(1)'], {}), '(pe, 1)\n', (567, 574), True, 'import numpy as np\n'), ((593, 625), 'tensorflow.Variable', 'tf.Variable', (['pe'], {'trainable': '(False)'}), '(pe, trainable=False)\n', (604, 625), True, 'import tensorflow as tf\n'), ((2073, 2125), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(1)'], {'padding': '"""same"""'}), "(self.hidden_dim, 1, padding='same')\n", (2089, 2125), True, 'import tensorflow as tf\n'), ((2145, 2175), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2173, 2175), True, 'import tensorflow as tf\n'), ((2195, 2247), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(1)'], {'padding': '"""same"""'}), "(self.hidden_dim, 1, padding='same')\n", (2211, 2247), True, 'import tensorflow as tf\n'), ((2267, 2297), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2295, 2297), True, 'import tensorflow as tf\n'), ((2317, 2369), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(2)'], {'padding': '"""same"""'}), "(self.hidden_dim, 2, padding='same')\n", (2333, 2369), True, 'import tensorflow as tf\n'), ((2389, 2419), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2417, 2419), True, 'import tensorflow as tf\n'), ((2441, 2493), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(2)'], {'padding': '"""same"""'}), "(self.hidden_dim, 2, padding='same')\n", (2457, 2493), True, 'import tensorflow as tf\n'), ((2515, 2545), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2543, 2545), True, 'import tensorflow as tf\n'), ((2565, 2617), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(3)'], {'padding': '"""same"""'}), "(self.hidden_dim, 3, padding='same')\n", (2581, 2617), True, 'import tensorflow as tf\n'), ((2637, 2667), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2665, 2667), True, 'import tensorflow as tf\n'), ((2689, 2741), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(3)'], {'padding': '"""same"""'}), "(self.hidden_dim, 3, padding='same')\n", (2705, 2741), True, 'import tensorflow as tf\n'), ((2763, 2793), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2791, 2793), True, 'import tensorflow as tf\n'), ((2815, 2867), 'tensorflow.layers.Conv1D', 'tf.layers.Conv1D', (['self.hidden_dim', '(3)'], {'padding': '"""same"""'}), "(self.hidden_dim, 3, padding='same')\n", (2831, 2867), True, 'import tensorflow as tf\n'), ((2889, 2919), 'tensorflow.layers.BatchNormalization', 'tf.layers.BatchNormalization', ([], {}), '()\n', (2917, 2919), True, 'import tensorflow as tf\n'), ((684, 721), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""position_encoder"""'], {}), "('position_encoder')\n", (701, 721), True, 'import tensorflow as tf\n'), ((742, 773), 'tensorflow.transpose', 'tf.transpose', (['inputs', '[1, 0, 2]'], {}), '(inputs, [1, 0, 2])\n', (754, 773), True, 'import tensorflow as tf\n'), ((936, 983), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time'}), '(dtype=tf.float32, size=max_time)\n', (950, 983), True, 'import tensorflow as tf\n'), ((1052, 1111), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'dynamic_size': '(True)', 'size': '(0)'}), '(dtype=tf.float32, dynamic_size=True, size=0)\n', (1066, 1111), True, 'import tensorflow as tf\n'), ((1129, 1159), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (1140, 1159), True, 'import tensorflow as tf\n'), ((1177, 1214), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]'], {'dtype': 'tf.bool'}), '([batch_size], dtype=tf.bool)\n', (1185, 1214), True, 'import tensorflow as tf\n'), ((2965, 2999), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cnn_extractor"""'], {}), "('cnn_extractor')\n", (2982, 2999), True, 'import tensorflow as tf\n'), ((3057, 3074), 'tensorflow.nn.selu', 'tf.nn.selu', (['input'], {}), '(input)\n', (3067, 3074), True, 'import tensorflow as tf\n'), ((3163, 3178), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw1'], {}), '(sw1)\n', (3173, 3178), True, 'import tensorflow as tf\n'), ((3263, 3278), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw2'], {}), '(sw2)\n', (3273, 3278), True, 'import tensorflow as tf\n'), ((3363, 3378), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw2'], {}), '(sw2)\n', (3373, 3378), True, 'import tensorflow as tf\n'), ((3465, 3480), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw3'], {}), '(sw3)\n', (3475, 3480), True, 'import tensorflow as tf\n'), ((3565, 3580), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw3'], {}), '(sw3)\n', (3575, 3580), True, 'import tensorflow as tf\n'), ((3667, 3682), 'tensorflow.nn.selu', 'tf.nn.selu', (['sw3'], {}), '(sw3)\n', (3677, 3682), True, 'import tensorflow as tf\n'), ((3755, 3785), 'tensorflow.concat', 'tf.concat', (['[sw1, sw2, sw3]', '(-1)'], {}), '([sw1, sw2, sw3], -1)\n', (3764, 3785), True, 'import tensorflow as tf\n'), ((3811, 3878), 'tensorflow.layers.dense', 'tf.layers.dense', (['cnn_output', 'self.hidden_dim'], {'activation': 'tf.nn.selu'}), '(cnn_output, self.hidden_dim, activation=tf.nn.selu)\n', (3826, 3878), True, 'import tensorflow as tf\n'), ((3898, 3938), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['cnn_output'], {'keep_prob': '(0.5)'}), '(cnn_output, keep_prob=0.5)\n', (3911, 3938), True, 'import tensorflow as tf\n'), ((4653, 4687), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""tag_embedding"""'], {}), "('tag_embedding')\n", (4670, 4687), True, 'import tensorflow as tf\n'), ((4724, 4813), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""_tag_embeddings"""', 'shape': '[self.num_tags, 25]', 'dtype': 'tf.float32'}), "(name='_tag_embeddings', shape=[self.num_tags, 25], dtype=tf\n .float32)\n", (4739, 4813), True, 'import tensorflow as tf\n'), ((4873, 4903), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), "('attention')\n", (4890, 4903), True, 'import tensorflow as tf\n'), ((5204, 5234), 'tensorflow.transpose', 'tf.transpose', (['input', '[1, 0, 2]'], {}), '(input, [1, 0, 2])\n', (5216, 5234), True, 'import tensorflow as tf\n'), ((5459, 5506), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time'}), '(dtype=tf.float32, size=max_time)\n', (5473, 5506), True, 'import tensorflow as tf\n'), ((5576, 5635), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'dynamic_size': '(True)', 'size': '(0)'}), '(dtype=tf.float32, dynamic_size=True, size=0)\n', (5590, 5635), True, 'import tensorflow as tf\n'), ((5660, 5719), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'dynamic_size': '(True)', 'size': '(0)'}), '(dtype=tf.float32, dynamic_size=True, size=0)\n', (5674, 5719), True, 'import tensorflow as tf\n'), ((5737, 5767), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (5748, 5767), True, 'import tensorflow as tf\n'), ((5785, 5822), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]'], {'dtype': 'tf.bool'}), '([batch_size], dtype=tf.bool)\n', (5793, 5822), True, 'import tensorflow as tf\n'), ((797, 811), 'tensorflow.shape', 'tf.shape', (['embs'], {}), '(embs)\n', (805, 811), True, 'import tensorflow as tf\n'), ((840, 854), 'tensorflow.shape', 'tf.shape', (['embs'], {}), '(embs)\n', (848, 854), True, 'import tensorflow as tf\n'), ((1551, 1586), 'tensorflow.greater_equal', 'tf.greater_equal', (['(t + 1)', 'seq_length'], {}), '(t + 1, seq_length)\n', (1567, 1586), True, 'import tensorflow as tf\n'), ((4261, 4299), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4297, 4299), True, 'import tensorflow as tf\n'), ((4461, 4499), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4497, 4499), True, 'import tensorflow as tf\n'), ((4561, 4583), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4581, 4583), True, 'import tensorflow as tf\n'), ((5258, 5273), 'tensorflow.shape', 'tf.shape', (['query'], {}), '(query)\n', (5266, 5273), True, 'import tensorflow as tf\n'), ((5302, 5317), 'tensorflow.shape', 'tf.shape', (['query'], {}), '(query)\n', (5310, 5317), True, 'import tensorflow as tf\n'), ((5351, 5384), 'tensorflow.expand_dims', 'tf.expand_dims', (['tag_embeddings', '(0)'], {}), '(tag_embeddings, 0)\n', (5365, 5384), True, 'import tensorflow as tf\n'), ((6081, 6107), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['gamma_h', '(-1)'], {}), '(gamma_h, -1)\n', (6094, 6107), True, 'import tensorflow as tf\n'), ((6390, 6431), 'tensorflow.greater_equal', 'tf.greater_equal', (['(t + 1)', 'sequence_lengths'], {}), '(t + 1, sequence_lengths)\n', (6406, 6431), True, 'import tensorflow as tf\n'), ((289, 310), 'numpy.arange', 'np.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (298, 310), True, 'import numpy as np\n'), ((1257, 1285), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['seq_length'], {}), '(seq_length)\n', (1273, 1285), True, 'import tensorflow as tf\n'), ((6234, 6261), 'tensorflow.concat', 'tf.concat', (['[c_t, cur_q]', '(-1)'], {}), '([c_t, cur_q], -1)\n', (6243, 6261), True, 'import tensorflow as tf\n'), ((360, 385), 'numpy.arange', 'np.arange', (['(0)', 'emb_size', '(2)'], {}), '(0, emb_size, 2)\n', (369, 385), True, 'import numpy as np\n'), ((409, 426), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (417, 426), False, 'import math\n'), ((1433, 1469), 'tensorflow.tile', 'tf.tile', (['self.pe[t]', '[batch_size, 1]'], {}), '(self.pe[t], [batch_size, 1])\n', (1440, 1469), True, 'import tensorflow as tf\n'), ((6023, 6048), 'tensorflow.expand_dims', 'tf.expand_dims', (['cur_q', '(-1)'], {}), '(cur_q, -1)\n', (6037, 6048), True, 'import tensorflow as tf\n'), ((6151, 6177), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights', '(1)'], {}), '(weights, 1)\n', (6165, 6177), True, 'import tensorflow as tf\n'), ((1730, 1746), 'tensorflow.reduce_all', 'tf.reduce_all', (['f'], {}), '(f)\n', (1743, 1746), True, 'import tensorflow as tf\n'), ((6597, 6613), 'tensorflow.reduce_all', 'tf.reduce_all', (['f'], {}), '(f)\n', (6610, 6613), True, 'import tensorflow as tf\n')]
|
import os
import unittest
import numpy as np
import numpy.random as rnd
import tensorflow as tf
from pymanopt.function import TensorFlow
from . import _backend_tests
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class TestUnaryFunction(_backend_tests.TestUnaryFunction):
def setUp(self):
super().setUp()
x = tf.Variable(tf.zeros(self.n, dtype=np.float64), name="x")
@TensorFlow(x)
def cost(x):
return tf.reduce_sum(x ** 2)
self.cost = cost
class TestNaryFunction(_backend_tests.TestNaryFunction):
def setUp(self):
super().setUp()
n = self.n
x = tf.Variable(tf.zeros(n, dtype=np.float64), name="x")
y = tf.Variable(tf.zeros(n, dtype=np.float64), name="y")
@TensorFlow(x, y)
def cost(x, y):
return tf.tensordot(x, y, axes=1)
self.cost = cost
class TestNaryParameterGrouping(_backend_tests.TestNaryParameterGrouping):
def setUp(self):
super().setUp()
n = self.n
x = tf.Variable(tf.zeros(n, dtype=np.float64), name="x")
y = tf.Variable(tf.zeros(n, dtype=np.float64), name="y")
z = tf.Variable(tf.zeros(n, dtype=np.float64), name="z")
@TensorFlow(x, y, z)
def cost(x, y, z):
return tf.reduce_sum(x ** 2 + y + z ** 3)
self.cost = cost
class TestVector(_backend_tests.TestVector):
def setUp(self):
super().setUp()
n = self.n
X = tf.Variable(tf.zeros(n, dtype=np.float64))
@TensorFlow(X)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
class TestMatrix(_backend_tests.TestMatrix):
def setUp(self):
super().setUp()
m = self.m
n = self.n
X = tf.Variable(tf.zeros((m, n), dtype=np.float64))
@TensorFlow(X)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
class TestTensor3(_backend_tests.TestTensor3):
def setUp(self):
super().setUp()
n1 = self.n1
n2 = self.n2
n3 = self.n3
X = tf.Variable(tf.zeros([n1, n2, n3], dtype=np.float64))
@TensorFlow(X)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
class TestMixed(_backend_tests.TestMixed):
def setUp(self):
super().setUp()
n1 = self.n1
n2 = self.n2
n3 = self.n3
n4 = self.n4
n5 = self.n5
n6 = self.n6
x = tf.Variable(tf.zeros(n1, dtype=np.float64))
y = tf.Variable(tf.zeros([n2, n3], dtype=np.float64))
z = tf.Variable(tf.zeros([n4, n5, n6], dtype=np.float64))
@TensorFlow(x, y, z)
def cost(x, y, z):
return (tf.exp(tf.reduce_sum(x ** 2)) +
tf.exp(tf.reduce_sum(y ** 2)) +
tf.exp(tf.reduce_sum(z ** 2)))
self.cost = cost
class TestUserProvidedSession(unittest.TestCase):
def test_user_session(self):
class MockSession:
def run(*args, **kwargs):
raise RuntimeError
n = 10
x = tf.Variable(tf.zeros(n, dtype=tf.float64), name="x")
@TensorFlow(x, session=MockSession())
def cost(x):
return tf.reduce_sum(x)
with self.assertRaises(RuntimeError):
cost(rnd.randn(n))
|
[
"tensorflow.reduce_sum",
"numpy.random.randn",
"pymanopt.function.TensorFlow",
"tensorflow.zeros",
"tensorflow.tensordot"
] |
[((397, 410), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x'], {}), '(x)\n', (407, 410), False, 'from pymanopt.function import TensorFlow\n'), ((764, 780), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x', 'y'], {}), '(x, y)\n', (774, 780), False, 'from pymanopt.function import TensorFlow\n'), ((1225, 1244), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1235, 1244), False, 'from pymanopt.function import TensorFlow\n'), ((1530, 1543), 'pymanopt.function.TensorFlow', 'TensorFlow', (['X'], {}), '(X)\n', (1540, 1543), False, 'from pymanopt.function import TensorFlow\n'), ((1842, 1855), 'pymanopt.function.TensorFlow', 'TensorFlow', (['X'], {}), '(X)\n', (1852, 1855), False, 'from pymanopt.function import TensorFlow\n'), ((2187, 2200), 'pymanopt.function.TensorFlow', 'TensorFlow', (['X'], {}), '(X)\n', (2197, 2200), False, 'from pymanopt.function import TensorFlow\n'), ((2709, 2728), 'pymanopt.function.TensorFlow', 'TensorFlow', (['x', 'y', 'z'], {}), '(x, y, z)\n', (2719, 2728), False, 'from pymanopt.function import TensorFlow\n'), ((341, 375), 'tensorflow.zeros', 'tf.zeros', (['self.n'], {'dtype': 'np.float64'}), '(self.n, dtype=np.float64)\n', (349, 375), True, 'import tensorflow as tf\n'), ((451, 472), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {}), '(x ** 2)\n', (464, 472), True, 'import tensorflow as tf\n'), ((648, 677), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (656, 677), True, 'import tensorflow as tf\n'), ((713, 742), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (721, 742), True, 'import tensorflow as tf\n'), ((824, 850), 'tensorflow.tensordot', 'tf.tensordot', (['x', 'y'], {'axes': '(1)'}), '(x, y, axes=1)\n', (836, 850), True, 'import tensorflow as tf\n'), ((1044, 1073), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1052, 1073), True, 'import tensorflow as tf\n'), ((1109, 1138), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1117, 1138), True, 'import tensorflow as tf\n'), ((1174, 1203), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1182, 1203), True, 'import tensorflow as tf\n'), ((1291, 1325), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2 + y + z ** 3)'], {}), '(x ** 2 + y + z ** 3)\n', (1304, 1325), True, 'import tensorflow as tf\n'), ((1489, 1518), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (1497, 1518), True, 'import tensorflow as tf\n'), ((1796, 1830), 'tensorflow.zeros', 'tf.zeros', (['(m, n)'], {'dtype': 'np.float64'}), '((m, n), dtype=np.float64)\n', (1804, 1830), True, 'import tensorflow as tf\n'), ((2135, 2175), 'tensorflow.zeros', 'tf.zeros', (['[n1, n2, n3]'], {'dtype': 'np.float64'}), '([n1, n2, n3], dtype=np.float64)\n', (2143, 2175), True, 'import tensorflow as tf\n'), ((2539, 2569), 'tensorflow.zeros', 'tf.zeros', (['n1'], {'dtype': 'np.float64'}), '(n1, dtype=np.float64)\n', (2547, 2569), True, 'import tensorflow as tf\n'), ((2595, 2631), 'tensorflow.zeros', 'tf.zeros', (['[n2, n3]'], {'dtype': 'np.float64'}), '([n2, n3], dtype=np.float64)\n', (2603, 2631), True, 'import tensorflow as tf\n'), ((2657, 2697), 'tensorflow.zeros', 'tf.zeros', (['[n4, n5, n6]'], {'dtype': 'np.float64'}), '([n4, n5, n6], dtype=np.float64)\n', (2665, 2697), True, 'import tensorflow as tf\n'), ((3163, 3192), 'tensorflow.zeros', 'tf.zeros', (['n'], {'dtype': 'tf.float64'}), '(n, dtype=tf.float64)\n', (3171, 3192), True, 'import tensorflow as tf\n'), ((3291, 3307), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), '(x)\n', (3304, 3307), True, 'import tensorflow as tf\n'), ((1591, 1612), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X ** 2)'], {}), '(X ** 2)\n', (1604, 1612), True, 'import tensorflow as tf\n'), ((1903, 1924), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X ** 2)'], {}), '(X ** 2)\n', (1916, 1924), True, 'import tensorflow as tf\n'), ((2248, 2269), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X ** 2)'], {}), '(X ** 2)\n', (2261, 2269), True, 'import tensorflow as tf\n'), ((3372, 3384), 'numpy.random.randn', 'rnd.randn', (['n'], {}), '(n)\n', (3381, 3384), True, 'import numpy.random as rnd\n'), ((2887, 2908), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(z ** 2)'], {}), '(z ** 2)\n', (2900, 2908), True, 'import tensorflow as tf\n'), ((2783, 2804), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {}), '(x ** 2)\n', (2796, 2804), True, 'import tensorflow as tf\n'), ((2835, 2856), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(y ** 2)'], {}), '(y ** 2)\n', (2848, 2856), True, 'import tensorflow as tf\n')]
|
import numpy as np
import robodk
import time
import queue
from scipy.spatial.transform import Rotation, Slerp
from PIL import Image
from robolink import *
from matplotlib import pyplot as plt
import multiprocessing
from constants import BELT_VELOCITY
BOX_RANDOM_ANGLE = np.pi / 8.0
BOX_X_RANDOM = 50.0
GRAVITY = -9.81
class SimulationLoop:
CONVEYOR_BELT_END = 100.0
def __init__(self, queue, lock):
self.sleep_for = 1.0 / 60.0
self.link = Robolink()
self.box_velocity = np.array([0.0, -BELT_VELOCITY, 0.0])
self.paused = False
self.done = False
self.previous_sim_time = None
self.queue = queue
self.box = self.link.Item('Box')
self.write_lock = lock
def run(self):
self.link.setSimulationSpeed(1.0)
self.previous_sim_time = self.link.SimulationTime()
while not self.done:
self._read_queue()
if self.paused:
time.sleep(0.05)
continue
self._step_simulation()
time.sleep(self.sleep_for)
def _read_queue(self):
try:
msg = self.queue.get(False)
try:
self.write_lock.acquire()
getattr(self, msg[0])(*msg[1:])
finally:
self.write_lock.release()
except queue.Empty:
pass
def _step_simulation(self):
current_time = self.link.SimulationTime()
diff = current_time - self.previous_sim_time
try:
self.write_lock.acquire()
self.previous_sim_time = current_time
if self.box.Parent().Name() != 'picking_setup':
# Box is in the robot's hand. Don't do anything.
return
current_pose = np.array(self.box.Pose().Rows())
if current_pose[1, 3] < self.CONVEYOR_BELT_END:
self.reset_box()
return
if self.box.Parent().Name() == "picking_setup":
# On conveyor belt. Let's move it.
current_pose[:3, 3] += diff * self.box_velocity * 1000.0 # Pose is in millimeters.
if current_pose[2, 3] > 5.0:
z = current_pose[2, 3]
current_pose[2, 3] = max(0.0, z + diff * GRAVITY * 1000.0)
self.box.setPose(robodk.Mat(current_pose.tolist()))
finally:
self.write_lock.release()
def reset_box(self):
gripper = self.link.Item('Gripper')
gripper.DetachAll()
try:
box = self.link.Item('Box')
if box.Name() == "Box":
box.Delete()
except Exception as e:
print(e)
box_template = self.link.Item('BoxTemplate')
box_template.Copy()
self.box = self.link.Paste(self.link.Item('picking_setup'))
self.box.setName("Box")
self.box.setParent(self.link.Item('picking_setup'))
box_pose = np.array(self.box.Pose().Rows())
box_pose[:3, :3] = Rotation.from_rotvec([0.0, 0.0,
-np.pi / 2.0 + np.random.uniform(-BOX_RANDOM_ANGLE, BOX_RANDOM_ANGLE)
]).as_matrix()
box_pose[0, 3] = 200.0 + np.random.uniform(-BOX_X_RANDOM, BOX_X_RANDOM)
box_pose[1, 3] = 1800.0
box_pose[2, 3] = 0.0
self.box.setPose(robodk.Mat(box_pose.tolist()))
self.box.Scale(np.random.uniform(np.array([0.7, 0.7, 0.1]), np.ones(3)).tolist())
def pause(self, value):
self.paused = value
if not self.paused:
self.previous_sim_time = self.link.SimulationTime()
def close(self):
self.done = True
def simulation_loop(queue, lock):
loop = SimulationLoop(queue, lock).run()
class Simulation:
def __init__(self):
self.queue = multiprocessing.Queue()
self.write_lock = multiprocessing.Lock()
self.background_thread = multiprocessing.Process(target=simulation_loop, args=(self.queue, self.write_lock), daemon=True)
self.background_thread.start()
def reset_box(self):
self.queue.put(('reset_box',))
def pause(self, value):
self.queue.put(('pause', value))
def close(self):
self.queue.put(('close',))
|
[
"numpy.random.uniform",
"multiprocessing.Lock",
"numpy.ones",
"time.sleep",
"numpy.array",
"multiprocessing.Queue",
"multiprocessing.Process"
] |
[((504, 540), 'numpy.array', 'np.array', (['[0.0, -BELT_VELOCITY, 0.0]'], {}), '([0.0, -BELT_VELOCITY, 0.0])\n', (512, 540), True, 'import numpy as np\n'), ((3787, 3810), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (3808, 3810), False, 'import multiprocessing\n'), ((3837, 3859), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (3857, 3859), False, 'import multiprocessing\n'), ((3893, 3994), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'simulation_loop', 'args': '(self.queue, self.write_lock)', 'daemon': '(True)'}), '(target=simulation_loop, args=(self.queue, self.\n write_lock), daemon=True)\n', (3916, 3994), False, 'import multiprocessing\n'), ((1048, 1074), 'time.sleep', 'time.sleep', (['self.sleep_for'], {}), '(self.sleep_for)\n', (1058, 1074), False, 'import time\n'), ((3193, 3239), 'numpy.random.uniform', 'np.random.uniform', (['(-BOX_X_RANDOM)', 'BOX_X_RANDOM'], {}), '(-BOX_X_RANDOM, BOX_X_RANDOM)\n', (3210, 3239), True, 'import numpy as np\n'), ((958, 974), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (968, 974), False, 'import time\n'), ((3398, 3423), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.1]'], {}), '([0.7, 0.7, 0.1])\n', (3406, 3423), True, 'import numpy as np\n'), ((3425, 3435), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3432, 3435), True, 'import numpy as np\n'), ((3082, 3136), 'numpy.random.uniform', 'np.random.uniform', (['(-BOX_RANDOM_ANGLE)', 'BOX_RANDOM_ANGLE'], {}), '(-BOX_RANDOM_ANGLE, BOX_RANDOM_ANGLE)\n', (3099, 3136), True, 'import numpy as np\n')]
|
#
# 为 GUI 封装的函数 不可直接运行
# Author: Xiaohei
# Updatetime: 2021-12-01
#
import cv2
import os
import numpy
import pickle
from enhance import image_enhance
def get_descriptors(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img = clahe.apply(img)
img = image_enhance.image_enhance(img)
img = numpy.array(img, dtype=numpy.uint8)
# Threshold
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# Normalize to 0 and 1 range
img[img == 255] = 1
# Harris corners
harris_corners = cv2.cornerHarris(img, 3, 3, 0.04)
harris_normalized = cv2.normalize(harris_corners, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32FC1)
threshold_harris = 125
# Extract keypoints
keypoints = []
for x in range(0, harris_normalized.shape[0]):
for y in range(0, harris_normalized.shape[1]):
if harris_normalized[x][y] > threshold_harris:
keypoints.append(cv2.KeyPoint(y, x, 1))
# Define descriptor
orb = cv2.ORB_create()
# Compute descriptors
_, des = orb.compute(img, keypoints)
return keypoints, des
def match(des1, path, name_lst):
avg_lst = []
if name_lst:
for name in name_lst:
with open("{}/{}".format(path, name), "rb+") as f:
des2 = pickle.load(f)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = sorted(bf.match(des1, des2), key=lambda match: match.distance)
score = 0
for match in matches:
score += match.distance
avg = score / len(matches)
avg_lst.append(avg)
return avg_lst
else:
return None
def run_app(image_path, data_path):
img1 = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if img1 is not None:
img1 = cv2.resize(img1, dsize=(256, 364))
kp1, des1 = get_descriptors(img1)
else:
raise Exception("Invalid image path!")
address_lst = [name for name in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, name))]
name_lst = list(address_lst)
avgs = match(des1, data_path, name_lst)
score_threshold = 40
if avgs is not None:
if min(avgs) < score_threshold:
flag = True
name = name_lst[avgs.index(min(avgs))]
else:
flag = False
name = name_lst[avgs.index(min(avgs))]
name1 = image_path.replace("\\", "/").split("/")[-1].split(".")[0]
# name1 = input("Input a name to save the fingerprint: ")
if name1:
with open("{}/{}".format(data_path, name1), "wb+") as f:
pickle.dump(des1, f)
else:
flag = False
name = "None"
name1 = image_path.replace("\\", "/").split("/")[-1].split(".")[0]
# name1 = input("Input a name to save the fingerprint: ")
if name1:
with open("{}/{}".format(data_path, name1), "wb+") as f:
pickle.dump(des1, f)
return flag, name
|
[
"os.listdir",
"pickle.dump",
"enhance.image_enhance.image_enhance",
"cv2.threshold",
"cv2.BFMatcher",
"cv2.normalize",
"cv2.imread",
"pickle.load",
"numpy.array",
"cv2.ORB_create",
"cv2.KeyPoint",
"cv2.createCLAHE",
"os.path.join",
"cv2.cornerHarris",
"cv2.resize"
] |
[((191, 242), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (206, 242), False, 'import cv2\n'), ((280, 312), 'enhance.image_enhance.image_enhance', 'image_enhance.image_enhance', (['img'], {}), '(img)\n', (307, 312), False, 'from enhance import image_enhance\n'), ((323, 358), 'numpy.array', 'numpy.array', (['img'], {'dtype': 'numpy.uint8'}), '(img, dtype=numpy.uint8)\n', (334, 358), False, 'import numpy\n'), ((391, 460), 'cv2.threshold', 'cv2.threshold', (['img', '(127)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (404, 460), False, 'import cv2\n'), ((562, 595), 'cv2.cornerHarris', 'cv2.cornerHarris', (['img', '(3)', '(3)', '(0.04)'], {}), '(img, 3, 3, 0.04)\n', (578, 595), False, 'import cv2\n'), ((620, 709), 'cv2.normalize', 'cv2.normalize', (['harris_corners', '(0)', '(255)'], {'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_32FC1'}), '(harris_corners, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.\n CV_32FC1)\n', (633, 709), False, 'import cv2\n'), ((1032, 1048), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (1046, 1048), False, 'import cv2\n'), ((1769, 1813), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(image_path, cv2.IMREAD_GRAYSCALE)\n', (1779, 1813), False, 'import cv2\n'), ((1855, 1889), 'cv2.resize', 'cv2.resize', (['img1'], {'dsize': '(256, 364)'}), '(img1, dsize=(256, 364))\n', (1865, 1889), False, 'import cv2\n'), ((1362, 1410), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (1375, 1410), False, 'import cv2\n'), ((2026, 2047), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2036, 2047), False, 'import os\n'), ((1330, 1344), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1341, 1344), False, 'import pickle\n'), ((2066, 2095), 'os.path.join', 'os.path.join', (['data_path', 'name'], {}), '(data_path, name)\n', (2078, 2095), False, 'import os\n'), ((3014, 3034), 'pickle.dump', 'pickle.dump', (['des1', 'f'], {}), '(des1, f)\n', (3025, 3034), False, 'import pickle\n'), ((974, 995), 'cv2.KeyPoint', 'cv2.KeyPoint', (['y', 'x', '(1)'], {}), '(y, x, 1)\n', (986, 995), False, 'import cv2\n'), ((2696, 2716), 'pickle.dump', 'pickle.dump', (['des1', 'f'], {}), '(des1, f)\n', (2707, 2716), False, 'import pickle\n')]
|
__author__ = 'aymgal'
# implementations of proximal operators adapted to sparsity
import numpy as np
from slitronomy.Util import util
def prox_sparsity_wavelets(coeffs_input, step, level_const=None, level_pixels=None, l_norm=1):
"""
Apply soft or hard threshold on all wavelets scales excepts the last one (the coarse scale)
"""
if l_norm not in [0, 1]:
raise ValueError("Sparsity proximal operator only defined with l0- and l1-norms")
if step == 0:
return coeffs_input
coeffs = np.copy(coeffs_input)
n_scales = coeffs.shape[0]
# apply threshold operation to all starlet scales except the coarsest
for s in range(n_scales-1):
thresh = step
if level_const is not None:
thresh *= level_const[s]
if level_pixels is not None:
thresh *= level_pixels[s, :, :]
if l_norm == 0:
coeffs[s, :, :] = util.hard_threshold(coeffs[s, :, :], thresh)
else:
coeffs[s, :, :] = util.soft_threshold(coeffs[s, :, :], thresh)
return coeffs
def prox_positivity(image_input):
image = np.copy(image_input)
image[image < 0] = 0.
return image
def full_prox_sparsity_positivity(image, transform, inverse_transform,
weights, noise_levels, thresh, thresh_increm,
n_scales, l_norm, formulation, force_positivity):
"""
returns the proximal operator of the regularisation term
g = lambda * |Phi^T HG|_0
or
g = lambda * |Phi^T HG|_1
"""
level_const = thresh * np.ones(n_scales)
level_const[0] += thresh_increm # possibly a stronger threshold for first decomposition levels (small scales features)
level_pixels = weights * noise_levels
if formulation == 'analysis':
coeffs = transform(image)
elif formulation == 'synthesis':
coeffs = image
# apply proximal operator
step = 1 # because threshold is already expressed in data units
coeffs_proxed = prox_sparsity_wavelets(coeffs, step=step,
level_const=level_const,
level_pixels=level_pixels,
l_norm=l_norm)
if formulation == 'analysis':
image_proxed = inverse_transform(coeffs_proxed)
elif formulation == 'synthesis':
image_proxed = coeffs_proxed
if force_positivity and formulation == 'analysis':
image_proxed = prox_positivity(image_proxed)
# TODO: apply positivity also in 'synthesis' formulation (i.e. to coeffs in starlet space?)
return image_proxed
|
[
"slitronomy.Util.util.soft_threshold",
"slitronomy.Util.util.hard_threshold",
"numpy.ones",
"numpy.copy"
] |
[((526, 547), 'numpy.copy', 'np.copy', (['coeffs_input'], {}), '(coeffs_input)\n', (533, 547), True, 'import numpy as np\n'), ((1118, 1138), 'numpy.copy', 'np.copy', (['image_input'], {}), '(image_input)\n', (1125, 1138), True, 'import numpy as np\n'), ((1599, 1616), 'numpy.ones', 'np.ones', (['n_scales'], {}), '(n_scales)\n', (1606, 1616), True, 'import numpy as np\n'), ((917, 961), 'slitronomy.Util.util.hard_threshold', 'util.hard_threshold', (['coeffs[s, :, :]', 'thresh'], {}), '(coeffs[s, :, :], thresh)\n', (936, 961), False, 'from slitronomy.Util import util\n'), ((1006, 1050), 'slitronomy.Util.util.soft_threshold', 'util.soft_threshold', (['coeffs[s, :, :]', 'thresh'], {}), '(coeffs[s, :, :], thresh)\n', (1025, 1050), False, 'from slitronomy.Util import util\n')]
|
#
# キャプチャー画像を推定する
# キャプチャー画像を100x100にリサイズする
#
#---------------------------------------------------------
#import keras
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.models import load_model
import numpy as np
import os
import serial
import time
from PIL import Image
import cv2
# 学習済みモデルのロード
model = load_model("./original_img.h5")
model.summary()
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
# 動作確認用
img_arr = []
image = Image.open('./img/g/0.png')
image = image.convert("RGB")
image = image.resize((100, 100))
data = np.asarray(image)
img_arr.append(data)
img_arr = np.array(img_arr)
img_arr = img_arr.astype('float32')/255
img_arr.shape[:]
y_pred = model.predict(img_arr)
print(y_pred)
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
# ビデオ初期化
#img_cap = cv2.VideoCapture(0)
img_cap = cv2.VideoCapture(1)
#with serial.Serial('/dev/cu.usbmodem14301', timeout=0.1) as ser:
while True:
# ビデオ画像の処理
img_arr = []
ret, img_base = img_cap.read()
xp = int(img_base.shape[1]) #1920
yp = int(img_base.shape[0]) #1080
cx = int(xp/2)
cy = int(yp/2)
#print(xp, " + ", yp)
resize = 100
img_crop = cv2.resize(img_base[cy-500:cy+500, cx-500:cx+500], (resize, resize))
cv2.imshow('Images for CNN', img_crop)
imgCV_RGB = img_crop[:, :, ::-1]
img_pil = Image.fromarray(imgCV_RGB)
data = np.asarray(img_pil)
img_arr.append(data)
img_arr = np.array(img_arr)
img_arr = img_arr.astype('float32')/255
img_arr.shape[:]
# 予測
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
y_pred = model.predict(img_arr)
#print(y_pred)
# 結果の表示
if y_pred[0].argmax() == 0:
if (y_pred[0][0] > 0.7):
print("")
elif y_pred[0].argmax() == 1:
if (y_pred[0][1] > 0.7):
print("グー!!")
elif y_pred[0].argmax() == 2:
if (y_pred[0][2] > 0.7):
print("チョキ!!")
elif y_pred[0].argmax() == 3:
if (y_pred[0][3] > 0.7):
print("パー!!")
if cv2.waitKey(10) == 27:
break
# ビデオ開放
cv2.destroyAllWindows()
|
[
"tensorflow.python.keras.models.load_model",
"cv2.waitKey",
"numpy.asarray",
"cv2.imshow",
"PIL.Image.open",
"cv2.VideoCapture",
"numpy.array",
"PIL.Image.fromarray",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((405, 436), 'tensorflow.python.keras.models.load_model', 'load_model', (['"""./original_img.h5"""'], {}), "('./original_img.h5')\n", (415, 436), False, 'from tensorflow.python.keras.models import load_model\n'), ((520, 547), 'PIL.Image.open', 'Image.open', (['"""./img/g/0.png"""'], {}), "('./img/g/0.png')\n", (530, 547), False, 'from PIL import Image\n'), ((617, 634), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (627, 634), True, 'import numpy as np\n'), ((666, 683), 'numpy.array', 'np.array', (['img_arr'], {}), '(img_arr)\n', (674, 683), True, 'import numpy as np\n'), ((879, 898), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (895, 898), False, 'import cv2\n'), ((2113, 2136), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2134, 2136), False, 'import cv2\n'), ((1223, 1299), 'cv2.resize', 'cv2.resize', (['img_base[cy - 500:cy + 500, cx - 500:cx + 500]', '(resize, resize)'], {}), '(img_base[cy - 500:cy + 500, cx - 500:cx + 500], (resize, resize))\n', (1233, 1299), False, 'import cv2\n'), ((1296, 1334), 'cv2.imshow', 'cv2.imshow', (['"""Images for CNN"""', 'img_crop'], {}), "('Images for CNN', img_crop)\n", (1306, 1334), False, 'import cv2\n'), ((1387, 1413), 'PIL.Image.fromarray', 'Image.fromarray', (['imgCV_RGB'], {}), '(imgCV_RGB)\n', (1402, 1413), False, 'from PIL import Image\n'), ((1426, 1445), 'numpy.asarray', 'np.asarray', (['img_pil'], {}), '(img_pil)\n', (1436, 1445), True, 'import numpy as np\n'), ((1485, 1502), 'numpy.array', 'np.array', (['img_arr'], {}), '(img_arr)\n', (1493, 1502), True, 'import numpy as np\n'), ((2067, 2082), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2078, 2082), False, 'import cv2\n')]
|
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tqdm import tqdm, tqdm_notebook
from augment import CTAugment
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
import math
from error import test_error
import logging
class OurCosineDecay(tf.keras.experimental.CosineDecay):
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = math_ops.cos(
constant_op.constant(7 / 16 * math.pi) * completed_fraction)
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def training(model, full_x_l, full_x_u, full_y_l, hparams, n_classes, file_name, log_interval=200):
def weak_transformation(x):
x = tf.image.random_flip_left_right(x)
max_shift = tf.cast(x.shape[1] * 0.125, dtype=tf.dtypes.int32)
shift = tf.random.uniform([x.shape[0], 2], minval=-max_shift, maxval=max_shift, dtype=tf.dtypes.int32)
return tfa.image.translate(x, tf.cast(shift, tf.dtypes.float32))
def pseudolabel(class_dist):
argmax = tf.math.argmax(class_dist, axis=1)
return tf.one_hot(argmax, class_dist.shape[1])
def threshold_gate(one_hot, logits, threshold):
max_probs = tf.math.multiply(one_hot, tf.nn.softmax(logits))
return tf.cast(max_probs > threshold, max_probs.dtype) # * max_probs
def sample_labeled_data(ds=full_x_l, y=full_y_l, batch_size=hparams['B']):
total_samples = ds.shape[0]
if total_samples >= batch_size:
choices = np.random.choice(np.arange(total_samples), batch_size, replace=False)
else:
choices = np.random.choice(np.arange(total_samples), batch_size, replace=True)
x_l = ds[choices, :, :, :]
y_l = y[choices]
return x_l, y_l
def step(x_l, y_l, x_u):
with tf.GradientTape() as tape:
# labeled data
x_l_weak = weak_transformation(x_l)
output_l_weak = model(x_l_weak, True)
loss_l = loss_fn_l(y_l, output_l_weak)
# update CTAugment weights
x_l_strong, choices, bins = cta.augment_batch(x_l)
output_l_strong = model(x_l_strong, True)
cta.update_weights_batch(y_l, output_l_strong, choices, bins)
# unlabeled data
x_u_weak = weak_transformation(x_u)
output_u_weak = model(x_u_weak, True)
y_u = pseudolabel(output_u_weak)
y_u = threshold_gate(y_u, output_u_weak, hparams['tau'])
x_u_strong, choices, bins = cta.augment_batch(x_u)
output_u_strong = model(x_u_strong, True)
loss_u = loss_fn_u(y_u, output_u_strong)
# add losses together
loss = loss_l + hparams['lamda'] * loss_u
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
schedule = OurCosineDecay(hparams['eta'], hparams['K'])
#optimizer = tf.keras.optimizers.SGD(schedule, momentum=hparams['beta'], nesterov=hparams['nesterov'])
optimizer = tfa.optimizers.SGDW(hparams['weight_decay'],
schedule, momentum=hparams['beta'],
nesterov=hparams['nesterov'])
loss_fn_u = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
loss_fn_l = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_fn_u = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
loss_fn_l = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
cta = CTAugment(hparams['cta_classes'], hparams['cta_decay'], hparams['cta_threshold'], hparams['cta_depth'])
# ds_l = tf.data.Dataset.from_tensor_slices((full_x_l, full_y_l))
ds_u = tf.data.Dataset.from_tensor_slices(full_x_u)
# split into batches
# ds_l = ds_l.batch(hparams['B']).prefetch(-1)
ds_u = ds_u.batch(int(hparams['mu'] * hparams['B'])).prefetch(-1)
# if type casting needed: x = tf.cast(x, tf.float32)
training_step = 0
epoch = 0
best_training_accuracy = 0
# for epoch in range(hparams['epochs']):
# for (x_l, y_l), x_u in tqdm(zip(ds_l, ds_u), desc='epoch {}/{}'.format(epoch + 1, hparams['epochs']),
# total=val_interval, ncols=100, ascii=True):
# training_step += 1
# step(x_l, y_l, x_u)
#for epoch in range(hparams['epochs']):
while training_step < hparams['K']:
epoch += 1
for x_u in tqdm(ds_u, desc='epoch {}'.format(epoch),
total=hparams['total'], ncols=100, ascii=True):
training_step += 1
x_l, y_l = sample_labeled_data()
step(x_l, y_l, x_u)
if training_step >= hparams['K']:
break
err = test_error(model, full_x_l, full_y_l)
logging.info('epoch: {}, labeled accuracy: {}'.format(epoch, err))
if err > best_training_accuracy:
best_training_accuracy = err
tf.keras.models.save_model(model, filepath=file_name)
return model
|
[
"tensorflow.python.framework.constant_op.constant",
"tensorflow.keras.losses.CategoricalCrossentropy",
"numpy.arange",
"error.test_error",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.random.uniform",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.cast",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.framework.ops.name_scope_v2",
"tensorflow.image.random_flip_left_right",
"augment.CTAugment",
"tensorflow.math.argmax",
"tensorflow_addons.optimizers.SGDW",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.keras.models.save_model",
"tensorflow.GradientTape",
"logging.getLogger"
] |
[((67, 98), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (84, 98), False, 'import logging\n'), ((3844, 3959), 'tensorflow_addons.optimizers.SGDW', 'tfa.optimizers.SGDW', (["hparams['weight_decay']", 'schedule'], {'momentum': "hparams['beta']", 'nesterov': "hparams['nesterov']"}), "(hparams['weight_decay'], schedule, momentum=hparams[\n 'beta'], nesterov=hparams['nesterov'])\n", (3863, 3959), True, 'import tensorflow_addons as tfa\n'), ((4044, 4101), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4083, 4101), True, 'import tensorflow as tf\n'), ((4118, 4181), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4163, 4181), True, 'import tensorflow as tf\n'), ((4199, 4256), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4238, 4256), True, 'import tensorflow as tf\n'), ((4273, 4336), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4318, 4336), True, 'import tensorflow as tf\n'), ((4348, 4456), 'augment.CTAugment', 'CTAugment', (["hparams['cta_classes']", "hparams['cta_decay']", "hparams['cta_threshold']", "hparams['cta_depth']"], {}), "(hparams['cta_classes'], hparams['cta_decay'], hparams[\n 'cta_threshold'], hparams['cta_depth'])\n", (4357, 4456), False, 'from augment import CTAugment\n'), ((4534, 4578), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['full_x_u'], {}), '(full_x_u)\n', (4568, 4578), True, 'import tensorflow as tf\n'), ((1469, 1503), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (1500, 1503), True, 'import tensorflow as tf\n'), ((1524, 1574), 'tensorflow.cast', 'tf.cast', (['(x.shape[1] * 0.125)'], {'dtype': 'tf.dtypes.int32'}), '(x.shape[1] * 0.125, dtype=tf.dtypes.int32)\n', (1531, 1574), True, 'import tensorflow as tf\n'), ((1591, 1689), 'tensorflow.random.uniform', 'tf.random.uniform', (['[x.shape[0], 2]'], {'minval': '(-max_shift)', 'maxval': 'max_shift', 'dtype': 'tf.dtypes.int32'}), '([x.shape[0], 2], minval=-max_shift, maxval=max_shift,\n dtype=tf.dtypes.int32)\n', (1608, 1689), True, 'import tensorflow as tf\n'), ((1810, 1844), 'tensorflow.math.argmax', 'tf.math.argmax', (['class_dist'], {'axis': '(1)'}), '(class_dist, axis=1)\n', (1824, 1844), True, 'import tensorflow as tf\n'), ((1860, 1899), 'tensorflow.one_hot', 'tf.one_hot', (['argmax', 'class_dist.shape[1]'], {}), '(argmax, class_dist.shape[1])\n', (1870, 1899), True, 'import tensorflow as tf\n'), ((2037, 2084), 'tensorflow.cast', 'tf.cast', (['(max_probs > threshold)', 'max_probs.dtype'], {}), '(max_probs > threshold, max_probs.dtype)\n', (2044, 2084), True, 'import tensorflow as tf\n'), ((5651, 5688), 'error.test_error', 'test_error', (['model', 'full_x_l', 'full_y_l'], {}), '(model, full_x_l, full_y_l)\n', (5661, 5688), False, 'from error import test_error\n'), ((557, 602), 'tensorflow.python.framework.ops.name_scope_v2', 'ops.name_scope_v2', (["(self.name or 'CosineDecay')"], {}), "(self.name or 'CosineDecay')\n", (574, 602), False, 'from tensorflow.python.framework import ops\n'), ((640, 727), 'tensorflow.python.framework.ops.convert_to_tensor_v2', 'ops.convert_to_tensor_v2', (['self.initial_learning_rate'], {'name': '"""initial_learning_rate"""'}), "(self.initial_learning_rate, name=\n 'initial_learning_rate')\n", (664, 727), False, 'from tensorflow.python.framework import ops\n'), ((814, 852), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self.decay_steps', 'dtype'], {}), '(self.decay_steps, dtype)\n', (827, 852), False, 'from tensorflow.python.ops import math_ops\n'), ((887, 913), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['step', 'dtype'], {}), '(step, dtype)\n', (900, 913), False, 'from tensorflow.python.ops import math_ops\n'), ((947, 996), 'tensorflow.python.ops.math_ops.minimum', 'math_ops.minimum', (['global_step_recomp', 'decay_steps'], {}), '(global_step_recomp, decay_steps)\n', (963, 996), False, 'from tensorflow.python.ops import math_ops\n'), ((1272, 1321), 'tensorflow.python.ops.math_ops.multiply', 'math_ops.multiply', (['initial_learning_rate', 'decayed'], {}), '(initial_learning_rate, decayed)\n', (1289, 1321), False, 'from tensorflow.python.ops import math_ops\n'), ((1724, 1757), 'tensorflow.cast', 'tf.cast', (['shift', 'tf.dtypes.float32'], {}), '(shift, tf.dtypes.float32)\n', (1731, 1757), True, 'import tensorflow as tf\n'), ((1999, 2020), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2012, 2020), True, 'import tensorflow as tf\n'), ((2582, 2599), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2597, 2599), True, 'import tensorflow as tf\n'), ((5875, 5928), 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['model'], {'filepath': 'file_name'}), '(model, filepath=file_name)\n', (5901, 5928), True, 'import tensorflow as tf\n'), ((2295, 2319), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (2304, 2319), True, 'import numpy as np\n'), ((2401, 2425), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (2410, 2425), True, 'import numpy as np\n'), ((1122, 1160), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7 / 16 * math.pi)'], {}), '(7 / 16 * math.pi)\n', (1142, 1160), False, 'from tensorflow.python.framework import constant_op\n')]
|
import cv2
import numpy
def colorDetection(image):
#Converts image HSV type image
hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
testImage = image
#Ranges for color detection
lowerYellow = numpy.array([20, 100, 100])
upperYellow = numpy.array([30,255, 255])
lowerBlue = numpy.array([85,100,100])
upperBlue = numpy.array([124,255,255])
lowerRed = numpy.array([0,100,100])
upperRed = numpy.array([19,255,255])
#Ranges applied to the hsvImage
yMask = cv2.inRange(hsvImage, lowerYellow, upperYellow)
bMask = cv2.inRange(hsvImage, lowerBlue, upperBlue)
rMask = cv2.inRange(hsvImage, lowerRed, upperRed)
#Finding the contours on the image
yContours, yHierarchy = cv2.findContours(yMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
bContours, bHierarchy = cv2.findContours(bMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
rContours, rHierarchy = cv2.findContours(rMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
#Given at least one yellow contour
if len(yContours) > 0:
# Find the index of the largest contour
yAreas = [cv2.contourArea(i) for i in yContours]
yMaxIndex = numpy.argmax(yAreas)
yCnt = yContours[yMaxIndex]
#Find coordinate for boundary rectangle
yx,yy,yw,yh = cv2.boundingRect(yCnt)
#Draw rectangle
cv2.rectangle(testImage,(yx-15,yy-15),(yx+yw+15,yy+yh+15),(0,255,255),0)
#Given at least one blue contour
if len(bContours) > 0:
# Find the index of the largest contour
bAreas = [cv2.contourArea(i) for i in bContours]
bMaxIndex = numpy.argmax(bAreas)
bCnt = bContours[bMaxIndex]
#Find coordinate for boundary rectangle
bx,by,bw,bh = cv2.boundingRect(bCnt)
#Draw rectangle
cv2.rectangle(testImage,(bx-15,by-15),(bx+bw+15,by+bh+15),(255,0,0),0)
#Given at least one red contour
if len(rContours) > 0:
# Find the index of the largest contour
rAreas = [cv2.contourArea(i) for i in rContours]
rMaxIndex = numpy.argmax(rAreas)
rCnt = rContours[rMaxIndex]
#Find coordinate for boundary rectangle
rx,ry,rw,rh = cv2.boundingRect(rCnt)
#Draw rectangle
cv2.rectangle(testImage,(rx-15,ry-15),(rx+rw+15,ry+rh+15),(0,0,255),0)
# #Displaying the masks individually and the final image
# cv2.imshow('Yellow Mask', yMask)
# cv2.imshow('Blue Mask', bMask)
# cv2.imshow('Red Mask', rMask)
# cv2.imshow('Altered image', testImage)
return testImage
def main():
#Default Camera (cv2.videocapture(-1) the parameter indexes your cameras)
camera = cv2.VideoCapture(-1)
while camera.isOpened():
_, image = camera.read()
cv2.imshow('Original', image)
rectImg = colorDetection(image)
cv2.imshow('Color Detector', rectImg)
cv2.waitKey(5)
if __name__ == '__main__':
main()
|
[
"cv2.contourArea",
"numpy.argmax",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.array",
"cv2.rectangle",
"cv2.boundingRect",
"cv2.inRange",
"cv2.findContours"
] |
[((103, 141), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (115, 141), False, 'import cv2\n'), ((214, 241), 'numpy.array', 'numpy.array', (['[20, 100, 100]'], {}), '([20, 100, 100])\n', (225, 241), False, 'import numpy\n'), ((260, 287), 'numpy.array', 'numpy.array', (['[30, 255, 255]'], {}), '([30, 255, 255])\n', (271, 287), False, 'import numpy\n'), ((303, 330), 'numpy.array', 'numpy.array', (['[85, 100, 100]'], {}), '([85, 100, 100])\n', (314, 330), False, 'import numpy\n'), ((345, 373), 'numpy.array', 'numpy.array', (['[124, 255, 255]'], {}), '([124, 255, 255])\n', (356, 373), False, 'import numpy\n'), ((387, 413), 'numpy.array', 'numpy.array', (['[0, 100, 100]'], {}), '([0, 100, 100])\n', (398, 413), False, 'import numpy\n'), ((427, 454), 'numpy.array', 'numpy.array', (['[19, 255, 255]'], {}), '([19, 255, 255])\n', (438, 454), False, 'import numpy\n'), ((502, 549), 'cv2.inRange', 'cv2.inRange', (['hsvImage', 'lowerYellow', 'upperYellow'], {}), '(hsvImage, lowerYellow, upperYellow)\n', (513, 549), False, 'import cv2\n'), ((562, 605), 'cv2.inRange', 'cv2.inRange', (['hsvImage', 'lowerBlue', 'upperBlue'], {}), '(hsvImage, lowerBlue, upperBlue)\n', (573, 605), False, 'import cv2\n'), ((618, 659), 'cv2.inRange', 'cv2.inRange', (['hsvImage', 'lowerRed', 'upperRed'], {}), '(hsvImage, lowerRed, upperRed)\n', (629, 659), False, 'import cv2\n'), ((728, 803), 'cv2.findContours', 'cv2.findContours', (['yMask', 'cv2.cv.CV_RETR_TREE', 'cv2.cv.CV_CHAIN_APPROX_SIMPLE'], {}), '(yMask, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE)\n', (744, 803), False, 'import cv2\n'), ((864, 939), 'cv2.findContours', 'cv2.findContours', (['bMask', 'cv2.cv.CV_RETR_TREE', 'cv2.cv.CV_CHAIN_APPROX_SIMPLE'], {}), '(bMask, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE)\n', (880, 939), False, 'import cv2\n'), ((1000, 1075), 'cv2.findContours', 'cv2.findContours', (['rMask', 'cv2.cv.CV_RETR_TREE', 'cv2.cv.CV_CHAIN_APPROX_SIMPLE'], {}), '(rMask, cv2.cv.CV_RETR_TREE, cv2.cv.CV_CHAIN_APPROX_SIMPLE)\n', (1016, 1075), False, 'import cv2\n'), ((2787, 2807), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(-1)'], {}), '(-1)\n', (2803, 2807), False, 'import cv2\n'), ((1300, 1320), 'numpy.argmax', 'numpy.argmax', (['yAreas'], {}), '(yAreas)\n', (1312, 1320), False, 'import numpy\n'), ((1427, 1449), 'cv2.boundingRect', 'cv2.boundingRect', (['yCnt'], {}), '(yCnt)\n', (1443, 1449), False, 'import cv2\n'), ((1482, 1578), 'cv2.rectangle', 'cv2.rectangle', (['testImage', '(yx - 15, yy - 15)', '(yx + yw + 15, yy + yh + 15)', '(0, 255, 255)', '(0)'], {}), '(testImage, (yx - 15, yy - 15), (yx + yw + 15, yy + yh + 15),\n (0, 255, 255), 0)\n', (1495, 1578), False, 'import cv2\n'), ((1745, 1765), 'numpy.argmax', 'numpy.argmax', (['bAreas'], {}), '(bAreas)\n', (1757, 1765), False, 'import numpy\n'), ((1872, 1894), 'cv2.boundingRect', 'cv2.boundingRect', (['bCnt'], {}), '(bCnt)\n', (1888, 1894), False, 'import cv2\n'), ((1927, 2021), 'cv2.rectangle', 'cv2.rectangle', (['testImage', '(bx - 15, by - 15)', '(bx + bw + 15, by + bh + 15)', '(255, 0, 0)', '(0)'], {}), '(testImage, (bx - 15, by - 15), (bx + bw + 15, by + bh + 15),\n (255, 0, 0), 0)\n', (1940, 2021), False, 'import cv2\n'), ((2187, 2207), 'numpy.argmax', 'numpy.argmax', (['rAreas'], {}), '(rAreas)\n', (2199, 2207), False, 'import numpy\n'), ((2314, 2336), 'cv2.boundingRect', 'cv2.boundingRect', (['rCnt'], {}), '(rCnt)\n', (2330, 2336), False, 'import cv2\n'), ((2369, 2463), 'cv2.rectangle', 'cv2.rectangle', (['testImage', '(rx - 15, ry - 15)', '(rx + rw + 15, ry + rh + 15)', '(0, 0, 255)', '(0)'], {}), '(testImage, (rx - 15, ry - 15), (rx + rw + 15, ry + rh + 15),\n (0, 0, 255), 0)\n', (2382, 2463), False, 'import cv2\n'), ((2879, 2908), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (2889, 2908), False, 'import cv2\n'), ((2958, 2995), 'cv2.imshow', 'cv2.imshow', (['"""Color Detector"""', 'rectImg'], {}), "('Color Detector', rectImg)\n", (2968, 2995), False, 'import cv2\n'), ((3004, 3018), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (3015, 3018), False, 'import cv2\n'), ((1241, 1259), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (1256, 1259), False, 'import cv2\n'), ((1686, 1704), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (1701, 1704), False, 'import cv2\n'), ((2128, 2146), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (2143, 2146), False, 'import cv2\n')]
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import contextlib
import itertools
import math
import os
import statistics
import time
import numpy as np
import torch
from . import FairseqDataset
import fairseq.data.batch_C_v0p5
import fairseq.data.batch_C_v0p5_better
import fairseq.data.batch_C_v0p6
import sys
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
print('Infer language pair from filename...')
for filename in os.listdir(path):
print('filename:', filename)
parts = filename.split('.')
if len(parts) >= 3 and len(parts[1].split('-')) == 2:
return parts[1].split('-')
return src, dst
class ShardedIterator(object):
"""A sharded wrapper around an iterable (padded to length)."""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = len(iterable) // num_shards
if len(iterable) % num_shards > 0:
self._sharded_len += 1
self.itr = itertools.zip_longest(
range(self._sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count."""
def __init__(self, iterable):
self.iterable = iterable
self.count = 0
self.itr = iter(self)
def __len__(self):
return len(self.iterable)
def __iter__(self):
for x in self.iterable:
self.count += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
return self.count < len(self)
def skip(self, num_to_skip):
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False, n_seq_per_batch_multiple=8, seq_len_multiple=1):
""" Convert a list of 1d tensors into a padded 2d tensor.
Args:
values: Python list where each element is a PyT 1d tensor
pad_idx: The index into the translation dictionary for the pad token (typically refer to 'dict.pad()')
eos_idx: The index into the translation dictionary for the eos token (typically refer to 'dict.eos()')
left_pad: Bool, left- or right-padding (true: left, false: right)
move_eos_to_beginning: Reverse order of sequence of tokens (true: reverse, false:leave in original order)
n_seq_per_batch_multiple: The number of sequences per batch to round down to
seq_len_multiple: The number of tokens per sequence to round up to
"""
size_of_seq_dim = max(v.size(0) for v in values) # Unpadded size
n_seq_in_batch = len(values)
if n_seq_per_batch_multiple % seq_len_multiple == 0:
n_seq_multiple = n_seq_per_batch_multiple / seq_len_multiple
else:
n_seq_multiple = n_seq_per_batch_multiple
if n_seq_in_batch < n_seq_multiple or n_seq_in_batch % n_seq_multiple > 0:
seq_len_multiple = n_seq_per_batch_multiple
size_of_seq_dim = (size_of_seq_dim + seq_len_multiple - 1) // seq_len_multiple * seq_len_multiple # Padded seq len, rounded up to next multiple
padded_2d_tensor = values[0].new(len(values), size_of_seq_dim).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
if left_pad:
for idx, val in enumerate(values):
copy_tensor(val, padded_2d_tensor[idx][size_of_seq_dim - len(val):])
else:
for idx, val in enumerate(values):
copy_tensor(val, padded_2d_tensor[idx][:len(val)])
return padded_2d_tensor
class EpochBatchIterator(object):
"""Iterate over a FairseqDataset and yield batches bucketed by size.
Batches may contain sequences of different lengths. This iterator can be
reused across multiple epochs with the next_epoch_itr() method.
Args:
dataset: a FairseqDataset
max_tokens: max number of tokens in each batch
max_sentences: max number of sentences in each batch
max_positions: max sentence length supported by the model
ignore_invalid_inputs: don't raise Exception for sentences that are too long
required_batch_size_multiple: require batch size to be a multiple of N
seeds: seeds for random number generator for reproducibility (1 seed for
each training epoch)
num_shards: shard the data iterator into N shards
shard_id: which shard of the data iterator to return
"""
def __init__(
self, dataset, dataloader_num_workers=1, dataloader_pin_memory=False, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1, seeds=[1],
num_shards=1, shard_id=0, epoch=0, bucket_growth_factor=1.1, seq_len_multiple=1,
batching_scheme='v0p5', batch_multiple_strategy='multiple_of_sequences',
):
assert isinstance(dataset, FairseqDataset)
self.dataset = dataset
self.max_tokens = max_tokens if max_tokens is not None else float('Inf')
self.max_sentences = max_sentences if max_sentences is not None else float('Inf')
self.dataloader_num_workers = dataloader_num_workers
self.dataloader_pin_memory = dataloader_pin_memory
assert len(max_positions) == 2, "Max positions contains source and target lengths!"
max_src_pos,max_tgt_pos = max_positions
self.max_positions = max_positions
self.max_positions_num = min(max_src_pos, max_tgt_pos)
self.ignore_invalid_inputs = ignore_invalid_inputs
self.bsz_mult = required_batch_size_multiple
self.seeds = seeds
self.num_shards = num_shards
self.shard_id = shard_id
self.seq_len_multiple = seq_len_multiple
self.batching_scheme = batching_scheme
self.batch_multiple_strategy = batch_multiple_strategy
self.epoch = epoch
self._cur_epoch_itr = None
self._next_epoch_itr = None
with numpy_seed(self.seeds[0]):
import time
start = time.time()
indices = self.dataset.ordered_indices(self.seeds[self.epoch])
#need integer, rather than float('Inf') values
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
if self.batching_scheme == 'v0p5' :
batches = fairseq.data.batch_C_v0p5.make_batches_v0p5(self.dataset.src_sizes, self.dataset.tgt_sizes, indices, max_tokens, max_sentences, self.bsz_mult, self.max_positions_num)
elif self.batching_scheme == 'v0p5_better' :
print('self.dataset.src_sizes', self.dataset.src_sizes.size)
print('self.dataset.tgt_sizes', self.dataset.tgt_sizes.size)
batches = fairseq.data.batch_C_v0p5_better.make_batches_v0p5_better(self.dataset.src_sizes, self.dataset.tgt_sizes, indices, max_tokens, max_sentences, self.max_positions_num, self.bsz_mult, self.seq_len_multiple)
elif self.batching_scheme == 'v0p6':
batch_strategy = 2
if self.batch_multiple_strategy == 'mult_of_sequences':
batch_strategy = 0
elif self.batch_multiple_strategy == 'pad_sequence_to_mult':
batch_strategy = 1
elif self.batch_multiple_strategy == 'dynamic':
batch_strategy = 2
else:
assert False, "Unknown batch multiple strategy!"
bucket_specify_min_boundary = 8
use_efficient_last_pack = False
#batch_strategy = 2
batches = fairseq.data.batch_C_v0p6.make_batches_v0p6(self.dataset.src_sizes,
self.dataset.tgt_sizes,
indices,
max_tokens,
max_sentences,
self.bsz_mult,
self.max_positions_num,
bucket_specify_min_boundary,
bucket_growth_factor,
batch_strategy,
use_efficient_last_pack)
else : # reference
def roundup(x, multiple):
return (x + multiple - 1) // multiple * multiple
def rounddown(x, multiple):
return x // multiple * multiple
def create_bucket_bounds_lists(max_allowable_seq_length, bucket_specify_min_boundary, bucket_specify_growth_scale):
bucket_boundaries = []
x = bucket_specify_min_boundary
while x < max_allowable_seq_length:
bucket_boundaries.append(x)
x = max(x + 1, int(x * bucket_specify_growth_scale))
if use_efficient_last_pack:
buckets_min_list = [0] + [i+1 for i in bucket_boundaries]
buckets_max_list = bucket_boundaries + [max_allowable_seq_length]
else:
buckets_min_list = [0] + bucket_boundaries
buckets_max_list = bucket_boundaries + [max_allowable_seq_length + 1]
return buckets_min_list, buckets_max_list
def create_seq_to_bucket_id_list_and_n_seq_per_batch(n_tok_per_seq, max_allowable_seq_length, max_sentences, pad_seq_per_batch_to_multiple_of, pad_tok_per_seq_to_multiple_of, bucket_specify_min_boundary, bucket_specify_growth_scale):
bucket_interval_min, bucket_interval_max = create_bucket_bounds_lists(max_allowable_seq_length, bucket_specify_min_boundary, bucket_specify_growth_scale)
if do_seq_len_padding_to_multiple:
n_seq_per_batch = [max_tokens // roundup(x, pad_tok_per_seq_to_multiple_of) for x in bucket_interval_max]
elif do_batch_size_rounding_down_to_multiple:
n_seq_per_batch = [rounddown(max_tokens // x, pad_seq_per_batch_to_multiple_of) for x in bucket_interval_max]
elif do_dynamic_batch_size_choice:
n_seq_per_batch_based_on_seq_len = [max_tokens // roundup(x, pad_tok_per_seq_to_multiple_of) for x in bucket_interval_max]
n_seq_per_batch_based_on_n_seq = [rounddown(max_tokens // x, pad_seq_per_batch_to_multiple_of) for x in bucket_interval_max]
n_seq_per_batch = [max(a,b) for a, b in zip(n_seq_per_batch_based_on_seq_len, n_seq_per_batch_based_on_n_seq)]
else:
n_seq_per_batch = [max_tokens // x for x in bucket_interval_max]
n_seq_per_batch = [min(max_sentences, i) if max_sentences is not None else i for i in n_seq_per_batch]
for a, b, c in zip(bucket_interval_min, bucket_interval_max, n_seq_per_batch):
print('bucket:', a, b, c)
token_length_2_bucket_id = {}
for x in range(max_allowable_seq_length+1):
for bucket_id, payload in enumerate(zip(bucket_interval_min, bucket_interval_max)):
bmin, bmax = payload
if (bmin <= x and x <= bmax and use_efficient_last_pack) or (bmin <= x and x < bmax):
token_length_2_bucket_id[x] = bucket_id
break
return ([token_length_2_bucket_id[x] if x <= max_allowable_seq_length else -1 for x in n_tok_per_seq], n_seq_per_batch, len(bucket_interval_min))
# Make adjustments to tuneable parameters here
pad_seq_per_batch_to_multiple_of = self.bsz_mult
pad_tok_per_seq_to_multiple_of = self.bsz_mult
max_allowable_seq_length = self.max_positions_num
bucket_specify_min_boundary = 8
bucket_specify_growth_scale = bucket_growth_factor ##1.035
do_seq_len_padding_to_multiple = False
do_batch_size_rounding_down_to_multiple = False
do_dynamic_batch_size_choice = True
use_efficient_last_pack = False
batches = []
src_token_counts = []
dst_token_counts = []
seq_counts = []
padded_token_counts = []
batch_max_padded_seq_len = 0
batch_seq_count = 0
batches.append([])
src_batch_token_count = 0
dst_batch_token_count = 0
curr_batch_padded_token_count = 0
batch_n_seq = 0
bucket_id = 0
longest_in_batch = []
print('### max_tokens:', max_tokens)
print('### max_sentences:', max_sentences)
pairwise_max_seq_len = [max(a,b) for a, b in zip(dataset.src_sizes, dataset.tgt_sizes)]
bucket_ids, n_seq_per_batch, n_buckets = create_seq_to_bucket_id_list_and_n_seq_per_batch(pairwise_max_seq_len, max_allowable_seq_length, max_sentences, pad_seq_per_batch_to_multiple_of, pad_tok_per_seq_to_multiple_of, bucket_specify_min_boundary, bucket_specify_growth_scale)
buckets = []
for i in range(n_buckets):
buckets.append([])
n_rejected_sequences = 0
for idx, bidx in enumerate(bucket_ids):
if bidx >= 0:
buckets[bidx].append(idx)
else:
n_rejected_sequences += 1
# Remove empty buckets (causes blow-up in eval code).
buckets = [i for i in buckets if len(i) > 0]
print(n_rejected_sequences, 'were omitted due to containing over 256 tokens.')
batch_seq_count = 0
#count = 0
seq_len_tracker = 0
for bucket, nspb in zip(buckets, n_seq_per_batch):
for item in bucket:
if batch_n_seq < nspb:
batches[-1].append(item)
src_batch_token_count += dataset.src_sizes[item]
dst_batch_token_count += dataset.tgt_sizes[item]
seq_len_tracker = max(seq_len_tracker, dst_batch_token_count)
batch_n_seq += 1
else:
batches.append([item])
src_token_counts.append(src_batch_token_count)
dst_token_counts.append(dst_batch_token_count)
src_batch_token_count = dataset.src_sizes[item]
dst_batch_token_count = dataset.tgt_sizes[item]
seq_counts.append(batch_n_seq)
batch_n_seq = 1
batches.append([])
batch_n_seq = 0
seq_counts.append(batch_n_seq)
src_batch_token_count = 0
dst_batch_token_count = 0
src_token_counts.append(src_batch_token_count)
dst_token_counts.append(dst_batch_token_count)
seq_cnt2 = []
for batch in batches:
seq_len_tracker = 0
nseqbucket = 0
for item in batch:
a = dataset.src_sizes[item]
b = dataset.tgt_sizes[item]
seq_len_tracker = max(seq_len_tracker, max(a, b))
nseqbucket += 1
longest_in_batch.append(seq_len_tracker)
seq_cnt2.append(nseqbucket)
# In the unlucky case, remove a newly created but empty last batch
if not batches[-1]:
del batches[-1]
del seq_counts[-1]
del src_token_counts[-1]
del dst_token_counts[-1]
tmp_batches = batches
batches = []
for b in tmp_batches:
if b:
batches.append(b)
#padded_token_counts = src_token_counts
#padded_token_counts = [x*0 for x in src_token_counts] # Setting to zero until this is actually implemented
#print('split dataset length:', len(dataset.src))
#print('mean src tokens per batch =', statistics.mean(src_token_counts), statistics.mean(padded_token_counts))
#print('median src tokens per batch =', statistics.median(src_token_counts), statistics.median(padded_token_counts))
#print('stdev src tokens per batch =', statistics.stdev(src_token_counts), statistics.stdev(padded_token_counts))
#print('min src tokens per batch =', min(src_token_counts), min(padded_token_counts))
#print('max src tokens per batch =', max(src_token_counts), max(padded_token_counts))
#print('mean tgt tokens per batch =', statistics.mean(dst_token_counts), statistics.mean(padded_token_counts))
#print('median tgt tokens per batch =', statistics.median(dst_token_counts), statistics.mean(padded_token_counts))
#print('stdev tgt tokens per batch =', statistics.stdev(dst_token_counts), statistics.stdev(padded_token_counts))
#print('min tgt tokens per batch =', min(dst_token_counts), min(padded_token_counts))
#print('max tgt tokens per batch =', max(dst_token_counts), max(padded_token_counts))
#print('mean seq per batch =', statistics.mean(seq_counts), statistics.mean(padded_token_counts))
#print('median seq per batch =', statistics.median(seq_counts), statistics.median(padded_token_counts))
#print('stdev seq per batch =', statistics.stdev(seq_counts), statistics.stdev(padded_token_counts))
#print('min seq per batch =', min(seq_counts), min(padded_token_counts))
#print('max seq per batch =', max(seq_counts), max(padded_token_counts))
#print('pad inc: mean tgt tokens per batch =', statistics.mean(np.array(seq_cnt2) * np.array(longest_in_batch)), longest_in_batch[:3], seq_cnt2[:3])
#print('pad inc: median tgt tokens per batch =', statistics.median(np.array(seq_cnt2) * np.array(longest_in_batch)), longest_in_batch[:3], seq_cnt2[:3])
self.frozen_batches = tuple(batches)
# self.frozen_batches = tuple(self._batch_generator())
print("generated %d batches in %fs" % (len(batches), time.time() - start))
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True):
"""Shuffle batches and return a new iterator over the dataset."""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._cur_epoch_itr
def end_of_epoch(self):
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.count
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.count
return 0
def state_dict(self):
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))
if itr_pos < len(itr):
self._next_epoch_itr = itr.skip(itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle):
if shuffle:
# set seed based on the seed and epoch number so that we get
# reproducible results when resuming from checkpoints
with numpy_seed(self.seeds[epoch]):
batches = list(self.frozen_batches) # copy
np.random.shuffle(batches)
else:
batches = self.frozen_batches
return CountingIterator(torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.dataset.collater,
num_workers=self.dataloader_num_workers,
pin_memory=self.dataloader_pin_memory,
batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),
))
def _batch_generator(self):
batch = []
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if len(batch) == self.max_sentences:
return True
if num_tokens > self.max_tokens:
return True
return False
sample_len = 0
sample_lens = []
ignored = []
for idx in self.dataset.ordered_indices(self.seeds[self.epoch]):
if not self.dataset.valid_size(idx, self.max_positions):
if self.ignore_invalid_inputs:
ignored.append(idx)
continue
raise Exception((
'Size of sample #{} is invalid, max_positions={}, skip this example with --skip-invalid-size-inputs-valid-test'
).format(idx, self.max_positions))
sample_lens.append(self.dataset.num_tokens(idx))
sample_len = max(sample_len, sample_lens[-1])
num_tokens = (len(batch) + 1) * sample_len
if is_batch_full(num_tokens):
mod_len = max(self.bsz_mult * (len(batch) // self.bsz_mult), len(batch) % self.bsz_mult,)
yield batch[:mod_len]
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
yield batch
if len(ignored) > 0:
print((
'| WARNING: {} samples have invalid sizes and will be skipped, max_positions={}, first few sample ids={}'
).format(len(ignored), self.max_positions, ignored[:10]))
@contextlib.contextmanager
def numpy_seed(seed):
"""Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward"""
if seed is None:
yield
return
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
|
[
"numpy.random.seed",
"numpy.random.get_state",
"numpy.random.set_state",
"time.time",
"itertools.islice",
"os.listdir",
"numpy.random.shuffle"
] |
[((761, 777), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (771, 777), False, 'import os\n'), ((24225, 24246), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (24244, 24246), True, 'import numpy as np\n'), ((24251, 24271), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (24265, 24271), True, 'import numpy as np\n'), ((24317, 24343), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (24336, 24343), True, 'import numpy as np\n'), ((2325, 2377), 'itertools.islice', 'itertools.islice', (['self.itr', 'num_to_skip', 'num_to_skip'], {}), '(self.itr, num_to_skip, num_to_skip)\n', (2341, 2377), False, 'import itertools\n'), ((6920, 6931), 'time.time', 'time.time', ([], {}), '()\n', (6929, 6931), False, 'import time\n'), ((21844, 21870), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (21861, 21870), True, 'import numpy as np\n'), ((20129, 20140), 'time.time', 'time.time', ([], {}), '()\n', (20138, 20140), False, 'import time\n')]
|
#!/usr/bin/env python
import os
import roslib; roslib.load_manifest('freemovr_engine')
import rosbag
import sensor_msgs.msg
import geometry_msgs.msg
import pymvg
import numpy as np
def test_bag():
for i in range(1000):
bagout = rosbag.Bag('/tmp/testbag.bag', 'w')
topic = '/tf'
extrinsics = geometry_msgs.msg.Transform()
bagout.write(topic, extrinsics)
topic = '/camera_info'
intrinsics = sensor_msgs.msg.CameraInfo()
intrinsics.distortion_model = ','*i
intrinsics.K = list(np.random.rand(3,3).flatten())
bagout.write(topic, intrinsics)
bagout.close()
c = pymvg.CameraModel.load_camera_simple()
c.save_to_bagfile('/tmp/testbag.bag')
fname = '/home/stowers/.ros/camera_info/Basler_21220788.yaml'
assert os.path.exists(fname)
c = pymvg.CameraModel.load_camera_from_file(fname)
c.get_intrinsics_as_msg()
c.save_to_bagfile('/tmp/testbag.bag')
c = pymvg.CameraModel.load_camera_from_file('/tmp/testbag.bag')
c.get_intrinsics_as_msg()
|
[
"rosbag.Bag",
"pymvg.CameraModel.load_camera_from_file",
"pymvg.CameraModel.load_camera_simple",
"os.path.exists",
"numpy.random.rand",
"roslib.load_manifest"
] |
[((48, 87), 'roslib.load_manifest', 'roslib.load_manifest', (['"""freemovr_engine"""'], {}), "('freemovr_engine')\n", (68, 87), False, 'import roslib\n'), ((650, 688), 'pymvg.CameraModel.load_camera_simple', 'pymvg.CameraModel.load_camera_simple', ([], {}), '()\n', (686, 688), False, 'import pymvg\n'), ((809, 830), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (823, 830), False, 'import os\n'), ((839, 885), 'pymvg.CameraModel.load_camera_from_file', 'pymvg.CameraModel.load_camera_from_file', (['fname'], {}), '(fname)\n', (878, 885), False, 'import pymvg\n'), ((967, 1026), 'pymvg.CameraModel.load_camera_from_file', 'pymvg.CameraModel.load_camera_from_file', (['"""/tmp/testbag.bag"""'], {}), "('/tmp/testbag.bag')\n", (1006, 1026), False, 'import pymvg\n'), ((245, 280), 'rosbag.Bag', 'rosbag.Bag', (['"""/tmp/testbag.bag"""', '"""w"""'], {}), "('/tmp/testbag.bag', 'w')\n", (255, 280), False, 'import rosbag\n'), ((547, 567), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (561, 567), True, 'import numpy as np\n')]
|
from firebase import firebase
from datetime import date
from datetime import datetime
import numpy as np
import cv2
import imghdr
import base64
import json
'''
Stucture of Our Main Object
Note that a unique id will be created when we save it for the first time in db, its not show currently.
We will use this id for all transcations
object = {
"userName":'',
"ubid":'',
"faceEncoding":[]
"dayAndTime":[{
'day':'',
'startTime':'',
'endTime':''}]
}
'''
today = date.today()
date1= today.strftime("%d/%m/%Y")
now = datetime.now()
start_time = now.strftime("%H:%M:%S")
class Database:
def __init__(self):
self.firebase = firebase.FirebaseApplication('https://cvip-a44cd.firebaseio.com/', None)
'''
To save the first data object
Pass it to a dictionary consisting of following parameters:name,ubid,feature vector
Structure of the dictionary eg - {"name":'Gerald',"ubid":5033999,"faceEncoding":[1,2,3,4,5]}
It will automatically insert date and start time in the main object
'''
def postData(self,d):
data={ "userName":d['name'],
"dayAndTime":[{ "day": date1,
"startTime":"",
"endTime":" "
}],
"ubid":d['ubid'],
"faceEncoding":d['faceEncoding']}
print("Posting data in DB")
result = self.firebase.post('/timeclock/',data)
uid = dict(result)
return uid['name']
#This method will retrieve all the data from database
'''
Sample of a single object returned from database
obj = {'-M5VP8cUF8UehCDc8fV4':
{'dayAndTime': [{
'day': '22/04/2020',
'endTime': ' ',
'startTime': '01:42:21'}],
'faceEncoding': [1, 2, 3, 4, 5],
'ubid': 5033, '
userName': 'Gerald'}}
'''
def getData(self):
result = self.firebase.get('/timeclock/', '')
print(result)
return result
#Pass the Unique Key to Get That Particular Data
def getSingleData(self,key):
result = self.firebase.get('/timeclock/', key)
#print(result)
return result
#To update a single object, pass it the unique key and updated data object
def updateSingleData(self,id,data):
rs=self.firebase.patch('/timeclock/'+id, data)
print('updated')
def clockInUser(self,id):
data = self.getSingleData(id)
x = data['dayAndTime']
for dict1 in x:
if (dict1['day'] == date1):
dict1['startTime'] = start_time
data['dayAndTime'] = x
rs = self.firebase.patch('/timeclock/' + id, data)
return data
def clockOutUser(self,id):
data=self.getSingleData(id)
x=data['dayAndTime']
for dict1 in x:
if(dict1['day'] == date1):
dict1['endTime']=start_time
data['dayAndTime']=x
rs=self.firebase.patch('/timeclock/'+id, data)
return data
#check if user is clocked in or out
def checkClockedStatus(self,id):
data=self.getSingleData(id)
x=data['dayAndTime']
status = True
for dict1 in x:
if(dict1['day'] == date1):
if(dict1['startTime'] == ''):
status=False
return status
def getNameAndFeatureVector(self):
res = self.firebase.get('/timeclock/', '')
if(res != None):
name=[]
featureVector=[]
uid=[]
ubid=[]
for obj1 in res:
uid.append(obj1)
obj2=res[str(obj1)]
name.append(obj2['userName'])
featureVector.append(np.array(obj2['faceEncoding']))
ubid.append(obj2['ubid'])
return name,featureVector,uid,ubid
else:
return [],[],[],[]
def dayAndDateValues(self):
day = today.strftime("%d")
month = today.strftime("%m")
year = today.strftime("%Y")
hours = now.strftime("%H")
seconds = now.strftime("%S")
minutes = now.strftime("%M")
return day,month,year,hours,seconds,minutes
def getUbidStatus(self,ubid):
data = self.firebase.get('/ubidDb/', None)
out = None
ubid = str(ubid)
ubid = ubid.replace(" ","")
for dict1 in data:
x=(data[dict1])
if(str(ubid) == x['ubid']):
out = x['name']
break
return out
'''
a=Database()
d1={"name":'Gautam',"ubid":5033,"faceEncoding":[1,2,3,4,5]}
#a.updateSingleData(a.getSingleData())
c=a.getData()
f=list(c.keys())
print(f)
res=a.getSingleData(f[0])
#print(res)
#res['userName'] = 'gautam'
res['dayAndTime'].append({'day':'24/04/2019'})
#print(res)
#a.updateSingleData(str(f[0]),res)
img=cv2.imread('3.png')
imgType= imghdr.what('3.png')
print(imgType)
img_str = cv2.imencode('.png',img)[1].tostring()
print(type(img_str))
a=Database()
d1={"name":'Gautam111',"ubid":5033,"faceEncoding":[1,2,3,4,5]}
img=cv2.imread('3.png')
with open("3.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
a.postData(d1, encoded_string)
#cv2.imwrite('F:/CV/Final/data/3.png', img)
'''
#
# a = Database()
# a.getUbidStatus("1111")
# firebase = firebase.FirebaseApplication('https://cvip-a44cd.firebaseio.com/', None)
# result = firebase.get('/ubidDb', None)
# for dict1 in result:
# x=(result[dict1])
# print(x['ubid'])
# a.clockInUser('-M5tqBEF89MM-FIi1XBl')
# print(a.checkClockedStatus('-M5tqBEF89MM-FIi1XBl'))
# # known_face_names, known_face_encodings, uniqueID, ubid = a.getNameAndFeatureVector()
# # print(known_face_names)
# known_face_names, known_face_encodings, uniqueID, ubid = a.getNameAndFeatureVector()
# print(ubid)
|
[
"firebase.firebase.FirebaseApplication",
"datetime.datetime.now",
"datetime.date.today",
"numpy.array"
] |
[((541, 553), 'datetime.date.today', 'date.today', ([], {}), '()\n', (551, 553), False, 'from datetime import date\n'), ((606, 620), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (618, 620), False, 'from datetime import datetime\n'), ((739, 811), 'firebase.firebase.FirebaseApplication', 'firebase.FirebaseApplication', (['"""https://cvip-a44cd.firebaseio.com/"""', 'None'], {}), "('https://cvip-a44cd.firebaseio.com/', None)\n", (767, 811), False, 'from firebase import firebase\n'), ((4346, 4376), 'numpy.array', 'np.array', (["obj2['faceEncoding']"], {}), "(obj2['faceEncoding'])\n", (4354, 4376), True, 'import numpy as np\n')]
|
import numpy as np
import scipy
import scipy.sparse
import matplotlib.pyplot as plt
# part 2 - read sparse matrix from csv file
def read_coo(fname):
Y = np.loadtxt(fname, delimiter=',')
rows = np.array(Y[:, 0], int)
cols = np.array(Y[:, 1], int)
V = Y[:, 2]
return scipy.sparse.coo_matrix((np.array(V), (rows, cols)))
A = read_coo("../sbm.csv")
# part 3 - create sparse + Rank-1 class
class sparse_rank1(object):
def __init__(self, S, alpha, u, v):
self.S = S
self.alpha = alpha
self.u = u
self.v = v
self.shape = S.shape
def dot(self, x):
return self.S.dot(x) + self.alpha*self.u*self.v.dot(x)
# part 4 - power method
# compute power method
# tol is a key-word argument for convergence tolerance
def power_method(A, tol=1e-8):
# rayleigh quotient
# returns v^T*Av
def rq(v, A):
return v.dot(A.dot(v))
n = A.shape[1]
# generate random vector with unit length
v = np.random.normal(0, 1, n)
v /= np.linalg.norm(v)
rqs = [] # keep track of rayleigh quotients as we progress
rqs.append(rq(v, A))
while True:
# v <- A*v
v = A.dot(v)
# normalize v
v /= np.linalg.norm(v)
rqs.append(rq(v, A))
# check if rayleigh quotient has converged
if np.abs(rqs[-1] - rqs[-2]) < tol:
break
# set eigenvalue
lam = rqs[-1]
return v, lam
# part 5 - spectral embedding
v, lam = power_method(A)
B = sparse_rank1(A, -lam, v, v)
v2, lam2 = power_method(B)
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.scatter(v, v2)
ax.set_xlabel(r'$v_1$')
ax.set_ylabel(r'$v_2$')
ax.set_title("Spectral Embedding")
plt.savefig('sbm.png')
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.array",
"numpy.loadtxt",
"numpy.random.normal",
"numpy.linalg.norm",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((1567, 1602), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 8)'}), '(1, 1, figsize=(10, 8))\n', (1579, 1602), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1729), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sbm.png"""'], {}), "('sbm.png')\n", (1718, 1729), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1738, 1740), True, 'import matplotlib.pyplot as plt\n'), ((159, 191), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'delimiter': '""","""'}), "(fname, delimiter=',')\n", (169, 191), True, 'import numpy as np\n'), ((203, 225), 'numpy.array', 'np.array', (['Y[:, 0]', 'int'], {}), '(Y[:, 0], int)\n', (211, 225), True, 'import numpy as np\n'), ((237, 259), 'numpy.array', 'np.array', (['Y[:, 1]', 'int'], {}), '(Y[:, 1], int)\n', (245, 259), True, 'import numpy as np\n'), ((983, 1008), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (999, 1008), True, 'import numpy as np\n'), ((1018, 1035), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (1032, 1035), True, 'import numpy as np\n'), ((1219, 1236), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (1233, 1236), True, 'import numpy as np\n'), ((312, 323), 'numpy.array', 'np.array', (['V'], {}), '(V)\n', (320, 323), True, 'import numpy as np\n'), ((1329, 1354), 'numpy.abs', 'np.abs', (['(rqs[-1] - rqs[-2])'], {}), '(rqs[-1] - rqs[-2])\n', (1335, 1354), True, 'import numpy as np\n')]
|
# Thanks to KKiller on Kaggle for designing this model.
from torch.utils.data import Dataset, DataLoader
from abc import ABC
from pathlib import Path
from numcodecs import blosc
import pandas as pd, numpy as np
import bisect
import itertools as it
from tqdm import tqdm
import logzero
import json
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from pytorch_lightning import Trainer
from pytorch_lightning import LightningModule
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
import pickle, copy, re, time, datetime, random, warnings, gc
import zarr
with open('parameters.json') as json_file:
JSON_PARAMETERS = json.load(json_file)
DATA_ROOT = Path("/data/lyft-motion-prediction-autonomous-vehicles")
TRAIN_ZARR = JSON_PARAMETERS["TRAIN_ZARR"]
VALID_ZARR = JSON_PARAMETERS["VALID_ZARR"]
HBACKWARD = JSON_PARAMETERS["HBACKWARD"]
HFORWARD = JSON_PARAMETERS["HFORWARD"]
NFRAMES = JSON_PARAMETERS["NFRAMES"]
FRAME_STRIDE = JSON_PARAMETERS["FRAME_STRIDE"]
AGENT_FEATURE_DIM = JSON_PARAMETERS["AGENT_FEATURE_DIM"]
MAX_AGENTS = JSON_PARAMETERS["MAX_AGENTS"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
NUM_WORKERS = JSON_PARAMETERS["NUM_WORKERS"]
BATCH_SIZE = JSON_PARAMETERS["BATCH_SIZE"]
EPOCHS=JSON_PARAMETERS["EPOCHS"]
GRADIENT_CLIP_VAL = JSON_PARAMETERS["GRADIENT_CLIP_VAL"]
LIMIT_VAL_BATCHES = JSON_PARAMETERS["LIMIT_VAL_BATCHES"]
ROOT = JSON_PARAMETERS["ROOT"]
Path(ROOT).mkdir(exist_ok=True, parents=True)
def get_utc():
TIME_FORMAT = r"%Y-%m-%dT%H:%M:%S%Z"
return datetime.datetime.now(datetime.timezone.utc).strftime(TIME_FORMAT)
PERCEPTION_LABELS = JSON_PARAMETERS["PERCEPTION_LABELS"]
KEPT_PERCEPTION_LABELS = JSON_PARAMETERS["KEPT_PERCEPTION_LABELS"]
KEPT_PERCEPTION_LABELS_DICT = {label:PERCEPTION_LABELS.index(label) for label in KEPT_PERCEPTION_LABELS}
KEPT_PERCEPTION_KEYS = sorted(KEPT_PERCEPTION_LABELS_DICT.values())
class LabelEncoder:
def __init__(self, max_size=500, default_val=-1):
self.max_size = max_size
self.labels = {}
self.default_val = default_val
@property
def nlabels(self):
return len(self.labels)
def reset(self):
self.labels = {}
def partial_fit(self, keys):
nlabels = self.nlabels
available = self.max_size - nlabels
if available < 1:
return
keys = set(keys)
new_keys = list(keys - set(self.labels))
if not len(new_keys):
return
self.labels.update(dict(zip(new_keys, range(nlabels, nlabels + available) )))
def fit(self, keys):
self.reset()
self.partial_fit(keys)
def get(self, key):
return self.labels.get(key, self.default_val)
def transform(self, keys):
return np.array(list(map(self.get, keys)))
def fit_transform(self, keys, partial=True):
self.partial_fit(keys) if partial else self.fit(keys)
return self.transform(keys)
class CustomLyftDataset(Dataset):
feature_mins = np.array([-17.336, -27.137, 0. , 0., 0. , -3.142, -37.833, -65.583],
dtype="float32")[None,None, None]
feature_maxs = np.array([17.114, 20.787, 42.854, 42.138, 7.079, 3.142, 29.802, 35.722],
dtype="float32")[None,None, None]
def __init__(self, zdataset, scenes=None, nframes=10, frame_stride=15, hbackward=10,
hforward=50, max_agents=150, agent_feature_dim=8):
"""
Custom Lyft dataset reader.
Parmeters:
----------
zdataset: zarr dataset
The root dataset, containing scenes, frames and agents
nframes: int
Number of frames per scene
frame_stride: int
The stride when reading the **nframes** frames from a scene
hbackward: int
Number of backward frames from current frame
hforward: int
Number forward frames from current frame
max_agents: int
Max number of agents to read for each target frame. Note that,
this also include the backward agents but not the forward ones.
"""
super().__init__()
self.zdataset = zdataset
self.scenes = scenes if scenes is not None else []
self.nframes = nframes
self.frame_stride = frame_stride
self.hbackward = hbackward
self.hforward = hforward
self.max_agents = max_agents
self.nread_frames = (nframes-1)*frame_stride + hbackward + hforward
self.frame_fields = ['timestamp', 'agent_index_interval']
self.agent_feature_dim = agent_feature_dim
self.filter_scenes()
def __len__(self):
return len(self.scenes)
def filter_scenes(self):
self.scenes = [scene for scene in self.scenes if self.get_nframes(scene) > self.nread_frames]
def __getitem__(self, index):
return self.read_frames(scene=self.scenes[index])
def get_nframes(self, scene, start=None):
frame_start = scene["frame_index_interval"][0]
frame_end = scene["frame_index_interval"][1]
nframes = (frame_end - frame_start) if start is None else ( frame_end - max(frame_start, start) )
return nframes
def _read_frames(self, scene, start=None):
nframes = self.get_nframes(scene, start=start)
assert nframes >= self.nread_frames
frame_start = scene["frame_index_interval"][0]
start = start or frame_start + np.random.choice(nframes-self.nread_frames)
frames = self.zdataset.frames.get_basic_selection(
selection=slice(start, start+self.nread_frames),
fields=self.frame_fields,
)
return frames
def parse_frame(self, frame):
return frame
def parse_agent(self, agent):
return agent
def read_frames(self, scene, start=None, white_tracks=None, encoder=False):
white_tracks = white_tracks or []
frames = self._read_frames(scene=scene, start=start)
agent_start = frames[0]["agent_index_interval"][0]
agent_end = frames[-1]["agent_index_interval"][1]
agents = self.zdataset.agents[agent_start:agent_end]
X = np.zeros((self.nframes, self.max_agents, self.hbackward, self.agent_feature_dim), dtype=np.float32)
target = np.zeros((self.nframes, self.max_agents, self.hforward, 2), dtype=np.float32)
target_availability = np.zeros((self.nframes, self.max_agents, self.hforward), dtype=np.uint8)
X_availability = np.zeros((self.nframes, self.max_agents, self.hbackward), dtype=np.uint8)
for f in range(self.nframes):
backward_frame_start = f*self.frame_stride
forward_frame_start = f*self.frame_stride+self.hbackward
backward_frames = frames[backward_frame_start:backward_frame_start+self.hbackward]
forward_frames = frames[forward_frame_start:forward_frame_start+self.hforward]
backward_agent_start = backward_frames[-1]["agent_index_interval"][0] - agent_start
backward_agent_end = backward_frames[-1]["agent_index_interval"][1] - agent_start
backward_agents = agents[backward_agent_start:backward_agent_end]
le = LabelEncoder(max_size=self.max_agents)
le.fit(white_tracks)
le.partial_fit(backward_agents["track_id"])
for iframe, frame in enumerate(backward_frames):
backward_agent_start = frame["agent_index_interval"][0] - agent_start
backward_agent_end = frame["agent_index_interval"][1] - agent_start
backward_agents = agents[backward_agent_start:backward_agent_end]
track_ids = le.transform(backward_agents["track_id"])
mask = (track_ids != le.default_val)
mask_agents = backward_agents[mask]
mask_ids = track_ids[mask]
X[f, mask_ids, iframe, :2] = mask_agents["centroid"]
X[f, mask_ids, iframe, 2:5] = mask_agents["extent"]
X[f, mask_ids, iframe, 5] = mask_agents["yaw"]
X[f, mask_ids, iframe, 6:8] = mask_agents["velocity"]
X_availability[f, mask_ids, iframe] = 1
for iframe, frame in enumerate(forward_frames):
forward_agent_start = frame["agent_index_interval"][0] - agent_start
forward_agent_end = frame["agent_index_interval"][1] - agent_start
forward_agents = agents[forward_agent_start:forward_agent_end]
track_ids = le.transform(forward_agents["track_id"])
mask = track_ids != le.default_val
target[f, track_ids[mask], iframe] = forward_agents[mask]["centroid"]
target_availability[f, track_ids[mask], iframe] = 1
target -= X[:,:,[-1], :2]
target *= target_availability[:,:,:,None]
X[:,:,:, :2] -= X[:,:,[-1], :2]
X *= X_availability[:,:,:,None]
X -= self.feature_mins
X /= (self.feature_maxs - self.feature_mins)
if encoder:
return X, target, target_availability, le
return X, target, target_availability
def collate(x):
x = map(np.concatenate, zip(*x))
x = map(torch.from_numpy, x)
return x
def shapefy( xy_pred, xy, xy_av):
NDIM = 3
xy_pred = xy_pred.view(-1, HFORWARD, NDIM, 2)
xy = xy.view(-1, HFORWARD, 2)[:,:,None]
xy_av = xy_av.view(-1, HFORWARD)[:,:,None]
return xy_pred, xy, xy_av
def LyftLoss(c, xy_pred, xy, xy_av):
c = c.view(-1, c.shape[-1])
xy_pred, xy, xy_av = shapefy(xy_pred, xy, xy_av)
c = torch.softmax(c, dim=1)
l = torch.sum(torch.mean(torch.square(xy_pred-xy), dim=3)*xy_av, dim=1)
# The LogSumExp trick for better numerical stability
# https://en.wikipedia.org/wiki/LogSumExp
m = l.min(dim=1).values
l = torch.exp(m[:, None]-l)
l = m - torch.log(torch.sum(l*c, dim=1))
denom = xy_av.max(2).values.max(1).values
l = torch.sum(l*denom)/denom.sum()
return 3*l # I found that my loss is usually 3 times smaller than the LB score
def MSE(xy_pred, xy, xy_av):
xy_pred, xy, xy_av = shapefy(xy_pred, xy, xy_av)
return 9*torch.mean(torch.sum(torch.mean(torch.square(xy_pred-xy), 3)*xy_av, dim=1))
def MAE(xy_pred, xy, xy_av):
xy_pred, xy, xy_av = shapefy(xy_pred, xy, xy_av)
return 9*torch.mean(torch.sum(torch.mean(torch.abs(xy_pred-xy), 3)*xy_av, dim=1))
class BaseNet(LightningModule):
def __init__(self, batch_size=32, lr=5e-4, weight_decay=1e-8, num_workers=0,
criterion=LyftLoss, data_root=DATA_ROOT, epochs=1):
super().__init__()
self.save_hyperparameters(
dict(
HBACKWARD = HBACKWARD,
HFORWARD = HFORWARD,
NFRAMES = NFRAMES,
FRAME_STRIDE = FRAME_STRIDE,
AGENT_FEATURE_DIM = AGENT_FEATURE_DIM,
MAX_AGENTS = MAX_AGENTS,
TRAIN_ZARR = TRAIN_ZARR,
VALID_ZARR = VALID_ZARR,
batch_size = batch_size,
lr=lr,
weight_decay=weight_decay,
num_workers=num_workers,
criterion=criterion,
epochs=epochs,
)
)
self._train_data = None
self._collate_fn = None
self._train_loader = None
self.batch_size = batch_size
self.num_workers = num_workers
self.lr = lr
self.epochs=epochs
self.weight_decay = weight_decay
self.criterion = criterion
self.data_root = data_root
def train_dataloader(self):
z = zarr.open(self.data_root.joinpath(TRAIN_ZARR).as_posix(), "r")
scenes = z.scenes.get_basic_selection(slice(None), fields= ["frame_index_interval"])
train_data = CustomLyftDataset(
z,
scenes = scenes,
nframes=NFRAMES,
frame_stride=FRAME_STRIDE,
hbackward=HBACKWARD,
hforward=HFORWARD,
max_agents=MAX_AGENTS,
agent_feature_dim=AGENT_FEATURE_DIM,
)
train_loader = DataLoader(train_data, batch_size = self.batch_size,collate_fn=collate,
pin_memory=True, num_workers = self.num_workers, shuffle=True)
self._train_data = train_data
self._train_loader = train_loader
return train_loader
def val_dataloader(self):
z = zarr.open(self.data_root.joinpath(VALID_ZARR).as_posix(), "r")
scenes = z.scenes.get_basic_selection(slice(None), fields=["frame_index_interval"])
val_data = CustomLyftDataset(
z,
scenes = scenes,
nframes=NFRAMES,
frame_stride=FRAME_STRIDE,
hbackward=HBACKWARD,
hforward=HFORWARD,
max_agents=MAX_AGENTS,
agent_feature_dim=AGENT_FEATURE_DIM,
)
val_loader = DataLoader(val_data, batch_size = self.batch_size, collate_fn=collate,
pin_memory=True, num_workers = self.num_workers, shuffle=True)
self._val_data = val_data
self._val_loader = val_loader
return val_loader
def validation_epoch_end(self, outputs):
avg_loss = torch.mean(torch.tensor([x['val_loss'] for x in outputs]))
avg_mse = torch.mean(torch.tensor([x['val_mse'] for x in outputs]))
avg_mae = torch.mean(torch.tensor([x['val_mae'] for x in outputs]))
tensorboard_logs = {'val_loss': avg_loss, "val_rmse": torch.sqrt(avg_mse), "val_mae": avg_mae}
torch.cuda.empty_cache()
gc.collect()
return {
'val_loss': avg_loss,
'log': tensorboard_logs,
"progress_bar": {"val_ll": tensorboard_logs["val_loss"], "val_rmse": tensorboard_logs["val_rmse"]}
}
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr= self.lr, betas= (0.9,0.999),
weight_decay= self.weight_decay, amsgrad=False)
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=self.epochs,
eta_min=1e-5,
)
return [optimizer], [scheduler]
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv = nn.Sequential(
nn.Conv1d(k, 256, kernel_size=1), nn.ReLU(),
nn.Conv1d(256, 256, kernel_size=1), nn.ReLU(),
nn.Conv1d(256, 512, kernel_size=1), nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(512, k*k),nn.ReLU(),
)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = self.conv(x)
x = torch.max(x, 2)[0]
x = self.fc(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,
self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = False, feature_transform = False, stn1_dim = 120,
stn2_dim = 64):
super(PointNetfeat, self).__init__()
self.global_feat = global_feat
self.feature_transform = feature_transform
self.stn1_dim = stn1_dim
self.stn2_dim = stn2_dim
self.stn = STNkd(k=stn1_dim)
self.conv1 = nn.Sequential(
nn.Conv1d(stn1_dim, 256, kernel_size=1), nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=1), nn.ReLU(),
nn.Conv1d(256, 1024, kernel_size=1), nn.ReLU(),
nn.Conv1d(1024, 2048, kernel_size=1), nn.ReLU(),
)
if self.feature_transform:
self.fstn = STNkd(k=stn2_dim)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = self.conv1(x)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = self.conv2(x)
x = torch.max(x, 2)[0]
if self.global_feat:
return x, trans, trans_feat
else:
x = x[:,:,None].repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class LyftNet(BaseNet):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pnet = PointNetfeat()
self.fc0 = nn.Sequential(
nn.Linear(2048+256, 1024), nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(1024, 300),
)
self.c_net = nn.Sequential(
nn.Linear(1024, 3),
)
def forward(self, x):
bsize, npoints, hb, nf = x.shape
# Push points to the last dim
x = x.transpose(1, 3)
# Merge time with features
x = x.reshape(bsize, hb*nf, npoints)
x, trans, trans_feat = self.pnet(x)
# Push featuresxtime to the last dim
x = x.transpose(1,2)
x = self.fc0(x)
c = self.c_net(x)
x = self.fc(x)
return c,x
def training_step(self, batch, batch_idx):
x, y, y_av = [b.to(device) for b in batch]
c, preds = self(x)
loss = self.criterion(c,preds,y, y_av)
with torch.no_grad():
logs = {
'loss': loss,
"mse": MSE(preds, y, y_av),
"mae": MAE(preds, y, y_av),
}
return {'loss': loss, 'log': logs, "progress_bar": {"rmse":torch.sqrt(logs["mse"]) }}
@torch.no_grad()
def validation_step(self, batch, batch_idx):
x, y, y_av = [b.to(device) for b in batch]
c,preds = self(x)
loss = self.criterion(c, preds, y, y_av)
val_logs = {
'val_loss': loss,
"val_mse": MSE(preds, y, y_av),
"val_mae": MAE(preds, y, y_av),
}
return val_logs
def get_last_checkpoint(root):
res = None
mtime = -1
for model_name in Path(root).glob("lyfnet*.ckpt"):
e = model_name.stat().st_ctime
if e > mtime:
mtime=e
res = model_name
return res
def get_last_version(root):
last_version = 0
for model_name in Path(root).glob("version_*"):
version = int(model_name.as_posix().split("_")[-1])
if version > last_version:
last_version = version
return last_version
|
[
"torch.bmm",
"torch.sqrt",
"torch.cat",
"gc.collect",
"pathlib.Path",
"torch.no_grad",
"torch.square",
"torch.utils.data.DataLoader",
"torch.nn.Conv1d",
"torch.softmax",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.exp",
"torch.nn.Linear",
"numpy.random.choice",
"datetime.datetime.now",
"torch.cuda.is_available",
"torch.max",
"torch.sum",
"json.load",
"torch.nn.ReLU",
"numpy.zeros",
"numpy.array",
"torch.cuda.empty_cache",
"numpy.eye",
"torch.abs",
"torch.tensor"
] |
[((782, 838), 'pathlib.Path', 'Path', (['"""/data/lyft-motion-prediction-autonomous-vehicles"""'], {}), "('/data/lyft-motion-prediction-autonomous-vehicles')\n", (786, 838), False, 'from pathlib import Path\n'), ((748, 768), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (757, 768), False, 'import json\n'), ((9802, 9825), 'torch.softmax', 'torch.softmax', (['c'], {'dim': '(1)'}), '(c, dim=1)\n', (9815, 9825), False, 'import torch\n'), ((10051, 10076), 'torch.exp', 'torch.exp', (['(m[:, None] - l)'], {}), '(m[:, None] - l)\n', (10060, 10076), False, 'import torch\n'), ((18488, 18503), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18501, 18503), False, 'import torch\n'), ((1223, 1248), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1246, 1248), False, 'import torch\n'), ((1529, 1539), 'pathlib.Path', 'Path', (['ROOT'], {}), '(ROOT)\n', (1533, 1539), False, 'from pathlib import Path\n'), ((3124, 3215), 'numpy.array', 'np.array', (['[-17.336, -27.137, 0.0, 0.0, 0.0, -3.142, -37.833, -65.583]'], {'dtype': '"""float32"""'}), "([-17.336, -27.137, 0.0, 0.0, 0.0, -3.142, -37.833, -65.583], dtype\n ='float32')\n", (3132, 3215), True, 'import pandas as pd, numpy as np\n'), ((3251, 3344), 'numpy.array', 'np.array', (['[17.114, 20.787, 42.854, 42.138, 7.079, 3.142, 29.802, 35.722]'], {'dtype': '"""float32"""'}), "([17.114, 20.787, 42.854, 42.138, 7.079, 3.142, 29.802, 35.722],\n dtype='float32')\n", (3259, 3344), True, 'import pandas as pd, numpy as np\n'), ((6353, 6457), 'numpy.zeros', 'np.zeros', (['(self.nframes, self.max_agents, self.hbackward, self.agent_feature_dim)'], {'dtype': 'np.float32'}), '((self.nframes, self.max_agents, self.hbackward, self.\n agent_feature_dim), dtype=np.float32)\n', (6361, 6457), True, 'import pandas as pd, numpy as np\n'), ((6470, 6547), 'numpy.zeros', 'np.zeros', (['(self.nframes, self.max_agents, self.hforward, 2)'], {'dtype': 'np.float32'}), '((self.nframes, self.max_agents, self.hforward, 2), dtype=np.float32)\n', (6478, 6547), True, 'import pandas as pd, numpy as np\n'), ((6579, 6651), 'numpy.zeros', 'np.zeros', (['(self.nframes, self.max_agents, self.hforward)'], {'dtype': 'np.uint8'}), '((self.nframes, self.max_agents, self.hforward), dtype=np.uint8)\n', (6587, 6651), True, 'import pandas as pd, numpy as np\n'), ((6677, 6750), 'numpy.zeros', 'np.zeros', (['(self.nframes, self.max_agents, self.hbackward)'], {'dtype': 'np.uint8'}), '((self.nframes, self.max_agents, self.hbackward), dtype=np.uint8)\n', (6685, 6750), True, 'import pandas as pd, numpy as np\n'), ((10179, 10199), 'torch.sum', 'torch.sum', (['(l * denom)'], {}), '(l * denom)\n', (10188, 10199), False, 'import torch\n'), ((12485, 12620), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'self.batch_size', 'collate_fn': 'collate', 'pin_memory': '(True)', 'num_workers': 'self.num_workers', 'shuffle': '(True)'}), '(train_data, batch_size=self.batch_size, collate_fn=collate,\n pin_memory=True, num_workers=self.num_workers, shuffle=True)\n', (12495, 12620), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13378, 13511), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': 'self.batch_size', 'collate_fn': 'collate', 'pin_memory': '(True)', 'num_workers': 'self.num_workers', 'shuffle': '(True)'}), '(val_data, batch_size=self.batch_size, collate_fn=collate,\n pin_memory=True, num_workers=self.num_workers, shuffle=True)\n', (13388, 13511), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14039, 14063), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (14061, 14063), False, 'import torch\n'), ((14072, 14084), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14082, 14084), False, 'import pickle, copy, re, time, datetime, random, warnings, gc\n'), ((14516, 14602), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': 'self.epochs', 'eta_min': '(1e-05)'}), '(optimizer, T_max=self.epochs, eta_min=\n 1e-05)\n', (14552, 14602), False, 'from torch import nn, optim\n'), ((16569, 16588), 'torch.bmm', 'torch.bmm', (['x', 'trans'], {}), '(x, trans)\n', (16578, 16588), False, 'import torch\n'), ((1643, 1687), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (1664, 1687), False, 'import pickle, copy, re, time, datetime, random, warnings, gc\n'), ((10102, 10125), 'torch.sum', 'torch.sum', (['(l * c)'], {'dim': '(1)'}), '(l * c, dim=1)\n', (10111, 10125), False, 'import torch\n'), ((13718, 13764), 'torch.tensor', 'torch.tensor', (["[x['val_loss'] for x in outputs]"], {}), "([x['val_loss'] for x in outputs])\n", (13730, 13764), False, 'import torch\n'), ((13795, 13840), 'torch.tensor', 'torch.tensor', (["[x['val_mse'] for x in outputs]"], {}), "([x['val_mse'] for x in outputs])\n", (13807, 13840), False, 'import torch\n'), ((13871, 13916), 'torch.tensor', 'torch.tensor', (["[x['val_mae'] for x in outputs]"], {}), "([x['val_mae'] for x in outputs])\n", (13883, 13916), False, 'import torch\n'), ((13989, 14008), 'torch.sqrt', 'torch.sqrt', (['avg_mse'], {}), '(avg_mse)\n', (13999, 14008), False, 'import torch\n'), ((14834, 14866), 'torch.nn.Conv1d', 'nn.Conv1d', (['k', '(256)'], {'kernel_size': '(1)'}), '(k, 256, kernel_size=1)\n', (14843, 14866), False, 'from torch import nn, optim\n'), ((14868, 14877), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14875, 14877), False, 'from torch import nn, optim\n'), ((14891, 14925), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)'], {'kernel_size': '(1)'}), '(256, 256, kernel_size=1)\n', (14900, 14925), False, 'from torch import nn, optim\n'), ((14927, 14936), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14934, 14936), False, 'from torch import nn, optim\n'), ((14950, 14984), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(512)'], {'kernel_size': '(1)'}), '(256, 512, kernel_size=1)\n', (14959, 14984), False, 'from torch import nn, optim\n'), ((14986, 14995), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14993, 14995), False, 'from torch import nn, optim\n'), ((15061, 15082), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(k * k)'], {}), '(512, k * k)\n', (15070, 15082), False, 'from torch import nn, optim\n'), ((15081, 15090), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15088, 15090), False, 'from torch import nn, optim\n'), ((15217, 15232), 'torch.max', 'torch.max', (['x', '(2)'], {}), '(x, 2)\n', (15226, 15232), False, 'import torch\n'), ((16061, 16100), 'torch.nn.Conv1d', 'nn.Conv1d', (['stn1_dim', '(256)'], {'kernel_size': '(1)'}), '(stn1_dim, 256, kernel_size=1)\n', (16070, 16100), False, 'from torch import nn, optim\n'), ((16102, 16111), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16109, 16111), False, 'from torch import nn, optim\n'), ((16180, 16214), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)'], {'kernel_size': '(1)'}), '(256, 256, kernel_size=1)\n', (16189, 16214), False, 'from torch import nn, optim\n'), ((16216, 16225), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16223, 16225), False, 'from torch import nn, optim\n'), ((16239, 16274), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(1024)'], {'kernel_size': '(1)'}), '(256, 1024, kernel_size=1)\n', (16248, 16274), False, 'from torch import nn, optim\n'), ((16276, 16285), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16283, 16285), False, 'from torch import nn, optim\n'), ((16299, 16335), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1024)', '(2048)'], {'kernel_size': '(1)'}), '(1024, 2048, kernel_size=1)\n', (16308, 16335), False, 'from torch import nn, optim\n'), ((16337, 16346), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16344, 16346), False, 'from torch import nn, optim\n'), ((16777, 16801), 'torch.bmm', 'torch.bmm', (['x', 'trans_feat'], {}), '(x, trans_feat)\n', (16786, 16801), False, 'import torch\n'), ((16949, 16964), 'torch.max', 'torch.max', (['x', '(2)'], {}), '(x, 2)\n', (16958, 16964), False, 'import torch\n'), ((17354, 17381), 'torch.nn.Linear', 'nn.Linear', (['(2048 + 256)', '(1024)'], {}), '(2048 + 256, 1024)\n', (17363, 17381), False, 'from torch import nn, optim\n'), ((17381, 17390), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17388, 17390), False, 'from torch import nn, optim\n'), ((17448, 17468), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(300)'], {}), '(1024, 300)\n', (17457, 17468), False, 'from torch import nn, optim\n'), ((17529, 17547), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(3)'], {}), '(1024, 3)\n', (17538, 17547), False, 'from torch import nn, optim\n'), ((18214, 18229), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18227, 18229), False, 'import torch\n'), ((18955, 18965), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (18959, 18965), False, 'from pathlib import Path\n'), ((19186, 19196), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (19190, 19196), False, 'from pathlib import Path\n'), ((5619, 5664), 'numpy.random.choice', 'np.random.choice', (['(nframes - self.nread_frames)'], {}), '(nframes - self.nread_frames)\n', (5635, 5664), True, 'import pandas as pd, numpy as np\n'), ((9860, 9886), 'torch.square', 'torch.square', (['(xy_pred - xy)'], {}), '(xy_pred - xy)\n', (9872, 9886), False, 'import torch\n'), ((17118, 17146), 'torch.cat', 'torch.cat', (['[x, pointfeat]', '(1)'], {}), '([x, pointfeat], 1)\n', (17127, 17146), False, 'import torch\n'), ((18451, 18474), 'torch.sqrt', 'torch.sqrt', (["logs['mse']"], {}), "(logs['mse'])\n", (18461, 18474), False, 'import torch\n'), ((10421, 10447), 'torch.square', 'torch.square', (['(xy_pred - xy)'], {}), '(xy_pred - xy)\n', (10433, 10447), False, 'import torch\n'), ((10593, 10616), 'torch.abs', 'torch.abs', (['(xy_pred - xy)'], {}), '(xy_pred - xy)\n', (10602, 10616), False, 'import torch\n'), ((15301, 15315), 'numpy.eye', 'np.eye', (['self.k'], {}), '(self.k)\n', (15307, 15315), True, 'import pandas as pd, numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import numpy as np
from ..sine_wave_generator import SineWaveChannelConfig, SineWaveGenerator
def test_generate_sinusoid() -> None:
"""
Tests that the samples generated from each channel matches the parameters
specified in their configuration.
"""
test_clock_frequency = 100 # hz
test_duration = 300 # sec
# test configurations
shape = (2,)
amplitudes = np.array([5.0, 3.0])
frequencies = np.array([5, 10])
phase_shifts = np.array([1.0, 5.0])
midlines = np.array([3.0, -2.5])
sample_rate = test_clock_frequency
config = SineWaveChannelConfig(
shape, amplitudes, frequencies, phase_shifts, midlines, sample_rate
)
# The generator
generator = SineWaveGenerator(config)
# Generate expected values
t_s = np.arange(0, test_duration, 1 / test_clock_frequency)
angles = np.expand_dims(frequencies, 1) * np.expand_dims(2 * np.pi * t_s, 0)
angles = angles + np.expand_dims(phase_shifts, 1)
expected = np.expand_dims(amplitudes, 1) * np.sin(angles) + np.expand_dims(
midlines, 1
)
values = [generator.next_sample().data for t in t_s]
values = np.array(values).T
np.testing.assert_almost_equal(values, expected)
if __name__ == "__main__":
test_generate_sinusoid()
|
[
"numpy.testing.assert_almost_equal",
"numpy.expand_dims",
"numpy.sin",
"numpy.array",
"numpy.arange"
] |
[((477, 497), 'numpy.array', 'np.array', (['[5.0, 3.0]'], {}), '([5.0, 3.0])\n', (485, 497), True, 'import numpy as np\n'), ((516, 533), 'numpy.array', 'np.array', (['[5, 10]'], {}), '([5, 10])\n', (524, 533), True, 'import numpy as np\n'), ((553, 573), 'numpy.array', 'np.array', (['[1.0, 5.0]'], {}), '([1.0, 5.0])\n', (561, 573), True, 'import numpy as np\n'), ((589, 610), 'numpy.array', 'np.array', (['[3.0, -2.5]'], {}), '([3.0, -2.5])\n', (597, 610), True, 'import numpy as np\n'), ((874, 927), 'numpy.arange', 'np.arange', (['(0)', 'test_duration', '(1 / test_clock_frequency)'], {}), '(0, test_duration, 1 / test_clock_frequency)\n', (883, 927), True, 'import numpy as np\n'), ((1263, 1311), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['values', 'expected'], {}), '(values, expected)\n', (1293, 1311), True, 'import numpy as np\n'), ((941, 971), 'numpy.expand_dims', 'np.expand_dims', (['frequencies', '(1)'], {}), '(frequencies, 1)\n', (955, 971), True, 'import numpy as np\n'), ((974, 1008), 'numpy.expand_dims', 'np.expand_dims', (['(2 * np.pi * t_s)', '(0)'], {}), '(2 * np.pi * t_s, 0)\n', (988, 1008), True, 'import numpy as np\n'), ((1031, 1062), 'numpy.expand_dims', 'np.expand_dims', (['phase_shifts', '(1)'], {}), '(phase_shifts, 1)\n', (1045, 1062), True, 'import numpy as np\n'), ((1127, 1154), 'numpy.expand_dims', 'np.expand_dims', (['midlines', '(1)'], {}), '(midlines, 1)\n', (1141, 1154), True, 'import numpy as np\n'), ((1240, 1256), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1248, 1256), True, 'import numpy as np\n'), ((1078, 1107), 'numpy.expand_dims', 'np.expand_dims', (['amplitudes', '(1)'], {}), '(amplitudes, 1)\n', (1092, 1107), True, 'import numpy as np\n'), ((1110, 1124), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (1116, 1124), True, 'import numpy as np\n')]
|
# coding: utf-8
# ---
# @File: model.py
# @description: 模型类
# @Author: <NAME>
# @E-mail: <EMAIL>
# @Time: 3月18, 2019
# ---
import tensorflow as tf
from PIL import Image
import scipy.misc
import os
from linear_3d_layer import Linear3DLayer
class Model_X(tf.keras.Model):
"""
继承自基类 tf.keras.Model
"""
def __init__(self, rnn_units, num_class):
super(Model_X, self).__init__()
self.rnn_units = rnn_units
self.num_class = num_class
self.i = 0
# 线性层
self.lcl1 = Linear3DLayer(filters=8, kernel_size=[1, 3, 75, 6],
activate_size=[3, 1, 2], activate_stride=[3, 1, 1])
self.lcl2 = Linear3DLayer(filters=8, kernel_size=[8, 3, 36, 3],
activate_size=[3, 1, 2], activate_stride=[3, 1, 1])
self.lcl3 = Linear3DLayer(filters=8, kernel_size=[8, 3, 17, 2],
activate_size=[3, 1, 1], activate_stride=[3, 1, 1])
# 池化层
self.pooling1 = tf.keras.layers.MaxPool3D(pool_size=[2, 2, 2], strides=[2, 2, 2], padding='same',
data_format='channels_first')
self.pooling2 = tf.keras.layers.MaxPool3D(pool_size=[2, 2, 2], strides=[2, 2, 2], padding='same',
data_format='channels_first')
self.pooling3 = tf.keras.layers.MaxPool3D(pool_size=[2, 2, 2], strides=[2, 2, 2], padding='same',
data_format='channels_first')
# 3DCNN
# self.conv3d1 = tf.keras.layers.Conv3D(filters=32, kernel_size=[3, 78, 6], strides=[1, 1, 6],
# use_bias=True,
# activation=tf.nn.leaky_relu, padding='same',
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.zeros_initializer(),
# data_format='channels_first')
# self.conv3d2 = tf.keras.layers.Conv3D(filters=16, kernel_size=[3, 38, 3], strides=[1, 1, 3],
# use_bias=True,
# activation=tf.nn.leaky_relu, padding='same',
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.zeros_initializer(),
# data_format='channels_first')
# self.conv3d3 = tf.keras.layers.Conv3D(filters=8, kernel_size=[3, 19, 2], strides=[1, 1, 2],
# use_bias=True,
# activation=tf.nn.leaky_relu, padding='same',
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.zeros_initializer(),
# data_format='channels_first')
# GRU 网络
self.cell1 = tf.keras.layers.GRU(units=self.rnn_units, return_sequences=True)
self.cell2 = tf.keras.layers.GRU(units=self.num_class)
# BatchNormal
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
self.bn3 = tf.keras.layers.BatchNormalization()
# self.pooling_a = tf.keras.layers.AveragePooling2D(pool_size=[1, 1, 2], strides=[1, 1, 2], padding='same',
# data_format='channels_first')
# drop = tf.keras.layers.Dropout(rate=drop_rate)
# FC
# self.fla = tf.keras.layers.Flatten(data_format='channels_last')
# self.fc1 = tf.keras.layers.Dense(units=128, use_bias=True, activation=None,
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.constant_initializer)
# self.fc2 = tf.keras.layers.Dense(units=num_class, use_bias=True, activation=None,
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.constant_initializer)
def call(self, inputs, drop_rate=0.3, **kwargs):
"""
组织了三层时频帧线性层,两层GRU,然后输出GRU的最后一个时间状态作为logits,其中串联了 BatchNormal
:param drop_rate: Dropout的比例=0.3,这个超参数没用到
:param inputs: [?, 1, 200, 80, 4]
:return: logits
"""
is_training = tf.equal(drop_rate, 0.3)
# print('inputs ', np.shape(inputs))
lc1 = self.lcl1(inputs)
# print('conv1: ', sc1.get_shape().as_list())
lc1 = self.bn1(lc1, training=is_training)
pool1 = self.pooling1(lc1) # (?, filters, 99, 39, 4)
# print('pool1: ', pool1.get_shape().as_list())
lc2 = self.lcl2(pool1)
lc2 = self.bn2(lc2, training=is_training)
pool2 = self.pooling2(lc2) # (?, filters, 49, 19, 2)
# print('pool2: ', pool2.get_shape().as_list())
lc3 = self.lcl3(pool2)
lc3 = self.bn3(lc3, training=is_training)
pool3 = self.pooling3(lc3) # (?, filters, 24, 9, 1)
# pool3 = self.pooling_a(pool3)
pool3 = tf.squeeze(pool3, axis=-1) # [?, filters, 24, 9]
# print('pool3: ', pool3.get_shape().as_list())
# x_rnn = tf.squeeze(pool3, axis=2) # (?, 8, 2, 10, 5)
# x_rnns = tf.unstack(pool3, axis=2) # 展开帧维度 2*[?, 8, 10, 5]
# x_rnn = tf.concat(x_rnns, axis=3) # 合并到行维度 [?, 8, 10, 10]
if not is_training:
# self.draw_hid_features(inputs, pool3)
pass
##################################################################
x_rnns = tf.unstack(pool3, axis=1) # 展开通道维度 filters*[?, 17, 10]
x_rnn = tf.concat(x_rnns, axis=2) # 合并到列维度 [?, 17, filters*10=80]
# x_rnn = tf.transpose(x_rnn, [0, 2, 1]) # [?, 10, 80]
# rnn_output = []
# for i in range(self.num_class):
# name = "ltsm_" + str(i)
# cell = tf.keras.layers.CuDNNLSTM(units=self.rnn_units, name=name)
# fc = tf.keras.layers.Dense(units=1, use_bias=True, activation=None,
# kernel_initializer=tf.keras.initializers.he_normal(),
# bias_initializer=tf.constant_initializer())
# drop = tf.keras.layers.Dropout(rate=drop_rate)
#
# item_out = cell(inputs=x_rnn) # [?, 64]
# fc_out = drop(item_out)
# fc_out2 = fc(fc_out) # [?, 1]
# cell = None
# drop = None
# fc = None
#
# rnn_output.append(fc_out2) # [4, ?, 1]
# rnn_output = tf.squeeze(rnn_output) # [4, ?]
# logits = tf.transpose(rnn_output) # [?, 4]
####################################################################
# rnn_output = []
# for _index in range(4):
# name = "gru_" + str(_index)
# cell = tf.keras.layers.CuDNNLSTM(units=32, name=name)
# item_out = cell(inputs=x_rnns[_index]) # [?, 25, rnn_units]
# cell = None
#
# rnn_output.append(item_out)
#
# output = tf.concat(rnn_output, 1) # [?, self.rnn_units*4]
# drop = tf.keras.layers.Dropout(rate=drop_rate)(output)
# logits = self.fc2(drop)
####################################################################
# drop = tf.keras.layers.Dropout(rate=drop_rate)
# fla = self.fla(x_rnn)
# fc1 = self.fc1(fla)
# fc1 = drop(fc1)
# logits = self.fc2(fc1)
####################################################################
cell_out1 = self.cell1(x_rnn)
logits = self.cell2(cell_out1)
return logits
def draw_hid_features(self, inputs, batch):
"""
绘制中间层的特征图,保存在本地/hid_pic,第120-121行调用
:param inputs: [?, 1, 100, 80, 6]
:param batch: [?, 8, 13, 10]
"""
import numpy
inputs = numpy.squeeze(inputs) # [?, 100, 80, 6]
batch = batch.numpy()
index_sample = 0
for sample in batch:
# [8, 13, 10]
index_channel = 0
yuan_tus = inputs[index_sample]
yuan_tu = numpy.hstack(yuan_tus)
save_dir = 'hid_pic' + '/batch_' + str(self.i) + '/' + str(index_sample)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
Image.fromarray(yuan_tu).convert('RGB').save(save_dir + '/' + 'yuan_tu.jpg')
for feature in sample:
# [13, 10]
save_path = save_dir + '/' + str(index_channel) + '.jpg'
scipy.misc.imsave(save_path, feature.T)
# Image.fromarray(feature).convert('RGB').save(save_path)
index_channel += 1
index_sample += 1
self.i += 1
############
# 8 16 8
|
[
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.unstack",
"os.makedirs",
"tensorflow.keras.layers.GRU",
"os.path.exists",
"tensorflow.concat",
"linear_3d_layer.Linear3DLayer",
"numpy.hstack",
"PIL.Image.fromarray",
"tensorflow.squeeze",
"numpy.squeeze",
"tensorflow.keras.layers.MaxPool3D",
"tensorflow.equal"
] |
[((525, 632), 'linear_3d_layer.Linear3DLayer', 'Linear3DLayer', ([], {'filters': '(8)', 'kernel_size': '[1, 3, 75, 6]', 'activate_size': '[3, 1, 2]', 'activate_stride': '[3, 1, 1]'}), '(filters=8, kernel_size=[1, 3, 75, 6], activate_size=[3, 1, 2],\n activate_stride=[3, 1, 1])\n', (538, 632), False, 'from linear_3d_layer import Linear3DLayer\n'), ((683, 790), 'linear_3d_layer.Linear3DLayer', 'Linear3DLayer', ([], {'filters': '(8)', 'kernel_size': '[8, 3, 36, 3]', 'activate_size': '[3, 1, 2]', 'activate_stride': '[3, 1, 1]'}), '(filters=8, kernel_size=[8, 3, 36, 3], activate_size=[3, 1, 2],\n activate_stride=[3, 1, 1])\n', (696, 790), False, 'from linear_3d_layer import Linear3DLayer\n'), ((841, 948), 'linear_3d_layer.Linear3DLayer', 'Linear3DLayer', ([], {'filters': '(8)', 'kernel_size': '[8, 3, 17, 2]', 'activate_size': '[3, 1, 1]', 'activate_stride': '[3, 1, 1]'}), '(filters=8, kernel_size=[8, 3, 17, 2], activate_size=[3, 1, 1],\n activate_stride=[3, 1, 1])\n', (854, 948), False, 'from linear_3d_layer import Linear3DLayer\n'), ((1018, 1134), 'tensorflow.keras.layers.MaxPool3D', 'tf.keras.layers.MaxPool3D', ([], {'pool_size': '[2, 2, 2]', 'strides': '[2, 2, 2]', 'padding': '"""same"""', 'data_format': '"""channels_first"""'}), "(pool_size=[2, 2, 2], strides=[2, 2, 2], padding=\n 'same', data_format='channels_first')\n", (1043, 1134), True, 'import tensorflow as tf\n'), ((1204, 1320), 'tensorflow.keras.layers.MaxPool3D', 'tf.keras.layers.MaxPool3D', ([], {'pool_size': '[2, 2, 2]', 'strides': '[2, 2, 2]', 'padding': '"""same"""', 'data_format': '"""channels_first"""'}), "(pool_size=[2, 2, 2], strides=[2, 2, 2], padding=\n 'same', data_format='channels_first')\n", (1229, 1320), True, 'import tensorflow as tf\n'), ((1390, 1506), 'tensorflow.keras.layers.MaxPool3D', 'tf.keras.layers.MaxPool3D', ([], {'pool_size': '[2, 2, 2]', 'strides': '[2, 2, 2]', 'padding': '"""same"""', 'data_format': '"""channels_first"""'}), "(pool_size=[2, 2, 2], strides=[2, 2, 2], padding=\n 'same', data_format='channels_first')\n", (1415, 1506), True, 'import tensorflow as tf\n'), ((3193, 3257), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', ([], {'units': 'self.rnn_units', 'return_sequences': '(True)'}), '(units=self.rnn_units, return_sequences=True)\n', (3212, 3257), True, 'import tensorflow as tf\n'), ((3279, 3320), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', ([], {'units': 'self.num_class'}), '(units=self.num_class)\n', (3298, 3320), True, 'import tensorflow as tf\n'), ((3363, 3399), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3397, 3399), True, 'import tensorflow as tf\n'), ((3419, 3455), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3453, 3455), True, 'import tensorflow as tf\n'), ((3475, 3511), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3509, 3511), True, 'import tensorflow as tf\n'), ((4691, 4715), 'tensorflow.equal', 'tf.equal', (['drop_rate', '(0.3)'], {}), '(drop_rate, 0.3)\n', (4699, 4715), True, 'import tensorflow as tf\n'), ((5415, 5441), 'tensorflow.squeeze', 'tf.squeeze', (['pool3'], {'axis': '(-1)'}), '(pool3, axis=-1)\n', (5425, 5441), True, 'import tensorflow as tf\n'), ((5917, 5942), 'tensorflow.unstack', 'tf.unstack', (['pool3'], {'axis': '(1)'}), '(pool3, axis=1)\n', (5927, 5942), True, 'import tensorflow as tf\n'), ((5990, 6015), 'tensorflow.concat', 'tf.concat', (['x_rnns'], {'axis': '(2)'}), '(x_rnns, axis=2)\n', (5999, 6015), True, 'import tensorflow as tf\n'), ((8268, 8289), 'numpy.squeeze', 'numpy.squeeze', (['inputs'], {}), '(inputs)\n', (8281, 8289), False, 'import numpy\n'), ((8517, 8539), 'numpy.hstack', 'numpy.hstack', (['yuan_tus'], {}), '(yuan_tus)\n', (8529, 8539), False, 'import numpy\n'), ((8645, 8669), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (8659, 8669), False, 'import os\n'), ((8687, 8708), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (8698, 8708), False, 'import os\n'), ((8721, 8745), 'PIL.Image.fromarray', 'Image.fromarray', (['yuan_tu'], {}), '(yuan_tu)\n', (8736, 8745), False, 'from PIL import Image\n')]
|
import argparse
import numpy as np
import time
from datetime import datetime
import os
from copy import deepcopy
import functools
import typing
import pickle
from .. import Chem
from .. import chemutils
import rdkit
from rdkit.Chem import Draw
import rdkit.RDLogger
import torch
from ..chemutils import get_mol, get_smiles, get_smiles_2D
from .. import vocabulary
from .. import data_utils
from .. import action, molecule_representation as mr, model as mo
from .. import molecule_edit as me
from ..molecule_models import joint_network
from ..molecule_models import action_representation as ar
from ..molecule_models._train_utils import replace_sparse_tensor, load_cuda_async, cast_numpy_to_torch
from ._data import SampleResult
SIZE_CAP = 25
class GibbsSampler:
"""Runs a Gibbs chain that alternates between a provided corruption distribution and reconstruction model.
"""
def __init__(self, model, expected_corruption_steps=5, action_encoder=None, device=None):
"""
Parameters
----------
model: a Pytorch network that takes as input a molecular graph (x_tilde) and returns logits
over all possible actions.`
expected_corruption_steps: the expected length of the corruption sequence,
used to determine the geometric distribution parameter.
action_encoder: used to specify the vocab size and possible actions
(of default type action_representation.VocabInsertEncoder)
"""
self.model = model
self.expected_corruption_steps = expected_corruption_steps
self.vocab = vocabulary.Vocabulary()
if action_encoder is None:
action_encoder = ar.VocabInsertEncoder(canonical=True)
self.action_encoder = action_encoder
self.device = device
def corrupter(self, mol, rng=np.random, return_seq=False):
"""Corrupts the input (of type rdkit Mol) via the default random insert & delete operations in molecule_edit.py.
"""
seq = [mol]
acts = []
ori_mol = deepcopy(mol)
number_of_steps = rng.geometric(1 / (1 + self.expected_corruption_steps)) - 1
for _ in range(number_of_steps):
if rng.uniform() < 0.5 and len(me.get_leaves(mol)) >= 2:
mol, this_act = me.delete_random_leaf(mol, rng=rng, return_action=True)
else:
mol, this_act = me.insert_random_node(mol, self.vocab, rng=rng, return_action=True)
seq.append(mol)
acts.append(this_act)
# Size cap
if mol.GetNumAtoms() > SIZE_CAP:
return self.corrupter(ori_mol, rng=rng, return_seq=return_seq)
# Avoid splits (rare)
if '.' in get_smiles_2D(mol):
return self.corrupter(ori_mol, rng=rng, return_seq=return_seq)
if not return_seq:
return mol
else:
return mol, seq
def _reconstruct_single_step(self, x_tilde):
""" Runs a single step of the reconstruction process.
Parameters
----------
x_tilde: the input molecule to the reconstructor
Returns
-------
A tuple of two elements.
mol: Either the one-step action applied to the denoiser, if it was valid,
or None if the sampled actions were invalid.
act: The action that was sampled for the molecule.
"""
x_tilde_graph = mr.combine_mol_graph(
[mr.mol2graph_single(x_tilde, include_leaves=True, include_rings=True, normalization='sqrt')],
return_namedtuple=True,
cast_tensor=cast_numpy_to_torch)
x_tilde_graph = load_cuda_async(x_tilde_graph, device=self.device)
x_tilde_graph = mr.GraphInfo.from_sequence(x_tilde_graph)
x_tilde_graph = replace_sparse_tensor(x_tilde_graph)
logits_and_scopes = self.model(x_tilde_graph)
predictions, cand_act_idxs = mo.classification.multi_classification_prediction(
logits_and_scopes, predict=True, num_samples=5)
for i, act_idx in enumerate(cand_act_idxs.cpu()[0]):
act_idx = act_idx.item()
# Get corresponding action object and try executing it
lengths = ar.compute_action_lengths(x_tilde, self.action_encoder)
act = ar.integer_to_action(act_idx, lengths, self.action_encoder)
try:
result = me.compute_action(x_tilde, act, vocab=self.vocab)
break
except ValueError:
pass
else:
# No valid action sampled.
result = None
return result, act
def reconstruct(self, actual_x_tilde, return_seq=False):
""" Runs the reconstructor on the given molecule.
Parameters
----------
actual_x_tilde: the corrupted molecule
return_seq: if True, returns the denoising sequence, otherwise,
only return the last denoised value.
"""
# Reconstruct
x = None
if return_seq:
seq = [actual_x_tilde]
x_tilde = deepcopy(actual_x_tilde)
num_steps_taken = 0
visited_smiles = {get_smiles_2D(actual_x_tilde): 0}
is_revisit = False
while True:
x_tilde, act = self._reconstruct_single_step(x_tilde)
if x_tilde is None:
print('Did not sample valid action. Returning to previous mol.')
break
num_steps_taken += 1
this_smiles = get_smiles_2D(x_tilde)
is_revisit = False
if this_smiles in visited_smiles:
# print('Revisited on step %i' % visited_smiles[this_smiles])
is_revisit = True
else:
visited_smiles[this_smiles] = num_steps_taken
if is_revisit or isinstance(act, action.Stop):
if x_tilde.GetNumAtoms() > SIZE_CAP:
# print('Mol too large. Returning to previous mol.')
pass
elif '.' in get_smiles_2D(x_tilde):
# Avoid splits (rare). Leaving this as return to previous
# print('Broke mol. Returning to previous mol.')
pass
else:
x = x_tilde
break
if not return_seq:
return x, num_steps_taken, is_revisit
else:
return x, seq, num_steps_taken, is_revisit
def _apply_corrupter(self, x, rng, check_substructure):
while True:
try:
actual_x_tilde, seq = self.corrupter(x, rng=rng, return_seq=True)
if check_substructure(actual_x_tilde):
break
except ValueError:
print('Corruption failed. Retrying corruption.')
pass
return actual_x_tilde, len(seq)
def run_chain(self, init_smiles=None, num_transitions=1000, sample_freq=1, seed=None, substructure=None):
"""
Parameters
----------
init_smiles: the SMILES string with which to initialize the chain.
If not provided, a random string from the ZINC validation set will be used.
num_transitions: total number of chain transitions to run.
sample_freq: frequency to print chain's state.
seed: seed for numpy.random.
"""
if not seed:
seed = np.random.randint(2**31 - 1)
rng = np.random.RandomState(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.autograd.set_grad_enabled(False)
# Initialize chain
if substructure is not None:
init_smiles = substructure
elif not init_smiles:
path = '../data/zinc/train.txt'
with open(path, 'r') as f:
data = [line.strip("\r\n ").split()[0] for line in f]
init_smiles = rng.choice(data)
init_smiles = get_smiles_2D(get_mol(init_smiles))
x = get_mol(init_smiles)
if substructure is not None:
for atom in x.GetAtoms():
atom.SetAtomMapNum(42)
num_marked = x.GetNumAtoms()
def check_sub_intact(mol):
num_here = len([atom for atom in mol.GetAtoms() if atom.GetAtomMapNum() == 42])
if num_here == num_marked:
return True
else:
return False
else:
def check_sub_intact(mol):
return True
# Run chain
collected_x_tilde = []
collected_x = [init_smiles]
print('init_x: %s' % init_smiles)
num_steps_reconstruct_chain = []
num_steps_corrupt_chain = []
revisit_chain = []
transition_attempts_chain = []
for t in range(num_transitions):
transition_attempts = 0
while True:
actual_x_tilde, num_steps_corrupt = self._apply_corrupter(x, rng, check_sub_intact)
# Reconstruct
for _ in range(10):
# Attempt 10 possible reconstruction transitions from the given corruption.
transition_attempts += 1
potential_x, num_steps_reconstruct, revisit = self.reconstruct(actual_x_tilde)
if potential_x is not None and check_sub_intact(potential_x):
# If the proposed x is valid, record it, and move to next transition.
x = potential_x
break
else:
# If none of the proposed reconstructions are valid after 10 steps,
# we retry the entire transition (including sampling the corruption).
continue
# Break out of the loop to validate a single transition.
break
if (t + 1) % sample_freq == 0:
# Print current state
collected_x_tilde.append(get_smiles_2D(actual_x_tilde))
collected_x.append(get_smiles_2D(x))
num_steps_corrupt_chain.append(num_steps_corrupt)
num_steps_reconstruct_chain.append(num_steps_reconstruct)
revisit_chain.append(revisit)
transition_attempts_chain.append(transition_attempts)
print('Iteration: %i' % (t + 1))
print('x_tilde: %s, x: %s' % (get_smiles_2D(actual_x_tilde), get_smiles_2D(x)))
return SampleResult(
seed, self.expected_corruption_steps, collected_x, collected_x_tilde,
num_steps_corrupt_chain, num_steps_reconstruct_chain, transition_attempts_chain,
revisit_chain, {})
def save_result(result: SampleResult, parameters=None):
if parameters is None:
parameters = {}
path = parameters.get('output_path')
if path is None:
savedir = parameters.get('save_dir')
else:
savedir = os.path.dirname(path)
if savedir is None:
savedir = '../output/'
if path is None:
for i in range(1000):
path = os.path.join(savedir, 'result_{0}.pkl'.format(i))
if not os.path.exists(path):
break
else:
raise ValueError("All paths exist.")
os.makedirs(savedir, exist_ok=True)
result = result._replace(meta={
**result.meta,
'model_path': parameters.get('model_path', "Model path unknown.")
})
print('Saving result in path {0}'.format(os.path.abspath(path)))
with open(path, 'wb') as f:
pickle.dump(result, f, protocol=pickle.HIGHEST_PROTOCOL)
def main():
# Set rdkit logging level
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None)
parser.add_argument('--seed', default=None, type=int)
parser.add_argument('--expected_corruption_steps', default=5, type=int)
parser.add_argument('--num_transitions', default=1000, type=int)
parser.add_argument('--sample_freq', default=1, type=int)
parser.add_argument('--substructure', default=None)
parser.add_argument('--device', default='cpu')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
if not args.model_path:
raise ValueError('Please specify a model path.')
# Load model
action_encoder = ar.VocabInsertEncoder(canonical=True)
config = joint_network.JointClassificationNetworkConfiguration(
action_encoder.get_num_atom_insert_locations(),
action_encoder.num_insert_bond_locations,
hidden_size=384)
model = joint_network.JointClassificationNetwork(1, config)
model.load_state_dict(torch.load(args.model_path, map_location='cpu'))
device = torch.device(args.device)
model = model.to(device=device)
model.eval()
# Run chain
sampler = GibbsSampler(model, args.expected_corruption_steps, action_encoder, device)
result = sampler.run_chain(seed=args.seed, num_transitions=args.num_transitions, sample_freq=args.sample_freq, substructure=args.substructure)
save_result(result, vars(args))
if __name__ == '__main__':
main()
|
[
"copy.deepcopy",
"pickle.dump",
"os.path.abspath",
"os.makedirs",
"argparse.ArgumentParser",
"torch.manual_seed",
"os.path.dirname",
"torch.load",
"os.path.exists",
"numpy.random.RandomState",
"rdkit.RDLogger.logger",
"torch.autograd.set_grad_enabled",
"numpy.random.randint",
"torch.device"
] |
[((11356, 11391), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (11367, 11391), False, 'import os\n'), ((11751, 11774), 'rdkit.RDLogger.logger', 'rdkit.RDLogger.logger', ([], {}), '()\n', (11772, 11774), False, 'import rdkit\n'), ((11830, 11855), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11853, 11855), False, 'import argparse\n'), ((12880, 12905), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (12892, 12905), False, 'import torch\n'), ((2055, 2068), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (2063, 2068), False, 'from copy import deepcopy\n'), ((5082, 5106), 'copy.deepcopy', 'deepcopy', (['actual_x_tilde'], {}), '(actual_x_tilde)\n', (5090, 5106), False, 'from copy import deepcopy\n'), ((7452, 7479), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7473, 7479), True, 'import numpy as np\n'), ((7488, 7511), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7505, 7511), False, 'import torch\n'), ((7617, 7655), 'torch.autograd.set_grad_enabled', 'torch.autograd.set_grad_enabled', (['(False)'], {}), '(False)\n', (7648, 7655), False, 'import torch\n'), ((11026, 11047), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (11041, 11047), False, 'import os\n'), ((11641, 11697), 'pickle.dump', 'pickle.dump', (['result', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(result, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (11652, 11697), False, 'import pickle\n'), ((12817, 12864), 'torch.load', 'torch.load', (['args.model_path'], {'map_location': '"""cpu"""'}), "(args.model_path, map_location='cpu')\n", (12827, 12864), False, 'import torch\n'), ((7409, 7439), 'numpy.random.randint', 'np.random.randint', (['(2 ** 31 - 1)'], {}), '(2 ** 31 - 1)\n', (7426, 7439), True, 'import numpy as np\n'), ((11577, 11598), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (11592, 11598), False, 'import os\n'), ((11244, 11264), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11258, 11264), False, 'import os\n')]
|
import copy
import pylab
import numpy as np
from environment import Env
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
from keras import backend as K
EPISODES = 2500
class ReinforceAgent:
def __init__(self):
self.render = False
self.load_model = False
self.action_space = [0, 1, 2, 3, 4]
self.action_size = len(self.action_space)
self.state_size = 15
self.discount_factor = 0.99 # decay rate
self.learning_rate = 0.001
self.model = self.build_model()
self.optimizer = self.optimizer()
self.states, self.actions, self.rewards = [], [], []
if self.load_model:
self.model.load_weights('./save_model/reinforce_trained.h5')
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='softmax'))
model.summary()
return model
def optimizer(self):
action = K.placeholder(shape=[None, 5])
discounted_rewards = K.placeholder(shape=[None, ])
good_prob = K.sum(action * self.model.output, axis=1)
eligibility = K.log(good_prob) * K.stop_gradient(discounted_rewards)
loss = -K.sum(eligibility)
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.model.trainable_weights,[],
loss)
train = K.function([self.model.input, action, discounted_rewards], [],
updates=updates)
return train
def get_action(self, state):
policy = self.model.predict(state)[0]
return np.random.choice(self.action_size, 1, p=policy)[0]
def discount_rewards(self, rewards):
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(0, len(rewards))):
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
def remember_episode(self, state, action, reward):
self.states.append(state[0])
self.rewards.append(reward)
act = np.zeros(self.action_size)
act[action] = 1
self.actions.append(act)
def train_model(self):
discounted_rewards = np.float32(self.discount_rewards(self.rewards))
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
self.optimizer([self.states, self.actions, discounted_rewards])
self.states, self.actions, self.rewards = [], [], []
if __name__ == "__main__":
env = Env()
agent = ReinforceAgent()
global_step = 0
scores, episodes = [], []
for e in range(EPISODES):
done = False
score = 0
state = env.reset()
state = np.reshape(state, [1, 15])
while not done:
if agent.render:
env.render()
global_step += 1
action = agent.get_action(state)
next_state, reward, done = env.step(action)
next_state = np.reshape(next_state, [1, 15])
agent.remember_episode(state, action, reward)
score += reward
state = copy.deepcopy(next_state)
if done:
agent.train_model()
scores.append(score)
episodes.append(e)
score = round(score,2)
print("episode:", e, " score:", score, " time_step:",
global_step)
if e % 100 == 0:
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/reinforce.png")
agent.model.save_weights("./save_model/reinforce.h5")
if e == 501:
break
print('game over')
env.destroy()
|
[
"keras.backend.placeholder",
"numpy.random.choice",
"numpy.zeros_like",
"copy.deepcopy",
"numpy.std",
"keras.backend.function",
"keras.backend.sum",
"keras.optimizers.Adam",
"numpy.zeros",
"pylab.savefig",
"keras.backend.log",
"numpy.mean",
"environment.Env",
"numpy.reshape",
"keras.layers.Dense",
"keras.backend.stop_gradient",
"keras.models.Sequential",
"pylab.plot"
] |
[((2784, 2789), 'environment.Env', 'Env', ([], {}), '()\n', (2787, 2789), False, 'from environment import Env\n'), ((827, 839), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (837, 839), False, 'from keras.models import Sequential\n'), ((1116, 1146), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '[None, 5]'}), '(shape=[None, 5])\n', (1129, 1146), True, 'from keras import backend as K\n'), ((1176, 1203), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '[None]'}), '(shape=[None])\n', (1189, 1203), True, 'from keras import backend as K\n'), ((1226, 1267), 'keras.backend.sum', 'K.sum', (['(action * self.model.output)'], {'axis': '(1)'}), '(action * self.model.output, axis=1)\n', (1231, 1267), True, 'from keras import backend as K\n'), ((1401, 1428), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learning_rate'}), '(lr=self.learning_rate)\n', (1405, 1428), False, 'from keras.optimizers import Adam\n'), ((1564, 1643), 'keras.backend.function', 'K.function', (['[self.model.input, action, discounted_rewards]', '[]'], {'updates': 'updates'}), '([self.model.input, action, discounted_rewards], [], updates=updates)\n', (1574, 1643), True, 'from keras import backend as K\n'), ((1910, 1932), 'numpy.zeros_like', 'np.zeros_like', (['rewards'], {}), '(rewards)\n', (1923, 1932), True, 'import numpy as np\n'), ((2307, 2333), 'numpy.zeros', 'np.zeros', (['self.action_size'], {}), '(self.action_size)\n', (2315, 2333), True, 'import numpy as np\n'), ((2526, 2553), 'numpy.mean', 'np.mean', (['discounted_rewards'], {}), '(discounted_rewards)\n', (2533, 2553), True, 'import numpy as np\n'), ((2584, 2610), 'numpy.std', 'np.std', (['discounted_rewards'], {}), '(discounted_rewards)\n', (2590, 2610), True, 'import numpy as np\n'), ((2984, 3010), 'numpy.reshape', 'np.reshape', (['state', '[1, 15]'], {}), '(state, [1, 15])\n', (2994, 3010), True, 'import numpy as np\n'), ((858, 913), 'keras.layers.Dense', 'Dense', (['(24)'], {'input_dim': 'self.state_size', 'activation': '"""relu"""'}), "(24, input_dim=self.state_size, activation='relu')\n", (863, 913), False, 'from keras.layers import Dense\n'), ((933, 961), 'keras.layers.Dense', 'Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (938, 961), False, 'from keras.layers import Dense\n'), ((981, 1026), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""softmax"""'}), "(self.action_size, activation='softmax')\n", (986, 1026), False, 'from keras.layers import Dense\n'), ((1290, 1306), 'keras.backend.log', 'K.log', (['good_prob'], {}), '(good_prob)\n', (1295, 1306), True, 'from keras import backend as K\n'), ((1309, 1344), 'keras.backend.stop_gradient', 'K.stop_gradient', (['discounted_rewards'], {}), '(discounted_rewards)\n', (1324, 1344), True, 'from keras import backend as K\n'), ((1361, 1379), 'keras.backend.sum', 'K.sum', (['eligibility'], {}), '(eligibility)\n', (1366, 1379), True, 'from keras import backend as K\n'), ((1788, 1835), 'numpy.random.choice', 'np.random.choice', (['self.action_size', '(1)'], {'p': 'policy'}), '(self.action_size, 1, p=policy)\n', (1804, 1835), True, 'import numpy as np\n'), ((3250, 3281), 'numpy.reshape', 'np.reshape', (['next_state', '[1, 15]'], {}), '(next_state, [1, 15])\n', (3260, 3281), True, 'import numpy as np\n'), ((3389, 3414), 'copy.deepcopy', 'copy.deepcopy', (['next_state'], {}), '(next_state)\n', (3402, 3414), False, 'import copy\n'), ((3729, 3762), 'pylab.plot', 'pylab.plot', (['episodes', 'scores', '"""b"""'], {}), "(episodes, scores, 'b')\n", (3739, 3762), False, 'import pylab\n'), ((3775, 3818), 'pylab.savefig', 'pylab.savefig', (['"""./save_graph/reinforce.png"""'], {}), "('./save_graph/reinforce.png')\n", (3788, 3818), False, 'import pylab\n')]
|
from pickle import load
from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means
def load_expt_result(path):
with open(path, 'rb') as f:
result = load(f)
return result
import matplotlib.pyplot as plt
import numpy as np
expt_dataset = np.load('expt_dataset.npz')
expt_means = expt_dataset['expt_means']
def rmse(predictions, inds):
pred_kcal_mol = unreduce(predictions[inds])
expt_kcal_mol = unreduce(expt_means[inds])
rmse = np.sqrt(np.mean((pred_kcal_mol - expt_kcal_mol) ** 2))
return rmse
def train_test_rmse(predictions, split=0):
train_inds, test_inds = train_test_split(split)
train_rmse = rmse(predictions, train_inds)
test_rmse = rmse(predictions, test_inds)
return train_rmse, test_rmse
def compute_train_test_rmse_traj(prediction_traj, cv_fold=0):
train_rmses = np.zeros(len(prediction_traj))
test_rmses = np.zeros(len(prediction_traj))
print(np.array(prediction_traj).shape)
for i in range(len(prediction_traj)):
train_rmses[i], test_rmses[i] = train_test_rmse(prediction_traj[i], cv_fold)
return train_rmses, test_rmses
if __name__ == '__main__':
plt.figure(figsize=(8,4))
train_color = 'blue'
test_color = 'green'
result = load_expt_result('ll=student_t,k=1,hash(theta0)=9203586750394740867.pkl')
train_rmses, test_rmses = compute_train_test_rmse_traj(result['prediction_traj'], result['experiment'].cv_fold)
train_label = 'train'
test_label = 'test'
plt.plot(train_rmses, label=train_label, c=train_color, alpha=0.5)
plt.plot(test_rmses, label=test_label, c=test_color, linestyle='--', alpha=0.5)
plt.xlabel('iteration')
plt.ylabel('RMSE (kcal/mol)')
plt.savefig('train_test_rmses_0.png', dpi=300)
plt.close()
from glob import glob
ll = 'gaussian'
fnames = glob('july_26_results/ll={}*'.format(ll))
def plot_scatter(path):
result = load_expt_result(path)
initial_pred = result['prediction_traj'][0]
pred_kcalmol = unreduce(initial_pred)
expt_kcalmol = unreduce(expt_means)
plt.figure()
plt.scatter(pred_kcalmol, expt_kcalmol)
plt.savefig('scatter.png', dpi=300)
plt.close()
plot_scatter(fnames[0])
def plot_result(ax, ll='gaussian'):
fnames = glob('july_27_28_results/ll={}*'.format(ll))
for i, fname in enumerate(fnames):
result = load_expt_result(fname)
train_rmses, test_rmses = compute_train_test_rmse_traj(result['prediction_traj'], result['experiment'].cv_fold)
if i == 0:
train_label = 'train'
test_label = 'test'
else:
train_label = None
test_label = None
ax.plot(train_rmses, label=train_label, c=train_color, alpha=0.5)
ax.plot(test_rmses, label=test_label, c=test_color, linestyle='--', alpha=0.5)
ax.set_xlabel('iteration')
ax.set_ylabel('RMSE (kcal/mol)')
plt.legend(title='10-fold CV')
ax = plt.subplot(1,2,1)
plot_result(ax, 'gaussian')
plt.title('gaussian likelihood')
ax1 = plt.subplot(1,2,2, sharey=ax)
plot_result(ax1, 'student_t')
plt.title('student-t likelihood')
plt.tight_layout()
plt.savefig('train_test_rmses.png', dpi=300, bbox_inches='tight')
plt.close()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.load",
"bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.train_test_split",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.train_test_rmse",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.unreduce",
"matplotlib.pyplot.savefig"
] |
[((387, 414), 'numpy.load', 'np.load', (['"""expt_dataset.npz"""'], {}), "('expt_dataset.npz')\n", (394, 414), True, 'import numpy as np\n'), ((505, 532), 'bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.unreduce', 'unreduce', (['predictions[inds]'], {}), '(predictions[inds])\n', (513, 532), False, 'from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means\n'), ((553, 579), 'bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.unreduce', 'unreduce', (['expt_means[inds]'], {}), '(expt_means[inds])\n', (561, 579), False, 'from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means\n'), ((736, 759), 'bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.train_test_split', 'train_test_split', (['split'], {}), '(split)\n', (752, 759), False, 'from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means\n'), ((1286, 1312), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1296, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1623, 1689), 'matplotlib.pyplot.plot', 'plt.plot', (['train_rmses'], {'label': 'train_label', 'c': 'train_color', 'alpha': '(0.5)'}), '(train_rmses, label=train_label, c=train_color, alpha=0.5)\n', (1631, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1773), 'matplotlib.pyplot.plot', 'plt.plot', (['test_rmses'], {'label': 'test_label', 'c': 'test_color', 'linestyle': '"""--"""', 'alpha': '(0.5)'}), "(test_rmses, label=test_label, c=test_color, linestyle='--', alpha=0.5)\n", (1702, 1773), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (1790, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1837), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE (kcal/mol)"""'], {}), "('RMSE (kcal/mol)')\n", (1818, 1837), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1889), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""train_test_rmses_0.png"""'], {'dpi': '(300)'}), "('train_test_rmses_0.png', dpi=300)\n", (1854, 1889), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1905), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1903, 1905), True, 'import matplotlib.pyplot as plt\n'), ((3179, 3199), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3190, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3266), 'matplotlib.pyplot.title', 'plt.title', (['"""gaussian likelihood"""'], {}), "('gaussian likelihood')\n", (3243, 3266), True, 'import matplotlib.pyplot as plt\n'), ((3278, 3309), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {'sharey': 'ax'}), '(1, 2, 2, sharey=ax)\n', (3289, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3379), 'matplotlib.pyplot.title', 'plt.title', (['"""student-t likelihood"""'], {}), "('student-t likelihood')\n", (3355, 3379), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3401, 3403), True, 'import matplotlib.pyplot as plt\n'), ((3408, 3473), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""train_test_rmses.png"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('train_test_rmses.png', dpi=300, bbox_inches='tight')\n", (3419, 3473), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3489), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3487, 3489), True, 'import matplotlib.pyplot as plt\n'), ((292, 299), 'pickle.load', 'load', (['f'], {}), '(f)\n', (296, 299), False, 'from pickle import load\n'), ((600, 645), 'numpy.mean', 'np.mean', (['((pred_kcal_mol - expt_kcal_mol) ** 2)'], {}), '((pred_kcal_mol - expt_kcal_mol) ** 2)\n', (607, 645), True, 'import numpy as np\n'), ((1172, 1216), 'bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.train_test_rmse', 'train_test_rmse', (['prediction_traj[i]', 'cv_fold'], {}), '(prediction_traj[i], cv_fold)\n', (1187, 1216), False, 'from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means\n'), ((2152, 2174), 'bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.unreduce', 'unreduce', (['initial_pred'], {}), '(initial_pred)\n', (2160, 2174), False, 'from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means\n'), ((2198, 2218), 'bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment.unreduce', 'unreduce', (['expt_means'], {}), '(expt_means)\n', (2206, 2218), False, 'from bayes_implicit_solvent.continuous_parameter_experiments.gd_vs_langevin.autograd_based_experiment import Experiment, experiments, train_test_split, train_test_rmse, unreduce, expt_means\n'), ((2227, 2239), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2237, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2287), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pred_kcalmol', 'expt_kcalmol'], {}), '(pred_kcalmol, expt_kcalmol)\n', (2259, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2331), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""scatter.png"""'], {'dpi': '(300)'}), "('scatter.png', dpi=300)\n", (2307, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2351), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2349, 2351), True, 'import matplotlib.pyplot as plt\n'), ((3138, 3168), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""10-fold CV"""'}), "(title='10-fold CV')\n", (3148, 3168), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1081), 'numpy.array', 'np.array', (['prediction_traj'], {}), '(prediction_traj)\n', (1064, 1081), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from samplics.sae.eblup_unit_model import EblupUnitModel
cornsoybean = pd.read_csv("./tests/sae/cornsoybean.csv")
cornsoybean_mean = pd.read_csv("./tests/sae/cornsoybeanmeans.csv")
cornsoybean = cornsoybean.sample(frac=1) # shuffle the data to remove the
# print(cornsoybean)
areas = cornsoybean["County"]
areas_list = np.unique(areas)
ys = cornsoybean["CornHec"]
Xs = cornsoybean[["CornPix", "SoyBeansPix"]]
Xmean = cornsoybean_mean[["MeanCornPixPerSeg", "MeanSoyBeansPixPerSeg"]]
# print(Xmean)
samp_size = np.array([1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 5, 6])
pop_size = np.array([545, 566, 394, 424, 564, 570, 402, 567, 687, 569, 965, 556])
"""REML Method"""
eblup_bhf_reml = EblupUnitModel()
eblup_bhf_reml.fit(
ys,
Xs,
areas,
)
eblup_bhf_reml.predict(Xmean, areas_list)
def test_eblup_bhf_reml():
assert eblup_bhf_reml.method == "REML"
def test_fixed_effects_bhf_reml():
assert np.isclose(
eblup_bhf_reml.fixed_effects,
np.array([17.96398, 0.3663352, -0.0303638]),
atol=1e-6,
).all()
def test_fe_std_bhf_reml():
assert np.isclose(
eblup_bhf_reml.fe_std,
np.array([30.986801, 0.065101, 0.067583]),
atol=1e-6,
).all()
def test_gamma_bhf_reml():
assert np.isclose(
np.array(list(eblup_bhf_reml.gamma.values())),
np.array(
[
0.17537405,
0.17537405,
0.17537405,
0.29841402,
0.38950426,
0.38950426,
0.38950426,
0.38950426,
0.45965927,
0.51535245,
0.51535245,
0.56063774,
]
),
atol=1e-6,
).all()
def test_random_effects_bhf_reml():
assert np.isclose(
eblup_bhf_reml.random_effects,
np.array(
[
2.184574,
1.475118,
-4.730863,
-2.764825,
8.370915,
4.274827,
-2.705540,
1.156682,
5.026852,
-2.883398,
-8.652532,
-0.751808,
]
),
atol=1e-6,
).all()
def test_re_std_bhf_reml():
assert np.isclose(eblup_bhf_reml.re_std ** 2, 63.3149, atol=1e-6)
def test_error_var_bhf_reml():
assert np.isclose(eblup_bhf_reml.error_std ** 2, 297.7128, atol=1e-6)
def test_goodness_of_fit_bhf_reml():
assert np.isclose(eblup_bhf_reml.goodness["loglike"], -161.005759)
assert np.isclose(eblup_bhf_reml.goodness["AIC"], 326.011518)
assert np.isclose(eblup_bhf_reml.goodness["BIC"], 329.064239)
def test_convergence_bhf_reml():
assert eblup_bhf_reml.convergence["achieved"] == True
assert eblup_bhf_reml.convergence["iterations"] == 4
def test_area_estimate_bhf_reml():
assert np.isclose(
np.array(list(eblup_bhf_reml.area_est.values())),
np.array(
[
122.56367092,
123.51515946,
113.09071900,
115.02074400,
137.19621212,
108.94543201,
116.51553231,
122.76148230,
111.53048000,
124.18034553,
112.50472697,
131.25788283,
]
),
atol=1e-6,
).all()
# @pytest.mark.skip(reason="to be fixed")
def test_area_mse_bhf_reml():
assert np.isclose(
np.array(list(eblup_bhf_reml.area_mse.values())),
np.array(
[
85.495399459,
85.648949504,
85.004705566,
83.235995880,
72.017014455,
73.356967955,
72.007536645,
73.580035237,
65.299062174,
58.426265442,
57.518251822,
53.876770532,
]
),
atol=1e-6,
).all()
eblup_bhf_reml_fpc = EblupUnitModel()
eblup_bhf_reml_fpc.fit(ys, Xs, areas)
eblup_bhf_reml_fpc.predict(Xmean, areas_list, pop_size)
def test_y_predicted_bhf_reml_fpc():
assert np.isclose(
np.array(list(eblup_bhf_reml_fpc.area_est.values())),
np.array(
[
122.582519,
123.527414,
113.034260,
114.990082,
137.266001,
108.980696,
116.483886,
122.771075,
111.564754,
124.156518,
112.462566,
131.251525,
]
),
atol=1e-6,
).all()
def test_bhf_reml_to_dataframe_default():
df = eblup_bhf_reml.to_dataframe()
assert df.shape[1] == 4
assert (df.columns == ["_parameter", "_area", "_estimate", "_mse"]).all()
def test_bhf_reml_to_dataframe_not_default():
df = eblup_bhf_reml.to_dataframe(
col_names=["parameter", "small_area", "modelled_estimate", "taylor_mse"]
)
assert df.shape[1] == 4
assert (df.columns == ["parameter", "small_area", "modelled_estimate", "taylor_mse"]).all()
# Bootstrap with REML
eblup_bhf_reml_boot = EblupUnitModel()
eblup_bhf_reml_boot.fit(
ys,
Xs,
areas,
)
eblup_bhf_reml_boot.predict(Xmean, areas_list)
eblup_bhf_reml_boot.bootstrap_mse(number_reps=5, show_progress=False)
df1_reml = eblup_bhf_reml_boot.to_dataframe()
def test_bhf_reml_to_dataframe_boot_default():
assert df1_reml.shape[1] == 5
assert (df1_reml.columns == ["_parameter", "_area", "_estimate", "_mse", "_mse_boot"]).all()
df2_reml = eblup_bhf_reml_boot.to_dataframe(
col_names=["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
)
def test_bhf_reml_to_dataframe_boot_not_default():
assert df2_reml.shape[1] == 5
assert (
df2_reml.columns
== ["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
).all()
# Shorter output
np.random.seed(123)
samp_size_short = np.array([3, 3, 3, 4, 5, 5, 6])
pop_size_short = np.array([570, 402, 567, 687, 569, 965, 556])
pop_area_short = np.linspace(6, 12, 7).astype(int)
Xp_mean_short = Xmean.loc[5:12, :]
eblup_bhf_reml_short = EblupUnitModel()
eblup_bhf_reml_short.fit(ys, Xs, areas, intercept=True)
eblup_bhf_reml_short.predict(Xp_mean_short, pop_area_short, pop_size_short)
def test_area_estimate_bhf_reml_short():
assert np.isclose(
np.array(list(eblup_bhf_reml_short.area_est.values())),
np.array(
[
108.98069631,
116.48388625,
122.77107460,
111.56475375,
124.15651773,
112.46256629,
131.25152478,
]
),
atol=1e-6,
).all()
# @pytest.mark.skip(reason="to be fixed")
def test_area_mse_bhf_reml_short():
assert np.isclose(
np.array(list(eblup_bhf_reml_short.area_mse.values())),
np.array(
[
78.70883983,
78.02323786,
78.87309307,
70.04040931,
64.11261351,
61.87654547,
59.81982861,
]
),
atol=1e-6,
).all()
"""ML Method"""
eblup_bhf_ml = EblupUnitModel(method="ml")
eblup_bhf_ml.fit(ys, Xs, areas)
eblup_bhf_ml.predict(Xmean, areas_list)
def test_eblup_bhf_ml():
assert eblup_bhf_ml.method == "ML"
def test_fixed_effects_bhf_ml():
assert np.isclose(
eblup_bhf_ml.fixed_effects,
np.array([18.08888, 0.36566, -0.03017]),
atol=1e-5,
).all()
def test_fe_std_bhf_ml():
assert np.isclose(
eblup_bhf_ml.fe_std,
np.array([29.82724469, 0.06262676, 0.06506189]),
atol=1e-5,
).all()
def test_gamma_bhf_ml():
assert np.isclose(
np.array(list(eblup_bhf_ml.gamma.values())),
np.array(
[
0.14570573,
0.14570573,
0.14570573,
0.25435106,
0.33848019,
0.33848019,
0.33848019,
0.33848019,
0.40555003,
0.46027174,
0.46027174,
0.50576795,
]
),
atol=1e-6,
).all()
def test_random_effects_bhf_ml():
assert np.isclose(
eblup_bhf_ml.random_effects,
np.array(
[
1.8322323,
1.2218437,
-3.9308431,
-2.3261989,
7.2988558,
3.7065346,
-2.3371090,
1.0315879,
4.4367420,
-2.5647926,
-7.7046350,
-0.6642178,
]
),
atol=1e-6,
).all()
def test_re_std_bhf_ml():
assert np.isclose(eblup_bhf_ml.re_std ** 2, 47.79559, atol=1e-4)
def test_error_var_bhf_ml():
assert np.isclose(eblup_bhf_ml.error_std ** 2, 280.2311, atol=1e-4)
def test_goodness_of_fit_bhf_ml():
assert np.isclose(eblup_bhf_ml.goodness["loglike"], -159.1981)
assert np.isclose(eblup_bhf_ml.goodness["AIC"], 328.4, atol=0.1)
assert np.isclose(eblup_bhf_ml.goodness["BIC"], 336.5, atol=0.1)
def test_convergence_bhf_ml():
assert eblup_bhf_ml.convergence["achieved"] == True
assert eblup_bhf_ml.convergence["iterations"] == 3
def test_area_estimate_bhf_ml():
assert np.isclose(
np.array(list(eblup_bhf_ml.area_est.values())),
np.array(
[
122.17284832,
123.22129485,
113.85918468,
115.42994973,
136.06978025,
108.37573030,
116.84704244,
122.60003878,
110.93542654,
124.44934607,
113.41480260,
131.28369873,
]
),
atol=1e-6,
).all()
def test_area_mse_bhf_ml():
assert np.isclose(
np.array(list(eblup_bhf_ml.area_mse.values())),
np.array(
[
70.03789330,
70.14078955,
69.75891524,
71.50874622,
64.73862949,
66.13552266,
64.77099780,
66.09246929,
60.71287515,
55.31330901,
54.52024143,
51.85801645,
]
),
atol=1e-4,
).all()
eblup_bhf_ml_fpc = EblupUnitModel(method="ML")
eblup_bhf_ml_fpc.fit(ys, Xs, areas)
eblup_bhf_ml_fpc.predict(Xmean, areas_list, pop_size)
def test_area_est_bhf_ml_fpc():
assert np.isclose(
np.array(list(eblup_bhf_ml_fpc.area_est.values())),
np.array(
[
122.1926,
123.2340,
113.8007,
115.3978,
136.1457,
108.4139,
116.8129,
122.6107,
110.9733,
124.4229,
113.3680,
131.2767,
]
),
atol=1e-4,
).all()
# Bootstrap with ML
eblup_bhf_ml_boot = EblupUnitModel(method="ML")
eblup_bhf_ml_boot.fit(
ys,
Xs,
areas,
)
eblup_bhf_ml_boot.predict(Xmean, areas_list)
eblup_bhf_ml_boot.bootstrap_mse(number_reps=5, show_progress=False)
df1_ml = eblup_bhf_ml_boot.to_dataframe()
def test_bhf_ml_to_dataframe_boot_default():
assert df1_ml.shape[1] == 5
assert (df1_ml.columns == ["_parameter", "_area", "_estimate", "_mse", "_mse_boot"]).all()
df2_ml = eblup_bhf_ml_boot.to_dataframe(
col_names=["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
)
def test_bhf_ml_to_dataframe_boot_not_default():
assert df2_ml.shape[1] == 5
assert (
df2_ml.columns
== ["parameter", "small_area", "modelled_estimate", "taylor_mse", "boot_mse"]
).all()
# Shorter output
eblup_bhf_ml_short = EblupUnitModel(method="ML")
eblup_bhf_ml_short.fit(ys, Xs, areas, intercept=True)
eblup_bhf_ml_short.predict(Xp_mean_short, pop_area_short, pop_size_short)
def test_area_estimate_bhf_ml_short():
assert np.isclose(
np.array(list(eblup_bhf_ml_short.area_est.values())),
np.array(
[
108.41385641,
116.81295596,
122.61070603,
110.97329145,
124.42291775,
113.36799091,
131.27669442,
]
),
atol=1e-6,
).all()
# @pytest.mark.skip(reason="to be fixed")
def test_area_mse_bhf_ml_short():
assert np.isclose(
np.array(list(eblup_bhf_ml_short.area_mse.values())),
np.array(
[
71.07422316,
70.52276075,
71.03548298,
65.27922762,
60.93670432,
58.91938558,
57.87424555,
]
),
atol=1e-6,
).all()
|
[
"numpy.random.seed",
"pandas.read_csv",
"numpy.isclose",
"numpy.array",
"numpy.linspace",
"samplics.sae.eblup_unit_model.EblupUnitModel",
"numpy.unique"
] |
[((113, 155), 'pandas.read_csv', 'pd.read_csv', (['"""./tests/sae/cornsoybean.csv"""'], {}), "('./tests/sae/cornsoybean.csv')\n", (124, 155), True, 'import pandas as pd\n'), ((175, 222), 'pandas.read_csv', 'pd.read_csv', (['"""./tests/sae/cornsoybeanmeans.csv"""'], {}), "('./tests/sae/cornsoybeanmeans.csv')\n", (186, 222), True, 'import pandas as pd\n'), ((364, 380), 'numpy.unique', 'np.unique', (['areas'], {}), '(areas)\n', (373, 380), True, 'import numpy as np\n'), ((557, 603), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 5, 6]'], {}), '([1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 5, 6])\n', (565, 603), True, 'import numpy as np\n'), ((615, 685), 'numpy.array', 'np.array', (['[545, 566, 394, 424, 564, 570, 402, 567, 687, 569, 965, 556]'], {}), '([545, 566, 394, 424, 564, 570, 402, 567, 687, 569, 965, 556])\n', (623, 685), True, 'import numpy as np\n'), ((722, 738), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {}), '()\n', (736, 738), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((4079, 4095), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {}), '()\n', (4093, 4095), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((5271, 5287), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {}), '()\n', (5285, 5287), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((6066, 6085), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (6080, 6085), True, 'import numpy as np\n'), ((6105, 6136), 'numpy.array', 'np.array', (['[3, 3, 3, 4, 5, 5, 6]'], {}), '([3, 3, 3, 4, 5, 5, 6])\n', (6113, 6136), True, 'import numpy as np\n'), ((6154, 6199), 'numpy.array', 'np.array', (['[570, 402, 567, 687, 569, 965, 556]'], {}), '([570, 402, 567, 687, 569, 965, 556])\n', (6162, 6199), True, 'import numpy as np\n'), ((6310, 6326), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {}), '()\n', (6324, 6326), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((7378, 7405), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {'method': '"""ml"""'}), "(method='ml')\n", (7392, 7405), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((10642, 10669), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {'method': '"""ML"""'}), "(method='ML')\n", (10656, 10669), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((11320, 11347), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {'method': '"""ML"""'}), "(method='ML')\n", (11334, 11347), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((12121, 12148), 'samplics.sae.eblup_unit_model.EblupUnitModel', 'EblupUnitModel', ([], {'method': '"""ML"""'}), "(method='ML')\n", (12135, 12148), False, 'from samplics.sae.eblup_unit_model import EblupUnitModel\n'), ((2329, 2388), 'numpy.isclose', 'np.isclose', (['(eblup_bhf_reml.re_std ** 2)', '(63.3149)'], {'atol': '(1e-06)'}), '(eblup_bhf_reml.re_std ** 2, 63.3149, atol=1e-06)\n', (2339, 2388), True, 'import numpy as np\n'), ((2432, 2495), 'numpy.isclose', 'np.isclose', (['(eblup_bhf_reml.error_std ** 2)', '(297.7128)'], {'atol': '(1e-06)'}), '(eblup_bhf_reml.error_std ** 2, 297.7128, atol=1e-06)\n', (2442, 2495), True, 'import numpy as np\n'), ((2545, 2604), 'numpy.isclose', 'np.isclose', (["eblup_bhf_reml.goodness['loglike']", '(-161.005759)'], {}), "(eblup_bhf_reml.goodness['loglike'], -161.005759)\n", (2555, 2604), True, 'import numpy as np\n'), ((2616, 2670), 'numpy.isclose', 'np.isclose', (["eblup_bhf_reml.goodness['AIC']", '(326.011518)'], {}), "(eblup_bhf_reml.goodness['AIC'], 326.011518)\n", (2626, 2670), True, 'import numpy as np\n'), ((2682, 2736), 'numpy.isclose', 'np.isclose', (["eblup_bhf_reml.goodness['BIC']", '(329.064239)'], {}), "(eblup_bhf_reml.goodness['BIC'], 329.064239)\n", (2692, 2736), True, 'import numpy as np\n'), ((8967, 9026), 'numpy.isclose', 'np.isclose', (['(eblup_bhf_ml.re_std ** 2)', '(47.79559)'], {'atol': '(0.0001)'}), '(eblup_bhf_ml.re_std ** 2, 47.79559, atol=0.0001)\n', (8977, 9026), True, 'import numpy as np\n'), ((9067, 9129), 'numpy.isclose', 'np.isclose', (['(eblup_bhf_ml.error_std ** 2)', '(280.2311)'], {'atol': '(0.0001)'}), '(eblup_bhf_ml.error_std ** 2, 280.2311, atol=0.0001)\n', (9077, 9129), True, 'import numpy as np\n'), ((9176, 9231), 'numpy.isclose', 'np.isclose', (["eblup_bhf_ml.goodness['loglike']", '(-159.1981)'], {}), "(eblup_bhf_ml.goodness['loglike'], -159.1981)\n", (9186, 9231), True, 'import numpy as np\n'), ((9243, 9300), 'numpy.isclose', 'np.isclose', (["eblup_bhf_ml.goodness['AIC']", '(328.4)'], {'atol': '(0.1)'}), "(eblup_bhf_ml.goodness['AIC'], 328.4, atol=0.1)\n", (9253, 9300), True, 'import numpy as np\n'), ((9312, 9369), 'numpy.isclose', 'np.isclose', (["eblup_bhf_ml.goodness['BIC']", '(336.5)'], {'atol': '(0.1)'}), "(eblup_bhf_ml.goodness['BIC'], 336.5, atol=0.1)\n", (9322, 9369), True, 'import numpy as np\n'), ((6217, 6238), 'numpy.linspace', 'np.linspace', (['(6)', '(12)', '(7)'], {}), '(6, 12, 7)\n', (6228, 6238), True, 'import numpy as np\n'), ((1009, 1052), 'numpy.array', 'np.array', (['[17.96398, 0.3663352, -0.0303638]'], {}), '([17.96398, 0.3663352, -0.0303638])\n', (1017, 1052), True, 'import numpy as np\n'), ((1177, 1218), 'numpy.array', 'np.array', (['[30.986801, 0.065101, 0.067583]'], {}), '([30.986801, 0.065101, 0.067583])\n', (1185, 1218), True, 'import numpy as np\n'), ((1366, 1529), 'numpy.array', 'np.array', (['[0.17537405, 0.17537405, 0.17537405, 0.29841402, 0.38950426, 0.38950426, \n 0.38950426, 0.38950426, 0.45965927, 0.51535245, 0.51535245, 0.56063774]'], {}), '([0.17537405, 0.17537405, 0.17537405, 0.29841402, 0.38950426, \n 0.38950426, 0.38950426, 0.38950426, 0.45965927, 0.51535245, 0.51535245,\n 0.56063774])\n', (1374, 1529), True, 'import numpy as np\n'), ((1890, 2030), 'numpy.array', 'np.array', (['[2.184574, 1.475118, -4.730863, -2.764825, 8.370915, 4.274827, -2.70554, \n 1.156682, 5.026852, -2.883398, -8.652532, -0.751808]'], {}), '([2.184574, 1.475118, -4.730863, -2.764825, 8.370915, 4.274827, -\n 2.70554, 1.156682, 5.026852, -2.883398, -8.652532, -0.751808])\n', (1898, 2030), True, 'import numpy as np\n'), ((3013, 3192), 'numpy.array', 'np.array', (['[122.56367092, 123.51515946, 113.090719, 115.020744, 137.19621212, \n 108.94543201, 116.51553231, 122.7614823, 111.53048, 124.18034553, \n 112.50472697, 131.25788283]'], {}), '([122.56367092, 123.51515946, 113.090719, 115.020744, 137.19621212,\n 108.94543201, 116.51553231, 122.7614823, 111.53048, 124.18034553, \n 112.50472697, 131.25788283])\n', (3021, 3192), True, 'import numpy as np\n'), ((3616, 3803), 'numpy.array', 'np.array', (['[85.495399459, 85.648949504, 85.004705566, 83.23599588, 72.017014455, \n 73.356967955, 72.007536645, 73.580035237, 65.299062174, 58.426265442, \n 57.518251822, 53.876770532]'], {}), '([85.495399459, 85.648949504, 85.004705566, 83.23599588, \n 72.017014455, 73.356967955, 72.007536645, 73.580035237, 65.299062174, \n 58.426265442, 57.518251822, 53.876770532])\n', (3624, 3803), True, 'import numpy as np\n'), ((4323, 4485), 'numpy.array', 'np.array', (['[122.582519, 123.527414, 113.03426, 114.990082, 137.266001, 108.980696, \n 116.483886, 122.771075, 111.564754, 124.156518, 112.462566, 131.251525]'], {}), '([122.582519, 123.527414, 113.03426, 114.990082, 137.266001, \n 108.980696, 116.483886, 122.771075, 111.564754, 124.156518, 112.462566,\n 131.251525])\n', (4331, 4485), True, 'import numpy as np\n'), ((6597, 6709), 'numpy.array', 'np.array', (['[108.98069631, 116.48388625, 122.7710746, 111.56475375, 124.15651773, \n 112.46256629, 131.25152478]'], {}), '([108.98069631, 116.48388625, 122.7710746, 111.56475375, \n 124.15651773, 112.46256629, 131.25152478])\n', (6605, 6709), True, 'import numpy as np\n'), ((7062, 7168), 'numpy.array', 'np.array', (['[78.70883983, 78.02323786, 78.87309307, 70.04040931, 64.11261351, \n 61.87654547, 59.81982861]'], {}), '([78.70883983, 78.02323786, 78.87309307, 70.04040931, 64.11261351, \n 61.87654547, 59.81982861])\n', (7070, 7168), True, 'import numpy as np\n'), ((7647, 7686), 'numpy.array', 'np.array', (['[18.08888, 0.36566, -0.03017]'], {}), '([18.08888, 0.36566, -0.03017])\n', (7655, 7686), True, 'import numpy as np\n'), ((7807, 7854), 'numpy.array', 'np.array', (['[29.82724469, 0.06262676, 0.06506189]'], {}), '([29.82724469, 0.06262676, 0.06506189])\n', (7815, 7854), True, 'import numpy as np\n'), ((7998, 8161), 'numpy.array', 'np.array', (['[0.14570573, 0.14570573, 0.14570573, 0.25435106, 0.33848019, 0.33848019, \n 0.33848019, 0.33848019, 0.40555003, 0.46027174, 0.46027174, 0.50576795]'], {}), '([0.14570573, 0.14570573, 0.14570573, 0.25435106, 0.33848019, \n 0.33848019, 0.33848019, 0.33848019, 0.40555003, 0.46027174, 0.46027174,\n 0.50576795])\n', (8006, 8161), True, 'import numpy as np\n'), ((8518, 8673), 'numpy.array', 'np.array', (['[1.8322323, 1.2218437, -3.9308431, -2.3261989, 7.2988558, 3.7065346, -\n 2.337109, 1.0315879, 4.436742, -2.5647926, -7.704635, -0.6642178]'], {}), '([1.8322323, 1.2218437, -3.9308431, -2.3261989, 7.2988558, \n 3.7065346, -2.337109, 1.0315879, 4.436742, -2.5647926, -7.704635, -\n 0.6642178])\n', (8526, 8673), True, 'import numpy as np\n'), ((9636, 9822), 'numpy.array', 'np.array', (['[122.17284832, 123.22129485, 113.85918468, 115.42994973, 136.06978025, \n 108.3757303, 116.84704244, 122.60003878, 110.93542654, 124.44934607, \n 113.4148026, 131.28369873]'], {}), '([122.17284832, 123.22129485, 113.85918468, 115.42994973, \n 136.06978025, 108.3757303, 116.84704244, 122.60003878, 110.93542654, \n 124.44934607, 113.4148026, 131.28369873])\n', (9644, 9822), True, 'import numpy as np\n'), ((10193, 10367), 'numpy.array', 'np.array', (['[70.0378933, 70.14078955, 69.75891524, 71.50874622, 64.73862949, \n 66.13552266, 64.7709978, 66.09246929, 60.71287515, 55.31330901, \n 54.52024143, 51.85801645]'], {}), '([70.0378933, 70.14078955, 69.75891524, 71.50874622, 64.73862949, \n 66.13552266, 64.7709978, 66.09246929, 60.71287515, 55.31330901, \n 54.52024143, 51.85801645])\n', (10201, 10367), True, 'import numpy as np\n'), ((10886, 11019), 'numpy.array', 'np.array', (['[122.1926, 123.234, 113.8007, 115.3978, 136.1457, 108.4139, 116.8129, \n 122.6107, 110.9733, 124.4229, 113.368, 131.2767]'], {}), '([122.1926, 123.234, 113.8007, 115.3978, 136.1457, 108.4139, \n 116.8129, 122.6107, 110.9733, 124.4229, 113.368, 131.2767])\n', (10894, 11019), True, 'import numpy as np\n'), ((12411, 12524), 'numpy.array', 'np.array', (['[108.41385641, 116.81295596, 122.61070603, 110.97329145, 124.42291775, \n 113.36799091, 131.27669442]'], {}), '([108.41385641, 116.81295596, 122.61070603, 110.97329145, \n 124.42291775, 113.36799091, 131.27669442])\n', (12419, 12524), True, 'import numpy as np\n'), ((12872, 12978), 'numpy.array', 'np.array', (['[71.07422316, 70.52276075, 71.03548298, 65.27922762, 60.93670432, \n 58.91938558, 57.87424555]'], {}), '([71.07422316, 70.52276075, 71.03548298, 65.27922762, 60.93670432, \n 58.91938558, 57.87424555])\n', (12880, 12978), True, 'import numpy as np\n')]
|
'''
This file implements the detection algorithms (message passing) on markov random field of graphs generated by ER model.
The algorithms defined in this file would be imported by bin/varying_loopy.py
For the specifics about the algorithms, please see the description in manuscript/amp.pdf.
'''
import numpy as np
import itertools
import factorgraph as fg
import maxsum
import alphaBP
from scipy.stats import multivariate_normal
######################################################################
class ML(object):
def __init__(self, hparam):
self.hparam = hparam
self.constellation = hparam.constellation
pass
def detect(self, S, b):
proposals = list( itertools.product(self.constellation, repeat=self.hparam.num_tx) )
threshold = np.inf
solution = None
for x in proposals:
tmp = np.matmul(np.array(x), S).dot(np.array(x)) + b.dot(x)
if tmp < threshold:
threshold = tmp
solution = x
return solution
class Marginal(object):
"""Compute all the marginals for a given distributions"""
def __init__(self, hparam):
self.hparam = hparam
self.constellation = hparam.constellation
pass
def detect(self, S, b):
proposals = list( itertools.product(self.constellation, repeat=S.shape[0]) )
array_proposals = np.array(proposals)
prob = []
for x in proposals:
tmp = np.matmul(np.array(x), S).dot(np.array(x)) + b.dot(x)
prob.append(np.exp(-tmp))
prob = np.array(prob)
marginals = []
for i in range(b.shape[0]):
this_marginal = []
for code in self.constellation:
subset_idx = array_proposals[:, i]==code
this_marginal.append(np.sum( prob[subset_idx]))
# normalize the marginal
this_marginal = np.array(this_marginal)
this_marginal = this_marginal/this_marginal.sum()
marginals.append( this_marginal)
return np.array(marginals)
class LoopyBP(object):
def __init__(self, noise_var, hparam):
# get the constellation
self.constellation = hparam.constellation
self.hparam = hparam
# set the graph
self.graph = fg.Graph()
# add the discrete random variables to graph
self.n_symbol = hparam.num_tx
for idx in range(hparam.num_tx):
self.graph.rv("x{}".format(idx), len(self.constellation))
def set_potential(self, S, b):
s = S
for var_idx in range(self.hparam.num_tx):
# set the first type of potentials, the standalone potentials
f_x_i = np.exp( - s[var_idx, var_idx] * np.power(self.constellation, 2)
- b[var_idx] * np.array(self.constellation))
self.graph.factor(["x{}".format(var_idx)],
potential=f_x_i)
for var_idx in range(self.hparam.num_tx):
for var_jdx in range(var_idx + 1, self.hparam.num_tx):
# set the cross potentials
if s[var_idx, var_jdx] > 0:
t_ij = np.exp(-2* np.array(self.constellation)[None,:].T
* s[var_idx, var_jdx] * np.array(self.constellation))
self.graph.factor(["x{}".format(var_jdx), "x{}".format(var_idx)],
potential=t_ij)
def fit(self, S, b, stop_iter=10):
""" set potentials and run message passing"""
self.set_potential(S, b)
# run BP
iters, converged = self.graph.lbp(normalize=True,max_iters=stop_iter)
def detect_signal_by_mean(self):
estimated_signal = []
rv_marginals = dict(self.graph.rv_marginals())
for idx in range(self.n_symbol):
x_marginal = rv_marginals["x{}".format(idx)]
estimated_signal.append(self.constellation[x_marginal.argmax()])
return estimated_signal
def marginals(self):
marginal_prob = []
rv_marginals = dict(self.graph.rv_marginals())
for idx in range(self.n_symbol):
x_marginal = rv_marginals["x{}".format(idx)]
x_marginal = np.array(x_marginal)
x_marginal = x_marginal/x_marginal.sum()
marginal_prob.append(x_marginal)
return np.array(marginal_prob)
class AlphaBP(LoopyBP):
def __init__(self, noise_var, hparam):
self.hparam = hparam
# get the constellation
self.constellation = hparam.constellation
self.n_symbol = hparam.num_tx
# set the graph
self.graph = alphaBP.alphaGraph(alpha=hparam.alpha)
# add the discrete random variables to graph
for idx in range(hparam.num_tx ):
self.graph.rv("x{}".format(idx), len(self.constellation))
class MMSEalphaBP(AlphaBP):
def set_potential(self, S, b):
s = S
inv = np.linalg.inv(np.eye(s.shape[0]) + 2 * s )
prior_u = inv.dot(b)
for var_idx in range(s.shape[1]):
# set the first type of potentials, the standalone potentials
f_x_i = np.exp( - s[var_idx, var_idx] * np.power(self.constellation, 2)
- b[var_idx] * np.array(self.constellation))
prior_i = np.exp(-0.5 * np.power(self.constellation - prior_u[var_idx], 2) \
/ (inv[var_idx, var_idx]) )
self.graph.factor(["x{}".format(var_idx)],
potential=f_x_i * prior_i)
for var_idx in range(s.shape[1]):
for var_jdx in range(var_idx + 1, s.shape[1]):
# set the cross potentials
if s[var_idx, var_jdx] > 0:
t_ij = np.exp(- 2 * np.array(self.constellation)[None,:].T
* s[var_idx, var_jdx] * np.array(self.constellation))
self.graph.factor(["x{}".format(var_jdx), "x{}".format(var_idx)],
potential=t_ij)
|
[
"factorgraph.Graph",
"numpy.sum",
"numpy.power",
"numpy.array",
"numpy.exp",
"itertools.product",
"alphaBP.alphaGraph",
"numpy.eye"
] |
[((1426, 1445), 'numpy.array', 'np.array', (['proposals'], {}), '(proposals)\n', (1434, 1445), True, 'import numpy as np\n'), ((1630, 1644), 'numpy.array', 'np.array', (['prob'], {}), '(prob)\n', (1638, 1644), True, 'import numpy as np\n'), ((2155, 2174), 'numpy.array', 'np.array', (['marginals'], {}), '(marginals)\n', (2163, 2174), True, 'import numpy as np\n'), ((2400, 2410), 'factorgraph.Graph', 'fg.Graph', ([], {}), '()\n', (2408, 2410), True, 'import factorgraph as fg\n'), ((4539, 4562), 'numpy.array', 'np.array', (['marginal_prob'], {}), '(marginal_prob)\n', (4547, 4562), True, 'import numpy as np\n'), ((4838, 4876), 'alphaBP.alphaGraph', 'alphaBP.alphaGraph', ([], {'alpha': 'hparam.alpha'}), '(alpha=hparam.alpha)\n', (4856, 4876), False, 'import alphaBP\n'), ((725, 789), 'itertools.product', 'itertools.product', (['self.constellation'], {'repeat': 'self.hparam.num_tx'}), '(self.constellation, repeat=self.hparam.num_tx)\n', (742, 789), False, 'import itertools\n'), ((1341, 1397), 'itertools.product', 'itertools.product', (['self.constellation'], {'repeat': 'S.shape[0]'}), '(self.constellation, repeat=S.shape[0])\n', (1358, 1397), False, 'import itertools\n'), ((1987, 2010), 'numpy.array', 'np.array', (['this_marginal'], {}), '(this_marginal)\n', (1995, 2010), True, 'import numpy as np\n'), ((4405, 4425), 'numpy.array', 'np.array', (['x_marginal'], {}), '(x_marginal)\n', (4413, 4425), True, 'import numpy as np\n'), ((1588, 1600), 'numpy.exp', 'np.exp', (['(-tmp)'], {}), '(-tmp)\n', (1594, 1600), True, 'import numpy as np\n'), ((5158, 5176), 'numpy.eye', 'np.eye', (['s.shape[0]'], {}), '(s.shape[0])\n', (5164, 5176), True, 'import numpy as np\n'), ((920, 931), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (928, 931), True, 'import numpy as np\n'), ((1540, 1551), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1548, 1551), True, 'import numpy as np\n'), ((1882, 1906), 'numpy.sum', 'np.sum', (['prob[subset_idx]'], {}), '(prob[subset_idx])\n', (1888, 1906), True, 'import numpy as np\n'), ((2840, 2871), 'numpy.power', 'np.power', (['self.constellation', '(2)'], {}), '(self.constellation, 2)\n', (2848, 2871), True, 'import numpy as np\n'), ((2916, 2944), 'numpy.array', 'np.array', (['self.constellation'], {}), '(self.constellation)\n', (2924, 2944), True, 'import numpy as np\n'), ((5409, 5440), 'numpy.power', 'np.power', (['self.constellation', '(2)'], {}), '(self.constellation, 2)\n', (5417, 5440), True, 'import numpy as np\n'), ((5485, 5513), 'numpy.array', 'np.array', (['self.constellation'], {}), '(self.constellation)\n', (5493, 5513), True, 'import numpy as np\n'), ((5564, 5614), 'numpy.power', 'np.power', (['(self.constellation - prior_u[var_idx])', '(2)'], {}), '(self.constellation - prior_u[var_idx], 2)\n', (5572, 5614), True, 'import numpy as np\n'), ((900, 911), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (908, 911), True, 'import numpy as np\n'), ((1520, 1531), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1528, 1531), True, 'import numpy as np\n'), ((3411, 3439), 'numpy.array', 'np.array', (['self.constellation'], {}), '(self.constellation)\n', (3419, 3439), True, 'import numpy as np\n'), ((6135, 6163), 'numpy.array', 'np.array', (['self.constellation'], {}), '(self.constellation)\n', (6143, 6163), True, 'import numpy as np\n'), ((3314, 3342), 'numpy.array', 'np.array', (['self.constellation'], {}), '(self.constellation)\n', (3322, 3342), True, 'import numpy as np\n'), ((6038, 6066), 'numpy.array', 'np.array', (['self.constellation'], {}), '(self.constellation)\n', (6046, 6066), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
import json
import shutil
import sys
import torch.nn.functional as F
from torch.autograd import Variable
from os import path as op
from matplotlib import pyplot, patches
MAX_LEN = 140 # Lenth of a tweet
BATCH_SIZE = 512
EPOCH = 250 # With epoch 0, we will run until interrupted
LR = 1e-4 # LR 1e-4 seems to give stable learning without big oscillation
CONTINUE = True # Attempts to continue from previous checkpoint
DEBUG = False
CUDA = True
TEST_WITH_VALIDATION = False # Only test with validation data
DATA_SLICE = 40000
CHECKPOINT_PATH = op.join(op.dirname(__file__), "..", "..", "checkpoint.pt")
MODEL_PATH = op.join(op.dirname(__file__), "..", "..", "model.pt")
def parseFromSemEval(file):
# TODO Move to utils
# TODO Remove dependency on Pandas
import pandas
f = pandas.read_csv(file, sep=",", encoding="utf-8", index_col=0)
return f[["text", "semantic"]].as_matrix()
def _convert_with_vocab(data, vocab_table):
# Convert according to VOCAB
# TODO Might not work if shape is only 1-d.
CONVERTED = np.zeros((data.shape[0], 140))
for i in range(data.shape[0]):
txt = data[i,0]
for j in range(min(len(txt), 140)):
try:
CONVERTED[i,j] = vocab_table[txt[j]]
except KeyError:
# Keep as 0
pass
return CONVERTED
def _loadSemEvalData(fname):
"""
Load data from predefined SemEval sources.
Returns: (Training-data, Training-labels, Validation-data, Validation-labels)
"""
DATADIR = op.join(op.dirname(__file__), "..", "..", "data")
# Test if files exist
if not op.exists(fname):
# Check alternative path
if not op.exists(op.join(DATADIR, fname)):
print("Could not find {} file. Please run download_data.py from data directory".format(op.join(DATADIR, fname)))
return 0
else:
fname = op.join(DATADIR, fname)
data = parseFromSemEval(fname)
return data
def _loadCharacterEmbedding():
"""
Load character-embedding indexes.
Returns: dict(character, index)
"""
# Path to unpacked file
# TODO For packaging use path to site
VOCAB = op.join(op.dirname(__file__), "..", "..", "assets", "embeddings", "reactionrnn_vocab.json")
if not op.exists(VOCAB):
print("Fatal error")
print("Could not find {} file. Has it been deleted?\nCan be downloaded from https://github.com/Manezki/TwitMine/blob/master/assets/embeddings/reactionrnn_vocab.json".format(VOCAB))
sys.exit(-1)
CONVERT_TABLE = json.load(open(VOCAB))
return CONVERT_TABLE
def batch(tensor, batch_size):
# TODO Move to utils
# TODO Change to be more concervative with memory
tensor_list = []
length = tensor.shape[0]
i = 0
while True:
if (i+1) * batch_size >= length:
tensor_list.append(tensor[i * batch_size: length])
return tensor_list
tensor_list.append(tensor[i * batch_size: (i+1) * batch_size])
i += 1
def save_checkpoint(state, is_best, filename=CHECKPOINT_PATH):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, MODEL_PATH)
def plot_progress(training, validation, loss=False):
# TODO move to utils
# BUG argmin line is skewed, x-points wrong?
xaxis = np.linspace(1, 1+len(training), num=len(training))
pl1 = pyplot.plot(xaxis, training, color='orange')
pl2 = pyplot.plot(xaxis, validation, color='blue')
if not loss:
pyplot.title("Training vs Validation accuracy")
pyplot.xlabel("Epoch")
pyplot.ylabel("Accuracy (%)")
orange = patches.Patch(color='orange', label="Training accuracy")
blue = patches.Patch(color='blue', label="Validation accuracy")
else:
minIdx = np.argmin(validation)
miny = np.min(training)
pyplot.plot([minIdx, minIdx+1], [miny, validation[minIdx]], color="red")
pyplot.title("Training vs Validation loss")
pyplot.xlabel("Epoch")
pyplot.ylabel("Loss")
orange = patches.Patch(color='orange', label="Training loss")
blue = patches.Patch(color='blue', label="Validation loss")
pyplot.legend(handles=[orange, blue])
pyplot.show()
class Estimator(object):
## Based on woderfull Gist https://gist.github.com/kenzotakahashi/ed9631f151710c6bd898499fcf938425
def __init__(self, model):
self.model = model
def compile(self, optimizer, loss):
self.optimizer = optimizer
self.loss_f = loss
def _fit(self, X_list, y_list):
"""
train one epoch
"""
loss_list = []
acc_list = []
for X, y in zip(X_list, y_list):
if CUDA:
X_v = Variable(torch.from_numpy(X).long(), requires_grad=False).cuda()
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False).cuda()
init_hidden = self.model.initHidden(X.shape[0], 100).cuda()
else:
X_v = Variable(torch.from_numpy(X).long(), requires_grad=False)
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False)
init_hidden = self.model.initHidden(X.shape[0], 100)
self.optimizer.zero_grad()
# Original y_pred = self.model(X, self.model.initHidden(X.size()[1]))
# Init hidden 100, as we perform embedding in the GRU
y_pred, hidden = self.model(X_v, init_hidden)
loss = self.loss_f(y_pred, y_v)
loss.backward()
self.optimizer.step()
## for log
loss_list.append(loss.data[0])
classes = torch.topk(y_pred, 1)[1].cpu().data.numpy().flatten()
#comp = np.hstack((classes.reshape(-1,1), (y+1).reshape(-1,1)))
#print(comp)
acc = self._accuracy(classes, y+1)
acc_list.append(acc)
return sum(loss_list) / len(loss_list), sum(acc_list) / len(acc_list)
def fit(self, X, y, batch_size=32, nb_epoch=10, validation_data=()):
# TODO keep track of the best model state and return it when finished
X_list = batch(X, batch_size)
y_list = batch(y, batch_size)
self.training_cost = []
self.training_acc = []
self.validation_cost = []
self.validation_acc = []
for t in range(1, nb_epoch + 1):
loss, acc = self._fit(X_list, y_list)
self.training_cost.append(loss)
self.training_acc.append(acc)
val_log = ''
if validation_data:
val_loss, val_acc = self.evaluate(validation_data[0], validation_data[1], batch_size)
val_log = "- val_loss: %06.4f - val_acc: %06.4f" % (val_loss, val_acc)
self.validation_cost.append(val_loss)
self.validation_acc.append(val_acc)
print("Epoch %s/%s loss: %06.4f - acc: %06.4f %s" % (t, nb_epoch, loss, acc, val_log))
def evaluate(self, X, y, batch_size=32):
y_pred, hidden = self.predict(X)
if CUDA:
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False).cuda()
else:
y_v = Variable(torch.from_numpy(y + 1).long(), requires_grad=False)
loss = self.loss_f(y_pred, y_v)
classes = torch.topk(y_pred, 1)[1].cpu().data.numpy().flatten()
acc = self._accuracy(classes, y+1)
_, gt = np.unique(y + 1, return_counts=True)
gt = gt.astype(float) / len(y)
_, pr = np.unique(classes, return_counts=True)
pr = pr.astype(float) / len(y)
if len(gt) == 3 and len(pr) == 3:
print("Distribution Grund truth: NEG {}, NEU {}, POS {}".format(gt[0], gt[1], gt[2]))
print("Distribution predictions: NEG {}, NEU {}, POS {}".format(pr[0], pr[1], pr[2]))
return loss.data[0], acc
def _accuracy(self, y_pred, y):
return sum(y_pred == y) / y.shape[0]
def predict(self, X):
if CUDA:
X = Variable(torch.from_numpy(X).long()).cuda()
init_hidden = self.model.initHidden(X.shape[0], 100).cuda()
else:
X = Variable(torch.from_numpy(X).long())
init_hidden = self.model.initHidden(X.shape[0], 100)
y_pred = self.model(X, init_hidden)
return y_pred
def predict_classes(self, X):
return torch.topk(self.predict(X), 1)[1].cpu().data.numpy().flatten()
#############
class RNN(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, output_size, state_dict=None, dict_path=None):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.embed = nn.Embedding(401,embed_size, padding_idx=0)
self.rnn = nn.GRU(embed_size, hidden_size, bias=True, dropout=0.5)
self.output = nn.Linear(hidden_size, output_size)
self._create_weight_tensors(input_size, hidden_size, output_size)
if state_dict is not None:
self._load_weights(state_dict)
else:
self._init_weights(nn.init.kaiming_normal)
self.softmax = nn.LogSoftmax(dim=1)
def _load_weights(self, state_dict):
pretrained = torch.load(state_dict)
self.load_state_dict(pretrained['state_dict'])
def _create_weight_tensors(self, input_size, hidden_size, output_size):
self.embed.weight = nn.Parameter(torch.zeros(401, 100))
self.rnn.weight_ih = nn.Parameter(torch.zeros(3*hidden_size, 100))
self.rnn.weight_hh = nn.Parameter(torch.zeros(3*hidden_size, hidden_size))
self.rnn.bias_ih = nn.Parameter(torch.zeros(3*hidden_size))
self.rnn.bias_hh = nn.Parameter(torch.zeros(3*hidden_size))
self.output.weight = nn.Parameter(torch.zeros(3, 256))
self.output.bias_ih = nn.Parameter(torch.zeros(3, 256))
def _init_weights(self, method):
method(self.embed.weight)
method(self.rnn.weight_ih)
method(self.rnn.weight_hh)
method(self.output.weight)
# Bias already 0s
def forward(self, input, hidden):
embedded = self.embed(input)
embedded.transpose_(0,1)
out, hidden = self.rnn(embedded, hidden)
lin = F.relu(self.output(out[MAX_LEN-1,:,:]))
return lin, hidden
def initHidden(self, batch_size, input_size):
return Variable(torch.zeros(1, batch_size, self.hidden_size))
def main():
training = _loadSemEvalData("dataset_training.csv")
validation = _loadSemEvalData("dataset_validation.csv")
# This line prevents running if the data was not loaded, refrase the check for more specific use.
# Training and Validation should be int only when bad loading
if isinstance(training, int) and isinstance(validation, int):
sys.exit(-1)
# If DATASLICE is smaller than data amount, take a subset.
training = training[:DATA_SLICE, :]
validation = validation[:DATA_SLICE, :]
# Convert text column to embedding indexes
CONVERT_TABLE = _loadCharacterEmbedding()
training_data = _convert_with_vocab(training, CONVERT_TABLE)
validation_data = _convert_with_vocab(validation, CONVERT_TABLE)
training_labels = training[:, 1].astype(int)
validation_labels = validation[:, 1].astype(int)
# Split the training data to test and training set.
# Holdout-method is used, and no further cross validation is performed.
# TODO Change naming convention from Training, test, validation(unseen data) to Training, validation, test
X_train = training_data[:int(training_data.shape[0]*0.8), :]
X_test = training_data[int(training_data.shape[0]*0.8):, :]
y_train = training_labels[:int(training_labels.shape[0]*0.8)]
y_test = training_labels[int(training_labels.shape[0]*0.8):]
epoch = 0
best_prec = 0.0
training_cost = []
training_acc = []
validation_cost = []
validation_acc = []
model = RNN(140, 100, 256, 3, state_dict=op.join(op.dirname(__file__), "..", "..", "assets", "weights", "RNN.pt"))
if torch.cuda.is_available() and CUDA:
model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LR)
if op.exists(MODEL_PATH) and CONTINUE:
# TODO, cannot continue if was trained on CPU and continues on GPU and vice versa
print("Continuying with the previous model")
checkpoint = torch.load(MODEL_PATH)
epoch = checkpoint["epoch"]
best_prec = checkpoint["best_prec"]
optimizer.load_state_dict(checkpoint["optimizer"])
for paramGroup in optimizer.param_groups:
paramGroup['lr'] = LR
training_cost = checkpoint["train_cost"]
training_acc = checkpoint["train_hist"]
validation_cost = checkpoint['valid_cost']
validation_acc = checkpoint["valid_hist"]
print("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
print(model)
def fit_and_log(epoch):
clf.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=epoch,
validation_data=(X_test, y_test))
[training_cost.append(i) for i in clf.training_cost]
[training_acc.append(i) for i in clf.training_acc]
[validation_acc.append(i) for i in clf.validation_acc]
[validation_cost.append(i) for i in clf.validation_cost]
clf = Estimator(model)
clf.compile(optimizer,
loss=nn.CrossEntropyLoss())
#loss=nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor([2,1,1.5])))
if TEST_WITH_VALIDATION:
_, VAL_ACC = clf.evaluate(validation_data, validation_labels, BATCH_SIZE)
print("Validation accuracy on the unseen validation data {}".format(VAL_ACC))
plot_progress(training_acc, validation_acc)
plot_progress(training_cost, validation_cost, loss=True)
return -1
try:
if EPOCH == 0:
c = 0
while True:
# TODO only saves after finished, should keep tract of the best weights.
print("Training epoch: {} from current run".format(c))
fit_and_log(1)
c+=1
epoch += 1
else:
fit_and_log(EPOCH)
epoch += EPOCH
except (KeyboardInterrupt, SystemExit):
# Save the model
if len(validation_acc) != 0:
is_best = validation_acc[-1] > best_prec
best_prec = max(validation_acc[-1], best_prec)
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_prec': best_prec,
'optimizer': optimizer.state_dict(),
'train_cost': training_cost,
'train_hist': training_acc,
'valid_cost': valid_cost,
'valid_hist': validation_acc
}, is_best)
print("Saved model after interrupt")
raise
score, acc = clf.evaluate(X_test, y_test)
print('Test score:', score)
print('Test accuracy:', acc)
# Save the model
is_best = acc > best_prec
best_prec = max(acc, best_prec)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec': best_prec,
'optimizer': optimizer.state_dict(),
'train_cost': training_cost,
'train_hist': training_acc,
'valid_cost': validation_cost,
'valid_hist': validation_acc
}, is_best)
_, VAL_ACC = clf.evaluate(validation_data, validation_labels, BATCH_SIZE)
print("Validation accuracy on the unseen validation data {}".format(VAL_ACC))
plot_progress(training_acc, validation_acc)
plot_progress(training_cost, validation_cost, loss=True)
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"torch.nn.Embedding",
"numpy.argmin",
"matplotlib.patches.Patch",
"os.path.join",
"numpy.unique",
"os.path.dirname",
"torch.load",
"os.path.exists",
"torch.nn.Linear",
"shutil.copyfile",
"torch.zeros",
"torch.nn.GRU",
"matplotlib.pyplot.show",
"torch.topk",
"torch.nn.LogSoftmax",
"matplotlib.pyplot.legend",
"numpy.min",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"sys.exit",
"torch.from_numpy",
"matplotlib.pyplot.plot",
"numpy.zeros",
"torch.nn.CrossEntropyLoss",
"torch.save",
"matplotlib.pyplot.xlabel"
] |
[((636, 656), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (646, 656), True, 'from os import path as op\n'), ((708, 728), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (718, 728), True, 'from os import path as op\n'), ((879, 940), 'pandas.read_csv', 'pandas.read_csv', (['file'], {'sep': '""","""', 'encoding': '"""utf-8"""', 'index_col': '(0)'}), "(file, sep=',', encoding='utf-8', index_col=0)\n", (894, 940), False, 'import pandas\n'), ((1130, 1160), 'numpy.zeros', 'np.zeros', (['(data.shape[0], 140)'], {}), '((data.shape[0], 140))\n', (1138, 1160), True, 'import numpy as np\n'), ((3205, 3232), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (3215, 3232), False, 'import torch\n'), ((3497, 3541), 'matplotlib.pyplot.plot', 'pyplot.plot', (['xaxis', 'training'], {'color': '"""orange"""'}), "(xaxis, training, color='orange')\n", (3508, 3541), False, 'from matplotlib import pyplot, patches\n'), ((3552, 3596), 'matplotlib.pyplot.plot', 'pyplot.plot', (['xaxis', 'validation'], {'color': '"""blue"""'}), "(xaxis, validation, color='blue')\n", (3563, 3596), False, 'from matplotlib import pyplot, patches\n'), ((4302, 4339), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'handles': '[orange, blue]'}), '(handles=[orange, blue])\n', (4315, 4339), False, 'from matplotlib import pyplot, patches\n'), ((4344, 4357), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (4355, 4357), False, 'from matplotlib import pyplot, patches\n'), ((1644, 1664), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (1654, 1664), True, 'from os import path as op\n'), ((1724, 1740), 'os.path.exists', 'op.exists', (['fname'], {}), '(fname)\n', (1733, 1740), True, 'from os import path as op\n'), ((2307, 2327), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (2317, 2327), True, 'from os import path as op\n'), ((2403, 2419), 'os.path.exists', 'op.exists', (['VOCAB'], {}), '(VOCAB)\n', (2412, 2419), True, 'from os import path as op\n'), ((2647, 2659), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2655, 2659), False, 'import sys\n'), ((3257, 3294), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'MODEL_PATH'], {}), '(filename, MODEL_PATH)\n', (3272, 3294), False, 'import shutil\n'), ((3622, 3669), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Training vs Validation accuracy"""'], {}), "('Training vs Validation accuracy')\n", (3634, 3669), False, 'from matplotlib import pyplot, patches\n'), ((3678, 3700), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3691, 3700), False, 'from matplotlib import pyplot, patches\n'), ((3709, 3738), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Accuracy (%)"""'], {}), "('Accuracy (%)')\n", (3722, 3738), False, 'from matplotlib import pyplot, patches\n'), ((3756, 3812), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""orange"""', 'label': '"""Training accuracy"""'}), "(color='orange', label='Training accuracy')\n", (3769, 3812), False, 'from matplotlib import pyplot, patches\n'), ((3828, 3884), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""blue"""', 'label': '"""Validation accuracy"""'}), "(color='blue', label='Validation accuracy')\n", (3841, 3884), False, 'from matplotlib import pyplot, patches\n'), ((3912, 3933), 'numpy.argmin', 'np.argmin', (['validation'], {}), '(validation)\n', (3921, 3933), True, 'import numpy as np\n'), ((3949, 3965), 'numpy.min', 'np.min', (['training'], {}), '(training)\n', (3955, 3965), True, 'import numpy as np\n'), ((3974, 4048), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[minIdx, minIdx + 1]', '[miny, validation[minIdx]]'], {'color': '"""red"""'}), "([minIdx, minIdx + 1], [miny, validation[minIdx]], color='red')\n", (3985, 4048), False, 'from matplotlib import pyplot, patches\n'), ((4055, 4098), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Training vs Validation loss"""'], {}), "('Training vs Validation loss')\n", (4067, 4098), False, 'from matplotlib import pyplot, patches\n'), ((4107, 4129), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4120, 4129), False, 'from matplotlib import pyplot, patches\n'), ((4138, 4159), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (4151, 4159), False, 'from matplotlib import pyplot, patches\n'), ((4177, 4229), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""orange"""', 'label': '"""Training loss"""'}), "(color='orange', label='Training loss')\n", (4190, 4229), False, 'from matplotlib import pyplot, patches\n'), ((4245, 4297), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""blue"""', 'label': '"""Validation loss"""'}), "(color='blue', label='Validation loss')\n", (4258, 4297), False, 'from matplotlib import pyplot, patches\n'), ((7596, 7632), 'numpy.unique', 'np.unique', (['(y + 1)'], {'return_counts': '(True)'}), '(y + 1, return_counts=True)\n', (7605, 7632), True, 'import numpy as np\n'), ((7688, 7726), 'numpy.unique', 'np.unique', (['classes'], {'return_counts': '(True)'}), '(classes, return_counts=True)\n', (7697, 7726), True, 'import numpy as np\n'), ((8852, 8896), 'torch.nn.Embedding', 'nn.Embedding', (['(401)', 'embed_size'], {'padding_idx': '(0)'}), '(401, embed_size, padding_idx=0)\n', (8864, 8896), True, 'import torch.nn as nn\n'), ((8916, 8971), 'torch.nn.GRU', 'nn.GRU', (['embed_size', 'hidden_size'], {'bias': '(True)', 'dropout': '(0.5)'}), '(embed_size, hidden_size, bias=True, dropout=0.5)\n', (8922, 8971), True, 'import torch.nn as nn\n'), ((8994, 9029), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (9003, 9029), True, 'import torch.nn as nn\n'), ((9277, 9297), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9290, 9297), True, 'import torch.nn as nn\n'), ((9361, 9383), 'torch.load', 'torch.load', (['state_dict'], {}), '(state_dict)\n', (9371, 9383), False, 'import torch\n'), ((10941, 10953), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (10949, 10953), False, 'import sys\n'), ((12201, 12226), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12224, 12226), False, 'import torch\n'), ((12337, 12358), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (12356, 12358), True, 'import torch.nn as nn\n'), ((12470, 12491), 'os.path.exists', 'op.exists', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (12479, 12491), True, 'from os import path as op\n'), ((12670, 12692), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (12680, 12692), False, 'import torch\n'), ((2006, 2029), 'os.path.join', 'op.join', (['DATADIR', 'fname'], {}), '(DATADIR, fname)\n', (2013, 2029), True, 'from os import path as op\n'), ((9557, 9578), 'torch.zeros', 'torch.zeros', (['(401)', '(100)'], {}), '(401, 100)\n', (9568, 9578), False, 'import torch\n'), ((9622, 9655), 'torch.zeros', 'torch.zeros', (['(3 * hidden_size)', '(100)'], {}), '(3 * hidden_size, 100)\n', (9633, 9655), False, 'import torch\n'), ((9697, 9738), 'torch.zeros', 'torch.zeros', (['(3 * hidden_size)', 'hidden_size'], {}), '(3 * hidden_size, hidden_size)\n', (9708, 9738), False, 'import torch\n'), ((9778, 9806), 'torch.zeros', 'torch.zeros', (['(3 * hidden_size)'], {}), '(3 * hidden_size)\n', (9789, 9806), False, 'import torch\n'), ((9846, 9874), 'torch.zeros', 'torch.zeros', (['(3 * hidden_size)'], {}), '(3 * hidden_size)\n', (9857, 9874), False, 'import torch\n'), ((9916, 9935), 'torch.zeros', 'torch.zeros', (['(3)', '(256)'], {}), '(3, 256)\n', (9927, 9935), False, 'import torch\n'), ((9980, 9999), 'torch.zeros', 'torch.zeros', (['(3)', '(256)'], {}), '(3, 256)\n', (9991, 9999), False, 'import torch\n'), ((10520, 10564), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.hidden_size'], {}), '(1, batch_size, self.hidden_size)\n', (10531, 10564), False, 'import torch\n'), ((13702, 13723), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (13721, 13723), True, 'import torch.nn as nn\n'), ((1800, 1823), 'os.path.join', 'op.join', (['DATADIR', 'fname'], {}), '(DATADIR, fname)\n', (1807, 1823), True, 'from os import path as op\n'), ((12128, 12148), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (12138, 12148), True, 'from os import path as op\n'), ((12278, 12299), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (12297, 12299), True, 'import torch.nn as nn\n'), ((1925, 1948), 'os.path.join', 'op.join', (['DATADIR', 'fname'], {}), '(DATADIR, fname)\n', (1932, 1948), True, 'from os import path as op\n'), ((7362, 7385), 'torch.from_numpy', 'torch.from_numpy', (['(y + 1)'], {}), '(y + 1)\n', (7378, 7385), False, 'import torch\n'), ((8335, 8354), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (8351, 8354), False, 'import torch\n'), ((5146, 5165), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (5162, 5165), False, 'import torch\n'), ((5226, 5249), 'torch.from_numpy', 'torch.from_numpy', (['(y + 1)'], {}), '(y + 1)\n', (5242, 5249), False, 'import torch\n'), ((7261, 7284), 'torch.from_numpy', 'torch.from_numpy', (['(y + 1)'], {}), '(y + 1)\n', (7277, 7284), False, 'import torch\n'), ((8189, 8208), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (8205, 8208), False, 'import torch\n'), ((4874, 4893), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (4890, 4893), False, 'import torch\n'), ((4961, 4984), 'torch.from_numpy', 'torch.from_numpy', (['(y + 1)'], {}), '(y + 1)\n', (4977, 4984), False, 'import torch\n'), ((7474, 7495), 'torch.topk', 'torch.topk', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (7484, 7495), False, 'import torch\n'), ((5802, 5823), 'torch.topk', 'torch.topk', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (5812, 5823), False, 'import torch\n')]
|
# Python related libraries
import numpy as np
import random
# ROS2 msgs
from autonomous_exploration_msgs.msg import PointGroup
class PointsGroup:
"""
Class similar to the one used in unity, created
to handle each area in the interactive map
"""
def __init__(self, pG : PointGroup) -> None:
# Store the map related data
self.mapOrigin = pG.map_origin
self.mapDims = pG.map_dims
self.mapResolution = pG.map_resolution
self.associated_file = pG.associated_file
# Store the group related data
self.groupID = pG.group_id
self.numOfPoints = int(0.5 * len(pG.map_pos))
self.mapPos = np.zeros([self.numOfPoints, 2])
for i in range(self.numOfPoints):
self.mapPos[i, 0] = pG.map_pos[2 * i]
self.mapPos[i, 1] = pG.map_pos[2 * i + 1]
# Generate the convexHull
# Check if there are enough points in the group before generating the convex hull
self.convexHullPoints = []
if self.numOfPoints > 3:
self.GenerateConvexHull()
def GenerateConvexHull(self) -> None:
"""
Generate the convex hull using the points of the interactive area
Same code as the one used in the unity package
"""
vertices = list(self.mapPos.copy())
vertices = [list(tmp) for tmp in vertices]
# Step 1: Find the vertex with the smallest x coordinate
startPos = vertices[0]
for vert in vertices:
if ( vert[0] < startPos[0]):
startPos = vert
#print(startPos)
#print(vertices)
self.convexHullPoints.append(startPos)
vertices.remove(startPos)
# Step2 : Loop to generate the convex hull
currPos = self.convexHullPoints[0]
cnt = 0
while True:
# After 2 iterations we have to add the start position again so we can terminate the algorithm
if (cnt == 2):
vertices.append(self.convexHullPoints[0])
# Check if there are no more points
if (len(vertices) == 0):
break
# Pick the next point randomly
nextPos = vertices[random.randint(0, len(vertices) - 1)]
a = currPos
b = nextPos
# Check if there's a point to the left of ab, if so then it's the new b
for vert in vertices:
# Skip the point picked randomly
if vert == nextPos:
continue
# Compare the point and the line
# To the left = better point, so pick it as next point on the convex hull
if self.CompareLinePoint(a, b, vert) > 0:
nextPos = vert
b = vert
# Update the convexHull
self.convexHullPoints.append(nextPos)
currPos = nextPos
# Check if we found again the first point of the convexhull
if currPos == self.convexHullPoints[0]:
del self.convexHullPoints[-1]
break
cnt += 1
@staticmethod
def CompareLinePoint(a : np.array, b : np.array, c : np.array) -> int:
""" Return the position of a point relative to a line """
# Where is c in relation to a-b ?
# < 0 -> to the right
# = 0 -> on the line
# > 0 -> to the left
relation = (a[0] - c[0]) * (b[1] - c[1]) - (b[0] - c[0]) * (a[1] - c[1])
return relation
def InConvexHull(self, c : list) -> bool:
""" Check if the point is inside the convex hull """
# Check if there are enough points to create the convex Hull
if (self.numOfPoints < 3):
return False
inConvexHull = True
# if the point is on the left of all the line segments
# of the convexHull then it's on the outside
for i in range(len(self.convexHullPoints) - 1):
a = self.convexHullPoints[i]
b = self.convexHullPoints[i + 1]
#print(a, b, c, self.groupID, self.CompareLinePoint(a, b, c))
# Check if it's left or right of the line ab
if (self.CompareLinePoint(a, b, c) > 0):
inConvexHull = False
break
# Check for the last line segment
a = self.convexHullPoints[-1]
b = self.convexHullPoints[0]
if (self.CompareLinePoint(a, b, c) > 0):
inConvexHull = False
return inConvexHull
|
[
"numpy.zeros"
] |
[((698, 729), 'numpy.zeros', 'np.zeros', (['[self.numOfPoints, 2]'], {}), '([self.numOfPoints, 2])\n', (706, 729), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from numpy import exp, arcsin, tan, cos, sqrt, sin
def comp_surface(self):
"""Compute the surface of the Hole
Parameters
----------
self : HoleMLSRPM
A HoleMLSRPM object
Returns
-------
S: float
Surface of the Magnet. [m**2]
"""
Rbo = self.get_Rbo()
# Z1
delta1 = arcsin((self.R1 + self.W2) / (self.R1 + self.R3))
alpha1 = self.W1 - delta1
Z1 = self.R3 * exp(-1j * alpha1)
x1 = Z1.real
y1 = Z1.imag
# Zc1
Zc1 = (self.R3 + self.R1) * exp(-1j * alpha1)
xc1 = (self.R3 + self.R1) * cos(alpha1)
yc1 = -(self.R3 + self.R1) * sin(alpha1)
# Z2
x2 = (-1 / tan(self.W1) * xc1 + yc1 - self.W2 / cos(self.W1)) / -(
tan(self.W1) + 1 / tan(self.W1)
)
y2 = -tan(self.W1) * x2 + self.W2 / cos(self.W1)
Z2 = x2 + 1j * y2
# Z3
a3 = 1 + tan(self.W1) ** 2
b3 = -2 * tan(self.W1) * self.W2 / cos(self.W1)
c3 = (self.W2 / cos(self.W1)) ** 2 - self.R2 ** 2
x3 = (-b3 + sqrt(b3 ** 2 - 4 * a3 * c3)) / (2 * a3)
y3 = -tan(self.W1) * x3 + self.W2 / cos(self.W1)
Z3 = x3 + 1j * y3
# Z5
x5 = Rbo - self.H1
y5 = -self.W0 / 2
Z5 = x5 + 1j * y5
# Zc2
xc2 = Rbo - self.H1 - self.R1
yc2 = -self.W0 / 2
Zc2 = xc2 + 1j * yc2
# Z4
a4 = (xc2 - x3) ** 2 - self.R1 ** 2
b4 = 2 * (xc2 - x3) * (y3 - yc2)
c4 = (y3 - yc2) ** 2 - self.R1 ** 2
alpha2 = (-b4 - sqrt(b4 ** 2 - 4 * a4 * c4)) / (2 * a4)
x4 = (xc2 / alpha2 + yc2 + alpha2 * x3 - y3) / (alpha2 + 1 / alpha2)
y4 = alpha2 * (x4 - x3) + y3
Z4 = x4 + 1j * y4
# symmetry
Z6 = Z5.conjugate()
x6 = Z6.real
y6 = Z6.imag
Z7 = Z4.conjugate()
x7 = Z7.real
y7 = Z7.imag
Z8 = Z3.conjugate()
x8 = Z8.real
y8 = Z8.imag
Z9 = Z2.conjugate()
x9 = Z9.real
y9 = Z9.imag
Z10 = Z1.conjugate()
x10 = Z10.real
y10 = Z10.imag
S_magnet_1 = (
x1 * y2
+ x2 * y3
+ x3 * y4
+ x4 * y5
+ x5 * y6
+ x6 * y7
+ x7 * y8
+ x8 * y9
+ x9 * y10
+ x10 * y1
)
S_magnet_2 = (
x1 * y10
+ x2 * y1
+ x3 * y2
+ x4 * y3
+ x5 * y4
+ x6 * y5
+ x7 * y6
+ x8 * y7
+ x9 * y8
+ x10 * y9
)
S_magnet = 0.5 * abs(S_magnet_1 - S_magnet_2)
return S_magnet
|
[
"numpy.arcsin",
"numpy.tan",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] |
[((355, 404), 'numpy.arcsin', 'arcsin', (['((self.R1 + self.W2) / (self.R1 + self.R3))'], {}), '((self.R1 + self.W2) / (self.R1 + self.R3))\n', (361, 404), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((454, 473), 'numpy.exp', 'exp', (['(-1.0j * alpha1)'], {}), '(-1.0j * alpha1)\n', (457, 473), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((549, 568), 'numpy.exp', 'exp', (['(-1.0j * alpha1)'], {}), '(-1.0j * alpha1)\n', (552, 568), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((599, 610), 'numpy.cos', 'cos', (['alpha1'], {}), '(alpha1)\n', (602, 610), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((644, 655), 'numpy.sin', 'sin', (['alpha1'], {}), '(alpha1)\n', (647, 655), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((938, 950), 'numpy.cos', 'cos', (['self.W1'], {}), '(self.W1)\n', (941, 950), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((823, 835), 'numpy.cos', 'cos', (['self.W1'], {}), '(self.W1)\n', (826, 835), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((881, 893), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (884, 893), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((1022, 1049), 'numpy.sqrt', 'sqrt', (['(b3 ** 2 - 4 * a3 * c3)'], {}), '(b3 ** 2 - 4 * a3 * c3)\n', (1026, 1049), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((1102, 1114), 'numpy.cos', 'cos', (['self.W1'], {}), '(self.W1)\n', (1105, 1114), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((1451, 1478), 'numpy.sqrt', 'sqrt', (['(b4 ** 2 - 4 * a4 * c4)'], {}), '(b4 ** 2 - 4 * a4 * c4)\n', (1455, 1478), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((718, 730), 'numpy.cos', 'cos', (['self.W1'], {}), '(self.W1)\n', (721, 730), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((745, 757), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (748, 757), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((793, 805), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (796, 805), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((913, 925), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (916, 925), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((971, 983), 'numpy.cos', 'cos', (['self.W1'], {}), '(self.W1)\n', (974, 983), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((1072, 1084), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (1075, 1084), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((764, 776), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (767, 776), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n'), ((681, 693), 'numpy.tan', 'tan', (['self.W1'], {}), '(self.W1)\n', (684, 693), False, 'from numpy import exp, arcsin, tan, cos, sqrt, sin\n')]
|
import numpy as np
import matplotlib.patches
class Roi():
"""A class to represent a ROI
"""
def __init__(self, axis:int, first_slice:int, last_slice:int, roi_number:int, type_number:int, list_point:list, volume_dimension:tuple):
"""constructor
Args:
axis (int): [1 for axial, 2 for coronal, 3 for saggital]
first_slice (int): [slice number where ROI begin]
last_slice (int): [slice number where ROI end]
roi_number (int): [roi number]
type_number (int): [0 for nifti, 1 for axial polygone, 11 for axial ellipse, 2 for coronal polygone, 12 for coronal ellipse, 3 for saggital polygone, 13 for saggital ellipse]
list_point (list): [list of [x,y] coordonates of polygone or ellipse / list of [x,y,z] coordonates of nifti]
volume_dimension (tuple): [(shape x, shape y, shape z)]
"""
self.axis = axis
self.first_slice = first_slice
self.last_slice = last_slice
self.roi_number = roi_number
self.type_number = type_number
self.list_point = list_point
self.list_point_np = np.asarray(self.list_point)
self.x = volume_dimension[0]
self.y = volume_dimension[1]
self.z = volume_dimension[2]
def __get_min_max_of_roi(self) -> tuple:
"""Compute extrema of ROI in which we will loop to find included voxel
Arguments:
point_list {np.ndarray} -- numpy point list
Returns:
[tuple] -- X/Y extremas
"""
points_array = self.list_point_np
all_x = points_array[:][:,0]
all_y = points_array[:][:,1]
if (self.type_number == 1 or self.type_number == 2 or self.type_number == 3) : #POLYGONE
xmin = min(all_x)
xmax = max(all_x)
ymin = min(all_y)
ymax = max(all_y)
return xmin , xmax , ymin , ymax
else : #ELLIPSE
height = abs(all_x[0] - all_x[1])
width = abs(all_y[0] - all_y[2])
xmin = all_x[0] - height
xmax = all_x[0] + height
ymin = all_y[0] - width
ymax = all_y[0] + width
return xmin , xmax , ymin, ymax
def mask_roi_in_slice(self, patch:matplotlib.patches) -> list:
"""get ROI x and y limits in which we will loop, to gather [x,y] pixel which are in the patch
Args:
patch (matplotlib.patches): [polygon or ellipse]]
Returns:
[list]: [list of [x,y] coordonates]
"""
points = []
xmin, xmax, ymin, ymax = self.__get_min_max_of_roi()
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1) :
if patch.contains_point([x,y], radius = -1e-9) :
points.append([x,y])
return points
def get_empty_np_array(self) -> np.ndarray:
"""Return numpy array to fill given the current dimension and axis
Returns:
[np.ndarray] -- zero filled numpy array
"""
return (np.zeros((self.x, self.y, self.z)))
def coronal_to_axial(self, np_array_3D:np.ndarray) -> np.ndarray:
"""transform coronal 3d ndarray to 3d axial ndarray
Args:
np_array_3D (np.ndarray): [ROI ndarray]]
Returns:
[np.ndarray]: [return axial ndarray]
"""
return np.transpose(np_array_3D, (2,1,0))
def sagittal_to_axial(self, np_array_3D:np.ndarray) -> np.ndarray:
"""transform saggital 3d ndarray to 3d axial ndarray
Args:
np_array_3D (np.ndarray): [ROI ndarray]]
Returns:
[np.ndarray]: [return axial ndarray]
"""
return np.transpose(np_array_3D, (0,2,1))
def get_mask(self, list_points:list) -> np.ndarray : #list_points = [[x,y,z], [x,y,z], ...]
"""generate an empty ndarray and fill up with ROI coordonates
Args:
list_points (list): [ [[x,y,z], [x,y,z], [x,y,z], ...] ]
Returns:
[np.ndarray]: [return binary ndarray of the ROI]
"""
np_array_3D = self.get_empty_np_array()
for point in list_points:
np_array_3D[point[1], point[0] , point[2]] = 1
return np_array_3D.astype(np.uint8)
|
[
"numpy.asarray",
"numpy.zeros",
"numpy.transpose"
] |
[((1146, 1173), 'numpy.asarray', 'np.asarray', (['self.list_point'], {}), '(self.list_point)\n', (1156, 1173), True, 'import numpy as np\n'), ((3103, 3137), 'numpy.zeros', 'np.zeros', (['(self.x, self.y, self.z)'], {}), '((self.x, self.y, self.z))\n', (3111, 3137), True, 'import numpy as np\n'), ((3432, 3468), 'numpy.transpose', 'np.transpose', (['np_array_3D', '(2, 1, 0)'], {}), '(np_array_3D, (2, 1, 0))\n', (3444, 3468), True, 'import numpy as np\n'), ((3762, 3798), 'numpy.transpose', 'np.transpose', (['np_array_3D', '(0, 2, 1)'], {}), '(np_array_3D, (0, 2, 1))\n', (3774, 3798), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[ ]:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from autoencoder import model
import pickle
import os
# In[ ]:
DEBUG = False
PLOTTING_SUPPORT = True
RUN_AS_PY_SCRIPT = False
SET_EULER_PARAMS = False
SET_MARMOT_PARAMS = False
# Handle arguments (When executed as .py script)
import sys
argv = sys.argv[:]
if len(argv) > 1:
script_path = argv.pop(0)
if "--euler" in argv:
import sys
sys.stdout = open('stdout.txt', 'w')
RUN_AS_PY_SCRIPT = True
PLOTTING_SUPPORT = False
SET_EULER_PARAMS = True
print("Parameters set for execution on euler cluster")
argv.remove("--euler")
if "--marmot" in argv:
RUN_AS_PY_SCRIPT = True
PLOTTING_SUPPORT = False
SET_MARMOT_PARAMS = True
print("Parameters set for execution on marmot cluster")
argv.remove("--marmot")
if "--script" in argv:
RUN_AS_PY_SCRIPT = True
PLOTTING_SUPPORT = False
print("Running as script")
argv.remove("--script")
# In[ ]:
if not RUN_AS_PY_SCRIPT:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
from IPython.display import clear_output
if PLOTTING_SUPPORT:
get_ipython().magic('matplotlib notebook')
from matplotlib import pyplot as plt
# ## Parameters
# In[ ]:
BATCH_SIZE = 10
VOXEL_SIDE = 24
MAX_STEPS = 10000
VAL_EXAMPLES = 200
N_ROTATION_ANGLES = 12
ROTATION_OFFSET = 0
VAL_EVERY_N_STEPS = 1
VAL_STEP_TOLERANCE = 3
TRAIN_TWINS = False
MP = model.ModelParams()
MP.INPUT_SHAPE = [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE, 1]
HOME_DIR = os.path.expanduser('~')
DATA_DIR = "./database/"
RUN_NAME = "kitti18"
RESTORE_MODEL = True
SAVE_DIR = HOME_DIR + "/Desktop/autoencoder/"
SAVE_FILE = "model.checkpoint"
MP_FILENAME = "model_params.pckl"
TENSORBOARD_DIR = HOME_DIR + "/tensorboard"
SAVE_UNVALIDATED = False
CREATE_VISUALS = False
DETAILED_STEP_TIMES = False
EXPORT_FEATURES = False
# In[ ]:
if SET_EULER_PARAMS:
DATA_DIR = "/cluster/home/dugasd/database/"
SAVE_DIR = "/cluster/home/dugasd/autoencoder-euler/"
TENSORBOARD_DIR = None
CREATE_VISUALS = False
MAX_STEPS = 1000000
VAL_STEP_TOLERANCE = 5
if SET_MARMOT_PARAMS:
DATA_DIR = "/home/daniel/database/"
RUN_NAME = "kitti18-20-27"
SAVE_DIR = "/home/daniel/autoencoder-marmot/"
TENSORBOARD_DIR = None
CREATE_VISUALS = False
MAX_STEPS = 1000000
VAL_STEP_TOLERANCE = 10
N_ROTATION_ANGLES = 36
if not RUN_AS_PY_SCRIPT:
#MP.CONVOLUTION_LAYERS = [{'type': 'conv3d', 'filter': [5, 5, 5, 1, 10], 'downsampling': {'type': 'max_pool3d', 'k': 2}}]
MP.CONVOLUTION_LAYERS = []
#MP.LATENT_SHAPE = [2]
N_ROTATION_ANGLES = 6
CREATE_VISUALS = True
TRAIN_TWINS = True
# In[ ]:
if RUN_AS_PY_SCRIPT:
while argv:
arg = argv.pop(0)
if arg == "-RUN_NAME":
RUN_NAME = argv.pop(0)
print("RUN_NAME set to " + RUN_NAME)
elif arg == "-SAVE_DIR":
SAVE_DIR = argv.pop(0)
print("SAVE_DIR set to " + SAVE_DIR)
elif arg == "--noconv":
MP.CONVOLUTION_LAYERS = []
print("CONVOLUTION LAYERS REMOVED")
elif arg == "--twins":
TRAIN_TWINS = True
print("Training twins.")
elif arg == "-LEARNING_RATE":
MP.LEARNING_RATE = float(argv.pop(0))
print("LEARNING_RATE set to " + str(MP.LEARNING_RATE))
elif arg == "-LATENT_SHAPE":
MP.LATENT_SHAPE = [int(argv.pop(0))]
print("LATENT_SHAPE set to " + str(MP.LATENT_SHAPE))
elif arg == "-VAL_STEP_TOLERANCE":
VAL_STEP_TOLERANCE = int(argv.pop(0))
print("VAL_STEP_TOLERANCE set to " + str(VAL_STEP_TOLERANCE))
elif arg == "-N_ROTATION_ANGLES":
N_ROTATION_ANGLES = int(argv.pop(0))
print("N_ROTATION_ANGLES set to " + str(N_ROTATION_ANGLES))
elif arg == "-ROTATION_OFFSET":
frac = list(map(float, argv.pop(0).split('/'))) + [1.0]
ROTATION_OFFSET = frac[0]/frac[1]
print("ROTATION_OFFSET set to " + str(ROTATION_OFFSET))
elif arg == "--float64":
MP.FLOAT_TYPE = tf.float64
print("MP.FLOAT_TYPE set to " + str(MP.FLOAT_TYPE))
else:
print("Unknown argument: " + arg)
raise NotImplementedError
# In[ ]:
SAVE_PATH = SAVE_DIR+SAVE_FILE
if SAVE_UNVALIDATED:
SAVE_DIR_NOVAL = SAVE_DIR+"unvalidated/"
SAVE_PATH_NOVAL = SAVE_DIR_NOVAL+SAVE_FILE
# ## Load Segments and Features
# In[ ]:
import utilities
run_names, runs = utilities.list_runs(DATA_DIR)
try:
run_names.remove(RUN_NAME)
run_names = [RUN_NAME] + run_names
except:
print(RUN_NAME + " not found in runs.")
print(run_names)
# In[ ]:
if not RUN_AS_PY_SCRIPT:
from ipywidgets import widgets
run_dropdown = widgets.Dropdown(description="Run to import : ", options=run_names)
button = widgets.Button(description="import")
# Interaction functions
def import_run_data(btn):
display.clear_output()
print("Loading segments, features, matches, classes for run")
global segments, features, fnames, matches, classes, ids, classes_set # 'output' variables
segments, features, fnames, matches, classes, ids = utilities.import_run(run_dropdown.value, folder=DATA_DIR)
classes_set = sorted(list(set(classes)))
button.on_click(import_run_data)
# Display widgets
from IPython import display
display.display(run_dropdown)
display.display(button)
import_run_data(button)
else:
segments, features, fnames, matches, classes, ids = utilities.import_run(RUN_NAME, folder=DATA_DIR)
classes_set = sorted(list(set(classes)))
# ## Create Autoencoder
# In[ ]:
if not RUN_AS_PY_SCRIPT:
try:
stored_MP = pickle.load(open(SAVE_DIR+MP_FILENAME, 'rb'))
if MP != stored_MP:
print("WARNING: Setting params for compatibility with stored model.")
print("Stored model: "); print(stored_MP); print("New model: "); print(MP)
MP = stored_MP
except FileNotFoundError:
print("No stored model found. Creating a new model.")
# In[ ]:
vae = model.Autoencoder(MP)
if TRAIN_TWINS: vae.build_twin_graph()
# In[ ]:
summary_writer = None
if TENSORBOARD_DIR != None:
summary_writer = tf.train.SummaryWriter(TENSORBOARD_DIR, vae.sess.graph)
# In[ ]:
if RESTORE_MODEL:
try:
vae.saver.restore(vae.sess, SAVE_PATH)
print("Model restored.")
print(MP.CONVOLUTION_LAYERS)
except Exception as err:
print("Could not load model: ", end="")
try:
stored_MP = pickle.load(open(SAVE_DIR+MP_FILENAME, 'rb'))
print("ERROR: mismatch between model params.")
print("Stored model: "); print(stored_MP); print("New model: "); print(MP)
raise err
except:
print("no model folder.")
# ## Create Voxelized Segment Dataset - With Rotated Copies
# In[ ]:
## Split into training and val data
split_at = min(VAL_EXAMPLES, int(0.2 * len(ids)))
val = segments[:split_at]
train = segments[split_at:]
# In[ ]:
if not TRAIN_TWINS:
print("Rotating segments")
from voxelize import create_rotations
train = create_rotations(train, N_ROTATION_ANGLES, ROTATION_OFFSET)
val = create_rotations(val, 12, ROTATION_OFFSET)
print("Voxelizing training data")
from voxelize import voxelize
train_vox, _ = voxelize(train,VOXEL_SIDE)
val_vox, _ = voxelize(val ,VOXEL_SIDE)
train_twins_vox = None
val_twins_vox = None
if train_vox[0].shape != MP.INPUT_SHAPE:
print("Reshaping")
train_vox=[np.reshape(vox, MP.INPUT_SHAPE) for vox in train_vox]
val_vox=[np.reshape(vox, MP.INPUT_SHAPE) for vox in val_vox]
del train # Save some memory
else:
from voxelize import create_twins
val, val_twins = create_twins(val)
train, train_twins = create_twins(train)
print("Voxelizing training data")
from voxelize import voxelize
train_vox, _ = voxelize(train,VOXEL_SIDE)
val_vox, _ = voxelize(val ,VOXEL_SIDE)
train_twins_vox, _ = voxelize(train_twins,VOXEL_SIDE)
val_twins_vox, _ = voxelize(val_twins ,VOXEL_SIDE)
del train_twins
# In[ ]:
import os
import psutil
process = psutil.Process(os.getpid())
print("Using " + str(process.memory_info().rss/(1024.0*1024.0)) + "mB of memory")
# ## Train Autoencoder ( Computationally Intensive )
# In[ ]:
from timeit import default_timer as timer
from autoencoder.batchmaker import Batchmaker, progress_bar
total_step_cost = None
step_cost_log = []
total_val_cost = 0
val_steps_since_last_improvement = 0
step_start = timer()
try:
val_cost_log = list(np.loadtxt(SAVE_DIR+"val_cost_log.txt"))
print("Previous cost log found.")
except:
val_cost_log = []
# single step
for step in range(MAX_STEPS):
if TRAIN_TWINS:
val, val_twins = create_twins(val)
train, train_twins = create_twins(train)
print("Voxelizing training data")
from voxelize import voxelize
train_vox, _ = voxelize(train,VOXEL_SIDE)
val_vox, _ = voxelize(val ,VOXEL_SIDE)
train_twins_vox, _ = voxelize(train_twins,VOXEL_SIDE)
val_twins_vox, _ = voxelize(val_twins ,VOXEL_SIDE)
del train_twins
# Validation
val_batchmaker = Batchmaker(val_vox, val_twins_vox, BATCH_SIZE, MP)
if np.mod(step, VAL_EVERY_N_STEPS) == 0:
total_val_cost = 0
while True:
if val_batchmaker.is_depleted():
break
else:
batch_input_values, batch_twin_values = val_batchmaker.next_batch()
cost_value = vae.cost_on_single_batch(batch_input_values, batch_twin_values)
total_val_cost += cost_value
if PLOTTING_SUPPORT:
progress_bar(val_batchmaker)
print("Validation cost: "+str(total_val_cost)+" (Training cost: "+str(total_step_cost)+")", end="")
try:
print(" Step Time: " + str(step_end-step_start))
if DETAILED_STEP_TIMES:
print(step_times)
except:
print(" ")
val_cost_log.append(total_val_cost)
# Training Monitor
if len(val_cost_log) > 1:
# Save cost log.
import os
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
if SAVE_UNVALIDATED: os.makedirs(SAVE_DIR_NOVAL)
print("Created directory: %s" % SAVE_DIR)
with open(SAVE_DIR+MP_FILENAME, 'wb') as file:
pickle.dump(MP, file, protocol=2)
np.savetxt(SAVE_DIR+"val_cost_log.txt", val_cost_log)
# Save if cost has improved. Otherwise increment counter.
if val_cost_log[-1] < min(val_cost_log[:-1]):
val_steps_since_last_improvement = 0
# save model to disk
print("Saving ... ", end='')
save_path = vae.saver.save(vae.sess, SAVE_PATH)
print("Model saved in file: %s" % save_path)
else:
val_steps_since_last_improvement += 1
# Stop training if val_cost hasn't improved in VAL_STEP_TOLERANCE steps
if val_steps_since_last_improvement > VAL_STEP_TOLERANCE:
if SAVE_UNVALIDATED:
print("Saving ... ", end='')
save_path = vae.saver.save(vae.sess, SAVE_PATH_NOVAL)
print("Unvalidated model saved in file: %s" % save_path)
print("Training stopped by validation monitor.")
break
# Train on batches
step_start = timer()
zero = timer() - timer()
step_times = {'batchmaking': zero, 'training': zero, 'plotting': zero}
total_step_cost = 0
training_batchmaker = Batchmaker(train_vox, train_twins_vox, BATCH_SIZE, MP)
while True:
if training_batchmaker.is_depleted():
break
else:
t_a = timer()
batch_input_values, batch_twin_values = training_batchmaker.next_batch()
t_b = timer()
# Train over 1 batch.
cost_value = vae.train_on_single_batch(batch_input_values, batch_twin_values, summary_writer=summary_writer)
total_step_cost += cost_value
t_c = timer()
if PLOTTING_SUPPORT:
progress_bar(training_batchmaker)
t_d = timer()
step_times['batchmaking'] += t_b - t_a
step_times['training'] += t_c - t_b
step_times['plotting'] += t_d - t_c
step_cost_log.append(total_step_cost)
step_end = timer()
print("Training ended.")
# ## Visualize Autoencoder Performance
# In[ ]:
if PLOTTING_SUPPORT:
# Plot a few random samples
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib notebook')
plt.ion()
n_samples = 5
import random
x_samples = random.sample(val_vox, 5)
x_samples = [np.reshape(sample, MP.INPUT_SHAPE) for sample in x_samples]
x_reconstruct = vae.encode_decode(x_samples)
plt.figure(figsize=(8, 12))
for i in range(n_samples):
plt.subplot(n_samples*2, 1, 2*i + 1)
plt.imshow(x_samples[i].reshape(VOXEL_SIDE, VOXEL_SIDE*VOXEL_SIDE), vmin=0, vmax=1, cmap='spectral')
plt.title("Top: val input - Bottom: Reconstruction")
plt.subplot(n_samples*2, 1, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(VOXEL_SIDE, VOXEL_SIDE*VOXEL_SIDE), vmin=0, vmax=1, cmap='spectral')
plt.tight_layout()
# In[ ]:
if PLOTTING_SUPPORT:
nx = ny = 4
nz = 1
dim1 = 0
dim2 = 1
dim3 = 0
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
z_values = np.linspace(-3, 3, nz)
canvas = np.empty((VOXEL_SIDE*ny, VOXEL_SIDE*nx, VOXEL_SIDE*nz))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
for k, zi in enumerate(z_values):
# we can only visualize 3 dimensions, in this case the first 3
latent_sample = np.zeros([1]+MP.LATENT_SHAPE)
latent_sample.flat[dim1] = xi
latent_sample.flat[dim2] = yi
latent_sample.flat[dim3] = zi
x_mean = vae.decode(latent_sample)
canvas[(nx-i-1)*VOXEL_SIDE:(nx-i)*VOXEL_SIDE,
j*VOXEL_SIDE:(j+1)*VOXEL_SIDE,
k*VOXEL_SIDE:(k+1)*VOXEL_SIDE] \
= x_mean[0].reshape(VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE)
from mpl_toolkits.mplot3d import Axes3D
threshold = 0.7
X,Y,Z = np.where(canvas > (threshold*np.max(canvas)))
fig = plt.figure()
plt.cla()
ax = Axes3D(fig)
ax.scatter(X, Y, Z)
# ## Compute Autoencoder Features for Segments
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Voxelizing segments")
from voxelize import voxelize
segments_vox, features_voxel_scale = voxelize(segments, VOXEL_SIDE)
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Computing Eigenvalue Features")
from eigenvalues import eigenvalue_features
features_eig = eigenvalue_features(segments)
features_eig[np.where(np.isnan(features_eig))] = 0
F = features_eig
C = classes
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Computing Features for Segments")
features_nn, confusion_nn = vae.batch_encode([np.reshape(sample, MP.INPUT_SHAPE) for sample in segments_vox])
fnames_nn = ['autoencoder_feature'+str(i+1) for i, _ in enumerate(features_nn[0])]
F = features_nn
C = classes
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Rotating segments")
from voxelize import create_rotations
rotated_segments, rotated_classes = create_rotations(segments, N_ROTATION_ANGLES, classes=classes)
if False: # walls_vs_cars
print("Removing unknowns")
rotated_segments = [segment for segment, class_ in zip(rotated_segments, rotated_classes) if class_ != "unknown"]
rotated_classes = [class_ for class_ in rotated_classes if class_ != "unknown"]
print("Voxelizing rotations")
from voxelize import voxelize
rotated_segments_vox, rotated_segments_scale = voxelize(rotated_segments, VOXEL_SIDE)
print("Computing Features for rotations")
rotated_features, _ = vae.batch_encode([np.reshape(sample, MP.INPUT_SHAPE) for sample in rotated_segments_vox])
F = rotated_features
C = rotated_classes
# ## T-SNE
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("T-SNE")
dir_ = "/tmp/online_matcher/visuals/"
import os
if not os.path.exists(dir_):
os.makedirs(dir_)
if MP.LATENT_SHAPE[0] == 2:
F2 = F
else:
from tools.tsne import tsne
F2 = tsne(F, err_threshold=1.0)
from itertools import cycle
cnames = ['dodgerblue', 'gold', 'silver', 'tomato',
'plum', 'lemonchiffon', 'grey', 'orchid', 'lime', 'palegreen']
from matplotlib import pyplot as plt
plt.figure(figsize=(12,7))
for c_, name in zip(cycle(cnames), classes_set):
x = [values[0] for values, class_ in zip(F2, C) if class_ == name]
y = [values[1] for values, class_ in zip(F2, C) if class_ == name]
plt.scatter(x, y, c=c_, alpha=0.8, lw = 0)
box = plt.gca().get_position()
plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height])
ncol = 2 if len(classes_set) > 10 else 1
plt.legend(classes_set, loc='center left', bbox_to_anchor=(1, 0.5), ncol=ncol)
plt.title('T-SNE')
plt.xlabel('x_dim')
plt.ylabel('y_dim')
plt.show()
try:
plt.gcf().savefig(dir_+"t-sne.png")
except:
print("not saved.")
if len(matches) > 0:
print("Adding matches")
# Dim all points
plt.cla()
for c_, name in zip(cycle(cnames), classes_set):
x = [values[0] for values, class_ in zip(F2, C) if class_ == name]
y = [values[1] for values, class_ in zip(F2, C) if class_ == name]
plt.scatter(x, y, c=c_, alpha=0.2, lw = 0)
plt.legend(classes_set, loc='center left', bbox_to_anchor=(1, 0.5), ncol=ncol)
plt.title('T-SNE')
plt.xlabel('x_dim')
plt.ylabel('y_dim')
# Bring out matched points
matched_ids = [id_ for match in matches for id_ in match]
for c_, name in zip(cycle(cnames), classes_set):
x = [values[0] for values, class_, id_ in zip(F2, C, ids) if class_ == name and id_ in matched_ids]
y = [values[1] for values, class_, id_ in zip(F2, C, ids) if class_ == name and id_ in matched_ids]
plt.scatter(x, y, c=c_, s=30, lw = 1)
# Show matches as lines
for match in matches:
line_x = [ F2[ids.index(match[0])][0], F2[ids.index(match[1])][0] ]
line_y = [ F2[ids.index(match[0])][1], F2[ids.index(match[1])][1] ]
plt.plot(line_x, line_y, 'black', linewidth=1)
try:
plt.gcf().savefig(dir_+"t-sne_matches.png")
except:
print("not saved.")
# ## Reconstructions
# In[ ]:
RC_CONFIDENCE = 0.2
ONEVIEW = True
# In[ ]:
# Reconstructions
if not RUN_AS_PY_SCRIPT:
N = 400
SV_ = segments_vox[:N]
S_ = segments[:N]
I_ = ids[:N]
reconstruction_vox = vae.batch_encode_decode([np.reshape(sample, MP.INPUT_SHAPE) for sample in SV_])
reconstruction_vox = [np.reshape(vox, [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE]) for vox in reconstruction_vox]
from voxelize import unvoxelize
reconstruction = [unvoxelize(vox > RC_CONFIDENCE) for vox in reconstruction_vox]
reconstruction = [segment*scale for (segment, scale) in zip(reconstruction, features_voxel_scale)]
if CREATE_VISUALS:
dir_ = "/tmp/online_matcher/visuals/reconstructions/"
from visuals import visuals_of_matches
reconstruction_ids = [id_+max(I_)+1 for id_ in I_]
one_to_one_matches = [[id1, id2] for id1, id2 in zip(I_, reconstruction_ids)]
visuals_of_matches(one_to_one_matches, S_+reconstruction, I_+reconstruction_ids, directory=dir_, oneview=ONEVIEW)
clear_output()
# In[ ]:
# Reconstructions of rotations for one object
if CREATE_VISUALS:
dir_ = "/tmp/online_matcher/visuals/rotations/"
class_name = "car"
class_ids = [np.random.choice([id_ for id_, class_ in zip(ids, classes) if class_ == class_name])]
class_indices = [ids.index(id_) for id_ in class_ids]
class_segments = np.array(segments)[class_indices]
from voxelize import create_rotations
class_rotated_segments = np.array(list(class_segments) + list(create_rotations(class_segments, N_ROTATION_ANGLES)))
from voxelize import voxelize
class_segments_vox, class_voxel_scale = voxelize(class_rotated_segments, VOXEL_SIDE)
if CREATE_VISUALS:
class_reconstruction_vox = vae.batch_encode_decode([np.reshape(vox, MP.INPUT_SHAPE) for vox in class_segments_vox])
class_reconstruction_vox = [np.reshape(vox, [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE]) for vox in class_reconstruction_vox]
from voxelize import unvoxelize
class_reconstruction = [unvoxelize(vox > RC_CONFIDENCE) for vox in class_reconstruction_vox]
class_reconstruction = [segment*scale for (segment, scale) in zip(class_reconstruction, class_voxel_scale)]
from visuals import visuals_of_matches
fake_ids = list(range(len(class_reconstruction)))
fake_reconstruction_ids = [id_+max(fake_ids)+1 for id_ in fake_ids]
one_to_one_matches = [[id1, id2] for id1, id2 in zip(fake_ids, fake_reconstruction_ids)]
visuals_of_matches(one_to_one_matches,
list(class_rotated_segments)+class_reconstruction,
fake_ids+fake_reconstruction_ids,
directory=dir_, oneview=ONEVIEW)
clear_output()
class_features, confusion = vae.batch_encode([np.reshape(vox, MP.INPUT_SHAPE) for vox in class_segments_vox])
class_features = np.array(class_features)
print(class_name)
print("Id: "+str(class_ids[0]))
from matplotlib import pyplot as plt
plt.figure()
plt.step(range(len(class_features.T)), class_features.T, color='k', alpha=0.2, where='mid')
plt.plot(np.sqrt(np.exp(confusion)).T, 'r')
plt.show()
plt.gcf().savefig(dir_+"signature.png")
# In[ ]:
#Gifs
id_ = np.random.choice(ids)
print(id_)
segment = segments[ids.index(id_)]
import visuals
visuals.single_segment_as_gif(segment)
visuals.single_segment_reconstruction_as_gif(segment, vae, confidence=0.3)
visuals.single_segment_rotations_reconstruction_as_gif(segment, vae, confidence=0.3)
visuals.single_segment_degeneration_as_gif(segment, vae, confidence=0.3)
visuals.single_segment_confidence_as_gif(segment, vae)
# ## Class Signatures
# In[ ]:
if PLOTTING_SUPPORT:
dir_ = "/tmp/online_matcher/visuals/reconstructions/"
for class_name in classes_set:
print(class_name)
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_segments = np.array(segments)[class_indices]
class_features = np.array(features_nn)[class_indices]
class_confusion = np.array(confusion_nn)[class_indices]
from matplotlib import pyplot as plt
plt.figure()
plt.step(range(len(class_features.T)), class_features.T, color='k', alpha=0.2, where='mid')
plt.plot(np.sqrt(np.exp(class_confusion)).T, 'r')
plt.show()
plt.gcf().savefig(dir_+class_name+"_signature.png")
# In[ ]:
if PLOTTING_SUPPORT:
# Include Rotated segments
for class_name in classes_set:
print(class_name)
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_segments = np.array(segments)[class_indices]
from voxelize import create_rotations
class_rotated_segments = np.array(list(class_segments) + list(create_rotations(class_segments, N_ROTATION_ANGLES)))
from voxelize import voxelize
class_segments_vox, _ = voxelize(class_rotated_segments, VOXEL_SIDE)
class_features, confusion = vae.batch_encode([np.reshape(vox, MP.INPUT_SHAPE) for vox in class_segments_vox])
class_features = np.array(class_features)
from matplotlib import pyplot as plt
plt.figure()
plt.step(range(len(class_features.T)), class_features.T, color='k', alpha=0.2, where='mid')
plt.plot(np.sqrt(np.exp(confusion)).T, 'r')
plt.show()
plt.gcf().savefig(dir_+class_name+"_rotations_signature.png")
# In[ ]:
if PLOTTING_SUPPORT:
from itertools import cycle
colors = cycle(['dodgerblue', 'gold', 'silver', 'tomato'])
plt.figure()
plt.title("Average absolute value of features, per class")
plt.xlabel('feature #')
plt.ylabel('avg(abs(feature))')
for class_name, color_ in zip(classes_set, colors):
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_features = np.array(features_nn)[class_indices]
plt.plot(np.mean(np.abs(class_features), axis=0), marker='_', color=color_, label=class_name)
plt.hlines(np.mean(np.abs(class_features)),0,len(class_features[0])-1, linestyle='--', color=color_)
plt.show()
plt.legend()
plt.figure()
plt.title("Average confusion, per class")
plt.xlabel('feature #')
plt.ylabel('sigma^2')
for class_name, color_ in zip(classes_set, colors):
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_confusion = np.array(confusion_nn)[class_indices]
plt.plot(np.mean(np.exp(class_confusion), axis=0), marker='_', color=color_, label=class_name)
plt.hlines(np.mean(np.exp(class_confusion)),0,len(class_features[0])-1, linestyle='--', color=color_)
plt.show()
plt.legend()
print("")
# ## Export Features
# In[ ]:
def remove_features(fnames_to_remove, fnames, features):
# Remove the autencoder features from the imported features if they already exist
for fname_to_remove in fnames_to_remove:
if fname_to_remove in fnames:
print(" Removing pre-existing feature " + fname_to_remove)
for j, values in enumerate(features):
features[j] = np.delete(values, fnames.index(fname_to_remove))
fnames.remove(fname_to_remove)
assert len(fnames) == len(features[0])
def update_features(fnames_to_update, features_to_update, fnames, features):
assert len(fnames_to_update) == len(features_to_update[0])
# Remove the selected features if they already exist
remove_features(fnames_to_update, fnames, features)
# Add in the selected features
for fname in fnames_to_update: print(" Adding feature " + fname)
for i, [f, ftu] in enumerate(zip(features, features_to_update)):
features[i] = np.concatenate([f, ftu])
fnames += fnames_to_update
# Create copies of the original features
# In[ ]:
if EXPORT_FEATURES:
updated_fnames = fnames[:]
updated_features = features[:]
print(fnames)
print(features[0])
# Add/overwrite autoencoder features
# In[ ]:
if EXPORT_FEATURES:
# AE features
fnames_nn = ['autoencoder_feature'+str(i+1) for i in range(features_nn[0].shape[0])]
update_features(fnames_nn, features_nn, updated_fnames, updated_features)
# Scale features
sc_fnames = ['x_scale', 'y_scale', 'z_scale']
update_features(sc_fnames, features_voxel_scale, updated_fnames, updated_features)
# In[ ]:
if EXPORT_FEATURES:
from load_segments import write_features
write_features(ids, updated_features, updated_fnames, filename=runs[run_index][features_file_index])
# ## Evaluate Features
# In[ ]:
# Features
if CREATE_VISUALS:
from visuals import visuals_of_segments
visuals_of_segments(segments, ids, features=features_nn)
clear_output()
# In[ ]:
# Matches
if CREATE_VISUALS:
from visuals import visuals_of_matches
visuals_of_matches(matches, segments, ids, features=features_nn)
clear_output()
# ## Save or Convert Model
# In[ ]:
CONVERT_VARIABLE_NAMES = False
name_to_var_dict = {}
if CONVERT_VARIABLE_NAMES:
for var in vae.variables:
# Modify a few names
if 'LatentLayerWeights/' in var.name:
name = var.name.replace('LatentLayerWeights/', '')
elif 'ReconstructionLayerWeights/' in var.name:
name = var.name.replace('ReconstructionLayerWeights/', '')
# Leave other names unchanged
else:
name = var.name
name_to_var_dict[name] = var
temp_saver = tf.train.Saver(name_to_var_dict)
temp_saver.restore(vae.sess, SAVE_PATH)
name_to_var_dict
# In[ ]:
# Save model and params
if False:
vae.saver.save(vae.sess, SAVE_PATH)
with open(SAVE_DIR+MP_FILENAME, 'wb') as file:
pickle.dump(MP, file, protocol=2)
# In[ ]:
|
[
"matplotlib.pyplot.title",
"visuals.visuals_of_matches",
"pickle.dump",
"numpy.abs",
"random.sample",
"numpy.empty",
"voxelize.create_twins",
"ipywidgets.widgets.Dropdown",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.gca",
"voxelize.create_rotations",
"itertools.cycle",
"utilities.import_run",
"visuals.single_segment_reconstruction_as_gif",
"matplotlib.pyplot.tight_layout",
"numpy.savetxt",
"os.path.exists",
"IPython.display.display",
"autoencoder.model.ModelParams",
"numpy.max",
"matplotlib.pyplot.cla",
"numpy.loadtxt",
"numpy.linspace",
"numpy.random.choice",
"numpy.reshape",
"load_segments.write_features",
"voxelize.unvoxelize",
"voxelize.voxelize",
"autoencoder.batchmaker.progress_bar",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"tensorflow.train.Saver",
"matplotlib.pyplot.legend",
"numpy.mod",
"ipywidgets.widgets.Button",
"visuals.single_segment_rotations_reconstruction_as_gif",
"matplotlib.pyplot.ion",
"visuals.visuals_of_segments",
"matplotlib.pyplot.ylabel",
"tools.tsne.tsne",
"IPython.display.clear_output",
"visuals.single_segment_as_gif",
"matplotlib.pyplot.gcf",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"os.getpid",
"autoencoder.model.Autoencoder",
"visuals.single_segment_confidence_as_gif",
"eigenvalues.eigenvalue_features",
"os.makedirs",
"timeit.default_timer",
"visuals.single_segment_degeneration_as_gif",
"tensorflow.train.SummaryWriter",
"matplotlib.pyplot.scatter",
"utilities.list_runs",
"matplotlib.pyplot.plot",
"numpy.zeros",
"autoencoder.batchmaker.Batchmaker",
"numpy.array",
"matplotlib.pyplot.xlabel",
"os.path.expanduser"
] |
[((1498, 1517), 'autoencoder.model.ModelParams', 'model.ModelParams', ([], {}), '()\n', (1515, 1517), False, 'from autoencoder import model\n'), ((1587, 1610), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1605, 1610), False, 'import os\n'), ((4489, 4518), 'utilities.list_runs', 'utilities.list_runs', (['DATA_DIR'], {}), '(DATA_DIR)\n', (4508, 4518), False, 'import utilities\n'), ((6026, 6047), 'autoencoder.model.Autoencoder', 'model.Autoencoder', (['MP'], {}), '(MP)\n', (6043, 6047), False, 'from autoencoder import model\n'), ((8425, 8432), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8430, 8432), True, 'from timeit import default_timer as timer\n'), ((21278, 21299), 'numpy.random.choice', 'np.random.choice', (['ids'], {}), '(ids)\n', (21294, 21299), True, 'import numpy as np\n'), ((21361, 21399), 'visuals.single_segment_as_gif', 'visuals.single_segment_as_gif', (['segment'], {}), '(segment)\n', (21390, 21399), False, 'import visuals\n'), ((21400, 21474), 'visuals.single_segment_reconstruction_as_gif', 'visuals.single_segment_reconstruction_as_gif', (['segment', 'vae'], {'confidence': '(0.3)'}), '(segment, vae, confidence=0.3)\n', (21444, 21474), False, 'import visuals\n'), ((21475, 21563), 'visuals.single_segment_rotations_reconstruction_as_gif', 'visuals.single_segment_rotations_reconstruction_as_gif', (['segment', 'vae'], {'confidence': '(0.3)'}), '(segment, vae,\n confidence=0.3)\n', (21529, 21563), False, 'import visuals\n'), ((21560, 21632), 'visuals.single_segment_degeneration_as_gif', 'visuals.single_segment_degeneration_as_gif', (['segment', 'vae'], {'confidence': '(0.3)'}), '(segment, vae, confidence=0.3)\n', (21602, 21632), False, 'import visuals\n'), ((21633, 21687), 'visuals.single_segment_confidence_as_gif', 'visuals.single_segment_confidence_as_gif', (['segment', 'vae'], {}), '(segment, vae)\n', (21673, 21687), False, 'import visuals\n'), ((24792, 24802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24800, 24802), True, 'from matplotlib import pyplot as plt\n'), ((24803, 24815), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24813, 24815), True, 'from matplotlib import pyplot as plt\n'), ((4744, 4811), 'ipywidgets.widgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""Run to import : """', 'options': 'run_names'}), "(description='Run to import : ', options=run_names)\n", (4760, 4811), False, 'from ipywidgets import widgets\n'), ((4823, 4859), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""import"""'}), "(description='import')\n", (4837, 4859), False, 'from ipywidgets import widgets\n'), ((5352, 5381), 'IPython.display.display', 'display.display', (['run_dropdown'], {}), '(run_dropdown)\n', (5367, 5381), False, 'from IPython import display\n'), ((5384, 5407), 'IPython.display.display', 'display.display', (['button'], {}), '(button)\n', (5399, 5407), False, 'from IPython import display\n'), ((5495, 5542), 'utilities.import_run', 'utilities.import_run', (['RUN_NAME'], {'folder': 'DATA_DIR'}), '(RUN_NAME, folder=DATA_DIR)\n', (5515, 5542), False, 'import utilities\n'), ((6168, 6223), 'tensorflow.train.SummaryWriter', 'tf.train.SummaryWriter', (['TENSORBOARD_DIR', 'vae.sess.graph'], {}), '(TENSORBOARD_DIR, vae.sess.graph)\n', (6190, 6223), True, 'import tensorflow as tf\n'), ((7028, 7087), 'voxelize.create_rotations', 'create_rotations', (['train', 'N_ROTATION_ANGLES', 'ROTATION_OFFSET'], {}), '(train, N_ROTATION_ANGLES, ROTATION_OFFSET)\n', (7044, 7087), False, 'from voxelize import create_rotations\n'), ((7096, 7138), 'voxelize.create_rotations', 'create_rotations', (['val', '(12)', 'ROTATION_OFFSET'], {}), '(val, 12, ROTATION_OFFSET)\n', (7112, 7138), False, 'from voxelize import create_rotations\n'), ((7225, 7252), 'voxelize.voxelize', 'voxelize', (['train', 'VOXEL_SIDE'], {}), '(train, VOXEL_SIDE)\n', (7233, 7252), False, 'from voxelize import voxelize\n'), ((7269, 7294), 'voxelize.voxelize', 'voxelize', (['val', 'VOXEL_SIDE'], {}), '(val, VOXEL_SIDE)\n', (7277, 7294), False, 'from voxelize import voxelize\n'), ((7640, 7657), 'voxelize.create_twins', 'create_twins', (['val'], {}), '(val)\n', (7652, 7657), False, 'from voxelize import create_twins\n'), ((7681, 7700), 'voxelize.create_twins', 'create_twins', (['train'], {}), '(train)\n', (7693, 7700), False, 'from voxelize import create_twins\n'), ((7787, 7814), 'voxelize.voxelize', 'voxelize', (['train', 'VOXEL_SIDE'], {}), '(train, VOXEL_SIDE)\n', (7795, 7814), False, 'from voxelize import voxelize\n'), ((7831, 7856), 'voxelize.voxelize', 'voxelize', (['val', 'VOXEL_SIDE'], {}), '(val, VOXEL_SIDE)\n', (7839, 7856), False, 'from voxelize import voxelize\n'), ((7880, 7913), 'voxelize.voxelize', 'voxelize', (['train_twins', 'VOXEL_SIDE'], {}), '(train_twins, VOXEL_SIDE)\n', (7888, 7913), False, 'from voxelize import voxelize\n'), ((7936, 7967), 'voxelize.voxelize', 'voxelize', (['val_twins', 'VOXEL_SIDE'], {}), '(val_twins, VOXEL_SIDE)\n', (7944, 7967), False, 'from voxelize import voxelize\n'), ((8050, 8061), 'os.getpid', 'os.getpid', ([], {}), '()\n', (8059, 8061), False, 'import os\n'), ((9074, 9124), 'autoencoder.batchmaker.Batchmaker', 'Batchmaker', (['val_vox', 'val_twins_vox', 'BATCH_SIZE', 'MP'], {}), '(val_vox, val_twins_vox, BATCH_SIZE, MP)\n', (9084, 9124), False, 'from autoencoder.batchmaker import Batchmaker, progress_bar\n'), ((11196, 11203), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11201, 11203), True, 'from timeit import default_timer as timer\n'), ((11350, 11404), 'autoencoder.batchmaker.Batchmaker', 'Batchmaker', (['train_vox', 'train_twins_vox', 'BATCH_SIZE', 'MP'], {}), '(train_vox, train_twins_vox, BATCH_SIZE, MP)\n', (11360, 11404), False, 'from autoencoder.batchmaker import Batchmaker, progress_bar\n'), ((12080, 12087), 'timeit.default_timer', 'timer', ([], {}), '()\n', (12085, 12087), True, 'from timeit import default_timer as timer\n'), ((12299, 12308), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (12306, 12308), True, 'from matplotlib import pyplot as plt\n'), ((12355, 12380), 'random.sample', 'random.sample', (['val_vox', '(5)'], {}), '(val_vox, 5)\n', (12368, 12380), False, 'import random\n'), ((12505, 12532), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 12)'}), '(figsize=(8, 12))\n', (12515, 12532), True, 'from matplotlib import pyplot as plt\n'), ((12917, 12935), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12933, 12935), True, 'from matplotlib import pyplot as plt\n'), ((13038, 13060), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'nx'], {}), '(-3, 3, nx)\n', (13049, 13060), True, 'import numpy as np\n'), ((13074, 13096), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'ny'], {}), '(-3, 3, ny)\n', (13085, 13096), True, 'import numpy as np\n'), ((13110, 13132), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'nz'], {}), '(-3, 3, nz)\n', (13121, 13132), True, 'import numpy as np\n'), ((13144, 13205), 'numpy.empty', 'np.empty', (['(VOXEL_SIDE * ny, VOXEL_SIDE * nx, VOXEL_SIDE * nz)'], {}), '((VOXEL_SIDE * ny, VOXEL_SIDE * nx, VOXEL_SIDE * nz))\n', (13152, 13205), True, 'import numpy as np\n'), ((14003, 14015), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14013, 14015), True, 'from matplotlib import pyplot as plt\n'), ((14018, 14027), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (14025, 14027), True, 'from matplotlib import pyplot as plt\n'), ((14035, 14046), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (14041, 14046), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((14256, 14286), 'voxelize.voxelize', 'voxelize', (['segments', 'VOXEL_SIDE'], {}), '(segments, VOXEL_SIDE)\n', (14264, 14286), False, 'from voxelize import voxelize\n'), ((14434, 14463), 'eigenvalues.eigenvalue_features', 'eigenvalue_features', (['segments'], {}), '(segments)\n', (14453, 14463), False, 'from eigenvalues import eigenvalue_features\n'), ((15009, 15071), 'voxelize.create_rotations', 'create_rotations', (['segments', 'N_ROTATION_ANGLES'], {'classes': 'classes'}), '(segments, N_ROTATION_ANGLES, classes=classes)\n', (15025, 15071), False, 'from voxelize import create_rotations\n'), ((15446, 15484), 'voxelize.voxelize', 'voxelize', (['rotated_segments', 'VOXEL_SIDE'], {}), '(rotated_segments, VOXEL_SIDE)\n', (15454, 15484), False, 'from voxelize import voxelize\n'), ((16177, 16204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (16187, 16204), True, 'from matplotlib import pyplot as plt\n'), ((16595, 16673), 'matplotlib.pyplot.legend', 'plt.legend', (['classes_set'], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)', 'ncol': 'ncol'}), "(classes_set, loc='center left', bbox_to_anchor=(1, 0.5), ncol=ncol)\n", (16605, 16673), True, 'from matplotlib import pyplot as plt\n'), ((16676, 16694), 'matplotlib.pyplot.title', 'plt.title', (['"""T-SNE"""'], {}), "('T-SNE')\n", (16685, 16694), True, 'from matplotlib import pyplot as plt\n'), ((16697, 16716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x_dim"""'], {}), "('x_dim')\n", (16707, 16716), True, 'from matplotlib import pyplot as plt\n'), ((16719, 16738), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y_dim"""'], {}), "('y_dim')\n", (16729, 16738), True, 'from matplotlib import pyplot as plt\n'), ((16741, 16751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16749, 16751), True, 'from matplotlib import pyplot as plt\n'), ((19729, 19773), 'voxelize.voxelize', 'voxelize', (['class_rotated_segments', 'VOXEL_SIDE'], {}), '(class_rotated_segments, VOXEL_SIDE)\n', (19737, 19773), False, 'from voxelize import voxelize\n'), ((20926, 20950), 'numpy.array', 'np.array', (['class_features'], {}), '(class_features)\n', (20934, 20950), True, 'import numpy as np\n'), ((21046, 21058), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21056, 21058), True, 'from matplotlib import pyplot as plt\n'), ((21201, 21211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21209, 21211), True, 'from matplotlib import pyplot as plt\n'), ((23550, 23599), 'itertools.cycle', 'cycle', (["['dodgerblue', 'gold', 'silver', 'tomato']"], {}), "(['dodgerblue', 'gold', 'silver', 'tomato'])\n", (23555, 23599), False, 'from itertools import cycle\n'), ((23602, 23614), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23612, 23614), True, 'from matplotlib import pyplot as plt\n'), ((23617, 23675), 'matplotlib.pyplot.title', 'plt.title', (['"""Average absolute value of features, per class"""'], {}), "('Average absolute value of features, per class')\n", (23626, 23675), True, 'from matplotlib import pyplot as plt\n'), ((23678, 23701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""feature #"""'], {}), "('feature #')\n", (23688, 23701), True, 'from matplotlib import pyplot as plt\n'), ((23704, 23735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""avg(abs(feature))"""'], {}), "('avg(abs(feature))')\n", (23714, 23735), True, 'from matplotlib import pyplot as plt\n'), ((24194, 24204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24202, 24204), True, 'from matplotlib import pyplot as plt\n'), ((24207, 24219), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24217, 24219), True, 'from matplotlib import pyplot as plt\n'), ((24225, 24237), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24235, 24237), True, 'from matplotlib import pyplot as plt\n'), ((24240, 24281), 'matplotlib.pyplot.title', 'plt.title', (['"""Average confusion, per class"""'], {}), "('Average confusion, per class')\n", (24249, 24281), True, 'from matplotlib import pyplot as plt\n'), ((24284, 24307), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""feature #"""'], {}), "('feature #')\n", (24294, 24307), True, 'from matplotlib import pyplot as plt\n'), ((24310, 24331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sigma^2"""'], {}), "('sigma^2')\n", (24320, 24331), True, 'from matplotlib import pyplot as plt\n'), ((26537, 26642), 'load_segments.write_features', 'write_features', (['ids', 'updated_features', 'updated_fnames'], {'filename': 'runs[run_index][features_file_index]'}), '(ids, updated_features, updated_fnames, filename=runs[\n run_index][features_file_index])\n', (26551, 26642), False, 'from load_segments import write_features\n'), ((26748, 26804), 'visuals.visuals_of_segments', 'visuals_of_segments', (['segments', 'ids'], {'features': 'features_nn'}), '(segments, ids, features=features_nn)\n', (26767, 26804), False, 'from visuals import visuals_of_segments\n'), ((26807, 26821), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (26819, 26821), False, 'from IPython.display import clear_output\n'), ((26906, 26970), 'visuals.visuals_of_matches', 'visuals_of_matches', (['matches', 'segments', 'ids'], {'features': 'features_nn'}), '(matches, segments, ids, features=features_nn)\n', (26924, 26970), False, 'from visuals import visuals_of_matches\n'), ((26973, 26987), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (26985, 26987), False, 'from IPython.display import clear_output\n'), ((27491, 27523), 'tensorflow.train.Saver', 'tf.train.Saver', (['name_to_var_dict'], {}), '(name_to_var_dict)\n', (27505, 27523), True, 'import tensorflow as tf\n'), ((4919, 4941), 'IPython.display.clear_output', 'display.clear_output', ([], {}), '()\n', (4939, 4941), False, 'from IPython import display\n'), ((5159, 5216), 'utilities.import_run', 'utilities.import_run', (['run_dropdown.value'], {'folder': 'DATA_DIR'}), '(run_dropdown.value, folder=DATA_DIR)\n', (5179, 5216), False, 'import utilities\n'), ((8463, 8504), 'numpy.loadtxt', 'np.loadtxt', (["(SAVE_DIR + 'val_cost_log.txt')"], {}), "(SAVE_DIR + 'val_cost_log.txt')\n", (8473, 8504), True, 'import numpy as np\n'), ((8662, 8679), 'voxelize.create_twins', 'create_twins', (['val'], {}), '(val)\n', (8674, 8679), False, 'from voxelize import create_twins\n'), ((8707, 8726), 'voxelize.create_twins', 'create_twins', (['train'], {}), '(train)\n', (8719, 8726), False, 'from voxelize import create_twins\n'), ((8824, 8851), 'voxelize.voxelize', 'voxelize', (['train', 'VOXEL_SIDE'], {}), '(train, VOXEL_SIDE)\n', (8832, 8851), False, 'from voxelize import voxelize\n'), ((8872, 8897), 'voxelize.voxelize', 'voxelize', (['val', 'VOXEL_SIDE'], {}), '(val, VOXEL_SIDE)\n', (8880, 8897), False, 'from voxelize import voxelize\n'), ((8925, 8958), 'voxelize.voxelize', 'voxelize', (['train_twins', 'VOXEL_SIDE'], {}), '(train_twins, VOXEL_SIDE)\n', (8933, 8958), False, 'from voxelize import voxelize\n'), ((8985, 9016), 'voxelize.voxelize', 'voxelize', (['val_twins', 'VOXEL_SIDE'], {}), '(val_twins, VOXEL_SIDE)\n', (8993, 9016), False, 'from voxelize import voxelize\n'), ((9130, 9161), 'numpy.mod', 'np.mod', (['step', 'VAL_EVERY_N_STEPS'], {}), '(step, VAL_EVERY_N_STEPS)\n', (9136, 9161), True, 'import numpy as np\n'), ((11213, 11220), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11218, 11220), True, 'from timeit import default_timer as timer\n'), ((11223, 11230), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11228, 11230), True, 'from timeit import default_timer as timer\n'), ((12396, 12430), 'numpy.reshape', 'np.reshape', (['sample', 'MP.INPUT_SHAPE'], {}), '(sample, MP.INPUT_SHAPE)\n', (12406, 12430), True, 'import numpy as np\n'), ((12566, 12606), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(n_samples * 2)', '(1)', '(2 * i + 1)'], {}), '(n_samples * 2, 1, 2 * i + 1)\n', (12577, 12606), True, 'from matplotlib import pyplot as plt\n'), ((12712, 12764), 'matplotlib.pyplot.title', 'plt.title', (['"""Top: val input - Bottom: Reconstruction"""'], {}), "('Top: val input - Bottom: Reconstruction')\n", (12721, 12764), True, 'from matplotlib import pyplot as plt\n'), ((12769, 12809), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(n_samples * 2)', '(1)', '(2 * i + 2)'], {}), '(n_samples * 2, 1, 2 * i + 2)\n', (12780, 12809), True, 'from matplotlib import pyplot as plt\n'), ((15815, 15835), 'os.path.exists', 'os.path.exists', (['dir_'], {}), '(dir_)\n', (15829, 15835), False, 'import os\n'), ((15841, 15858), 'os.makedirs', 'os.makedirs', (['dir_'], {}), '(dir_)\n', (15852, 15858), False, 'import os\n'), ((15949, 15975), 'tools.tsne.tsne', 'tsne', (['F'], {'err_threshold': '(1.0)'}), '(F, err_threshold=1.0)\n', (15953, 15975), False, 'from tools.tsne import tsne\n'), ((16226, 16239), 'itertools.cycle', 'cycle', (['cnames'], {}), '(cnames)\n', (16231, 16239), False, 'from itertools import cycle\n'), ((16401, 16441), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'c_', 'alpha': '(0.8)', 'lw': '(0)'}), '(x, y, c=c_, alpha=0.8, lw=0)\n', (16412, 16441), True, 'from matplotlib import pyplot as plt\n'), ((16909, 16918), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (16916, 16918), True, 'from matplotlib import pyplot as plt\n'), ((17172, 17250), 'matplotlib.pyplot.legend', 'plt.legend', (['classes_set'], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)', 'ncol': 'ncol'}), "(classes_set, loc='center left', bbox_to_anchor=(1, 0.5), ncol=ncol)\n", (17182, 17250), True, 'from matplotlib import pyplot as plt\n'), ((17255, 17273), 'matplotlib.pyplot.title', 'plt.title', (['"""T-SNE"""'], {}), "('T-SNE')\n", (17264, 17273), True, 'from matplotlib import pyplot as plt\n'), ((17278, 17297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x_dim"""'], {}), "('x_dim')\n", (17288, 17297), True, 'from matplotlib import pyplot as plt\n'), ((17302, 17321), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y_dim"""'], {}), "('y_dim')\n", (17312, 17321), True, 'from matplotlib import pyplot as plt\n'), ((18424, 18477), 'numpy.reshape', 'np.reshape', (['vox', '[VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE]'], {}), '(vox, [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE])\n', (18434, 18477), True, 'import numpy as np\n'), ((18563, 18594), 'voxelize.unvoxelize', 'unvoxelize', (['(vox > RC_CONFIDENCE)'], {}), '(vox > RC_CONFIDENCE)\n', (18573, 18594), False, 'from voxelize import unvoxelize\n'), ((19003, 19124), 'visuals.visuals_of_matches', 'visuals_of_matches', (['one_to_one_matches', '(S_ + reconstruction)', '(I_ + reconstruction_ids)'], {'directory': 'dir_', 'oneview': 'ONEVIEW'}), '(one_to_one_matches, S_ + reconstruction, I_ +\n reconstruction_ids, directory=dir_, oneview=ONEVIEW)\n', (19021, 19124), False, 'from visuals import visuals_of_matches\n'), ((19121, 19135), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (19133, 19135), False, 'from IPython.display import clear_output\n'), ((19460, 19478), 'numpy.array', 'np.array', (['segments'], {}), '(segments)\n', (19468, 19478), True, 'import numpy as np\n'), ((20780, 20794), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (20792, 20794), False, 'from IPython.display import clear_output\n'), ((22214, 22226), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22224, 22226), True, 'from matplotlib import pyplot as plt\n'), ((22381, 22391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22389, 22391), True, 'from matplotlib import pyplot as plt\n'), ((22988, 23032), 'voxelize.voxelize', 'voxelize', (['class_rotated_segments', 'VOXEL_SIDE'], {}), '(class_rotated_segments, VOXEL_SIDE)\n', (22996, 23032), False, 'from voxelize import voxelize\n'), ((23168, 23192), 'numpy.array', 'np.array', (['class_features'], {}), '(class_features)\n', (23176, 23192), True, 'import numpy as np\n'), ((23238, 23250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23248, 23250), True, 'from matplotlib import pyplot as plt\n'), ((23399, 23409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23407, 23409), True, 'from matplotlib import pyplot as plt\n'), ((25828, 25852), 'numpy.concatenate', 'np.concatenate', (['[f, ftu]'], {}), '([f, ftu])\n', (25842, 25852), True, 'import numpy as np\n'), ((27720, 27753), 'pickle.dump', 'pickle.dump', (['MP', 'file'], {'protocol': '(2)'}), '(MP, file, protocol=2)\n', (27731, 27753), False, 'import pickle\n'), ((7428, 7459), 'numpy.reshape', 'np.reshape', (['vox', 'MP.INPUT_SHAPE'], {}), '(vox, MP.INPUT_SHAPE)\n', (7438, 7459), True, 'import numpy as np\n'), ((7495, 7526), 'numpy.reshape', 'np.reshape', (['vox', 'MP.INPUT_SHAPE'], {}), '(vox, MP.INPUT_SHAPE)\n', (7505, 7526), True, 'import numpy as np\n'), ((10246, 10301), 'numpy.savetxt', 'np.savetxt', (["(SAVE_DIR + 'val_cost_log.txt')", 'val_cost_log'], {}), "(SAVE_DIR + 'val_cost_log.txt', val_cost_log)\n", (10256, 10301), True, 'import numpy as np\n'), ((11495, 11502), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11500, 11502), True, 'from timeit import default_timer as timer\n'), ((11596, 11603), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11601, 11603), True, 'from timeit import default_timer as timer\n'), ((11795, 11802), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11800, 11802), True, 'from timeit import default_timer as timer\n'), ((11884, 11891), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11889, 11891), True, 'from timeit import default_timer as timer\n'), ((14490, 14512), 'numpy.isnan', 'np.isnan', (['features_eig'], {}), '(features_eig)\n', (14498, 14512), True, 'import numpy as np\n'), ((14684, 14718), 'numpy.reshape', 'np.reshape', (['sample', 'MP.INPUT_SHAPE'], {}), '(sample, MP.INPUT_SHAPE)\n', (14694, 14718), True, 'import numpy as np\n'), ((15571, 15605), 'numpy.reshape', 'np.reshape', (['sample', 'MP.INPUT_SHAPE'], {}), '(sample, MP.INPUT_SHAPE)\n', (15581, 15605), True, 'import numpy as np\n'), ((16453, 16462), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16460, 16462), True, 'from matplotlib import pyplot as plt\n'), ((16480, 16489), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16487, 16489), True, 'from matplotlib import pyplot as plt\n'), ((16943, 16956), 'itertools.cycle', 'cycle', (['cnames'], {}), '(cnames)\n', (16948, 16956), False, 'from itertools import cycle\n'), ((17124, 17164), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'c_', 'alpha': '(0.2)', 'lw': '(0)'}), '(x, y, c=c_, alpha=0.2, lw=0)\n', (17135, 17164), True, 'from matplotlib import pyplot as plt\n'), ((17439, 17452), 'itertools.cycle', 'cycle', (['cnames'], {}), '(cnames)\n', (17444, 17452), False, 'from itertools import cycle\n'), ((17686, 17721), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'c_', 's': '(30)', 'lw': '(1)'}), '(x, y, c=c_, s=30, lw=1)\n', (17697, 17721), True, 'from matplotlib import pyplot as plt\n'), ((17938, 17984), 'matplotlib.pyplot.plot', 'plt.plot', (['line_x', 'line_y', '"""black"""'], {'linewidth': '(1)'}), "(line_x, line_y, 'black', linewidth=1)\n", (17946, 17984), True, 'from matplotlib import pyplot as plt\n'), ((18345, 18379), 'numpy.reshape', 'np.reshape', (['sample', 'MP.INPUT_SHAPE'], {}), '(sample, MP.INPUT_SHAPE)\n', (18355, 18379), True, 'import numpy as np\n'), ((19947, 20000), 'numpy.reshape', 'np.reshape', (['vox', '[VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE]'], {}), '(vox, [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE])\n', (19957, 20000), True, 'import numpy as np\n'), ((20102, 20133), 'voxelize.unvoxelize', 'unvoxelize', (['(vox > RC_CONFIDENCE)'], {}), '(vox > RC_CONFIDENCE)\n', (20112, 20133), False, 'from voxelize import unvoxelize\n'), ((20843, 20874), 'numpy.reshape', 'np.reshape', (['vox', 'MP.INPUT_SHAPE'], {}), '(vox, MP.INPUT_SHAPE)\n', (20853, 20874), True, 'import numpy as np\n'), ((21214, 21223), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21221, 21223), True, 'from matplotlib import pyplot as plt\n'), ((22017, 22035), 'numpy.array', 'np.array', (['segments'], {}), '(segments)\n', (22025, 22035), True, 'import numpy as np\n'), ((22072, 22093), 'numpy.array', 'np.array', (['features_nn'], {}), '(features_nn)\n', (22080, 22093), True, 'import numpy as np\n'), ((22131, 22153), 'numpy.array', 'np.array', (['confusion_nn'], {}), '(confusion_nn)\n', (22139, 22153), True, 'import numpy as np\n'), ((22727, 22745), 'numpy.array', 'np.array', (['segments'], {}), '(segments)\n', (22735, 22745), True, 'import numpy as np\n'), ((23952, 23973), 'numpy.array', 'np.array', (['features_nn'], {}), '(features_nn)\n', (23960, 23973), True, 'import numpy as np\n'), ((24549, 24571), 'numpy.array', 'np.array', (['confusion_nn'], {}), '(confusion_nn)\n', (24557, 24571), True, 'import numpy as np\n'), ((9956, 9980), 'os.path.exists', 'os.path.exists', (['SAVE_DIR'], {}), '(SAVE_DIR)\n', (9970, 9980), False, 'import os\n'), ((9994, 10015), 'os.makedirs', 'os.makedirs', (['SAVE_DIR'], {}), '(SAVE_DIR)\n', (10005, 10015), False, 'import os\n'), ((11838, 11871), 'autoencoder.batchmaker.progress_bar', 'progress_bar', (['training_batchmaker'], {}), '(training_batchmaker)\n', (11850, 11871), False, 'from autoencoder.batchmaker import Batchmaker, progress_bar\n'), ((13427, 13458), 'numpy.zeros', 'np.zeros', (['([1] + MP.LATENT_SHAPE)'], {}), '([1] + MP.LATENT_SHAPE)\n', (13435, 13458), True, 'import numpy as np\n'), ((13978, 13992), 'numpy.max', 'np.max', (['canvas'], {}), '(canvas)\n', (13984, 13992), True, 'import numpy as np\n'), ((16763, 16772), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16770, 16772), True, 'from matplotlib import pyplot as plt\n'), ((19601, 19652), 'voxelize.create_rotations', 'create_rotations', (['class_segments', 'N_ROTATION_ANGLES'], {}), '(class_segments, N_ROTATION_ANGLES)\n', (19617, 19652), False, 'from voxelize import create_rotations\n'), ((19851, 19882), 'numpy.reshape', 'np.reshape', (['vox', 'MP.INPUT_SHAPE'], {}), '(vox, MP.INPUT_SHAPE)\n', (19861, 19882), True, 'import numpy as np\n'), ((21172, 21189), 'numpy.exp', 'np.exp', (['confusion'], {}), '(confusion)\n', (21178, 21189), True, 'import numpy as np\n'), ((22396, 22405), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22403, 22405), True, 'from matplotlib import pyplot as plt\n'), ((23083, 23114), 'numpy.reshape', 'np.reshape', (['vox', 'MP.INPUT_SHAPE'], {}), '(vox, MP.INPUT_SHAPE)\n', (23093, 23114), True, 'import numpy as np\n'), ((23414, 23423), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (23421, 23423), True, 'from matplotlib import pyplot as plt\n'), ((24010, 24032), 'numpy.abs', 'np.abs', (['class_features'], {}), '(class_features)\n', (24016, 24032), True, 'import numpy as np\n'), ((24110, 24132), 'numpy.abs', 'np.abs', (['class_features'], {}), '(class_features)\n', (24116, 24132), True, 'import numpy as np\n'), ((24608, 24631), 'numpy.exp', 'np.exp', (['class_confusion'], {}), '(class_confusion)\n', (24614, 24631), True, 'import numpy as np\n'), ((24709, 24732), 'numpy.exp', 'np.exp', (['class_confusion'], {}), '(class_confusion)\n', (24715, 24732), True, 'import numpy as np\n'), ((9509, 9537), 'autoencoder.batchmaker.progress_bar', 'progress_bar', (['val_batchmaker'], {}), '(val_batchmaker)\n', (9521, 9537), False, 'from autoencoder.batchmaker import Batchmaker, progress_bar\n'), ((10049, 10076), 'os.makedirs', 'os.makedirs', (['SAVE_DIR_NOVAL'], {}), '(SAVE_DIR_NOVAL)\n', (10060, 10076), False, 'import os\n'), ((10204, 10237), 'pickle.dump', 'pickle.dump', (['MP', 'file'], {'protocol': '(2)'}), '(MP, file, protocol=2)\n', (10215, 10237), False, 'import pickle\n'), ((22344, 22367), 'numpy.exp', 'np.exp', (['class_confusion'], {}), '(class_confusion)\n', (22350, 22367), True, 'import numpy as np\n'), ((22872, 22923), 'voxelize.create_rotations', 'create_rotations', (['class_segments', 'N_ROTATION_ANGLES'], {}), '(class_segments, N_ROTATION_ANGLES)\n', (22888, 22923), False, 'from voxelize import create_rotations\n'), ((23368, 23385), 'numpy.exp', 'np.exp', (['confusion'], {}), '(confusion)\n', (23374, 23385), True, 'import numpy as np\n'), ((18010, 18019), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (18017, 18019), True, 'from matplotlib import pyplot as plt\n')]
|
import numpy as np
from rllab.core.serializable import Serializable
from .replay_buffer import ReplayBuffer
class SimpleReplayBuffer(ReplayBuffer, Serializable):
def __init__(self, env_spec, max_replay_buffer_size):
super(SimpleReplayBuffer, self).__init__()
Serializable.quick_init(self, locals())
max_replay_buffer_size = int(max_replay_buffer_size)
self._env_spec = env_spec
self._observation_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._max_buffer_size = max_replay_buffer_size
self._observations = np.zeros((max_replay_buffer_size,
self._observation_dim))
# It's a bit memory inefficient to save the observations twice,
# but it makes the code *much* easier since you no longer have to
# worry about termination conditions.
self._next_obs = np.zeros((max_replay_buffer_size,
self._observation_dim))
self._actions = np.zeros((max_replay_buffer_size, self._action_dim))
self._rewards = np.zeros(max_replay_buffer_size)
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros(max_replay_buffer_size, dtype='uint8')
self._top = 0
self._size = 0
def add_sample(self, observation, action, reward, terminal,
next_observation, **kwargs):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._next_obs[self._top] = next_observation
self._advance()
def terminate_episode(self):
pass
def _advance(self):
self._top = (self._top + 1) % self._max_buffer_size
if self._size < self._max_buffer_size:
self._size += 1
def random_batch(self, batch_size):
indices = np.random.randint(0, self._size, batch_size)
return {
'observations': self._observations[indices],
'actions': self._actions[indices],
'rewards': self._rewards[indices],
'terminals': self._terminals[indices],
'next_observations': self._next_obs[indices]
}
@property
def size(self):
return self._size
def __getstate__(self):
buffer_state = super(SimpleReplayBuffer, self).__getstate__()
buffer_state.update({
'observations': self._observations.tobytes(),
'actions': self._actions.tobytes(),
'rewards': self._rewards.tobytes(),
'terminals': self._terminals.tobytes(),
'next_observations': self._next_obs.tobytes(),
'top': self._top,
'size': self._size,
})
return buffer_state
def __setstate__(self, buffer_state):
super(SimpleReplayBuffer, self).__setstate__(buffer_state)
flat_obs = np.fromstring(buffer_state['observations'])
flat_next_obs = np.fromstring(buffer_state['next_observations'])
flat_actions = np.fromstring(buffer_state['actions'])
flat_reward = np.fromstring(buffer_state['rewards'])
flat_terminals = np.fromstring(
buffer_state['terminals'], dtype=np.uint8)
self._observations = flat_obs.reshape(self._max_buffer_size, -1)
self._next_obs = flat_next_obs.reshape(self._max_buffer_size, -1)
self._actions = flat_actions.reshape(self._max_buffer_size, -1)
self._rewards = flat_reward.reshape(self._max_buffer_size)
self._terminals = flat_terminals.reshape(self._max_buffer_size)
self._top = buffer_state['top']
self._size = buffer_state['size']
|
[
"numpy.random.randint",
"numpy.zeros",
"numpy.fromstring"
] |
[((630, 687), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, self._observation_dim)'], {}), '((max_replay_buffer_size, self._observation_dim))\n', (638, 687), True, 'import numpy as np\n'), ((944, 1001), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, self._observation_dim)'], {}), '((max_replay_buffer_size, self._observation_dim))\n', (952, 1001), True, 'import numpy as np\n'), ((1061, 1113), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, self._action_dim)'], {}), '((max_replay_buffer_size, self._action_dim))\n', (1069, 1113), True, 'import numpy as np\n'), ((1138, 1170), 'numpy.zeros', 'np.zeros', (['max_replay_buffer_size'], {}), '(max_replay_buffer_size)\n', (1146, 1170), True, 'import numpy as np\n'), ((1262, 1309), 'numpy.zeros', 'np.zeros', (['max_replay_buffer_size'], {'dtype': '"""uint8"""'}), "(max_replay_buffer_size, dtype='uint8')\n", (1270, 1309), True, 'import numpy as np\n'), ((1994, 2038), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self._size', 'batch_size'], {}), '(0, self._size, batch_size)\n', (2011, 2038), True, 'import numpy as np\n'), ((3011, 3054), 'numpy.fromstring', 'np.fromstring', (["buffer_state['observations']"], {}), "(buffer_state['observations'])\n", (3024, 3054), True, 'import numpy as np\n'), ((3079, 3127), 'numpy.fromstring', 'np.fromstring', (["buffer_state['next_observations']"], {}), "(buffer_state['next_observations'])\n", (3092, 3127), True, 'import numpy as np\n'), ((3151, 3189), 'numpy.fromstring', 'np.fromstring', (["buffer_state['actions']"], {}), "(buffer_state['actions'])\n", (3164, 3189), True, 'import numpy as np\n'), ((3212, 3250), 'numpy.fromstring', 'np.fromstring', (["buffer_state['rewards']"], {}), "(buffer_state['rewards'])\n", (3225, 3250), True, 'import numpy as np\n'), ((3276, 3332), 'numpy.fromstring', 'np.fromstring', (["buffer_state['terminals']"], {'dtype': 'np.uint8'}), "(buffer_state['terminals'], dtype=np.uint8)\n", (3289, 3332), True, 'import numpy as np\n')]
|
import tensorflow as tf
import os
import sklearn.metrics
import numpy as np
import sys
import time
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
class re_model:
"""Basic model class, which contains data input and tensorflow graphs, should be inherited"""
def __init__(self, train_data_loader, batch_size, max_length=120):
"""
class construction funciton, model initialization
Args:
train_data_loader: a `file_data_loader object`, which could be `npy_data_loader`
or `json_file_data_loader`
batch_size: how many scopes/instances are included in one batch
max_length: max sentence length, divide sentences into the same length (working
part should be finished in `data_loader`)
Returns:
None
"""
self.word = tf.placeholder(dtype=tf.int32, shape=[None, max_length], name='word')
self.pos1 = tf.placeholder(dtype=tf.int32, shape=[None, max_length], name='pos1')
self.pos2 = tf.placeholder(dtype=tf.int32, shape=[None, max_length], name='pos2')
self.label = tf.placeholder(dtype=tf.int32, shape=[batch_size], name='label')
self.ins_label = tf.placeholder(dtype=tf.int32, shape=[None], name='ins_label')
self.length = tf.placeholder(dtype=tf.int32, shape=[None], name='length')
self.scope = tf.placeholder(dtype=tf.int32, shape=[batch_size, 2], name='scope')
self.train_data_loader = train_data_loader
self.rel_tot = train_data_loader.rel_tot
self.word_vec_mat = train_data_loader.word_vec_mat
def loss(self):
"""training loss, should be overrided in the subclasses"""
raise NotImplementedError
def train_logit(self):
"""training logit, should be overrided in the subclasses"""
raise NotImplementedError
def test_logit(self):
"""test logit, should be overrided in the subclasses"""
raise NotImplementedError
class re_framework:
"""the basic training framework, does all the training and test staffs"""
MODE_BAG = 0 # Train and test the model at bag level.
MODE_INS = 1 # Train and test the model at instance level
def __init__(self, train_data_loader, test_data_loader, max_length=120, batch_size=160):
"""
class construction funciton, framework initialization
Args:
train_data_loader: a `file_data_loader object`, which could be `npy_data_loader`
or `json_file_data_loader`
test_data_loader: similar as the `train_data_loader`
max_length: max sentence length, divide sentences into the same length (working
part should be finished in `data_loader`)
batch_size: how many scopes/instances are included in one batch
Returns:
None
"""
self.train_data_loader = train_data_loader
self.test_data_loader = test_data_loader
self.sess = None # default graph session
def one_step_multi_models(self, sess, models, batch_data_gen, run_array, return_label=True):
"""
run models and multi running tasks via session
Args:
sess: tf.Session() that is going to run
models: a list. this function support multi-model training
batch_data_gen: `data_loader` to generate batch data
run_array: a list, contains all the running models or arrays
return_label: boolean argument. if it is `True`, then the training label
will be returned either
Returns:
result: a tuple/list contains the result
"""
feed_dict = {}
batch_label = []
for model in models:
batch_data = batch_data_gen.next_batch(batch_data_gen.batch_size // len(models))
feed_dict.update({
model.word: batch_data['word'],
model.pos1: batch_data['pos1'],
model.pos2: batch_data['pos2'],
model.label: batch_data['rel'],
model.ins_label: batch_data['ins_rel'],
model.scope: batch_data['scope'],
model.length: batch_data['length'],
})
if 'mask' in batch_data and hasattr(model, "mask"): # mask data is used in PCNN models
feed_dict.update({model.mask: batch_data['mask']})
batch_label.append(batch_data['rel'])
result = sess.run(run_array, feed_dict)
batch_label = np.concatenate(batch_label)
if return_label:
result += [batch_label]
return result
def one_step(self, sess, model, batch_data, run_array):
"""
run one model and multi running tasks via session, usually used in test operation
Args:
sess: tf.Session() that is going to run
model: one model, inherited from `re_model`
batch_data: a dict contains the batch data
run_array: a list, contains all the running models or arrays
Returns:
result: a tuple/list contains the result
"""
feed_dict = {
model.word: batch_data['word'],
model.pos1: batch_data['pos1'],
model.pos2: batch_data['pos2'],
model.label: batch_data['rel'],
model.ins_label: batch_data['ins_rel'],
model.scope: batch_data['scope'],
model.length: batch_data['length'],
}
if 'mask' in batch_data and hasattr(model, "mask"):
feed_dict.update({model.mask: batch_data['mask']})
result = sess.run(run_array, feed_dict)
return result
def train(self, model, model_name, ckpt_dir='./checkpoint', summary_dir='./summary',
test_result_dir='./test_result', learning_rate=0.5, max_epoch=60,
pretrain_model=None, test_epoch=1, optimizer=tf.train.GradientDescentOptimizer,
gpu_nums=1, not_best_stop=20):
"""
training function
Args:
model: `re_model` that is going to train
model_name: a string, to identify models, affecting checkpoint saving
ckpt_dir: checkpoint saving directory
summary_dir: for tensorboard use, to save summary files
test_result_dir: directory to store the final results
learning_rate: learning rate of optimizer
max_epoch: how many epochs you want to train
pretrain_model: a string, containing the checkpoint model path and model name
e.g. ./checkpoint/nyt_pcnn_one
test_epoch: when do you want to test the model. default is `1`, which means
test the result after every training epoch
optimizer: training optimizer, default is `tf.train.GradientDescentOptimizer`
gpu_nums: how many gpus you want to use when training
not_best_stop: if there is `not_best_stop` epochs that not excel at the model
result, the training will be stopped
Returns:
None
"""
assert(self.train_data_loader.batch_size % gpu_nums == 0)
print("Start training...")
# Init
config = tf.ConfigProto(allow_soft_placement=True) # allow cpu computing if there is no gpu available
self.sess = tf.Session(config=config)
optimizer = optimizer(learning_rate)
# Multi GPUs
tower_grads = []
tower_models = []
for gpu_id in range(gpu_nums):
with tf.device("/gpu:%d" % gpu_id):
with tf.name_scope("gpu_%d" % gpu_id):
cur_model = model(self.train_data_loader, self.train_data_loader.batch_size // gpu_nums, self.train_data_loader.max_length)
tower_grads.append(optimizer.compute_gradients(cur_model.loss()))
tower_models.append(cur_model)
tf.add_to_collection("loss", cur_model.loss())
tf.add_to_collection("train_logit", cur_model.train_logit())
loss_collection = tf.get_collection("loss")
loss = tf.add_n(loss_collection) / len(loss_collection)
train_logit_collection = tf.get_collection("train_logit")
train_logit = tf.concat(train_logit_collection, 0)
grads = average_gradients(tower_grads)
train_op = optimizer.apply_gradients(grads)
summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)
"""supporting check the scalars on tensorboard"""
_output = tf.cast(tf.argmax(train_logit, -1), tf.int32) # predicted output
_tot_acc = tf.reduce_mean(tf.cast(tf.equal(_output, tower_models[0].label), tf.float32)) # accuracy including N/A relations
_not_na_acc = tf.reduce_mean(tf.cast(tf.logical_and(tf.equal(_output, tower_models[0].label), tf.not_equal(tower_models[0].label, 0)), tf.float32)) # accuracy not including N/A relations
tf.summary.scalar('tot_acc', _tot_acc)
tf.summary.scalar('not_na_acc', _not_na_acc)
# Saver
saver = tf.train.Saver(max_to_keep=None)
if pretrain_model is None:
self.sess.run(tf.global_variables_initializer())
else:
saver.restore(self.sess, pretrain_model)
# Training
merged_summary = tf.summary.merge_all() # merge all scalars and histograms
best_metric = 0
best_prec = None
best_recall = None
not_best_count = 0 # Stop training after several epochs without improvement.
global_cnt = 0 # for record summary steps
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
while True:
time_start = time.time()
try:
summa, iter_loss, iter_logit, _train_op, iter_label = self.one_step_multi_models(self.sess, tower_models, self.train_data_loader, [merged_summary, loss, train_logit, train_op])
except StopIteration:
break
summary_writer.add_summary(summa, global_cnt)
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
if (epoch + 1) % test_epoch == 0:
metric = self.test(model)
if metric > best_metric:
best_metric = metric
best_prec = self.cur_prec
best_recall = self.cur_recall
print("Best model, storing...")
if not os.path.isdir(ckpt_dir):
os.mkdir(ckpt_dir)
path = saver.save(self.sess, os.path.join(ckpt_dir, model_name))
print("Finish storing")
not_best_count = 0
else:
not_best_count += 1
if not_best_count >= not_best_stop:
break
global_cnt += 1
print("######")
print("Finish training " + model_name)
print("Best epoch auc = %f" % (best_metric))
if (not best_prec is None) and (not best_recall is None):
if not os.path.isdir(test_result_dir):
os.mkdir(test_result_dir)
np.save(os.path.join(test_result_dir, model_name + "_x.npy"), best_recall)
np.save(os.path.join(test_result_dir, model_name + "_y.npy"), best_prec)
def test(self, model, ckpt=None, return_result=False, mode=MODE_BAG):
"""
test function, to evaluate model
Args:
model: a `re_model` which has not been instantiated
ckpt: whether there is a pretained checkpoing model
return_result: if True, the predicted result will be returned, either
mode: basically it is at the bag level
Returns:
auc: if return_result is True, return AUC and predicted labels,
else return AUC only
"""
if mode == re_framework.MODE_BAG:
return self.__test_bag__(model, ckpt=ckpt, return_result=return_result)
elif mode == re_framework.MODE_INS:
raise NotImplementedError
else:
raise NotImplementedError
def __test_bag__(self, model, ckpt=None, return_result=False):
"""
test function at bag level
Args:
model: a `re_model` which has not been instantiated
ckpt: whether there is a pretained checkpoing model
return_result: if True, the predicted result will be returned, either
Returns:
auc: if return_result is True, return AUC and predicted labels,
else return AUC only
"""
print("Testing...")
if self.sess == None:
self.sess = tf.Session()
model = model(self.test_data_loader, self.test_data_loader.batch_size, self.test_data_loader.max_length)
if not ckpt is None:
saver = tf.train.Saver()
saver.restore(self.sess, ckpt)
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
entpair_tot = 0
test_result = []
pred_result = []
for i, batch_data in enumerate(self.test_data_loader):
iter_logit = self.one_step(self.sess, model, batch_data, [model.test_logit()])[0]
iter_output = iter_logit.argmax(-1)
iter_correct = (iter_output == batch_data['rel']).sum()
iter_not_na_correct = np.logical_and(iter_output == batch_data['rel'], batch_data['rel'] != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += batch_data['rel'].shape[0]
tot_not_na += (batch_data['rel'] != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[TEST] step %d | not NA accuracy: %f, accuracy: %f\r" % (i, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
for idx in range(len(iter_logit)):
for rel in range(1, self.test_data_loader.rel_tot):
test_result.append({'score': iter_logit[idx][rel], 'flag': batch_data['multi_rel'][idx][rel]})
if batch_data['entpair'][idx] != "None#None":
pred_result.append({'score': float(iter_logit[idx][rel]), 'entpair': batch_data['entpair'][idx].encode('utf-8'), 'relation': rel})
entpair_tot += 1
sorted_test_result = sorted(test_result, key=lambda x: x['score'])
prec = []
recall = []
correct = 0
for i, item in enumerate(sorted_test_result[::-1]):
correct += item['flag']
prec.append(float(correct) / (i + 1))
recall.append(float(correct) / self.test_data_loader.relfact_tot)
auc = sklearn.metrics.auc(x=recall, y=prec)
print("\n[TEST] auc: {}".format(auc))
print("Finish testing")
self.cur_prec = prec
self.cur_recall = recall
if not return_result:
return auc
else:
return (auc, pred_result)
|
[
"os.mkdir",
"tensorflow.get_collection",
"tensorflow.ConfigProto",
"sys.stdout.flush",
"os.path.join",
"tensorflow.add_n",
"tensorflow.not_equal",
"tensorflow.concat",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.name_scope",
"tensorflow.summary.merge_all",
"tensorflow.equal",
"tensorflow.summary.scalar",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"tensorflow.expand_dims",
"numpy.concatenate",
"numpy.logical_and",
"tensorflow.argmax",
"os.path.isdir",
"tensorflow.device",
"time.time"
] |
[((1196, 1227), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (1205, 1227), True, 'import tensorflow as tf\n'), ((1243, 1266), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (1257, 1266), True, 'import tensorflow as tf\n'), ((2308, 2377), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, max_length]', 'name': '"""word"""'}), "(dtype=tf.int32, shape=[None, max_length], name='word')\n", (2322, 2377), True, 'import tensorflow as tf\n'), ((2398, 2467), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, max_length]', 'name': '"""pos1"""'}), "(dtype=tf.int32, shape=[None, max_length], name='pos1')\n", (2412, 2467), True, 'import tensorflow as tf\n'), ((2488, 2557), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, max_length]', 'name': '"""pos2"""'}), "(dtype=tf.int32, shape=[None, max_length], name='pos2')\n", (2502, 2557), True, 'import tensorflow as tf\n'), ((2579, 2643), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[batch_size]', 'name': '"""label"""'}), "(dtype=tf.int32, shape=[batch_size], name='label')\n", (2593, 2643), True, 'import tensorflow as tf\n'), ((2669, 2731), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""ins_label"""'}), "(dtype=tf.int32, shape=[None], name='ins_label')\n", (2683, 2731), True, 'import tensorflow as tf\n'), ((2754, 2813), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""length"""'}), "(dtype=tf.int32, shape=[None], name='length')\n", (2768, 2813), True, 'import tensorflow as tf\n'), ((2835, 2902), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[batch_size, 2]', 'name': '"""scope"""'}), "(dtype=tf.int32, shape=[batch_size, 2], name='scope')\n", (2849, 2902), True, 'import tensorflow as tf\n'), ((6006, 6033), 'numpy.concatenate', 'np.concatenate', (['batch_label'], {}), '(batch_label)\n', (6020, 6033), True, 'import numpy as np\n'), ((8766, 8807), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (8780, 8807), True, 'import tensorflow as tf\n'), ((8880, 8905), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (8890, 8905), True, 'import tensorflow as tf\n'), ((9630, 9655), 'tensorflow.get_collection', 'tf.get_collection', (['"""loss"""'], {}), "('loss')\n", (9647, 9655), True, 'import tensorflow as tf\n'), ((9753, 9785), 'tensorflow.get_collection', 'tf.get_collection', (['"""train_logit"""'], {}), "('train_logit')\n", (9770, 9785), True, 'import tensorflow as tf\n'), ((9808, 9844), 'tensorflow.concat', 'tf.concat', (['train_logit_collection', '(0)'], {}), '(train_logit_collection, 0)\n', (9817, 9844), True, 'import tensorflow as tf\n'), ((9970, 10021), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_dir', 'self.sess.graph'], {}), '(summary_dir, self.sess.graph)\n', (9991, 10021), True, 'import tensorflow as tf\n'), ((10500, 10538), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""tot_acc"""', '_tot_acc'], {}), "('tot_acc', _tot_acc)\n", (10517, 10538), True, 'import tensorflow as tf\n'), ((10547, 10591), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""not_na_acc"""', '_not_na_acc'], {}), "('not_na_acc', _not_na_acc)\n", (10564, 10591), True, 'import tensorflow as tf\n'), ((10633, 10665), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (10647, 10665), True, 'import tensorflow as tf\n'), ((10874, 10896), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (10894, 10896), True, 'import tensorflow as tf\n'), ((997, 1017), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (1011, 1017), True, 'import tensorflow as tf\n'), ((9671, 9696), 'tensorflow.add_n', 'tf.add_n', (['loss_collection'], {}), '(loss_collection)\n', (9679, 9696), True, 'import tensorflow as tf\n'), ((10107, 10133), 'tensorflow.argmax', 'tf.argmax', (['train_logit', '(-1)'], {}), '(train_logit, -1)\n', (10116, 10133), True, 'import tensorflow as tf\n'), ((15307, 15319), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (15317, 15319), True, 'import tensorflow as tf\n'), ((15482, 15498), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (15496, 15498), True, 'import tensorflow as tf\n'), ((9088, 9117), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % gpu_id)"], {}), "('/gpu:%d' % gpu_id)\n", (9097, 9117), True, 'import tensorflow as tf\n'), ((10206, 10246), 'tensorflow.equal', 'tf.equal', (['_output', 'tower_models[0].label'], {}), '(_output, tower_models[0].label)\n', (10214, 10246), True, 'import tensorflow as tf\n'), ((10727, 10760), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10758, 10760), True, 'import tensorflow as tf\n'), ((11449, 11460), 'time.time', 'time.time', ([], {}), '()\n', (11458, 11460), False, 'import time\n'), ((11832, 11843), 'time.time', 'time.time', ([], {}), '()\n', (11841, 11843), False, 'import time\n'), ((13661, 13691), 'os.path.isdir', 'os.path.isdir', (['test_result_dir'], {}), '(test_result_dir)\n', (13674, 13691), False, 'import os\n'), ((13709, 13734), 'os.mkdir', 'os.mkdir', (['test_result_dir'], {}), '(test_result_dir)\n', (13717, 13734), False, 'import os\n'), ((13755, 13807), 'os.path.join', 'os.path.join', (['test_result_dir', "(model_name + '_x.npy')"], {}), "(test_result_dir, model_name + '_x.npy')\n", (13767, 13807), False, 'import os\n'), ((13842, 13894), 'os.path.join', 'os.path.join', (['test_result_dir', "(model_name + '_y.npy')"], {}), "(test_result_dir, model_name + '_y.npy')\n", (13854, 13894), False, 'import os\n'), ((16511, 16529), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16527, 16529), False, 'import sys\n'), ((9140, 9172), 'tensorflow.name_scope', 'tf.name_scope', (["('gpu_%d' % gpu_id)"], {}), "('gpu_%d' % gpu_id)\n", (9153, 9172), True, 'import tensorflow as tf\n'), ((10356, 10396), 'tensorflow.equal', 'tf.equal', (['_output', 'tower_models[0].label'], {}), '(_output, tower_models[0].label)\n', (10364, 10396), True, 'import tensorflow as tf\n'), ((10398, 10436), 'tensorflow.not_equal', 'tf.not_equal', (['tower_models[0].label', '(0)'], {}), '(tower_models[0].label, 0)\n', (10410, 10436), True, 'import tensorflow as tf\n'), ((12598, 12616), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12614, 12616), False, 'import sys\n'), ((16027, 16099), 'numpy.logical_and', 'np.logical_and', (["(iter_output == batch_data['rel'])", "(batch_data['rel'] != 0)"], {}), "(iter_output == batch_data['rel'], batch_data['rel'] != 0)\n", (16041, 16099), True, 'import numpy as np\n'), ((12071, 12129), 'numpy.logical_and', 'np.logical_and', (['(iter_output == iter_label)', '(iter_label != 0)'], {}), '(iter_output == iter_label, iter_label != 0)\n', (12085, 12129), True, 'import numpy as np\n'), ((13053, 13076), 'os.path.isdir', 'os.path.isdir', (['ckpt_dir'], {}), '(ckpt_dir)\n', (13066, 13076), False, 'import os\n'), ((13102, 13120), 'os.mkdir', 'os.mkdir', (['ckpt_dir'], {}), '(ckpt_dir)\n', (13110, 13120), False, 'import os\n'), ((13170, 13204), 'os.path.join', 'os.path.join', (['ckpt_dir', 'model_name'], {}), '(ckpt_dir, model_name)\n', (13182, 13204), False, 'import os\n')]
|
import argparse
import cv2
import glog
import json
import numpy as np
import os
from tqdm import tqdm
from pycocotools import coco as coco_loader
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
description='Merge annotations in COCO representation into one'
)
parser.add_argument(
'--input-dir', required=True,
help='directory with input annotations in *.json format'
)
parser.add_argument(
'--output', required=True,
help='output annotation file'
)
parser.add_argument(
'--images-map', required=True,
help='file with map of datasets and its images path (json format)'
)
parser.add_argument(
'--draw', default=None,
help='directory to save images with its segments. By default is disabled'
)
return parser.parse_args()
def draw_bboxes_and_masks(img, annotations, input_dir):
""" Draw bounding boxes and contours of masks on image and save it.
:param img: file name of image (is getting from the same field in annotation)
:param annotations: list of bonding boxes and segments on the image
:param input_dir: base directory to save images
"""
input_file = os.path.join(input_dir, img['file_name'])
save_path = os.path.join(os.path.dirname(input_file), 'draw')
if not os.path.exists(save_path):
os.makedirs(save_path)
output_file = os.path.join(save_path, os.path.basename(input_file))
img = cv2.imread(input_file)
yellow = (0, 255, 255)
red = (0, 0, 255)
for ann in annotations:
cat_id = str(ann['category_id'])
bbox = [int(ann['bbox'][0]), int(ann['bbox'][1]),
int(ann['bbox'][0] + ann['bbox'][2]), int(ann['bbox'][1] + ann['bbox'][3])]
masks = ann['segmentation']
for mask in masks:
i = 0
points = []
while i < len(mask):
x = int(mask[i])
y = int(mask[i + 1])
points.append([x, y])
i += 2
img = cv2.polylines(img, np.int32([points]), True, yellow, 1)
img = cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), red, 1)
x = bbox[0] + (bbox[2] - bbox[0]) // 4
y = bbox[1] + (bbox[3] - bbox[1]) // 2
cv2.putText(img, cat_id, (x, y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, red, 1)
cv2.imwrite(output_file, img)
def is_json_file(filename):
""" Check if file has a *.json type (just check an extension)
:param filename: name of file
:return: True if file has a *.json type
"""
return True if filename.lower().endswith('.json') else False
def get_anno_list(directory):
""" Get list of files in directory
:param directory: directory to parse
:return: list of files in the directory in format [name1.ext, name2.ext, ...]
"""
files = []
for file in os.listdir(directory):
if is_json_file(file):
files.append(file)
return files
def pretty_string(name_list):
""" Make a string from list of some names
:param name_list: list of names [name#0, name#1, ...]
:return: string in format:
-name#0
-name#1
"""
output_string = ''
for s in name_list:
output_string += '\n -' + s
return output_string
def common_path_images(images_map):
""" Define which part of paths to images is common for all of them
:param images_map: dictionary of matched datasets and its images paths. Format:
{
'dataset1.json': '/path/to/images/for/dataset1',
'dataset2.json': '/path/to/images/for/dataset2',
...
}
:return: string with a common part of the images paths
"""
paths = [path for _, path in images_map.items()]
return os.path.commonpath(paths)
def merge_annotations(directory, anno_list, images_map):
""" Merge several annotations in COCO representation into one
:param directory: base directory where is saved all datasets which is needed to merge
:param anno_list: list of annotations to merge. [dataset1.json, dataset2.json, ...]
:param images_map: dictionary of matched datasets and its images paths
:return: merged annotation, list of used annotations and list of skipped annotations
"""
merged_anno = None
first_step = True
reference_classes = None
common_path = common_path_images(images_map)
valid_annos = []
skipped_annos = []
for anno_file in tqdm(anno_list, 'Parsing annotations...'):
if anno_file not in images_map:
glog.warning('Dataset <{}> is absent in \'images-map\' file and will be ignored!'.format(anno_file))
skipped_annos.append(anno_file)
continue
img_prefix = images_map[anno_file].replace(common_path, '')
if img_prefix[0] == '/':
img_prefix = img_prefix.replace('/', '', 1)
with open(os.path.join(directory, anno_file)) as f:
data = json.load(f)
for img in data['images']:
img['file_name'] = os.path.join(img_prefix, img['file_name'])
if first_step:
merged_anno = data
reference_classes = data['categories']
first_step = False
else:
classes = data['categories']
if classes != reference_classes:
glog.warning('Categories field in dataset <{}> has another classes and will be ignored!'
.format(anno_file))
skipped_annos.append(anno_file)
continue
add_img_id = len(merged_anno['images'])
add_obj_id = len(merged_anno['annotations'])
for img in data['images']:
img['id'] += add_img_id
for ann in data['annotations']:
ann['id'] += add_obj_id
ann['image_id'] += add_img_id
merged_anno['images'].extend(data['images'])
merged_anno['annotations'].extend(data['annotations'])
valid_annos.append(anno_file)
return merged_anno, valid_annos, skipped_annos
def main():
args = parse_args()
anno_list = get_anno_list(args.input_dir)
with open(args.images_map) as f:
images_map = json.load(f)
result_annotation, valid_annos, skipped_annos = merge_annotations(args.input_dir, anno_list, images_map)
assert len(valid_annos) > 0, 'The result annotation is empty! Please check parameters and your \'images_map\' file.'
# Save created annotation
glog.info('Saving annotation...')
with open(args.output, 'w') as outfile:
json.dump(result_annotation, outfile)
glog.info('Annotation was saved in <{}> successfully'.format(args.output))
# Try to load created annotation via cocoapi
try:
glog.info('Trying to load annotation <{}> via cocoapi...'.format(args.output))
coco_loader.COCO(args.output)
except:
raise
else:
glog.info('Annotation in COCO representation <{}> successfully created from: {}'
.format(args.output, pretty_string(valid_annos)))
if len(skipped_annos) > 0:
glog.info('The next annotations were skipped: {}'.format(pretty_string(skipped_annos)))
if args.draw:
for img in tqdm(result_annotation['images'], 'Drawing and saving images...'):
ann_for_img = []
for ann in result_annotation['annotations']:
if ann['image_id'] == img['id']:
ann_for_img.append(ann)
draw_bboxes_and_masks(img, ann_for_img, args.draw)
if __name__ == "__main__":
main()
|
[
"json.dump",
"tqdm.tqdm",
"json.load",
"cv2.putText",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.basename",
"os.path.commonpath",
"cv2.imwrite",
"glog.info",
"os.path.dirname",
"os.path.exists",
"cv2.imread",
"pycocotools.coco.COCO",
"numpy.int32",
"cv2.rectangle",
"os.path.join",
"os.listdir"
] |
[((223, 316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Merge annotations in COCO representation into one"""'}), "(description=\n 'Merge annotations in COCO representation into one')\n", (246, 316), False, 'import argparse\n'), ((1246, 1287), 'os.path.join', 'os.path.join', (['input_dir', "img['file_name']"], {}), "(input_dir, img['file_name'])\n", (1258, 1287), False, 'import os\n'), ((1506, 1528), 'cv2.imread', 'cv2.imread', (['input_file'], {}), '(input_file)\n', (1516, 1528), False, 'import cv2\n'), ((2407, 2436), 'cv2.imwrite', 'cv2.imwrite', (['output_file', 'img'], {}), '(output_file, img)\n', (2418, 2436), False, 'import cv2\n'), ((2917, 2938), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (2927, 2938), False, 'import os\n'), ((3905, 3930), 'os.path.commonpath', 'os.path.commonpath', (['paths'], {}), '(paths)\n', (3923, 3930), False, 'import os\n'), ((4594, 4635), 'tqdm.tqdm', 'tqdm', (['anno_list', '"""Parsing annotations..."""'], {}), "(anno_list, 'Parsing annotations...')\n", (4598, 4635), False, 'from tqdm import tqdm\n'), ((6717, 6750), 'glog.info', 'glog.info', (['"""Saving annotation..."""'], {}), "('Saving annotation...')\n", (6726, 6750), False, 'import glog\n'), ((1317, 1344), 'os.path.dirname', 'os.path.dirname', (['input_file'], {}), '(input_file)\n', (1332, 1344), False, 'import os\n'), ((1365, 1390), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (1379, 1390), False, 'import os\n'), ((1400, 1422), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (1411, 1422), False, 'import os\n'), ((1465, 1493), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (1481, 1493), False, 'import os\n'), ((2158, 2224), 'cv2.rectangle', 'cv2.rectangle', (['img', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', 'red', '(1)'], {}), '(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), red, 1)\n', (2171, 2224), False, 'import cv2\n'), ((2327, 2402), 'cv2.putText', 'cv2.putText', (['img', 'cat_id', '(x, y)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(1)', 'red', '(1)'], {}), '(img, cat_id, (x, y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, red, 1)\n', (2338, 2402), False, 'import cv2\n'), ((6437, 6449), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6446, 6449), False, 'import json\n'), ((6803, 6840), 'json.dump', 'json.dump', (['result_annotation', 'outfile'], {}), '(result_annotation, outfile)\n', (6812, 6840), False, 'import json\n'), ((7074, 7103), 'pycocotools.coco.COCO', 'coco_loader.COCO', (['args.output'], {}), '(args.output)\n', (7090, 7103), True, 'from pycocotools import coco as coco_loader\n'), ((7470, 7535), 'tqdm.tqdm', 'tqdm', (["result_annotation['images']", '"""Drawing and saving images..."""'], {}), "(result_annotation['images'], 'Drawing and saving images...')\n", (7474, 7535), False, 'from tqdm import tqdm\n'), ((5091, 5103), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5100, 5103), False, 'import json\n'), ((2106, 2124), 'numpy.int32', 'np.int32', (['[points]'], {}), '([points])\n', (2114, 2124), True, 'import numpy as np\n'), ((5030, 5064), 'os.path.join', 'os.path.join', (['directory', 'anno_file'], {}), '(directory, anno_file)\n', (5042, 5064), False, 'import os\n'), ((5178, 5220), 'os.path.join', 'os.path.join', (['img_prefix', "img['file_name']"], {}), "(img_prefix, img['file_name'])\n", (5190, 5220), False, 'import os\n')]
|
from TexGen.Core import *
import numpy as np
from os import path
def export_weave_vtu(filename, weave, domain, max_dim_nvox, round_vox_up=True, export_orientation=True):
""" Exporting weave to vtu, to be read by pumapy
:param filename: filepath and name
:type filename: string
:param weave: weave object, as defined in TexGen
:type weave: CTextile or child class of CTextile
:param domain: domain size object, as defined in TexGen
:type domain: CDomainPlanes
:param max_dim_nvox: number of voxels to add in the largest domain dimension
:type max_dim_nvox: int
:param round_vox_up: for the shorter dimensions, round number of voxels up (for +/-1 vox)
:type round_vox_up: bool
:param export_orientation: specify whether to export orientation
:type export_orientation: bool
:return: filename of weave exported (input filename + dimensions)
:rtype: string
"""
if not isinstance(domain, CDomainPlanes):
raise Exception("Domain needs to be of CDomainPlanes type.")
if not isinstance(filename, str):
raise Exception("Filename has to be a string.")
if not path.exists(path.split(filename)[0]):
raise Exception("Directory " + path.split(filename)[0] + " not found.")
min_bounds = XYZ()
max_bounds = XYZ()
domain.GetBoxLimits(min_bounds, max_bounds)
weave.AssignDomain(CDomainPlanes(min_bounds, max_bounds))
lengths = np.array([max_bounds.x - min_bounds.x, max_bounds.y - min_bounds.y, max_bounds.z - min_bounds.z])
max_len = np.max(lengths)
mask = np.zeros(3, dtype=bool)
mask[lengths == max_len] = True
voxel_length = max_len / float(max_dim_nvox)
nvox = np.zeros(3, dtype=int)
nvox[mask] = max_dim_nvox
nvox[~mask] = (lengths[~mask] / voxel_length).astype(int) # truncates
rem = np.zeros(3, dtype=float)
rem[~mask] = lengths[~mask] - voxel_length * nvox[~mask]
if round_vox_up:
rem[~mask] = voxel_length - rem[~mask]
max_bounds = XYZ(max_bounds.x + rem[0],
max_bounds.y + rem[1],
max_bounds.z + rem[2])
nvox[~mask] += 1
else:
max_bounds = XYZ(max_bounds.x - rem[0], max_bounds.y - rem[1], max_bounds.z - rem[2])
weave.AssignDomain(CDomainPlanes(min_bounds, max_bounds))
mesh = CRectangularVoxelMesh()
print("Exporting " + filename + ".vtu ... ", end='')
filename += "_" + str(nvox[0]) + "_" + str(nvox[1]) + "_" + str(nvox[2])
mesh.SaveVoxelMesh(weave, filename, int(nvox[0]), int(nvox[1]), int(nvox[2]), False, export_orientation,
MATERIAL_CONTINUUM, 0, VTU_EXPORT)
print("Done")
return filename
|
[
"numpy.zeros",
"numpy.max",
"os.path.split",
"numpy.array"
] |
[((1491, 1593), 'numpy.array', 'np.array', (['[max_bounds.x - min_bounds.x, max_bounds.y - min_bounds.y, max_bounds.z -\n min_bounds.z]'], {}), '([max_bounds.x - min_bounds.x, max_bounds.y - min_bounds.y, \n max_bounds.z - min_bounds.z])\n', (1499, 1593), True, 'import numpy as np\n'), ((1603, 1618), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (1609, 1618), True, 'import numpy as np\n'), ((1631, 1654), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'bool'}), '(3, dtype=bool)\n', (1639, 1654), True, 'import numpy as np\n'), ((1753, 1775), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (1761, 1775), True, 'import numpy as np\n'), ((1892, 1916), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (1900, 1916), True, 'import numpy as np\n'), ((1212, 1232), 'os.path.split', 'path.split', (['filename'], {}), '(filename)\n', (1222, 1232), False, 'from os import path\n'), ((1277, 1297), 'os.path.split', 'path.split', (['filename'], {}), '(filename)\n', (1287, 1297), False, 'from os import path\n')]
|
import cv2
import numpy as np
#script que simula o jogo, pra testar um frame
#se apertar esc ele fecha, se apertar espaço alterna entre uma tela preta e um frame
print("Aperte espaço para mudar frame/tela preta")
print("Pressione uma tecla pra selecionar a resolução:")
print("A->1280x720 zoom = 75%")
print("B->1280x720 zoom = 80%")
print("C->1280x720 zoom = 85%")
print("D->1280x720 zoom = 90%")
print("E->1280x720 zoom = 95%")
print("F->1280x720 zoom = 100%")
print("G->1280x720 zoom = 125%")
print("H->1752x712 zoom = 75%")
print("I->1752x712 zoom = 80%")
print("J->1752x712 zoom = 85%")
print("K->1752x712 zoom = 90%")
print("L->1752x712 zoom = 95%")
print("M->1752x712 zoom = 100%")
print("N->1752x712 zoom = 105%")
print("O->1752x712 zoom = 110%")
print("P->1752x712 zoom = 115%")
print("Q->1752x712 zoom = 120%")
print("R->1752x712 zoom = 125%")
print("S->1920x1080 zoom = 90%")
cv2.imshow('Stardew Valley', np.zeros([20,20]))
k = cv2.waitKey()
if k == 65 or k == 97: # letra a
img = cv2.imread('1280x720_zoom75.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 66 or k == 98: # letra b
img = cv2.imread('1280x720_zoom80.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 67 or k == 99: # letra c
img = cv2.imread('1280x720_zoom85.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 68 or k == 100: # letra d
img = cv2.imread('1280x720_zoom90.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 69 or k == 101: # letra e
img = cv2.imread('1280x720_zoom95.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 70 or k == 102: # letra f
img = cv2.imread('1280x720_zoom100.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 71 or k == 103: # letra g
img = cv2.imread('1280x720_zoom125.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 72 or k == 104: # letra h
img = cv2.imread('1752x712_zoom75.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 73 or k == 105: # letra i
img = cv2.imread('1752x712_zoom80.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 74 or k == 106: # letra j
img = cv2.imread('1752x712_zoom85.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 75 or k == 107: # letra k
img = cv2.imread('1752x712_zoom90.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 76 or k == 108: # letra l
img = cv2.imread('1752x712_zoom95.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 77 or k == 109: # letra m
img = cv2.imread('1752x712_zoom100.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 78 or k == 110: # letra n
img = cv2.imread('1752x712_zoom105.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 79 or k == 111: # letra o
img = cv2.imread('1752x712_zoom110.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 80 or k == 112: # letra p
img = cv2.imread('1752x712_zoom115.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 81 or k == 113: # letra q
img = cv2.imread('1752x712_zoom120.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 82 or k == 114: # letra r
img = cv2.imread('1752x712_zoom125.png')
img2 = np.zeros(img.shape)
dsp = img
if k == 83 or k == 115: # letra s
img = cv2.imread('1920x1080_zoom90.png')
img2 = np.zeros(img.shape)
dsp = img
cv2.imshow('Stardew Valley', img)
while True:
k = cv2.waitKey(0)
if k == 32:
if dsp is img:
dsp = img2
elif dsp is img2:
dsp = img
cv2.imshow('Stardew Valley', dsp)
elif k == 27:
cv2.destroyAllWindows()
break
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.imread",
"cv2.imshow"
] |
[((939, 952), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (950, 952), False, 'import cv2\n'), ((3298, 3331), 'cv2.imshow', 'cv2.imshow', (['"""Stardew Valley"""', 'img'], {}), "('Stardew Valley', img)\n", (3308, 3331), False, 'import cv2\n'), ((916, 934), 'numpy.zeros', 'np.zeros', (['[20, 20]'], {}), '([20, 20])\n', (924, 934), True, 'import numpy as np\n'), ((996, 1029), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom75.png"""'], {}), "('1280x720_zoom75.png')\n", (1006, 1029), False, 'import cv2\n'), ((1041, 1060), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1049, 1060), True, 'import numpy as np\n'), ((1118, 1151), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom80.png"""'], {}), "('1280x720_zoom80.png')\n", (1128, 1151), False, 'import cv2\n'), ((1163, 1182), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1171, 1182), True, 'import numpy as np\n'), ((1240, 1273), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom85.png"""'], {}), "('1280x720_zoom85.png')\n", (1250, 1273), False, 'import cv2\n'), ((1285, 1304), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1293, 1304), True, 'import numpy as np\n'), ((1363, 1396), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom90.png"""'], {}), "('1280x720_zoom90.png')\n", (1373, 1396), False, 'import cv2\n'), ((1408, 1427), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1416, 1427), True, 'import numpy as np\n'), ((1486, 1519), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom95.png"""'], {}), "('1280x720_zoom95.png')\n", (1496, 1519), False, 'import cv2\n'), ((1531, 1550), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1539, 1550), True, 'import numpy as np\n'), ((1609, 1643), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom100.png"""'], {}), "('1280x720_zoom100.png')\n", (1619, 1643), False, 'import cv2\n'), ((1655, 1674), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1663, 1674), True, 'import numpy as np\n'), ((1733, 1767), 'cv2.imread', 'cv2.imread', (['"""1280x720_zoom125.png"""'], {}), "('1280x720_zoom125.png')\n", (1743, 1767), False, 'import cv2\n'), ((1779, 1798), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1787, 1798), True, 'import numpy as np\n'), ((1857, 1890), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom75.png"""'], {}), "('1752x712_zoom75.png')\n", (1867, 1890), False, 'import cv2\n'), ((1902, 1921), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1910, 1921), True, 'import numpy as np\n'), ((1980, 2013), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom80.png"""'], {}), "('1752x712_zoom80.png')\n", (1990, 2013), False, 'import cv2\n'), ((2025, 2044), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2033, 2044), True, 'import numpy as np\n'), ((2103, 2136), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom85.png"""'], {}), "('1752x712_zoom85.png')\n", (2113, 2136), False, 'import cv2\n'), ((2148, 2167), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2156, 2167), True, 'import numpy as np\n'), ((2226, 2259), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom90.png"""'], {}), "('1752x712_zoom90.png')\n", (2236, 2259), False, 'import cv2\n'), ((2271, 2290), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2279, 2290), True, 'import numpy as np\n'), ((2349, 2382), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom95.png"""'], {}), "('1752x712_zoom95.png')\n", (2359, 2382), False, 'import cv2\n'), ((2394, 2413), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2402, 2413), True, 'import numpy as np\n'), ((2472, 2506), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom100.png"""'], {}), "('1752x712_zoom100.png')\n", (2482, 2506), False, 'import cv2\n'), ((2518, 2537), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2526, 2537), True, 'import numpy as np\n'), ((2596, 2630), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom105.png"""'], {}), "('1752x712_zoom105.png')\n", (2606, 2630), False, 'import cv2\n'), ((2642, 2661), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2650, 2661), True, 'import numpy as np\n'), ((2720, 2754), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom110.png"""'], {}), "('1752x712_zoom110.png')\n", (2730, 2754), False, 'import cv2\n'), ((2766, 2785), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2774, 2785), True, 'import numpy as np\n'), ((2844, 2878), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom115.png"""'], {}), "('1752x712_zoom115.png')\n", (2854, 2878), False, 'import cv2\n'), ((2890, 2909), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (2898, 2909), True, 'import numpy as np\n'), ((2968, 3002), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom120.png"""'], {}), "('1752x712_zoom120.png')\n", (2978, 3002), False, 'import cv2\n'), ((3014, 3033), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (3022, 3033), True, 'import numpy as np\n'), ((3092, 3126), 'cv2.imread', 'cv2.imread', (['"""1752x712_zoom125.png"""'], {}), "('1752x712_zoom125.png')\n", (3102, 3126), False, 'import cv2\n'), ((3138, 3157), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (3146, 3157), True, 'import numpy as np\n'), ((3216, 3250), 'cv2.imread', 'cv2.imread', (['"""1920x1080_zoom90.png"""'], {}), "('1920x1080_zoom90.png')\n", (3226, 3250), False, 'import cv2\n'), ((3262, 3281), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (3270, 3281), True, 'import numpy as np\n'), ((3352, 3366), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3363, 3366), False, 'import cv2\n'), ((3485, 3518), 'cv2.imshow', 'cv2.imshow', (['"""Stardew Valley"""', 'dsp'], {}), "('Stardew Valley', dsp)\n", (3495, 3518), False, 'import cv2\n'), ((3545, 3568), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3566, 3568), False, 'import cv2\n')]
|
import numpy as np
def conv(f, g):
def h(x):
"""Input x has to be equidistant!
"""
# If the support of f or g extends outside x,
# we have to evaluate the functions also outside x
# to get the values of the convolution for all x.
n = len(x)
d = x[1] - x[0]
x_ext = np.concatenate([x[-n:] - n * d, x, x[:n] + n * d])
m = len(x_ext)
x_ext_tiled = np.tile(x_ext, (m, 1))
distance_matrix = x_ext_tiled - x_ext_tiled.T
res = np.sum(g(-distance_matrix) * np.tile(f(x_ext), (m, 1)), axis=1) * d
return res[n:-n]
return h
from scipy.signal import fftconvolve
def fconv(f, g):
def h(x):
"""Input x has to be equidistant!
"""
# Do some trickery to evaluate the convolution at the desired x-values.
n = len(x)
d = x[1] - x[0]
x_ext = np.concatenate([x[-n // 2 :] - n * d, x, x[: n // 2] + n * d])
res = fftconvolve(f(x_ext), g(x_ext), mode="full") * (x_ext[1] - x_ext[0])
return np.interp(x, x_ext * 2, res[::2])
return h
|
[
"numpy.interp",
"numpy.tile",
"numpy.concatenate"
] |
[((336, 386), 'numpy.concatenate', 'np.concatenate', (['[x[-n:] - n * d, x, x[:n] + n * d]'], {}), '([x[-n:] - n * d, x, x[:n] + n * d])\n', (350, 386), True, 'import numpy as np\n'), ((433, 455), 'numpy.tile', 'np.tile', (['x_ext', '(m, 1)'], {}), '(x_ext, (m, 1))\n', (440, 455), True, 'import numpy as np\n'), ((898, 958), 'numpy.concatenate', 'np.concatenate', (['[x[-n // 2:] - n * d, x, x[:n // 2] + n * d]'], {}), '([x[-n // 2:] - n * d, x, x[:n // 2] + n * d])\n', (912, 958), True, 'import numpy as np\n'), ((1059, 1092), 'numpy.interp', 'np.interp', (['x', '(x_ext * 2)', 'res[::2]'], {}), '(x, x_ext * 2, res[::2])\n', (1068, 1092), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np
import rospy
import time
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped, PointStamped
from tutoriales_basicos.msg import Histogram
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Int16
from std_msgs.msg import Float32
BURGER_MAX_LIN_VEL = 0.22*.7
BURGER_MAX_ANG_VEL = 2.84
#import matplotlib.pyplot as plt
def min(c1,c2):
s = 4
return(np.amin([(c1 - c2),(c1 - c2 - s),(c1 - c2 + s)]))
class LaserSub:
def __init__(self):
self.sub_l_0 = rospy.Subscriber("/tb3_0/scan", LaserScan, self.scan_callback, queue_size=1)
self.pub_H = rospy.Publisher("/tb3_0/Histogram", Histogram, queue_size=10)
self.r = 0.3
self.s = 0.3
self.alfa = 4 #tamano del sector 4 grados.
self.a = 5
self.b = 1
self.H = np.zeros(90)
self.Hp = list()
#def steeringCallback(self,data):
# if self.Hp[int(data.steering/4)] < 1:
# twist = Twist()
# twist.linear.x = BURGER_MAX_LIN_VEL; twist.linear.y = 0.0; twist.linear.z = 0.0
# #print(twist.linear.x)
# twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = data.steering*Kp
# self.pub.publish(twist)
# else:
# for k in range(90):
# if self.Hp[k] < 1:
# if k == 0:
# gmin = 5*min(k,int(data.steering/4)) + 2*min(k,int(data.yaw/4))
# gpast = gmin
# orientation = k
# else:
# gmin = 5*min(k,int(data.steering/4)) + 2*min(k,int(data.yaw/4))
# if gmin < gpast:
# gpast = gmin
# orientation = k
def scan_callback(self,data):
# Guardar datos en histograma
#print(data.ranges)
self.H = np.zeros(90)#Crear vector de 90 elementos
size = np.size(data.ranges) #Obtiene el tamano de los datos (360)
for beta in range(size): #For hasta 360
#print(data.ranges[beta])
if data.ranges[beta] > 2: #Si la distancia es mayor a 2
d = 0 #d=0
#print(beta, d)
else:
d = data.ranges[beta] #Si no guarda la distancia
#print(beta, d)
k = int((beta)/self.alfa) # k_alfa es el sector actualmente en calculo
if beta<120 or (beta>240 and beta<360):
#if beta>(beta - np.arcsin((self.r + self.s)/d)) and beta<(beta + np.arcsin((self.r + self.s)/d)):
previus = self.H[k]
self.H[k]=(previus + (15*(self.a-self.b*d*d)))
msg_to_send = Histogram()
msg_to_send.Histogram = self.H
self.pub_H.publish(msg_to_send)
def main():
try:
rospy.init_node('LaseSub')
LaserSub() # constructor creates publishers / subscribers
rospy.spin()
except rospy.ROSInterruptException:
pass
if __name__=="__main__":
main()
|
[
"numpy.size",
"rospy.Subscriber",
"numpy.amin",
"tutoriales_basicos.msg.Histogram",
"numpy.zeros",
"rospy.Publisher",
"rospy.init_node",
"rospy.spin"
] |
[((558, 602), 'numpy.amin', 'np.amin', (['[c1 - c2, c1 - c2 - s, c1 - c2 + s]'], {}), '([c1 - c2, c1 - c2 - s, c1 - c2 + s])\n', (565, 602), True, 'import numpy as np\n'), ((673, 749), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/tb3_0/scan"""', 'LaserScan', 'self.scan_callback'], {'queue_size': '(1)'}), "('/tb3_0/scan', LaserScan, self.scan_callback, queue_size=1)\n", (689, 749), False, 'import rospy\n'), ((771, 832), 'rospy.Publisher', 'rospy.Publisher', (['"""/tb3_0/Histogram"""', 'Histogram'], {'queue_size': '(10)'}), "('/tb3_0/Histogram', Histogram, queue_size=10)\n", (786, 832), False, 'import rospy\n'), ((981, 993), 'numpy.zeros', 'np.zeros', (['(90)'], {}), '(90)\n', (989, 993), True, 'import numpy as np\n'), ((2040, 2052), 'numpy.zeros', 'np.zeros', (['(90)'], {}), '(90)\n', (2048, 2052), True, 'import numpy as np\n'), ((2097, 2117), 'numpy.size', 'np.size', (['data.ranges'], {}), '(data.ranges)\n', (2104, 2117), True, 'import numpy as np\n'), ((2875, 2886), 'tutoriales_basicos.msg.Histogram', 'Histogram', ([], {}), '()\n', (2884, 2886), False, 'from tutoriales_basicos.msg import Histogram\n'), ((3000, 3026), 'rospy.init_node', 'rospy.init_node', (['"""LaseSub"""'], {}), "('LaseSub')\n", (3015, 3026), False, 'import rospy\n'), ((3102, 3114), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3112, 3114), False, 'import rospy\n')]
|
import numpy as np
from collections import deque
import gym
import os
import gym.spaces as spaces
import cv2
import datalib.trajectories as trajectories
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class SavedClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self._flat_reward = 0
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
self._flat_reward = reward
return np.sign(reward)
class SavedPositiveClippedRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self._flat_reward = 0
def reward(self, reward):
"""Bin reward to {+1, 0} by its sign."""
self._flat_reward = reward
return max(np.sign(reward), 0)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, is_monte, is_pong):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.is_monte = is_monte
self.is_pong = is_pong
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
if self.is_monte:
frame[0:23, ...] = 0
if self.is_pong:
frame[0:23, :] = [144, 72, 17]
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class PenalizeDying(gym.Wrapper):
"""
{'ale.lives': 6}
"""
def __init__(self, env, penalty):
gym.Wrapper.__init__(self, env)
self.lives = 6
self.penalty = penalty
def reset(self):
ob = self.env.reset()
self.lives = 6
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
new_lives = info['ale.lives']
if new_lives < self.lives:
self.lives = new_lives
reward -= self.penalty
# done = True
return ob, reward, done, info
class StepPenalty(gym.Wrapper):
def __init__(self, env, step_penalty):
gym.Wrapper.__init__(self, env)
self.env = env
self.step_penalty = step_penalty
def reset(self, **kwargs):
self.env.reset(**kwargs)
def step(self, action):
ob, reward, done, info = self.env.step(action)
reward = reward - self.step_penalty
return ob, reward, done, info
class LimitLength(gym.Wrapper):
def __init__(self, env, k, timeout_penalty):
gym.Wrapper.__init__(self, env)
self.k = k
self.timeout_penalty = timeout_penalty
def reset(self):
# This assumes that reset() will really reset the env.
# If the underlying env tries to be smart about reset
# (e.g. end-of-life), the assumption doesn't hold.
ob = self.env.reset()
self.cnt = 0
return ob
def step(self, action):
ob, r, done, info = self.env.step(action)
self.cnt += 1
if self.cnt == self.k:
done = True
r -= self.timeout_penalty
return ob, r, done, info
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class RandomStartingWrapper(gym.Wrapper):
def __init__(self, env, config):
super(RandomStartingWrapper, self).__init__(env)
self.config = config
self.df = trajectories.load_trajectories_by_score(
trajectory_dir=config['traj_dir'],
max_score_cutoff=config['max_score_cutoff'],
min_score_cutoff=config['min_score_cutoff'],
project_level_gamma=config['gamma'],
clip_rewards=config['clip_rewards'],
frameskip=config['frameskip'],
process_lost_lifes=config['process_lost_lifes'],
)
self.random_state = None
def seed(self, seed=None):
self.env.seed(seed)
if seed is None:
raise ValueError("Seed cannot be None in case of random starting env wrapper")
self.random_state = np.random.RandomState(seed)
def reset(self, **kwargs):
super(RandomStartingWrapper, self).reset(**kwargs)
wrapped_env = self.env.env
if self.random_state is None:
raise ValueError("Uninitialized random state")
idx = self.random_state.randint(1, len(self.df))
# We have to kick out the first frame, because we don't have observation before it
while self.df.iloc[idx].frame == 0:
idx = self.random_state.randint(1, len(self.df))
# print("Will restore state no = {}".format(idx))
traj = self.df.iloc[idx].trajectory
state_idx = self.df.iloc[idx].frame
state_fname = os.path.join(self.config['stat_dir'], "{}/{:07d}.npy".format(traj, state_idx))
state = np.load(state_fname)
img_fname = os.path.join(self.config['img_dir'], "{}/{:07d}.png".format(traj, state_idx - 1))
img = cv2.imread(img_fname, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
wrapped_env.restore_full_state(state)
# wrapped_env._get_obs() returns observation before state change, so we have to fix it ourselves
# https://github.com/openai/gym/issues/715
return img
class DoomMyWayHomeActionWrapper(gym.ActionWrapper):
"""
Doom my way home env (see doom.env.doom_my_way_home). has action space:
actions = [0] * 43
actions[13] = 0 # MOVE_FORWARD
actions[14] = 1 # TURN_RIGHT
actions[15] = 0 # TURN_LEFT
We need to change that to match the scheme I have implemnted while gathering data
(and to much the stoachastic policy reinforecement learning formulation).
We want to map e.g.:
7 -> [1, 1, 1]
5 -> [1, 0, 1]
(but ofc the relevant array starts from place 13)
"""
def __init__(self, env):
super(DoomMyWayHomeActionWrapper, self).__init__(env)
self.action_space = gym.spaces.Discrete(8)
def action(self, action):
a = action
move_fwd = a % 2
a = a // 2
turn_right = a % 2
a = a // 2
turn_left = a % 2
a = a // 2
assert a == 0
out = [0] * 43
out[0] = move_fwd
out[1] = turn_right
out[2] = turn_left
return out
def make_state_restoring_atari(env_id, config):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = RandomStartingWrapper(env, config)
env = MaxAndSkipEnv(env, skip=4)
return env
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False, is_monte=False, is_pong=False, save_original_reward=False, only_positive_rewards=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, is_monte, is_pong)
if scale:
env = ScaledFloatFrame(env)
if only_positive_rewards:
env = SavedPositiveClippedRewardEnv(env)
elif clip_rewards:
if save_original_reward:
env = SavedClipRewardEnv(env)
else:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def wrap_doom_deepmind_like(env, clip_rewards=True, frame_stack=False, scale=False, save_original_reward=False):
env = WarpFrame(env, is_monte=False, is_pong=False)
env = DoomMyWayHomeActionWrapper(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
if save_original_reward:
env = SavedClipRewardEnv(env)
else:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
|
[
"gym.Wrapper.__init__",
"datalib.trajectories.load_trajectories_by_score",
"numpy.load",
"gym.make",
"gym.RewardWrapper.__init__",
"numpy.concatenate",
"cv2.cvtColor",
"collections.deque",
"numpy.zeros",
"cv2.ocl.setUseOpenCL",
"numpy.random.RandomState",
"gym.ObservationWrapper.__init__",
"gym.spaces.Discrete",
"cv2.imread",
"numpy.array",
"gym.spaces.Box",
"numpy.sign",
"cv2.resize"
] |
[((155, 182), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (175, 182), False, 'import cv2\n'), ((13187, 13203), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (13195, 13203), False, 'import gym\n'), ((13377, 13393), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (13385, 13393), False, 'import gym\n'), ((397, 428), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (417, 428), False, 'import gym\n'), ((1368, 1399), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (1388, 1399), False, 'import gym\n'), ((2106, 2137), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (2126, 2137), False, 'import gym\n'), ((3456, 3487), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (3476, 3487), False, 'import gym\n'), ((3590, 3650), 'numpy.zeros', 'np.zeros', (['((2,) + env.observation_space.shape)'], {'dtype': 'np.uint8'}), '((2,) + env.observation_space.shape, dtype=np.uint8)\n', (3598, 3650), True, 'import numpy as np\n'), ((4517, 4554), 'gym.RewardWrapper.__init__', 'gym.RewardWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (4543, 4554), False, 'import gym\n'), ((4654, 4669), 'numpy.sign', 'np.sign', (['reward'], {}), '(reward)\n', (4661, 4669), True, 'import numpy as np\n'), ((4753, 4790), 'gym.RewardWrapper.__init__', 'gym.RewardWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (4779, 4790), False, 'import gym\n'), ((4955, 4970), 'numpy.sign', 'np.sign', (['reward'], {}), '(reward)\n', (4962, 4970), True, 'import numpy as np\n'), ((5065, 5102), 'gym.RewardWrapper.__init__', 'gym.RewardWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (5091, 5102), False, 'import gym\n'), ((5464, 5506), 'gym.ObservationWrapper.__init__', 'gym.ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (5495, 5506), False, 'import gym\n'), ((5653, 5732), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(self.height, self.width, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8)\n', (5663, 5732), True, 'import gym.spaces as spaces\n'), ((5923, 5962), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (5935, 5962), False, 'import cv2\n'), ((5979, 6053), 'cv2.resize', 'cv2.resize', (['frame', '(self.width, self.height)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n', (5989, 6053), False, 'import cv2\n'), ((6206, 6237), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (6226, 6237), False, 'import gym\n'), ((6764, 6795), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (6784, 6795), False, 'import gym\n'), ((7183, 7214), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (7203, 7214), False, 'import gym\n'), ((8052, 8083), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (8072, 8083), False, 'import gym\n'), ((8125, 8144), 'collections.deque', 'deque', (['[]'], {'maxlen': 'k'}), '([], maxlen=k)\n', (8130, 8144), False, 'from collections import deque\n'), ((8220, 8299), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(shp[0], shp[1], shp[2] * k)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)\n', (8230, 8299), True, 'import gym.spaces as spaces\n'), ((8811, 8853), 'gym.ObservationWrapper.__init__', 'gym.ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (8842, 8853), False, 'import gym\n'), ((10180, 10516), 'datalib.trajectories.load_trajectories_by_score', 'trajectories.load_trajectories_by_score', ([], {'trajectory_dir': "config['traj_dir']", 'max_score_cutoff': "config['max_score_cutoff']", 'min_score_cutoff': "config['min_score_cutoff']", 'project_level_gamma': "config['gamma']", 'clip_rewards': "config['clip_rewards']", 'frameskip': "config['frameskip']", 'process_lost_lifes': "config['process_lost_lifes']"}), "(trajectory_dir=config['traj_dir'],\n max_score_cutoff=config['max_score_cutoff'], min_score_cutoff=config[\n 'min_score_cutoff'], project_level_gamma=config['gamma'], clip_rewards=\n config['clip_rewards'], frameskip=config['frameskip'],\n process_lost_lifes=config['process_lost_lifes'])\n", (10219, 10516), True, 'import datalib.trajectories as trajectories\n'), ((10834, 10861), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (10855, 10861), True, 'import numpy as np\n'), ((11607, 11627), 'numpy.load', 'np.load', (['state_fname'], {}), '(state_fname)\n', (11614, 11627), True, 'import numpy as np\n'), ((11745, 11784), 'cv2.imread', 'cv2.imread', (['img_fname', 'cv2.IMREAD_COLOR'], {}), '(img_fname, cv2.IMREAD_COLOR)\n', (11755, 11784), False, 'import cv2\n'), ((11799, 11835), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (11811, 11835), False, 'import cv2\n'), ((12772, 12794), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(8)'], {}), '(8)\n', (12791, 12794), False, 'import gym\n'), ((5267, 5282), 'numpy.sign', 'np.sign', (['reward'], {}), '(reward)\n', (5274, 5282), True, 'import numpy as np\n'), ((9627, 9663), 'numpy.concatenate', 'np.concatenate', (['self._frames'], {'axis': '(2)'}), '(self._frames, axis=2)\n', (9641, 9663), True, 'import numpy as np\n'), ((9014, 9035), 'numpy.array', 'np.array', (['observation'], {}), '(observation)\n', (9022, 9035), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
from SpeakerNet import *
from utils import *
from DatasetLoader import loadWAV
import sys, time, os, argparse, socket
import yaml
import numpy
import pdb
import torch
import glob
import zipfile
import datetime
import os
import random
import subprocess
import torch.distributed as dist
import torch.multiprocessing as mp
import numpy as np
import torch.nn.functional as F
from werkzeug.utils import secure_filename
from flask import Flask, request, jsonify
# ## ===== ===== ===== ===== ===== ===== ===== =====
# ## Parse arguments
# ## ===== ===== ===== ===== ===== ===== ===== =====
parser = argparse.ArgumentParser(description = 'Prepare Data');
## Data loader
parser.add_argument('--max_frames', type=int, default=200, help='Input length to the network for training');
parser.add_argument('--eval_frames', type=int, default=400, help='Input length to the network for testing; 0 uses the whole files');
## Training details
parser.add_argument('--trainfunc', type=str, default='softmaxproto', help='Loss function');
## Optimizer
parser.add_argument('--optimizer', type=str, default='adam', help='sgd or adam');
## Loss functions
parser.add_argument('--hard_prob', type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');
parser.add_argument('--hard_rank', type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');
parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');
parser.add_argument('--nPerSpeaker', type=int, default=2, help='Number of utterances per speaker per batch, only for metric learning based losses');
parser.add_argument('--nClasses', type=int, default=400, help='Number of speakers in the softmax layer, only for softmax-based losses');
## Load
parser.add_argument('--model_path', type=str, default='model000000500.model', help='Path for model and logs');
## Model definition
parser.add_argument('--n_mels', type=int, default=64, help='Number of mel filterbanks');
parser.add_argument('--log_input', type=bool, default=True, help='Log input features')
parser.add_argument('--model', type=str, default='ResNetSE34V2', help='Name of model definition');
parser.add_argument('--encoder_type', type=str, default='ASP', help='Type of encoder');
parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');
##Server 's params
parser.add_argument('--gpu', dest='gpu', action='store_true', help='Use GPU');
parser.add_argument('--threshold', type=float, default=-1.0831763744354248, help='Threshold');
parser.add_argument('--feats_path', type=str, default='feats.npy', help='Path for feats file');
args = parser.parse_args();
## Load models
if args.gpu == True:
s = SpeakerNet(**vars(args));
s = WrappedModel(s).cuda(0)
else:
s = SpeakerNetCPU(**vars(args));
s = WrappedModel(s).cpu()
## Load model weights
try:
loadParameters(args.model_path, s, args.gpu);
except:
raise Exception('Model path is wrong!')
print('Model %s loaded from previous state!'%args.model_path);
feats = np.load(args.feats_path, allow_pickle=True)[()]
def main_worker(file_path):
data = create_data(file_path, args.eval_frames)
feature_vector = s(data).detach().cpu()
normalized_vector = F.normalize(feature_vector, p=2, dim=1)
max_score = args.threshold
speaker = ''
for key, value in feats.items():
dist = F.pairwise_distance(normalized_vector.unsqueeze(-1), value.unsqueeze(-1).transpose(0,2)).detach().cpu().numpy();
score = -1 * np.mean(dist);
if score >= max_score:
max_score = score
speaker = key.split('/')[-2]
return speaker
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
audio_file = request.files['file']
file_name_1 = str(random.randint(0, 100000)) + '.' + secure_filename(audio_file.filename).split('.')[-1]
audio_file.save(file_name_1)
file_name_2 = str(random.randint(0, 100000)) + '.wav'
out = subprocess.call('ffmpeg -y -i %s -ac 1 -vn -acodec pcm_s16le -ar 16000 %s >/dev/null 2>/dev/null' %(file_name_1, file_name_2), shell=True)
if out != 0:
return 'Invalid format!'
speaker = main_worker(file_name_2)
os.remove(file_name_1)
os.remove(file_name_2)
result = {'speaker': speaker}
return jsonify(result)
if __name__ == '__main__':
app.run(host='0.0.0.0', port='8080', debug=False)
|
[
"numpy.load",
"os.remove",
"argparse.ArgumentParser",
"random.randint",
"flask.Flask",
"werkzeug.utils.secure_filename",
"flask.jsonify",
"numpy.mean",
"subprocess.call",
"torch.nn.functional.normalize"
] |
[((638, 689), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Prepare Data"""'}), "(description='Prepare Data')\n", (661, 689), False, 'import sys, time, os, argparse, socket\n'), ((4073, 4088), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (4078, 4088), False, 'from flask import Flask, request, jsonify\n'), ((3455, 3498), 'numpy.load', 'np.load', (['args.feats_path'], {'allow_pickle': '(True)'}), '(args.feats_path, allow_pickle=True)\n', (3462, 3498), True, 'import numpy as np\n'), ((3653, 3692), 'torch.nn.functional.normalize', 'F.normalize', (['feature_vector'], {'p': '(2)', 'dim': '(1)'}), '(feature_vector, p=2, dim=1)\n', (3664, 3692), True, 'import torch.nn.functional as F\n'), ((4396, 4545), 'subprocess.call', 'subprocess.call', (["('ffmpeg -y -i %s -ac 1 -vn -acodec pcm_s16le -ar 16000 %s >/dev/null 2>/dev/null'\n % (file_name_1, file_name_2))"], {'shell': '(True)'}), "(\n 'ffmpeg -y -i %s -ac 1 -vn -acodec pcm_s16le -ar 16000 %s >/dev/null 2>/dev/null'\n % (file_name_1, file_name_2), shell=True)\n", (4411, 4545), False, 'import subprocess\n'), ((4629, 4651), 'os.remove', 'os.remove', (['file_name_1'], {}), '(file_name_1)\n', (4638, 4651), False, 'import os\n'), ((4656, 4678), 'os.remove', 'os.remove', (['file_name_2'], {}), '(file_name_2)\n', (4665, 4678), False, 'import os\n'), ((4725, 4740), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (4732, 4740), False, 'from flask import Flask, request, jsonify\n'), ((3928, 3941), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (3935, 3941), True, 'import numpy as np\n'), ((4350, 4375), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (4364, 4375), False, 'import random\n'), ((4207, 4232), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (4221, 4232), False, 'import random\n'), ((4242, 4278), 'werkzeug.utils.secure_filename', 'secure_filename', (['audio_file.filename'], {}), '(audio_file.filename)\n', (4257, 4278), False, 'from werkzeug.utils import secure_filename\n')]
|
from functools import singledispatch
import json
import fsspec
from toolz import groupby
from loguru import logger
from typing import Any, List, Union
from pydantic.datetime_parse import datetime_re
from pydantic.validators import parse_datetime
import numpy as np
from ..types import Interval
from ..indexes import Index, InterpolatingIndex, IntervalIndex, MultiIndex
from ..utils import jsonable, singledispatchmethod, hashable_doc, unhashable_doc
from .base import BaseDataQuery, DatasourceInterface
class JsonBaseQuery(BaseDataQuery):
def __init__(self, index, data, field: str, label: Any) -> None:
self.index = index
self.data = data
self.field = field
self.label = label
@property
def labels(self):
return {self.field: self.label}
def filter(self, record: dict):
raise NotImplementedError
def apply_selection(self, records):
return list(filter(self.filter, records))
def execute(self, limit: int = None, skip: int = None, sort=None):
logger.debug("Applying pandas dataframe selection")
if not self.data:
return []
if sort is None:
data = self.data
else:
if isinstance(sort, str):
sort = [sort]
data = [hashable_doc(d) for d in self.data]
data = sorted(data, key=lambda d: tuple(d[s] for s in sort))
data = [unhashable_doc(d) for d in data]
docs = self.apply_selection(data)
if limit is not None:
start = skip * self.index.DOCS_PER_LABEL if skip is not None else 0
limit = start + limit * self.index.DOCS_PER_LABEL
docs = docs[start:limit]
docs = self.index.reduce(docs, self.labels)
docs = from_json(docs)
logger.debug(f"Done. Found {len(docs)} documents.")
return docs
def min(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
docs = self.apply_selection(self.data)
results = {}
for field in fields:
values = [d[field] for d in docs]
results[field] = min(values)
results = from_json(results)
if len(fields) == 1:
return results[fields[0]]
return results
def max(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
docs = self.apply_selection(self.data)
results = {}
for field in fields:
values = [d[field] for d in docs]
results[field] = max(values)
results = from_json(results)
if len(fields) == 1:
return results[fields[0]]
return results
def unique(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
docs = self.apply_selection(self.data)
results = {}
for field in fields:
values = [doc[field] for doc in docs]
values = set([hashable_doc(v) for v in values])
values = [unhashable_doc(v) for v in values]
results[field] = values
results = from_json(results)
if len(fields) == 1:
return results[fields[0]]
return results
def count(self):
docs = self.apply_selection(self.data)
return len(docs)
class JsonSimpleQuery(JsonBaseQuery):
def filter(self, record: dict):
if self.label is None:
return True
if self.field not in record:
raise KeyError(self.field)
label = self.label
if isinstance(label, slice):
if label.step is None:
ge = record[self.field] >= label.start
lt = record[self.field] < label.stop
return ge and lt
else:
label = list(range(label.start, label.stop, label.step))
if isinstance(label, list):
return record[self.field] in label
else:
return record[self.field] == label
class JsonIntervalQuery(JsonBaseQuery):
def filter(self, record: dict):
if self.label is None:
return record
if self.field not in record:
raise KeyError(self.field)
interval = self.label
if isinstance(interval, tuple):
left, right = interval
elif isinstance(interval, dict):
left, right = interval["left"], interval["right"]
elif isinstance(interval, slice):
left, right = interval.start, interval.stop
elif hasattr(interval, "left") and hasattr(interval, "right"):
left, right = interval.left, interval.right
else:
left = right = interval
left, right = to_json(left), to_json(right)
return (record[self.field]["left"] < right) and (
record[self.field]["right"] > left
)
class JsonInterpolationQuery(JsonBaseQuery):
def apply_selection(self, records, limit=1):
if self.label is None:
return records
if not all(self.field in record for record in records):
raise KeyError(self.field)
field_values = np.array([record[self.field] for record in records])
before_mask = field_values <= self.label
before_values = field_values[before_mask]
after_mask = field_values > self.label
after_values = field_values[after_mask]
before_idxs = np.argsort(np.abs(before_values) - self.label)[:limit]
before_records = [records[i] for i in np.flatnonzero(before_mask)]
before_values = [before_records[i] for i in before_idxs]
after_idxs = np.argsort(np.abs(after_values) - self.label)[:limit]
after_records = [records[i] for i in np.flatnonzero(after_mask)]
after_values = [after_records[i] for i in after_idxs]
return before_values + after_values
class JsonMultiQuery(JsonBaseQuery):
def __init__(self, index, data, queries: List[JsonBaseQuery]) -> None:
self.index = index
self.data = data
self.queries = queries
@property
def labels(self):
return {query.field: query.label for query in self.queries}
def apply_selection(self, records):
if len(self.queries) == 1:
return self.queries[0].apply_selection(records)
for query in self.queries:
if isinstance(query, JsonInterpolationQuery):
selections = []
others = [q.field for q in self.queries if q is not query]
if not others:
records = query.apply_selection(records)
continue
for _, docs in groupby(others, records):
selection = query.apply_selection(docs).reset_index()
selections.extend(selection)
if selections:
records = selections
else:
records = []
else:
records = query.apply_selection(records)
return records
@DatasourceInterface.register_interface(list)
class JsonInterface(DatasourceInterface):
@classmethod
def from_url(cls, url: str, jsonpath="", **kwargs):
if url.endswith(".json"):
with fsspec.open(url, **kwargs) as f:
data = json.load(f)
for p in jsonpath.split("."):
data = data[p] if p else data
if not isinstance(data, list):
raise ValueError("JSON file must contain a list of documents")
return cls(data)
raise NotImplementedError
@singledispatchmethod
def compile_query(self, index, label):
raise NotImplementedError(
f"{self.__class__.__name__} does not support {type(index)} indexes."
)
@compile_query.register(Index)
@compile_query.register(str)
def simple_query(self, index, label):
if isinstance(index, str):
index, name = Index(), index
index.name = name
label = to_json(label)
return JsonSimpleQuery(index, self.source, index.name, label)
@compile_query.register(IntervalIndex)
def interval_query(self, index, label):
label = to_json(label)
return JsonIntervalQuery(index, self.source, index.name, label)
@compile_query.register(InterpolatingIndex)
def interpolating_query(self, index, label):
label = to_json(label)
return JsonInterpolationQuery(index, self.source, index.name, label)
@compile_query.register(list)
@compile_query.register(tuple)
@compile_query.register(MultiIndex)
def multi_query(self, index, labels):
if not isinstance(index, MultiIndex):
index = MultiIndex(*index)
queries = [self.compile_query(idx, labels[idx.name]) for idx in index.indexes]
return JsonMultiQuery(index, self.source, queries)
def _find(self, doc):
for i, d in enumerate(self.source):
if doc.same_index(doc.__class__(**d)):
return i
else:
raise KeyError(doc.index_labels)
def insert(self, doc):
doc = to_json(doc.dict())
self.source.append(doc)
def update(self, doc):
for i, d in enumerate(self.source):
if doc.same_index(doc.__class__(**d)):
self.source[i] = to_json(doc.dict())
break
else:
from rframe.schema import UpdateError
raise UpdateError(f"No document with index {doc.index} found.")
def delete(self, doc):
del self.source[self._find(doc)]
def to_json(obj):
return jsonable(obj)
@singledispatch
def from_json(obj):
return obj
@from_json.register(str)
def from_json_str(obj):
match = datetime_re.match(obj) # type: ignore
if match is None:
return obj
return parse_datetime(obj)
@from_json.register(list)
def from_json_list(obj):
return [from_json(v) for v in obj]
@from_json.register(tuple)
def from_json_tuple(obj):
return tuple(from_json(v) for v in obj)
@from_json.register(dict)
def from_json_dict(obj):
if len(obj) == 2 and "left" in obj and "right" in obj:
left, right = from_json((obj["left"], obj["right"]))
return Interval[left, right]
return {k: from_json(v) for k, v in obj.items()}
|
[
"pydantic.datetime_parse.datetime_re.match",
"loguru.logger.debug",
"json.load",
"numpy.abs",
"numpy.flatnonzero",
"toolz.groupby",
"numpy.array",
"rframe.schema.UpdateError",
"pydantic.validators.parse_datetime",
"fsspec.open"
] |
[((9844, 9866), 'pydantic.datetime_parse.datetime_re.match', 'datetime_re.match', (['obj'], {}), '(obj)\n', (9861, 9866), False, 'from pydantic.datetime_parse import datetime_re\n'), ((9935, 9954), 'pydantic.validators.parse_datetime', 'parse_datetime', (['obj'], {}), '(obj)\n', (9949, 9954), False, 'from pydantic.validators import parse_datetime\n'), ((1039, 1090), 'loguru.logger.debug', 'logger.debug', (['"""Applying pandas dataframe selection"""'], {}), "('Applying pandas dataframe selection')\n", (1051, 1090), False, 'from loguru import logger\n'), ((5214, 5266), 'numpy.array', 'np.array', (['[record[self.field] for record in records]'], {}), '([record[self.field] for record in records])\n', (5222, 5266), True, 'import numpy as np\n'), ((9556, 9613), 'rframe.schema.UpdateError', 'UpdateError', (['f"""No document with index {doc.index} found."""'], {}), "(f'No document with index {doc.index} found.')\n", (9567, 9613), False, 'from rframe.schema import UpdateError\n'), ((5586, 5613), 'numpy.flatnonzero', 'np.flatnonzero', (['before_mask'], {}), '(before_mask)\n', (5600, 5613), True, 'import numpy as np\n'), ((5801, 5827), 'numpy.flatnonzero', 'np.flatnonzero', (['after_mask'], {}), '(after_mask)\n', (5815, 5827), True, 'import numpy as np\n'), ((6727, 6751), 'toolz.groupby', 'groupby', (['others', 'records'], {}), '(others, records)\n', (6734, 6751), False, 'from toolz import groupby\n'), ((7317, 7343), 'fsspec.open', 'fsspec.open', (['url'], {}), '(url, **kwargs)\n', (7328, 7343), False, 'import fsspec\n'), ((7373, 7385), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7382, 7385), False, 'import json\n'), ((5496, 5517), 'numpy.abs', 'np.abs', (['before_values'], {}), '(before_values)\n', (5502, 5517), True, 'import numpy as np\n'), ((5713, 5733), 'numpy.abs', 'np.abs', (['after_values'], {}), '(after_values)\n', (5719, 5733), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division
import cv2
import numpy as np
import time
from ..utils.viz import show_frame
class Tracker(object):
def __init__(self, name):
self.name = name
def init(self, image, init_rect):
raise NotImplementedError()
def update(self, image):
raise NotImplementedError()
def track(self, img_files, init_rect, visualize=False):
frame_num = len(img_files)
bndboxes = np.zeros((frame_num, 4))
bndboxes[0, :] = init_rect
speed_fps = np.zeros(frame_num)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file)
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
start_time = time.time()
if f == 0:
self.init(image, init_rect)
else:
bndboxes[f, :] = self.update(image)
elapsed_time = time.time() - start_time
speed_fps[f] = 1. / elapsed_time
if visualize:
show_frame(image, bndboxes[f, :], fig_n=1)
return bndboxes, speed_fps
from .siamfc import TrackerSiamFC
from .goturn import TrackerGOTURN
from .csk import TrackerCSK
from .kcf import TrackerKCF
from .dcf import TrackerDCF
from .dcfnet import TrackerDCFNet
from .mosse import TrackerMOSSE
from .dsst import TrackerDSST
|
[
"cv2.cvtColor",
"cv2.imread",
"numpy.zeros",
"time.time"
] |
[((465, 489), 'numpy.zeros', 'np.zeros', (['(frame_num, 4)'], {}), '((frame_num, 4))\n', (473, 489), True, 'import numpy as np\n'), ((545, 564), 'numpy.zeros', 'np.zeros', (['frame_num'], {}), '(frame_num)\n', (553, 564), True, 'import numpy as np\n'), ((635, 655), 'cv2.imread', 'cv2.imread', (['img_file'], {}), '(img_file)\n', (645, 655), False, 'import cv2\n'), ((875, 886), 'time.time', 'time.time', ([], {}), '()\n', (884, 886), False, 'import time\n'), ((712, 751), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (724, 751), False, 'import cv2\n'), ((1051, 1062), 'time.time', 'time.time', ([], {}), '()\n', (1060, 1062), False, 'import time\n'), ((810, 848), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (822, 848), False, 'import cv2\n')]
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.batch_norm."""
import os
from absl.testing import absltest
from haiku._src import batch_norm
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class BatchNormTest(absltest.TestCase):
@test_utils.transform_and_run
def test_basic(self):
data = jnp.arange(2 * 3 * 4, dtype=jnp.float32).reshape([2, 3, 4])
norm = batch_norm.BatchNorm(True, True, 0.9)
result = norm(data, is_training=True)
result_0_replicated = jnp.broadcast_to(result[:, :, :1], result.shape)
# Input data is symmetrical variance per-channel.
np.testing.assert_allclose(result, result_0_replicated)
# Running through again in test mode produces same output.
np.testing.assert_allclose(norm(data, is_training=False), result, rtol=2e-2)
@test_utils.transform_and_run
def test_simple_training(self):
layer = batch_norm.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9)
inputs = np.ones([2, 3, 3, 5])
scale = np.full((5,), 0.5)
offset = np.full((5,), 2.0)
result = layer(inputs, True, scale=scale, offset=offset)
np.testing.assert_equal(result, np.full(inputs.shape, 2.0))
@test_utils.transform_and_run
def test_simple_training_nchw(self):
layer = batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
data_format="NCHW")
inputs = np.ones([2, 5, 3, 3])
scale = np.full((5, 1, 1), 0.5)
offset = np.full((5, 1, 1), 2.0)
result = layer(inputs, True, scale=scale, offset=offset)
np.testing.assert_equal(result, np.full(inputs.shape, 2.0))
@test_utils.transform_and_run
def test_simple_training_normalized_axes(self):
layer = batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
axis=[0, 2, 3]) # Not the second axis.
# This differs only in the second axis.
inputs = np.stack([2.0 * np.ones([5, 3, 3]), np.ones([5, 3, 3])], 1)
result = layer(inputs, True)
# Despite not all values being identical, treating slices from the first
# axis separately leads to a fully normalized = equal array.
np.testing.assert_equal(result, np.zeros(inputs.shape))
def test_simple_training_cross_replica_axis(self):
ldc = jax.local_device_count()
def f(x, is_training=True):
return batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
cross_replica_axis="i",
)(x, is_training=is_training)
f = transform.transform_with_state(f)
inputs = np.arange(ldc * 4).reshape(ldc, 4)
key = np.broadcast_to(jax.random.PRNGKey(42), (ldc, 2))
params, state = jax.pmap(f.init, axis_name="i")(key, inputs)
result, _ = jax.pmap(f.apply, axis_name="i")(params, state, key, inputs)
mean = np.mean(inputs, axis=0)
std = np.std(inputs, axis=0) + 1e-10
expected = (inputs - mean) / std
np.testing.assert_array_almost_equal(result, expected)
def test_simple_training_cross_replica_axis_index_groups(self):
ldc = jax.local_device_count()
if ldc < 2:
self.skipTest("Cross-replica test requires at least 2 devices.")
num_groups = ldc // 2
num_group_devices = ldc // num_groups
# for 8 devices this produces [[0, 1], [2, 3], [4, 5], [6, 7]] groups.
groups = np.arange(ldc).reshape(num_groups, num_group_devices).tolist()
def f(x, is_training=True):
return batch_norm.BatchNorm(
create_scale=False,
create_offset=False,
decay_rate=0.9,
cross_replica_axis="i",
cross_replica_axis_index_groups=groups,
)(x, is_training=is_training)
f = transform.transform_with_state(f)
inputs = np.arange(ldc * 4).reshape(ldc, 4).astype(np.float32)
key = np.broadcast_to(jax.random.PRNGKey(42), (ldc, 2))
params, state = jax.pmap(f.init, axis_name="i")(key, inputs)
result, _ = jax.pmap(f.apply, axis_name="i")(params, state, key, inputs)
expected = np.empty_like(inputs)
for g in range(num_groups):
group_inputs = inputs[num_group_devices*g:num_group_devices*(g + 1)]
group_mean = np.mean(group_inputs, axis=0)
group_std = np.std(group_inputs, axis=0) + 1e-10
group_inputs = (group_inputs - group_mean) / group_std
expected[num_group_devices*g:num_group_devices*(g + 1)] = group_inputs
np.testing.assert_array_almost_equal(result, expected)
@test_utils.transform_and_run
def test_no_scale_and_offset(self):
layer = batch_norm.BatchNorm(
create_scale=False, create_offset=False, decay_rate=0.9)
inputs = jnp.ones([2, 5, 3, 3, 3])
result = layer(inputs, True)
np.testing.assert_equal(result, np.zeros_like(inputs))
@test_utils.transform_and_run
def test_no_scale_and_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `scale_init` if `create_scale=False`"):
batch_norm.BatchNorm(
create_scale=False,
create_offset=True,
decay_rate=0.9,
scale_init=jnp.ones)
@test_utils.transform_and_run
def test_no_offset_beta_init_provided(self):
with self.assertRaisesRegex(
ValueError, "Cannot set `offset_init` if `create_offset=False`"):
batch_norm.BatchNorm(
create_scale=True,
create_offset=False,
decay_rate=0.9,
offset_init=jnp.zeros)
def test_eps_cast_to_var_dtype(self):
# See https://github.com/google/jax/issues/4718 for more info. In the
# context of this test we need to assert NumPy bf16 params/state and a
# Python float for eps preserve bf16 output.
def f(x, is_training):
return batch_norm.BatchNorm(True, True, 0.9, eps=0.1)(x, is_training)
f = transform.transform_with_state(f)
x = np.ones([], jnp.bfloat16)
key = jax.random.PRNGKey(42)
params, state = jax.device_get(f.init(key, x, True))
y, _ = f.apply(params, state, None, x, False)
self.assertEqual(y.dtype, jnp.bfloat16)
if __name__ == "__main__":
_xla_flags = os.environ.get("XLA_FLAGS", "")
os.environ["XLA_FLAGS"] = (_xla_flags +
" --xla_force_host_platform_device_count=8")
absltest.main()
os.environ["XLA_FLAGS"] = _xla_flags
|
[
"absl.testing.absltest.main",
"numpy.ones",
"jax.random.PRNGKey",
"numpy.mean",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"numpy.full",
"numpy.zeros_like",
"numpy.std",
"jax.local_device_count",
"numpy.empty_like",
"haiku._src.batch_norm.BatchNorm",
"numpy.testing.assert_allclose",
"haiku._src.transform.transform_with_state",
"jax.numpy.broadcast_to",
"jax.pmap",
"jax.numpy.ones",
"jax.numpy.arange",
"numpy.zeros",
"os.environ.get"
] |
[((6809, 6840), 'os.environ.get', 'os.environ.get', (['"""XLA_FLAGS"""', '""""""'], {}), "('XLA_FLAGS', '')\n", (6823, 6840), False, 'import os\n'), ((6960, 6975), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (6973, 6975), False, 'from absl.testing import absltest\n'), ((1117, 1154), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', (['(True)', '(True)', '(0.9)'], {}), '(True, True, 0.9)\n', (1137, 1154), False, 'from haiku._src import batch_norm\n'), ((1223, 1271), 'jax.numpy.broadcast_to', 'jnp.broadcast_to', (['result[:, :, :1]', 'result.shape'], {}), '(result[:, :, :1], result.shape)\n', (1239, 1271), True, 'import jax.numpy as jnp\n'), ((1330, 1385), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'result_0_replicated'], {}), '(result, result_0_replicated)\n', (1356, 1385), True, 'import numpy as np\n'), ((1609, 1686), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(False)', 'decay_rate': '(0.9)'}), '(create_scale=False, create_offset=False, decay_rate=0.9)\n', (1629, 1686), False, 'from haiku._src import batch_norm\n'), ((1710, 1731), 'numpy.ones', 'np.ones', (['[2, 3, 3, 5]'], {}), '([2, 3, 3, 5])\n', (1717, 1731), True, 'import numpy as np\n'), ((1744, 1762), 'numpy.full', 'np.full', (['(5,)', '(0.5)'], {}), '((5,), 0.5)\n', (1751, 1762), True, 'import numpy as np\n'), ((1776, 1794), 'numpy.full', 'np.full', (['(5,)', '(2.0)'], {}), '((5,), 2.0)\n', (1783, 1794), True, 'import numpy as np\n'), ((2005, 2107), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(False)', 'decay_rate': '(0.9)', 'data_format': '"""NCHW"""'}), "(create_scale=False, create_offset=False, decay_rate=\n 0.9, data_format='NCHW')\n", (2025, 2107), False, 'from haiku._src import batch_norm\n'), ((2150, 2171), 'numpy.ones', 'np.ones', (['[2, 5, 3, 3]'], {}), '([2, 5, 3, 3])\n', (2157, 2171), True, 'import numpy as np\n'), ((2184, 2207), 'numpy.full', 'np.full', (['(5, 1, 1)', '(0.5)'], {}), '((5, 1, 1), 0.5)\n', (2191, 2207), True, 'import numpy as np\n'), ((2221, 2244), 'numpy.full', 'np.full', (['(5, 1, 1)', '(2.0)'], {}), '((5, 1, 1), 2.0)\n', (2228, 2244), True, 'import numpy as np\n'), ((2466, 2564), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(False)', 'decay_rate': '(0.9)', 'axis': '[0, 2, 3]'}), '(create_scale=False, create_offset=False, decay_rate=\n 0.9, axis=[0, 2, 3])\n', (2486, 2564), False, 'from haiku._src import batch_norm\n'), ((3036, 3060), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (3058, 3060), False, 'import jax\n'), ((3295, 3328), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (3325, 3328), False, 'from haiku._src import transform\n'), ((3592, 3615), 'numpy.mean', 'np.mean', (['inputs'], {'axis': '(0)'}), '(inputs, axis=0)\n', (3599, 3615), True, 'import numpy as np\n'), ((3699, 3753), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (3735, 3753), True, 'import numpy as np\n'), ((3831, 3855), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (3853, 3855), False, 'import jax\n'), ((4446, 4479), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (4476, 4479), False, 'from haiku._src import transform\n'), ((4766, 4787), 'numpy.empty_like', 'np.empty_like', (['inputs'], {}), '(inputs)\n', (4779, 4787), True, 'import numpy as np\n'), ((5142, 5196), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (5178, 5196), True, 'import numpy as np\n'), ((5280, 5357), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(False)', 'decay_rate': '(0.9)'}), '(create_scale=False, create_offset=False, decay_rate=0.9)\n', (5300, 5357), False, 'from haiku._src import batch_norm\n'), ((5381, 5406), 'jax.numpy.ones', 'jnp.ones', (['[2, 5, 3, 3, 3]'], {}), '([2, 5, 3, 3, 3])\n', (5389, 5406), True, 'import jax.numpy as jnp\n'), ((6513, 6546), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (6543, 6546), False, 'from haiku._src import transform\n'), ((6556, 6581), 'numpy.ones', 'np.ones', (['[]', 'jnp.bfloat16'], {}), '([], jnp.bfloat16)\n', (6563, 6581), True, 'import numpy as np\n'), ((6592, 6614), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(42)'], {}), '(42)\n', (6610, 6614), False, 'import jax\n'), ((1893, 1919), 'numpy.full', 'np.full', (['inputs.shape', '(2.0)'], {}), '(inputs.shape, 2.0)\n', (1900, 1919), True, 'import numpy as np\n'), ((2343, 2369), 'numpy.full', 'np.full', (['inputs.shape', '(2.0)'], {}), '(inputs.shape, 2.0)\n', (2350, 2369), True, 'import numpy as np\n'), ((2948, 2970), 'numpy.zeros', 'np.zeros', (['inputs.shape'], {}), '(inputs.shape)\n', (2956, 2970), True, 'import numpy as np\n'), ((3404, 3426), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(42)'], {}), '(42)\n', (3422, 3426), False, 'import jax\n'), ((3458, 3489), 'jax.pmap', 'jax.pmap', (['f.init'], {'axis_name': '"""i"""'}), "(f.init, axis_name='i')\n", (3466, 3489), False, 'import jax\n'), ((3519, 3551), 'jax.pmap', 'jax.pmap', (['f.apply'], {'axis_name': '"""i"""'}), "(f.apply, axis_name='i')\n", (3527, 3551), False, 'import jax\n'), ((3626, 3648), 'numpy.std', 'np.std', (['inputs'], {'axis': '(0)'}), '(inputs, axis=0)\n', (3632, 3648), True, 'import numpy as np\n'), ((4574, 4596), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(42)'], {}), '(42)\n', (4592, 4596), False, 'import jax\n'), ((4628, 4659), 'jax.pmap', 'jax.pmap', (['f.init'], {'axis_name': '"""i"""'}), "(f.init, axis_name='i')\n", (4636, 4659), False, 'import jax\n'), ((4689, 4721), 'jax.pmap', 'jax.pmap', (['f.apply'], {'axis_name': '"""i"""'}), "(f.apply, axis_name='i')\n", (4697, 4721), False, 'import jax\n'), ((4914, 4943), 'numpy.mean', 'np.mean', (['group_inputs'], {'axis': '(0)'}), '(group_inputs, axis=0)\n', (4921, 4943), True, 'import numpy as np\n'), ((5476, 5497), 'numpy.zeros_like', 'np.zeros_like', (['inputs'], {}), '(inputs)\n', (5489, 5497), True, 'import numpy as np\n'), ((5688, 5789), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(True)', 'decay_rate': '(0.9)', 'scale_init': 'jnp.ones'}), '(create_scale=False, create_offset=True, decay_rate=0.9,\n scale_init=jnp.ones)\n', (5708, 5789), False, 'from haiku._src import batch_norm\n'), ((6020, 6123), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(True)', 'create_offset': '(False)', 'decay_rate': '(0.9)', 'offset_init': 'jnp.zeros'}), '(create_scale=True, create_offset=False, decay_rate=0.9,\n offset_init=jnp.zeros)\n', (6040, 6123), False, 'from haiku._src import batch_norm\n'), ((1045, 1085), 'jax.numpy.arange', 'jnp.arange', (['(2 * 3 * 4)'], {'dtype': 'jnp.float32'}), '(2 * 3 * 4, dtype=jnp.float32)\n', (1055, 1085), True, 'import jax.numpy as jnp\n'), ((2711, 2729), 'numpy.ones', 'np.ones', (['[5, 3, 3]'], {}), '([5, 3, 3])\n', (2718, 2729), True, 'import numpy as np\n'), ((3107, 3213), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(False)', 'decay_rate': '(0.9)', 'cross_replica_axis': '"""i"""'}), "(create_scale=False, create_offset=False, decay_rate=\n 0.9, cross_replica_axis='i')\n", (3127, 3213), False, 'from haiku._src import batch_norm\n'), ((3343, 3361), 'numpy.arange', 'np.arange', (['(ldc * 4)'], {}), '(ldc * 4)\n', (3352, 3361), True, 'import numpy as np\n'), ((4208, 4354), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', ([], {'create_scale': '(False)', 'create_offset': '(False)', 'decay_rate': '(0.9)', 'cross_replica_axis': '"""i"""', 'cross_replica_axis_index_groups': 'groups'}), "(create_scale=False, create_offset=False, decay_rate=\n 0.9, cross_replica_axis='i', cross_replica_axis_index_groups=groups)\n", (4228, 4354), False, 'from haiku._src import batch_norm\n'), ((4962, 4990), 'numpy.std', 'np.std', (['group_inputs'], {'axis': '(0)'}), '(group_inputs, axis=0)\n', (4968, 4990), True, 'import numpy as np\n'), ((6441, 6487), 'haiku._src.batch_norm.BatchNorm', 'batch_norm.BatchNorm', (['(True)', '(True)', '(0.9)'], {'eps': '(0.1)'}), '(True, True, 0.9, eps=0.1)\n', (6461, 6487), False, 'from haiku._src import batch_norm\n'), ((2691, 2709), 'numpy.ones', 'np.ones', (['[5, 3, 3]'], {}), '([5, 3, 3])\n', (2698, 2709), True, 'import numpy as np\n'), ((4099, 4113), 'numpy.arange', 'np.arange', (['ldc'], {}), '(ldc)\n', (4108, 4113), True, 'import numpy as np\n'), ((4494, 4512), 'numpy.arange', 'np.arange', (['(ldc * 4)'], {}), '(ldc * 4)\n', (4503, 4512), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
def canny(image):
gray = cv2.cvtColor(lane_image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blur, 50, 150)
return canny
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
#print(line)
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)
return line_image
def region_of_interest(image):
height = image.shape[0]
triangle = np.array([[(200, height), (1100, height), (550, 250)]])
mask = np.zeros_like(image)
cv2.fillPoly(mask, triangle, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
image = cv2.imread('test_image.jpg')
lane_image = np.copy(image)
canny = canny(lane_image)
cropped_image = region_of_interest(canny)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5)
line_image = display_lines(lane_image, lines)
combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)
cv2.imshow('result', combo_image)
cv2.waitKey(0)
|
[
"cv2.line",
"cv2.GaussianBlur",
"cv2.Canny",
"numpy.zeros_like",
"numpy.copy",
"cv2.bitwise_and",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.addWeighted",
"cv2.fillPoly",
"cv2.imread",
"numpy.array",
"cv2.imshow"
] |
[((776, 804), 'cv2.imread', 'cv2.imread', (['"""test_image.jpg"""'], {}), "('test_image.jpg')\n", (786, 804), False, 'import cv2\n'), ((818, 832), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (825, 832), True, 'import numpy as np\n'), ((1065, 1115), 'cv2.addWeighted', 'cv2.addWeighted', (['lane_image', '(0.8)', 'line_image', '(1)', '(1)'], {}), '(lane_image, 0.8, line_image, 1, 1)\n', (1080, 1115), False, 'import cv2\n'), ((1116, 1149), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'combo_image'], {}), "('result', combo_image)\n", (1126, 1149), False, 'import cv2\n'), ((1150, 1164), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1161, 1164), False, 'import cv2\n'), ((61, 105), 'cv2.cvtColor', 'cv2.cvtColor', (['lane_image', 'cv2.COLOR_RGB2GRAY'], {}), '(lane_image, cv2.COLOR_RGB2GRAY)\n', (73, 105), False, 'import cv2\n'), ((117, 150), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (133, 150), False, 'import cv2\n'), ((163, 187), 'cv2.Canny', 'cv2.Canny', (['blur', '(50)', '(150)'], {}), '(blur, 50, 150)\n', (172, 187), False, 'import cv2\n'), ((257, 277), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (270, 277), True, 'import numpy as np\n'), ((569, 624), 'numpy.array', 'np.array', (['[[(200, height), (1100, height), (550, 250)]]'], {}), '([[(200, height), (1100, height), (550, 250)]])\n', (577, 624), True, 'import numpy as np\n'), ((636, 656), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (649, 656), True, 'import numpy as np\n'), ((661, 694), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'triangle', '(255)'], {}), '(mask, triangle, 255)\n', (673, 694), False, 'import cv2\n'), ((714, 742), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'mask'], {}), '(image, mask)\n', (729, 742), False, 'import cv2\n'), ((959, 971), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (967, 971), True, 'import numpy as np\n'), ((413, 470), 'cv2.line', 'cv2.line', (['line_image', '(x1, y1)', '(x2, y2)', '(255, 0, 0)', '(10)'], {}), '(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)\n', (421, 470), False, 'import cv2\n')]
|
# -*- coding: UTF-8 -*-
"""
此脚本用于展示如何利用神经网络解决分类问题
"""
import os
from mlp import ANN
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_circles, make_moons
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, OneHotEncoder
def generateData(n):
"""
"""
np.random.seed(12046)
blobs = make_blobs(n_samples=n, centers = [[-2, -2], [2, 2]])
circles = make_circles(n_samples=n, factor=.4, noise=.05)
moons = make_moons(n_samples=n, noise=.05)
blocks = np.random.rand(n, 2) - 0.5
y = (blocks[:, 0] * blocks[:, 1] < 0) + 0
blocks = (blocks, y)
# 由于神经网络对数据的线性变换不稳定,因此将数据做归一化处理
scaler = StandardScaler()
blobs = (scaler.fit_transform(blobs[0]), blobs[1])
circles = (scaler.fit_transform(circles[0]), circles[1])
moons = (scaler.fit_transform(moons[0]), moons[1])
blocks = (scaler.fit_transform(blocks[0]), blocks[1])
return blobs, circles, moons, blocks
def drawData(ax, data):
"""
将数据可视化
"""
X, y = data
label1 = X[y>0]
ax.scatter(label1[:, 0], label1[:, 1], marker="o")
label0 = X[y==0]
ax.scatter(label0[:, 0], label0[:, 1], marker="^", color="k")
return ax
def drawModel(ax, model):
"""
将模型的分离超平面可视化
"""
x1 = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 100)
x2 = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 100)
X1, X2 = np.meshgrid(x1, x2)
Y = model.predict_proba(np.c_[X1.ravel(), X2.ravel()])[:, 1]
Y = Y.reshape(X1.shape)
ax.contourf(X1, X2, Y, levels=[0, 0.5], colors=["gray"], alpha=0.4)
return ax
def trainLogit(data):
"""
"""
X, y = data
model = LogisticRegression()
model.fit(X, y)
return model
def trainANN(data, logPath):
"""
"""
X, y = data
enc = OneHotEncoder()
y = enc.fit_transform(y.reshape(-1, 1)).toarray()
model = ANN([4, 4, 2], logPath)
model.fit(X, y)
return model
def visualize(data):
"""
"""
# 创建一个图形框
fig = plt.figure(figsize=(10, 10), dpi=80)
fig1 = plt.figure(figsize=(10, 10), dpi=80)
# 在图形框里画四幅图
for i in range(len(data)):
ax = fig.add_subplot(2, 2, i+1)
ax1 = fig1.add_subplot(2, 2, i+1)
drawData(ax, data[i])
# Windows下的存储路径与Linux并不相同
if os.name == "nt":
drawModel(ax, trainANN(data[i], "logs\\data_%s" % (i+1)))
else:
drawModel(ax, trainANN(data[i], "logs/data_%s" % (i+1)))
drawData(ax1, data[i])
drawModel(ax1, trainLogit(data[i]))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
plt.show()
if __name__ == "__main__":
data = generateData(200)
visualize(data)
|
[
"sklearn.datasets.make_circles",
"numpy.meshgrid",
"sklearn.preprocessing.StandardScaler",
"numpy.random.seed",
"matplotlib.pyplot.show",
"mlp.ANN",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.datasets.make_blobs",
"sklearn.datasets.make_moons",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.figure",
"numpy.random.rand"
] |
[((366, 387), 'numpy.random.seed', 'np.random.seed', (['(12046)'], {}), '(12046)\n', (380, 387), True, 'import numpy as np\n'), ((400, 451), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n', 'centers': '[[-2, -2], [2, 2]]'}), '(n_samples=n, centers=[[-2, -2], [2, 2]])\n', (410, 451), False, 'from sklearn.datasets import make_blobs, make_circles, make_moons\n'), ((468, 517), 'sklearn.datasets.make_circles', 'make_circles', ([], {'n_samples': 'n', 'factor': '(0.4)', 'noise': '(0.05)'}), '(n_samples=n, factor=0.4, noise=0.05)\n', (480, 517), False, 'from sklearn.datasets import make_blobs, make_circles, make_moons\n'), ((528, 563), 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': 'n', 'noise': '(0.05)'}), '(n_samples=n, noise=0.05)\n', (538, 563), False, 'from sklearn.datasets import make_blobs, make_circles, make_moons\n'), ((723, 739), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (737, 739), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((1453, 1472), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (1464, 1472), True, 'import numpy as np\n'), ((1720, 1740), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1738, 1740), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1851, 1866), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1864, 1866), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((1933, 1956), 'mlp.ANN', 'ANN', (['[4, 4, 2]', 'logPath'], {}), '([4, 4, 2], logPath)\n', (1936, 1956), False, 'from mlp import ANN\n'), ((2057, 2093), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(80)'}), '(figsize=(10, 10), dpi=80)\n', (2067, 2093), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2141), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(80)'}), '(figsize=(10, 10), dpi=80)\n', (2115, 2141), True, 'import matplotlib.pyplot as plt\n'), ((2765, 2775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2773, 2775), True, 'import matplotlib.pyplot as plt\n'), ((576, 596), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (590, 596), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import tensorflow as tf
import argparse
import time
from PIL import Image
import tf1st
if __name__ == "__main__":
# Argument parse
parser = argparse.ArgumentParser(description='Neural Style Transfer with OpenCV and Tensorflow')
parser.add_argument('--input-image', default="./images/federer.jpg", type=str, help='image to style')
parser.add_argument('--style-image', default="./images/vangogh.jpg", type=str, help='styling image')
parser.add_argument('--content-weight', default=1000, type=float, help='weight of the content image')
parser.add_argument('--style-weight', default=0.01, type=float, help='weight of the styling image')
parser.add_argument('--iterations', default=1000, type=int, help='number of iterations')
parser.add_argument('--result-image', default="./images/result.jpg", type=str, help='resulting image')
args = parser.parse_args()
# Enable eager execution for tensorflow
tf.enable_eager_execution()
print("Eager execution: {}".format(tf.executing_eagerly()))
model = tf1st.get_model()
for layer in model.layers:
layer.trainable = False
# Get the style and content feature representations (from our specified intermediate layers)
style_features, content_features = tf1st.get_feature_representations(model, args.input_image, args.style_image)
gram_style_features = [tf1st.gram_matrix(style_feature) for style_feature in style_features]
# Set initial image
init_image = tf1st.load_and_process_img(args.input_image)
init_image = tf.Variable(init_image, dtype=tf.float32)
# Create our optimizer
opt = tf.train.AdamOptimizer(learning_rate=5, beta1=0.99, epsilon=1e-1)
# Store our best result
best_loss, best_img = float('inf'), None
# Create a nice config
loss_weights = (args.style_weight, args.content_weight)
cfg = {
'model': model,
'loss_weights': loss_weights,
'init_image': init_image,
'gram_style_features': gram_style_features,
'content_features': content_features
}
# For displaying
num_rows = 2
num_cols = 5
display_interval = args.iterations/(num_rows*num_cols)
start_time = time.time()
global_start = time.time()
norm_means = np.array([103.939, 116.779, 123.68])
min_vals = -norm_means
max_vals = 255 - norm_means
imgs = []
for i in range(args.iterations):
grads, all_loss = tf1st.compute_grads(cfg)
loss, style_score, content_score = all_loss
opt.apply_gradients([(grads, init_image)])
clipped = tf.clip_by_value(init_image, min_vals, max_vals)
init_image.assign(clipped)
end_time = time.time()
if loss < best_loss:
# Update best loss and best image from total loss.
best_loss = loss
best_img = tf1st.deprocess_img(init_image.numpy())
start_time = time.time()
# Use the .numpy() method to get the concrete numpy array
plot_img = init_image.numpy()
plot_img = tf1st.deprocess_img(plot_img)
imgs.append(plot_img)
final_img = cv2.cvtColor(np.array(Image.fromarray(plot_img)), cv2.COLOR_BGR2RGB)
cv2.imshow('Actual Styled Image', final_img)
cv2.imwrite(args.result_image, final_img)
cv2.waitKey(1)
print('Iteration: {}'.format(i))
print('Total loss: {:.4e}, '
'style loss: {:.4e}, '
'content loss: {:.4e}, '
'time: {:.4f}s'.format(loss, style_score, content_score, time.time() - start_time))
print('Total time: {:.4f}s'.format(time.time() - global_start))
time.sleep(5)
print('Done')
|
[
"argparse.ArgumentParser",
"tensorflow.clip_by_value",
"tf1st.gram_matrix",
"tensorflow.executing_eagerly",
"tensorflow.Variable",
"tf1st.get_model",
"cv2.imshow",
"cv2.imwrite",
"cv2.waitKey",
"time.sleep",
"tensorflow.enable_eager_execution",
"tf1st.load_and_process_img",
"tf1st.deprocess_img",
"time.time",
"numpy.array",
"tf1st.get_feature_representations",
"PIL.Image.fromarray",
"tensorflow.train.AdamOptimizer",
"tf1st.compute_grads"
] |
[((176, 268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Neural Style Transfer with OpenCV and Tensorflow"""'}), "(description=\n 'Neural Style Transfer with OpenCV and Tensorflow')\n", (199, 268), False, 'import argparse\n'), ((947, 974), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (972, 974), True, 'import tensorflow as tf\n'), ((1048, 1065), 'tf1st.get_model', 'tf1st.get_model', ([], {}), '()\n', (1063, 1065), False, 'import tf1st\n'), ((1260, 1336), 'tf1st.get_feature_representations', 'tf1st.get_feature_representations', (['model', 'args.input_image', 'args.style_image'], {}), '(model, args.input_image, args.style_image)\n', (1293, 1336), False, 'import tf1st\n'), ((1472, 1516), 'tf1st.load_and_process_img', 'tf1st.load_and_process_img', (['args.input_image'], {}), '(args.input_image)\n', (1498, 1516), False, 'import tf1st\n'), ((1532, 1573), 'tensorflow.Variable', 'tf.Variable', (['init_image'], {'dtype': 'tf.float32'}), '(init_image, dtype=tf.float32)\n', (1543, 1573), True, 'import tensorflow as tf\n'), ((1607, 1671), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(5)', 'beta1': '(0.99)', 'epsilon': '(0.1)'}), '(learning_rate=5, beta1=0.99, epsilon=0.1)\n', (1629, 1671), True, 'import tensorflow as tf\n'), ((2149, 2160), 'time.time', 'time.time', ([], {}), '()\n', (2158, 2160), False, 'import time\n'), ((2178, 2189), 'time.time', 'time.time', ([], {}), '()\n', (2187, 2189), False, 'import time\n'), ((2208, 2244), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (2216, 2244), True, 'import numpy as np\n'), ((3503, 3516), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3513, 3516), False, 'import time\n'), ((1362, 1394), 'tf1st.gram_matrix', 'tf1st.gram_matrix', (['style_feature'], {}), '(style_feature)\n', (1379, 1394), False, 'import tf1st\n'), ((2375, 2399), 'tf1st.compute_grads', 'tf1st.compute_grads', (['cfg'], {}), '(cfg)\n', (2394, 2399), False, 'import tf1st\n'), ((2509, 2557), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['init_image', 'min_vals', 'max_vals'], {}), '(init_image, min_vals, max_vals)\n', (2525, 2557), True, 'import tensorflow as tf\n'), ((2604, 2615), 'time.time', 'time.time', ([], {}), '()\n', (2613, 2615), False, 'import time\n'), ((2807, 2818), 'time.time', 'time.time', ([], {}), '()\n', (2816, 2818), False, 'import time\n'), ((2932, 2961), 'tf1st.deprocess_img', 'tf1st.deprocess_img', (['plot_img'], {}), '(plot_img)\n', (2951, 2961), False, 'import tf1st\n'), ((3077, 3121), 'cv2.imshow', 'cv2.imshow', (['"""Actual Styled Image"""', 'final_img'], {}), "('Actual Styled Image', final_img)\n", (3087, 3121), False, 'import cv2\n'), ((3126, 3167), 'cv2.imwrite', 'cv2.imwrite', (['args.result_image', 'final_img'], {}), '(args.result_image, final_img)\n', (3137, 3167), False, 'import cv2\n'), ((3172, 3186), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3183, 3186), False, 'import cv2\n'), ((1012, 1034), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (1032, 1034), True, 'import tensorflow as tf\n'), ((3026, 3051), 'PIL.Image.fromarray', 'Image.fromarray', (['plot_img'], {}), '(plot_img)\n', (3041, 3051), False, 'from PIL import Image\n'), ((3401, 3412), 'time.time', 'time.time', ([], {}), '()\n', (3410, 3412), False, 'import time\n'), ((3467, 3478), 'time.time', 'time.time', ([], {}), '()\n', (3476, 3478), False, 'import time\n')]
|
from functools import partial
import importlib
import pytest
from pathlib import Path
import zarr
import dask.array as dsa
import dask
import dask.core
import xarray
import numpy
from rechunker import api
_DIMENSION_KEY = "_ARRAY_DIMENSIONS"
def requires_import(module, *args):
try:
importlib.import_module(module)
except ImportError:
skip = True
else:
skip = False
mark = pytest.mark.skipif(skip, reason=f"requires {module}")
return pytest.param(*args, marks=mark)
requires_beam = partial(requires_import, "apache_beam")
requires_prefect = partial(requires_import, "prefect")
requires_pywren = partial(requires_import, "pywren_ibm_cloud")
@pytest.fixture(params=[(8000, 200), {"y": 8000, "x": 200}])
def target_chunks(request):
return request.param
def test_invalid_executor():
with pytest.raises(ValueError, match="unrecognized executor"):
api._get_executor("unknown")
@pytest.mark.parametrize("shape", [(100, 50)])
@pytest.mark.parametrize("source_chunks", [(10, 50)])
@pytest.mark.parametrize("target_chunks", [(20, 10)])
@pytest.mark.parametrize("max_mem", ["10MB"])
@pytest.mark.parametrize("executor", ["dask"])
def test_rechunk_dataset(
tmp_path, shape, source_chunks, target_chunks, max_mem, executor
):
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
a = numpy.arange(numpy.prod(shape)).reshape(shape).astype("f4")
a[-1] = numpy.nan
ds = xarray.Dataset(
dict(
a=xarray.DataArray(
a, dims=["x", "y"], attrs={"a1": 1, "a2": [1, 2, 3], "a3": "x"}
),
b=xarray.DataArray(numpy.ones(shape[0]), dims=["x"]),
c=xarray.DataArray(numpy.ones(shape[1]), dims=["y"]),
),
coords=dict(
cx=xarray.DataArray(numpy.ones(shape[0]), dims=["x"]),
cy=xarray.DataArray(numpy.ones(shape[1]), dims=["y"]),
),
attrs={"a1": 1, "a2": [1, 2, 3], "a3": "x"},
)
ds = ds.chunk(chunks=dict(zip(["x", "y"], source_chunks)))
options = dict(
a=dict(
compressor=zarr.Blosc(cname="zstd"),
dtype="int32",
scale_factor=0.1,
_FillValue=-9999,
)
)
rechunked = api.rechunk(
ds,
target_chunks=dict(a=target_chunks, b=target_chunks[:1]),
max_mem=max_mem,
target_store=target_store,
target_options=options,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
rechunked.execute()
# Validate encoded variables
dst = xarray.open_zarr(target_store, decode_cf=False)
assert dst.a.dtype == options["a"]["dtype"]
assert all(dst.a.values[-1] == options["a"]["_FillValue"])
assert dst.a.encoding["compressor"] is not None
# Validate decoded variables
dst = xarray.open_zarr(target_store, decode_cf=True)
assert dst.a.data.chunksize == target_chunks
assert dst.b.data.chunksize == target_chunks[:1]
assert dst.c.data.chunksize == source_chunks[1:]
xarray.testing.assert_equal(ds.compute(), dst.compute())
assert ds.attrs == dst.attrs
@pytest.mark.parametrize("shape", [(8000, 8000)])
@pytest.mark.parametrize("source_chunks", [(200, 8000)])
@pytest.mark.parametrize("dtype", ["f4"])
@pytest.mark.parametrize("max_mem", [25600000, "25.6MB"])
@pytest.mark.parametrize(
"executor",
[
"dask",
"python",
requires_beam("beam"),
requires_prefect("prefect"),
requires_pywren("pywren"),
],
)
@pytest.mark.parametrize(
"dims,target_chunks",
[
(None, (8000, 200)),
# would be nice to support this syntax eventually
pytest.param(None, (-1, 200), marks=pytest.mark.xfail),
(["y", "x"], (8000, 200)),
(["y", "x"], {"y": 8000, "x": 200}),
# can't infer missing dimension chunk specification
pytest.param(["y", "x"], {"x": 200}, marks=pytest.mark.xfail),
# can't use dict syntax without array dims
pytest.param(None, {"y": 8000, "x": 200}, marks=pytest.mark.xfail),
],
)
def test_rechunk_array(
tmp_path, shape, source_chunks, dtype, dims, target_chunks, max_mem, executor
):
### Create source array ###
store_source = str(tmp_path / "source.zarr")
source_array = zarr.ones(
shape, chunks=source_chunks, dtype=dtype, store=store_source
)
# add some attributes
source_array.attrs["foo"] = "bar"
if dims:
source_array.attrs[_DIMENSION_KEY] = dims
### Create targets ###
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
rechunked = api.rechunk(
source_array,
target_chunks,
max_mem,
target_store,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
target_array = zarr.open(target_store)
if isinstance(target_chunks, dict):
target_chunks_list = [target_chunks[d] for d in dims]
else:
target_chunks_list = target_chunks
assert target_array.chunks == tuple(target_chunks_list)
assert dict(source_array.attrs) == dict(target_array.attrs)
result = rechunked.execute()
assert isinstance(result, zarr.Array)
a_tar = dsa.from_zarr(target_array)
assert dsa.equal(a_tar, 1).all().compute()
@pytest.mark.parametrize("shape", [(8000, 8000)])
@pytest.mark.parametrize("source_chunks", [(200, 8000), (800, 8000)])
@pytest.mark.parametrize("dtype", ["f4"])
@pytest.mark.parametrize("max_mem", [25600000])
@pytest.mark.parametrize(
"target_chunks", [(200, 8000), (800, 8000), (8000, 200), (400, 8000),],
)
def test_rechunk_dask_array(
tmp_path, shape, source_chunks, dtype, target_chunks, max_mem
):
### Create source array ###
source_array = dsa.ones(shape, chunks=source_chunks, dtype=dtype)
### Create targets ###
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
rechunked = api.rechunk(
source_array, target_chunks, max_mem, target_store, temp_store=temp_store
)
assert isinstance(rechunked, api.Rechunked)
target_array = zarr.open(target_store)
assert target_array.chunks == tuple(target_chunks)
result = rechunked.execute()
assert isinstance(result, zarr.Array)
a_tar = dsa.from_zarr(target_array)
assert dsa.equal(a_tar, 1).all().compute()
@pytest.mark.parametrize(
"executor",
[
"dask",
"python",
requires_beam("beam"),
requires_prefect("prefect"),
requires_pywren("pywren"),
],
)
def test_rechunk_group(tmp_path, executor):
store_source = str(tmp_path / "source.zarr")
group = zarr.group(store_source)
group.attrs["foo"] = "bar"
# 800 byte chunks
a = group.ones("a", shape=(5, 10, 20), chunks=(1, 10, 20), dtype="f4")
a.attrs["foo"] = "bar"
b = group.ones("b", shape=(20,), chunks=(10,), dtype="f4")
b.attrs["foo"] = "bar"
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
max_mem = 1600 # should force a two-step plan for a
target_chunks = {"a": (5, 10, 4), "b": (20,)}
rechunked = api.rechunk(
group,
target_chunks,
max_mem,
target_store,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
target_group = zarr.open(target_store)
assert "a" in target_group
assert "b" in target_group
assert dict(group.attrs) == dict(target_group.attrs)
rechunked.execute()
for aname in target_chunks:
assert target_group[aname].chunks == target_chunks[aname]
a_tar = dsa.from_zarr(target_group[aname])
assert dsa.equal(a_tar, 1).all().compute()
def sample_xarray_dataset():
return xarray.Dataset(
dict(
a=xarray.DataArray(
dsa.ones(shape=(10, 20, 40), chunks=(5, 10, 4), dtype="f4"),
dims=("x", "y", "z"),
attrs={"foo": "bar"},
),
b=xarray.DataArray(
dsa.ones(shape=(8000,), chunks=(200,), dtype="f4"),
dims="w",
attrs={"foo": "bar"},
),
),
attrs={"foo": "bar"},
)
def sample_zarr_group(tmp_path):
path = str(tmp_path / "source.zarr")
group = zarr.group(path)
group.attrs["foo"] = "bar"
# 800 byte chunks
a = group.ones("a", shape=(10, 20, 40), chunks=(5, 10, 4), dtype="f4")
a.attrs["foo"] = "bar"
b = group.ones("b", shape=(8000,), chunks=(200,), dtype="f4")
b.attrs["foo"] = "bar"
return group
def sample_zarr_array(tmp_path):
shape = (8000, 8000)
source_chunks = (200, 8000)
dtype = "f4"
dims = None
path = str(tmp_path / "source.zarr")
array = zarr.ones(shape, chunks=source_chunks, dtype=dtype, store=path)
# add some attributes
array.attrs["foo"] = "bar"
if dims:
array.attrs[_DIMENSION_KEY] = dims
return array
@pytest.fixture(params=["Array", "Group", "Dataset"])
def rechunk_args(tmp_path, request):
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
max_mem = 1600 # should force a two-step plan for a and b
target_chunks = {"a": (10, 5, 4), "b": (100,)}
args = dict(
target_chunks=target_chunks,
max_mem=max_mem,
target_store=target_store,
temp_store=temp_store,
)
if request.param == "Dataset":
ds = sample_xarray_dataset()
args.update({"source": ds})
elif request.param == "Group":
group = sample_zarr_group(tmp_path)
args.update({"source": group})
else:
array = sample_zarr_array(tmp_path)
max_mem = 25600000
target_chunks = (8000, 200)
args.update(
{"source": array, "target_chunks": target_chunks, "max_mem": max_mem,}
)
return args
@pytest.fixture()
def rechunked(rechunk_args):
return api.rechunk(**rechunk_args)
def test_repr(rechunked):
assert isinstance(rechunked, api.Rechunked)
repr_str = repr(rechunked)
assert repr_str.startswith("<Rechunked>")
assert all(thing in repr_str for thing in ["Source", "Intermediate", "Target"])
def test_repr_html(rechunked):
rechunked._repr_html_() # no exceptions
def _is_collection(source):
assert isinstance(
source,
(dask.array.Array, zarr.core.Array, zarr.hierarchy.Group, xarray.Dataset),
)
return isinstance(source, (zarr.hierarchy.Group, xarray.Dataset))
def _wrap_options(source, options):
if _is_collection(source):
options = {v: options for v in source}
return options
def test_rechunk_option_overwrite(rechunk_args):
api.rechunk(**rechunk_args).execute()
# TODO: make this match more reliable based on outcome of
# https://github.com/zarr-developers/zarr-python/issues/605
with pytest.raises(ValueError, match=r"path .* contains an array"):
api.rechunk(**rechunk_args).execute()
options = _wrap_options(rechunk_args["source"], dict(overwrite=True))
api.rechunk(**rechunk_args, target_options=options).execute()
def test_rechunk_passthrough(rechunk_args):
# Verify that no errors are raised when the target chunks == source chunks
if _is_collection(rechunk_args["source"]):
rechunk_args["target_chunks"] = {v: None for v in rechunk_args["source"]}
else:
rechunk_args["target_chunks"] = None
api.rechunk(**rechunk_args).execute()
def test_rechunk_no_temp_dir_provided_error(rechunk_args):
# Verify that the correct error is raised when no temp_store is given
# and the chunks to write differ from the chunks to read
args = {k: v for k, v in rechunk_args.items() if k != "temp_store"}
with pytest.raises(ValueError, match="A temporary store location must be provided"):
api.rechunk(**args).execute()
def test_rechunk_option_compression(rechunk_args):
def rechunk(compressor):
options = _wrap_options(
rechunk_args["source"], dict(overwrite=True, compressor=compressor)
)
rechunked = api.rechunk(**rechunk_args, target_options=options)
rechunked.execute()
return sum(
file.stat().st_size
for file in Path(rechunked._target.store.path).rglob("*")
)
size_uncompressed = rechunk(None)
size_compressed = rechunk(
zarr.Blosc(cname="zstd", clevel=9, shuffle=zarr.Blosc.SHUFFLE)
)
assert size_compressed < size_uncompressed
def test_rechunk_invalid_option(rechunk_args):
if isinstance(rechunk_args["source"], xarray.Dataset):
# Options are essentially unbounded for Xarray (for CF encoding params),
# so check only options with special error cases
options = _wrap_options(rechunk_args["source"], {"chunks": 10})
with pytest.raises(
ValueError,
match="Chunks must be provided in ``target_chunks`` rather than options",
):
api.rechunk(**rechunk_args, target_options=options)
else:
for o in ["shape", "chunks", "dtype", "store", "name", "unknown"]:
options = _wrap_options(rechunk_args["source"], {o: True})
with pytest.raises(ValueError, match=f"Zarr options must not include {o}"):
api.rechunk(**rechunk_args, temp_options=options)
with pytest.raises(ValueError, match=f"Zarr options must not include {o}"):
api.rechunk(**rechunk_args, target_options=options)
def test_rechunk_bad_target_chunks(rechunk_args):
if not _is_collection(rechunk_args["source"]):
return
rechunk_args = dict(rechunk_args)
rechunk_args["target_chunks"] = (10, 10)
with pytest.raises(
ValueError, match="You must specify ``target-chunks`` as a dict"
):
api.rechunk(**rechunk_args)
def test_rechunk_invalid_source(tmp_path):
with pytest.raises(
ValueError,
match="Source must be a Zarr Array, Zarr Group, Dask Array or Xarray Dataset",
):
api.rechunk(
[[1, 2], [3, 4]], target_chunks=(10, 10), max_mem=100, target_store=tmp_path
)
@pytest.mark.parametrize(
"source,target_chunks",
[
(sample_xarray_dataset(), {"a": (10, 5, 4), "b": (100,)}),
(dsa.ones((20, 10), chunks=(5, 5)), (10, 10)),
],
)
@pytest.mark.parametrize(
"executor",
[
"python",
requires_beam("beam"),
requires_prefect("prefect"),
requires_pywren("pywren"),
],
)
def test_unsupported_executor(tmp_path, source, target_chunks, executor):
with pytest.raises(
NotImplementedError, match="Executor type .* not supported for source",
):
api.rechunk(
source,
target_chunks=target_chunks,
max_mem=1600,
target_store=str(tmp_path / "target.zarr"),
temp_store=str(tmp_path / "temp.zarr"),
executor=executor,
)
def test_rechunk_no_target_chunks(rechunk_args):
rechunk_args = dict(rechunk_args)
if _is_collection(rechunk_args["source"]):
rechunk_args["target_chunks"] = {v: None for v in rechunk_args["source"]}
else:
rechunk_args["target_chunks"] = None
api.rechunk(**rechunk_args)
def test_no_intermediate():
a = zarr.ones((4, 4), chunks=(2, 2))
b = zarr.ones((4, 4), chunks=(4, 1))
rechunked = api.Rechunked(None, None, source=a, intermediate=None, target=b)
assert "Intermediate" not in repr(rechunked)
rechunked._repr_html_()
def test_no_intermediate_fused(tmp_path):
shape = (8000, 8000)
source_chunks = (200, 8000)
dtype = "f4"
max_mem = 25600000
target_chunks = (400, 8000)
store_source = str(tmp_path / "source.zarr")
source_array = zarr.ones(
shape, chunks=source_chunks, dtype=dtype, store=store_source
)
target_store = str(tmp_path / "target.zarr")
rechunked = api.rechunk(source_array, target_chunks, max_mem, target_store)
num_tasks = len([v for v in rechunked.plan.dask.values() if dask.core.istask(v)])
assert num_tasks < 20 # less than if no fuse
def test_pywren_function_executor(tmp_path):
pytest.importorskip("pywren_ibm_cloud")
from rechunker.executors.pywren import (
pywren_local_function_executor,
PywrenExecutor,
)
# Create a Pywren function exectutor that we manage ourselves
# and pass in to rechunker's PywrenExecutor
with pywren_local_function_executor() as function_executor:
executor = PywrenExecutor(function_executor)
shape = (8000, 8000)
source_chunks = (200, 8000)
dtype = "f4"
max_mem = 25600000
target_chunks = (400, 8000)
### Create source array ###
store_source = str(tmp_path / "source.zarr")
source_array = zarr.ones(
shape, chunks=source_chunks, dtype=dtype, store=store_source
)
### Create targets ###
target_store = str(tmp_path / "target.zarr")
temp_store = str(tmp_path / "temp.zarr")
rechunked = api.rechunk(
source_array,
target_chunks,
max_mem,
target_store,
temp_store=temp_store,
executor=executor,
)
assert isinstance(rechunked, api.Rechunked)
target_array = zarr.open(target_store)
assert target_array.chunks == tuple(target_chunks)
result = rechunked.execute()
assert isinstance(result, zarr.Array)
a_tar = dsa.from_zarr(target_array)
assert dsa.equal(a_tar, 1).all().compute()
|
[
"dask.array.equal",
"rechunker.api.rechunk",
"numpy.ones",
"pathlib.Path",
"pytest.mark.skipif",
"rechunker.executors.pywren.pywren_local_function_executor",
"dask.core.istask",
"pytest.mark.parametrize",
"zarr.Blosc",
"numpy.prod",
"rechunker.api._get_executor",
"zarr.open",
"dask.array.ones",
"zarr.ones",
"pytest.raises",
"rechunker.api.Rechunked",
"functools.partial",
"importlib.import_module",
"pytest.fixture",
"zarr.group",
"dask.array.from_zarr",
"pytest.importorskip",
"pytest.param",
"xarray.DataArray",
"xarray.open_zarr",
"rechunker.executors.pywren.PywrenExecutor"
] |
[((534, 573), 'functools.partial', 'partial', (['requires_import', '"""apache_beam"""'], {}), "(requires_import, 'apache_beam')\n", (541, 573), False, 'from functools import partial\n'), ((593, 628), 'functools.partial', 'partial', (['requires_import', '"""prefect"""'], {}), "(requires_import, 'prefect')\n", (600, 628), False, 'from functools import partial\n'), ((647, 691), 'functools.partial', 'partial', (['requires_import', '"""pywren_ibm_cloud"""'], {}), "(requires_import, 'pywren_ibm_cloud')\n", (654, 691), False, 'from functools import partial\n'), ((695, 754), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[(8000, 200), {'y': 8000, 'x': 200}]"}), "(params=[(8000, 200), {'y': 8000, 'x': 200}])\n", (709, 754), False, 'import pytest\n'), ((946, 991), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(100, 50)]'], {}), "('shape', [(100, 50)])\n", (969, 991), False, 'import pytest\n'), ((993, 1045), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""source_chunks"""', '[(10, 50)]'], {}), "('source_chunks', [(10, 50)])\n", (1016, 1045), False, 'import pytest\n'), ((1047, 1099), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunks"""', '[(20, 10)]'], {}), "('target_chunks', [(20, 10)])\n", (1070, 1099), False, 'import pytest\n'), ((1101, 1145), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_mem"""', "['10MB']"], {}), "('max_mem', ['10MB'])\n", (1124, 1145), False, 'import pytest\n'), ((1147, 1192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""executor"""', "['dask']"], {}), "('executor', ['dask'])\n", (1170, 1192), False, 'import pytest\n'), ((3194, 3242), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(8000, 8000)]'], {}), "('shape', [(8000, 8000)])\n", (3217, 3242), False, 'import pytest\n'), ((3244, 3299), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""source_chunks"""', '[(200, 8000)]'], {}), "('source_chunks', [(200, 8000)])\n", (3267, 3299), False, 'import pytest\n'), ((3301, 3341), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['f4']"], {}), "('dtype', ['f4'])\n", (3324, 3341), False, 'import pytest\n'), ((3343, 3399), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_mem"""', "[25600000, '25.6MB']"], {}), "('max_mem', [25600000, '25.6MB'])\n", (3366, 3399), False, 'import pytest\n'), ((5411, 5459), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(8000, 8000)]'], {}), "('shape', [(8000, 8000)])\n", (5434, 5459), False, 'import pytest\n'), ((5461, 5529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""source_chunks"""', '[(200, 8000), (800, 8000)]'], {}), "('source_chunks', [(200, 8000), (800, 8000)])\n", (5484, 5529), False, 'import pytest\n'), ((5531, 5571), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['f4']"], {}), "('dtype', ['f4'])\n", (5554, 5571), False, 'import pytest\n'), ((5573, 5619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_mem"""', '[25600000]'], {}), "('max_mem', [25600000])\n", (5596, 5619), False, 'import pytest\n'), ((5621, 5720), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunks"""', '[(200, 8000), (800, 8000), (8000, 200), (400, 8000)]'], {}), "('target_chunks', [(200, 8000), (800, 8000), (8000, \n 200), (400, 8000)])\n", (5644, 5720), False, 'import pytest\n'), ((9101, 9153), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['Array', 'Group', 'Dataset']"}), "(params=['Array', 'Group', 'Dataset'])\n", (9115, 9153), False, 'import pytest\n'), ((10028, 10044), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (10042, 10044), False, 'import pytest\n'), ((419, 472), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': 'f"""requires {module}"""'}), "(skip, reason=f'requires {module}')\n", (437, 472), False, 'import pytest\n'), ((484, 515), 'pytest.param', 'pytest.param', (['*args'], {'marks': 'mark'}), '(*args, marks=mark)\n', (496, 515), False, 'import pytest\n'), ((2640, 2687), 'xarray.open_zarr', 'xarray.open_zarr', (['target_store'], {'decode_cf': '(False)'}), '(target_store, decode_cf=False)\n', (2656, 2687), False, 'import xarray\n'), ((2895, 2941), 'xarray.open_zarr', 'xarray.open_zarr', (['target_store'], {'decode_cf': '(True)'}), '(target_store, decode_cf=True)\n', (2911, 2941), False, 'import xarray\n'), ((4360, 4431), 'zarr.ones', 'zarr.ones', (['shape'], {'chunks': 'source_chunks', 'dtype': 'dtype', 'store': 'store_source'}), '(shape, chunks=source_chunks, dtype=dtype, store=store_source)\n', (4369, 4431), False, 'import zarr\n'), ((4712, 4822), 'rechunker.api.rechunk', 'api.rechunk', (['source_array', 'target_chunks', 'max_mem', 'target_store'], {'temp_store': 'temp_store', 'executor': 'executor'}), '(source_array, target_chunks, max_mem, target_store, temp_store=\n temp_store, executor=executor)\n', (4723, 4822), False, 'from rechunker import api\n'), ((4941, 4964), 'zarr.open', 'zarr.open', (['target_store'], {}), '(target_store)\n', (4950, 4964), False, 'import zarr\n'), ((5333, 5360), 'dask.array.from_zarr', 'dsa.from_zarr', (['target_array'], {}), '(target_array)\n', (5346, 5360), True, 'import dask.array as dsa\n'), ((5874, 5924), 'dask.array.ones', 'dsa.ones', (['shape'], {'chunks': 'source_chunks', 'dtype': 'dtype'}), '(shape, chunks=source_chunks, dtype=dtype)\n', (5882, 5924), True, 'import dask.array as dsa\n'), ((6064, 6155), 'rechunker.api.rechunk', 'api.rechunk', (['source_array', 'target_chunks', 'max_mem', 'target_store'], {'temp_store': 'temp_store'}), '(source_array, target_chunks, max_mem, target_store, temp_store=\n temp_store)\n', (6075, 6155), False, 'from rechunker import api\n'), ((6233, 6256), 'zarr.open', 'zarr.open', (['target_store'], {}), '(target_store)\n', (6242, 6256), False, 'import zarr\n'), ((6401, 6428), 'dask.array.from_zarr', 'dsa.from_zarr', (['target_array'], {}), '(target_array)\n', (6414, 6428), True, 'import dask.array as dsa\n'), ((6777, 6801), 'zarr.group', 'zarr.group', (['store_source'], {}), '(store_source)\n', (6787, 6801), False, 'import zarr\n'), ((7267, 7370), 'rechunker.api.rechunk', 'api.rechunk', (['group', 'target_chunks', 'max_mem', 'target_store'], {'temp_store': 'temp_store', 'executor': 'executor'}), '(group, target_chunks, max_mem, target_store, temp_store=\n temp_store, executor=executor)\n', (7278, 7370), False, 'from rechunker import api\n'), ((7489, 7512), 'zarr.open', 'zarr.open', (['target_store'], {}), '(target_store)\n', (7498, 7512), False, 'import zarr\n'), ((8443, 8459), 'zarr.group', 'zarr.group', (['path'], {}), '(path)\n', (8453, 8459), False, 'import zarr\n'), ((8904, 8967), 'zarr.ones', 'zarr.ones', (['shape'], {'chunks': 'source_chunks', 'dtype': 'dtype', 'store': 'path'}), '(shape, chunks=source_chunks, dtype=dtype, store=path)\n', (8913, 8967), False, 'import zarr\n'), ((10085, 10112), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**rechunk_args)\n', (10096, 10112), False, 'from rechunker import api\n'), ((15377, 15404), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**rechunk_args)\n', (15388, 15404), False, 'from rechunker import api\n'), ((15443, 15475), 'zarr.ones', 'zarr.ones', (['(4, 4)'], {'chunks': '(2, 2)'}), '((4, 4), chunks=(2, 2))\n', (15452, 15475), False, 'import zarr\n'), ((15484, 15516), 'zarr.ones', 'zarr.ones', (['(4, 4)'], {'chunks': '(4, 1)'}), '((4, 4), chunks=(4, 1))\n', (15493, 15516), False, 'import zarr\n'), ((15533, 15597), 'rechunker.api.Rechunked', 'api.Rechunked', (['None', 'None'], {'source': 'a', 'intermediate': 'None', 'target': 'b'}), '(None, None, source=a, intermediate=None, target=b)\n', (15546, 15597), False, 'from rechunker import api\n'), ((15917, 15988), 'zarr.ones', 'zarr.ones', (['shape'], {'chunks': 'source_chunks', 'dtype': 'dtype', 'store': 'store_source'}), '(shape, chunks=source_chunks, dtype=dtype, store=store_source)\n', (15926, 15988), False, 'import zarr\n'), ((16070, 16133), 'rechunker.api.rechunk', 'api.rechunk', (['source_array', 'target_chunks', 'max_mem', 'target_store'], {}), '(source_array, target_chunks, max_mem, target_store)\n', (16081, 16133), False, 'from rechunker import api\n'), ((16322, 16361), 'pytest.importorskip', 'pytest.importorskip', (['"""pywren_ibm_cloud"""'], {}), "('pywren_ibm_cloud')\n", (16341, 16361), False, 'import pytest\n'), ((301, 332), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (324, 332), False, 'import importlib\n'), ((848, 904), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognized executor"""'}), "(ValueError, match='unrecognized executor')\n", (861, 904), False, 'import pytest\n'), ((914, 942), 'rechunker.api._get_executor', 'api._get_executor', (['"""unknown"""'], {}), "('unknown')\n", (931, 942), False, 'from rechunker import api\n'), ((3747, 3801), 'pytest.param', 'pytest.param', (['None', '(-1, 200)'], {'marks': 'pytest.mark.xfail'}), '(None, (-1, 200), marks=pytest.mark.xfail)\n', (3759, 3801), False, 'import pytest\n'), ((3951, 4012), 'pytest.param', 'pytest.param', (["['y', 'x']", "{'x': 200}"], {'marks': 'pytest.mark.xfail'}), "(['y', 'x'], {'x': 200}, marks=pytest.mark.xfail)\n", (3963, 4012), False, 'import pytest\n'), ((4073, 4139), 'pytest.param', 'pytest.param', (['None', "{'y': 8000, 'x': 200}"], {'marks': 'pytest.mark.xfail'}), "(None, {'y': 8000, 'x': 200}, marks=pytest.mark.xfail)\n", (4085, 4139), False, 'import pytest\n'), ((7771, 7805), 'dask.array.from_zarr', 'dsa.from_zarr', (['target_group[aname]'], {}), '(target_group[aname])\n', (7784, 7805), True, 'import dask.array as dsa\n'), ((11020, 11080), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""path .* contains an array"""'}), "(ValueError, match='path .* contains an array')\n", (11033, 11080), False, 'import pytest\n'), ((11897, 11975), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""A temporary store location must be provided"""'}), "(ValueError, match='A temporary store location must be provided')\n", (11910, 11975), False, 'import pytest\n'), ((12240, 12291), 'rechunker.api.rechunk', 'api.rechunk', ([], {'target_options': 'options'}), '(**rechunk_args, target_options=options)\n', (12251, 12291), False, 'from rechunker import api\n'), ((12530, 12592), 'zarr.Blosc', 'zarr.Blosc', ([], {'cname': '"""zstd"""', 'clevel': '(9)', 'shuffle': 'zarr.Blosc.SHUFFLE'}), "(cname='zstd', clevel=9, shuffle=zarr.Blosc.SHUFFLE)\n", (12540, 12592), False, 'import zarr\n'), ((13853, 13932), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""You must specify ``target-chunks`` as a dict"""'}), "(ValueError, match='You must specify ``target-chunks`` as a dict')\n", (13866, 13932), False, 'import pytest\n'), ((13956, 13983), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**rechunk_args)\n', (13967, 13983), False, 'from rechunker import api\n'), ((14038, 14147), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Source must be a Zarr Array, Zarr Group, Dask Array or Xarray Dataset"""'}), "(ValueError, match=\n 'Source must be a Zarr Array, Zarr Group, Dask Array or Xarray Dataset')\n", (14051, 14147), False, 'import pytest\n'), ((14175, 14268), 'rechunker.api.rechunk', 'api.rechunk', (['[[1, 2], [3, 4]]'], {'target_chunks': '(10, 10)', 'max_mem': '(100)', 'target_store': 'tmp_path'}), '([[1, 2], [3, 4]], target_chunks=(10, 10), max_mem=100,\n target_store=tmp_path)\n', (14186, 14268), False, 'from rechunker import api\n'), ((14741, 14831), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""Executor type .* not supported for source"""'}), "(NotImplementedError, match=\n 'Executor type .* not supported for source')\n", (14754, 14831), False, 'import pytest\n'), ((16601, 16633), 'rechunker.executors.pywren.pywren_local_function_executor', 'pywren_local_function_executor', ([], {}), '()\n', (16631, 16633), False, 'from rechunker.executors.pywren import pywren_local_function_executor, PywrenExecutor\n'), ((16676, 16709), 'rechunker.executors.pywren.PywrenExecutor', 'PywrenExecutor', (['function_executor'], {}), '(function_executor)\n', (16690, 16709), False, 'from rechunker.executors.pywren import pywren_local_function_executor, PywrenExecutor\n'), ((16973, 17044), 'zarr.ones', 'zarr.ones', (['shape'], {'chunks': 'source_chunks', 'dtype': 'dtype', 'store': 'store_source'}), '(shape, chunks=source_chunks, dtype=dtype, store=store_source)\n', (16982, 17044), False, 'import zarr\n'), ((17222, 17332), 'rechunker.api.rechunk', 'api.rechunk', (['source_array', 'target_chunks', 'max_mem', 'target_store'], {'temp_store': 'temp_store', 'executor': 'executor'}), '(source_array, target_chunks, max_mem, target_store, temp_store=\n temp_store, executor=executor)\n', (17233, 17332), False, 'from rechunker import api\n'), ((17487, 17510), 'zarr.open', 'zarr.open', (['target_store'], {}), '(target_store)\n', (17496, 17510), False, 'import zarr\n'), ((17671, 17698), 'dask.array.from_zarr', 'dsa.from_zarr', (['target_array'], {}), '(target_array)\n', (17684, 17698), True, 'import dask.array as dsa\n'), ((10847, 10874), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**rechunk_args)\n', (10858, 10874), False, 'from rechunker import api\n'), ((11207, 11258), 'rechunker.api.rechunk', 'api.rechunk', ([], {'target_options': 'options'}), '(**rechunk_args, target_options=options)\n', (11218, 11258), False, 'from rechunker import api\n'), ((11582, 11609), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**rechunk_args)\n', (11593, 11609), False, 'from rechunker import api\n'), ((12977, 13081), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Chunks must be provided in ``target_chunks`` rather than options"""'}), "(ValueError, match=\n 'Chunks must be provided in ``target_chunks`` rather than options')\n", (12990, 13081), False, 'import pytest\n'), ((13125, 13176), 'rechunker.api.rechunk', 'api.rechunk', ([], {'target_options': 'options'}), '(**rechunk_args, target_options=options)\n', (13136, 13176), False, 'from rechunker import api\n'), ((14425, 14458), 'dask.array.ones', 'dsa.ones', (['(20, 10)'], {'chunks': '(5, 5)'}), '((20, 10), chunks=(5, 5))\n', (14433, 14458), True, 'import dask.array as dsa\n'), ((1529, 1614), 'xarray.DataArray', 'xarray.DataArray', (['a'], {'dims': "['x', 'y']", 'attrs': "{'a1': 1, 'a2': [1, 2, 3], 'a3': 'x'}"}), "(a, dims=['x', 'y'], attrs={'a1': 1, 'a2': [1, 2, 3], 'a3':\n 'x'})\n", (1545, 1614), False, 'import xarray\n'), ((11091, 11118), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**rechunk_args)\n', (11102, 11118), False, 'from rechunker import api\n'), ((11985, 12004), 'rechunker.api.rechunk', 'api.rechunk', ([], {}), '(**args)\n', (11996, 12004), False, 'from rechunker import api\n'), ((13350, 13419), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""Zarr options must not include {o}"""'}), "(ValueError, match=f'Zarr options must not include {o}')\n", (13363, 13419), False, 'import pytest\n'), ((13437, 13486), 'rechunker.api.rechunk', 'api.rechunk', ([], {'temp_options': 'options'}), '(**rechunk_args, temp_options=options)\n', (13448, 13486), False, 'from rechunker import api\n'), ((13504, 13573), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""Zarr options must not include {o}"""'}), "(ValueError, match=f'Zarr options must not include {o}')\n", (13517, 13573), False, 'import pytest\n'), ((13591, 13642), 'rechunker.api.rechunk', 'api.rechunk', ([], {'target_options': 'options'}), '(**rechunk_args, target_options=options)\n', (13602, 13642), False, 'from rechunker import api\n'), ((16199, 16218), 'dask.core.istask', 'dask.core.istask', (['v'], {}), '(v)\n', (16215, 16218), False, 'import dask\n'), ((1673, 1693), 'numpy.ones', 'numpy.ones', (['shape[0]'], {}), '(shape[0])\n', (1683, 1693), False, 'import numpy\n'), ((1739, 1759), 'numpy.ones', 'numpy.ones', (['shape[1]'], {}), '(shape[1])\n', (1749, 1759), False, 'import numpy\n'), ((2132, 2156), 'zarr.Blosc', 'zarr.Blosc', ([], {'cname': '"""zstd"""'}), "(cname='zstd')\n", (2142, 2156), False, 'import zarr\n'), ((5372, 5391), 'dask.array.equal', 'dsa.equal', (['a_tar', '(1)'], {}), '(a_tar, 1)\n', (5381, 5391), True, 'import dask.array as dsa\n'), ((6440, 6459), 'dask.array.equal', 'dsa.equal', (['a_tar', '(1)'], {}), '(a_tar, 1)\n', (6449, 6459), True, 'import dask.array as dsa\n'), ((7977, 8036), 'dask.array.ones', 'dsa.ones', ([], {'shape': '(10, 20, 40)', 'chunks': '(5, 10, 4)', 'dtype': '"""f4"""'}), "(shape=(10, 20, 40), chunks=(5, 10, 4), dtype='f4')\n", (7985, 8036), True, 'import dask.array as dsa\n'), ((8177, 8227), 'dask.array.ones', 'dsa.ones', ([], {'shape': '(8000,)', 'chunks': '(200,)', 'dtype': '"""f4"""'}), "(shape=(8000,), chunks=(200,), dtype='f4')\n", (8185, 8227), True, 'import dask.array as dsa\n'), ((1407, 1424), 'numpy.prod', 'numpy.prod', (['shape'], {}), '(shape)\n', (1417, 1424), False, 'import numpy\n'), ((1838, 1858), 'numpy.ones', 'numpy.ones', (['shape[0]'], {}), '(shape[0])\n', (1848, 1858), False, 'import numpy\n'), ((1905, 1925), 'numpy.ones', 'numpy.ones', (['shape[1]'], {}), '(shape[1])\n', (1915, 1925), False, 'import numpy\n'), ((7821, 7840), 'dask.array.equal', 'dsa.equal', (['a_tar', '(1)'], {}), '(a_tar, 1)\n', (7830, 7840), True, 'import dask.array as dsa\n'), ((17714, 17733), 'dask.array.equal', 'dsa.equal', (['a_tar', '(1)'], {}), '(a_tar, 1)\n', (17723, 17733), True, 'import dask.array as dsa\n'), ((12396, 12430), 'pathlib.Path', 'Path', (['rechunked._target.store.path'], {}), '(rechunked._target.store.path)\n', (12400, 12430), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python3
"""The main entry point to the PDP trainer/tester/predictor."""
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file
# in the project root for full license information.
import numpy as np
import torch
import torch.optim as optim
import logging
import argparse, os, yaml, csv
from pdp.generator import *
from pdp.trainer import SatFactorGraphTrainer
##########################################################################################################################
def write_to_csv(result_list, file_path):
with open(file_path, mode = 'w', newline = '') as f:
writer = csv.writer(f, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for row in result_list:
writer.writerow([row[0], row[1][1, 0]])
def write_to_csv_time(result_list, file_path):
with open(file_path, mode = 'w', newline = '') as f:
writer = csv.writer(f, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for row in result_list:
writer.writerow([row[0], row[2]])
def run(random_seed, config_file, is_training, load_model, cpu, reset_step, use_generator, batch_replication):
"""Runs the train/test/predict procedures."""
if not use_generator:
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# Set the configurations (from either JSON or YAML file)
with open(config_file, 'r') as f:
config = yaml.load(f)
config['train_path'] = [os.path.abspath(p) for p in config['train_path']]
config['validation_path'] = [os.path.abspath(p) for p in config['train_path']]
config['model_path'] = os.path.abspath(config['model_path'])
# Set the logger
format = '[%(levelname)s] %(asctime)s - %(name)s: %(message)s'
logging.basicConfig(level = logging.DEBUG, format = format)
logger = logging.getLogger(config['model_name'] + ' (' + config['version'] + ')')
# Check if the input path is a list or on
if not isinstance(config['train_path'], list):
config['train_path'] = [os.path.join(config['train_path'], f) for f in os.listdir(config['train_path']) if
os.path.isfile(os.path.join(config['train_path'], f)) and f.endswith('.json')]
if not isinstance(config['validation_path'], list):
config['validation_path'] = [os.path.join(config['validation_path'], f) for f in os.listdir(config['validation_path']) if
os.path.isfile(os.path.join(config['validation_path'], f)) and f.endswith('.json')]
if config['verbose']:
if use_generator:
logger.info("Generating training examples via %s generator." % config['generator'])
else:
logger.info("Training file(s): %s" % config['train_path'])
logger.info("Validation file(s): %s" % config['validation_path'])
best_model_path_base = os.path.join(os.path.relpath(config['model_path']), config['model_name'], config['version'], "best")
last_model_path_base = os.path.join(os.path.relpath(config['model_path']), config['model_name'], config['version'], "last")
if not os.path.exists(best_model_path_base):
os.makedirs(best_model_path_base)
if not os.path.exists(last_model_path_base):
os.makedirs(last_model_path_base)
trainer = SatFactorGraphTrainer(config = config, use_cuda = not cpu, logger = logger)
# Training
if is_training:
if config['verbose']:
logger.info("Starting the training phase...")
generator = None
if use_generator:
if config['generator'] == 'modular':
generator = ModularCNFGenerator(config['min_k'], config['min_n'], config['max_n'], config['min_q'], config['max_q'], config['min_c'],
config['max_c'], config['min_alpha'], config['max_alpha'])
elif config['generator'] == 'v-modular':
generator = VariableModularCNFGenerator(config['min_k'], config['max_k'], config['min_n'], config['max_n'], config['min_q'],
config['max_q'], config['min_c'], config['max_c'], config['min_alpha'], config['max_alpha'])
else:
generator = UniformCNFGenerator(config['min_n'], config['max_n'], config['min_k'], config['max_k'], config['min_alpha'],
config['max_alpha'])
model_list, errors, losses = trainer.train(train_list = config['train_path'], validation_list = config['validation_path'],
optimizer = optim.Adam(trainer.get_parameter_list(), lr = config['learning_rate'], weight_decay = config['weight_decay']),
last_export_path_base = last_model_path_base, best_export_path_base = best_model_path_base, metric_index = config['metric_index'],
load_model = load_model, reset_step = reset_step, generator = generator, train_epoch_size = config['train_epoch_size'])
if config['verbose']:
logger.info("Starting the test phase...")
for test_files in config['test_path']:
if config['verbose']:
logger.info("Testing " + test_files)
if load_model == "last":
import_path_base = last_model_path_base
elif load_model == "best":
import_path_base = best_model_path_base
else:
import_path_base = None
result = trainer.test(test_list = test_files, import_path_base = import_path_base, batch_replication = batch_replication)
if config['verbose']:
for row in result:
filename, errors, _ = row
print('Dataset: ' + filename)
print("Accuracy: \t%s" % (1 - errors[0]))
print("Recall: \t%s" % (1 - errors[1]))
if os.path.isdir(test_files):
write_to_csv(result,
os.path.join(test_files, config['model_type'] + '_' + config['model_name'] + '_' + config['version'] + '-results.csv'))
write_to_csv_time(result, os.path.join(test_files,
config['model_type'] + '_' + config['model_name'] + '_' + config['version'] + '-results-time.csv'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config', help = 'The configuration JSON file')
parser.add_argument('-t', '--test', help = 'The test mode', action = 'store_true')
parser.add_argument('-l', '--load_model', help = 'Load the previous model')
parser.add_argument('-c', '--cpu_mode', help = 'Run on CPU', action = 'store_true')
parser.add_argument('-r', '--reset', help = 'Reset the global step', action = 'store_true')
parser.add_argument('-g', '--use_generator', help = 'Reset the global step', action = 'store_true')
parser.add_argument('-b', '--batch_replication', help = 'Batch replication factor', type = int, default = 1)
args = parser.parse_args()
run(0, args.config, not args.test, args.load_model, args.cpu_mode, args.reset, args.use_generator, args.batch_replication)
|
[
"yaml.load",
"os.path.abspath",
"numpy.random.seed",
"csv.writer",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"torch.manual_seed",
"os.path.isdir",
"os.path.exists",
"os.path.relpath",
"pdp.trainer.SatFactorGraphTrainer",
"os.path.join",
"os.listdir",
"logging.getLogger"
] |
[((1884, 1939), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'format'}), '(level=logging.DEBUG, format=format)\n', (1903, 1939), False, 'import logging\n'), ((1958, 2030), 'logging.getLogger', 'logging.getLogger', (["(config['model_name'] + ' (' + config['version'] + ')')"], {}), "(config['model_name'] + ' (' + config['version'] + ')')\n", (1975, 2030), False, 'import logging\n'), ((3456, 3525), 'pdp.trainer.SatFactorGraphTrainer', 'SatFactorGraphTrainer', ([], {'config': 'config', 'use_cuda': '(not cpu)', 'logger': 'logger'}), '(config=config, use_cuda=not cpu, logger=logger)\n', (3477, 3525), False, 'from pdp.trainer import SatFactorGraphTrainer\n'), ((6471, 6496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6494, 6496), False, 'import argparse, os, yaml, csv\n'), ((687, 757), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(f, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (697, 757), False, 'import argparse, os, yaml, csv\n'), ((980, 1050), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(f, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (990, 1050), False, 'import argparse, os, yaml, csv\n'), ((1344, 1371), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1358, 1371), True, 'import numpy as np\n'), ((1381, 1411), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (1398, 1411), False, 'import torch\n'), ((1533, 1545), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1542, 1545), False, 'import argparse, os, yaml, csv\n'), ((1749, 1786), 'os.path.abspath', 'os.path.abspath', (["config['model_path']"], {}), "(config['model_path'])\n", (1764, 1786), False, 'import argparse, os, yaml, csv\n'), ((3030, 3067), 'os.path.relpath', 'os.path.relpath', (["config['model_path']"], {}), "(config['model_path'])\n", (3045, 3067), False, 'import argparse, os, yaml, csv\n'), ((3161, 3198), 'os.path.relpath', 'os.path.relpath', (["config['model_path']"], {}), "(config['model_path'])\n", (3176, 3198), False, 'import argparse, os, yaml, csv\n'), ((3263, 3299), 'os.path.exists', 'os.path.exists', (['best_model_path_base'], {}), '(best_model_path_base)\n', (3277, 3299), False, 'import argparse, os, yaml, csv\n'), ((3310, 3343), 'os.makedirs', 'os.makedirs', (['best_model_path_base'], {}), '(best_model_path_base)\n', (3321, 3343), False, 'import argparse, os, yaml, csv\n'), ((3358, 3394), 'os.path.exists', 'os.path.exists', (['last_model_path_base'], {}), '(last_model_path_base)\n', (3372, 3394), False, 'import argparse, os, yaml, csv\n'), ((3405, 3438), 'os.makedirs', 'os.makedirs', (['last_model_path_base'], {}), '(last_model_path_base)\n', (3416, 3438), False, 'import argparse, os, yaml, csv\n'), ((6002, 6027), 'os.path.isdir', 'os.path.isdir', (['test_files'], {}), '(test_files)\n', (6015, 6027), False, 'import argparse, os, yaml, csv\n'), ((1579, 1597), 'os.path.abspath', 'os.path.abspath', (['p'], {}), '(p)\n', (1594, 1597), False, 'import argparse, os, yaml, csv\n'), ((1667, 1685), 'os.path.abspath', 'os.path.abspath', (['p'], {}), '(p)\n', (1682, 1685), False, 'import argparse, os, yaml, csv\n'), ((2165, 2202), 'os.path.join', 'os.path.join', (["config['train_path']", 'f'], {}), "(config['train_path'], f)\n", (2177, 2202), False, 'import argparse, os, yaml, csv\n'), ((2457, 2499), 'os.path.join', 'os.path.join', (["config['validation_path']", 'f'], {}), "(config['validation_path'], f)\n", (2469, 2499), False, 'import argparse, os, yaml, csv\n'), ((2212, 2244), 'os.listdir', 'os.listdir', (["config['train_path']"], {}), "(config['train_path'])\n", (2222, 2244), False, 'import argparse, os, yaml, csv\n'), ((2509, 2546), 'os.listdir', 'os.listdir', (["config['validation_path']"], {}), "(config['validation_path'])\n", (2519, 2546), False, 'import argparse, os, yaml, csv\n'), ((6089, 6211), 'os.path.join', 'os.path.join', (['test_files', "(config['model_type'] + '_' + config['model_name'] + '_' + config['version'\n ] + '-results.csv')"], {}), "(test_files, config['model_type'] + '_' + config['model_name'] +\n '_' + config['version'] + '-results.csv')\n", (6101, 6211), False, 'import argparse, os, yaml, csv\n'), ((6248, 6375), 'os.path.join', 'os.path.join', (['test_files', "(config['model_type'] + '_' + config['model_name'] + '_' + config['version'\n ] + '-results-time.csv')"], {}), "(test_files, config['model_type'] + '_' + config['model_name'] +\n '_' + config['version'] + '-results-time.csv')\n", (6260, 6375), False, 'import argparse, os, yaml, csv\n'), ((2296, 2333), 'os.path.join', 'os.path.join', (["config['train_path']", 'f'], {}), "(config['train_path'], f)\n", (2308, 2333), False, 'import argparse, os, yaml, csv\n'), ((2603, 2645), 'os.path.join', 'os.path.join', (["config['validation_path']", 'f'], {}), "(config['validation_path'], f)\n", (2615, 2645), False, 'import argparse, os, yaml, csv\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 29 10:58:31 2016
@author: <EMAIL>
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Ellipse
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
|
[
"numpy.arctan2",
"numpy.linalg.eigh",
"matplotlib.pyplot.gca",
"matplotlib.patches.Ellipse",
"numpy.sqrt"
] |
[((1407, 1473), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos', 'width': 'width', 'height': 'height', 'angle': 'theta'}), '(xy=pos, width=width, height=height, angle=theta, **kwargs)\n', (1414, 1473), False, 'from matplotlib.patches import Ellipse\n'), ((1068, 1087), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (1082, 1087), True, 'import numpy as np\n'), ((1200, 1209), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1207, 1209), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1295), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (1276, 1295), True, 'import numpy as np\n'), ((1381, 1394), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (1388, 1394), True, 'import numpy as np\n')]
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BC agent."""
from absl.testing import absltest
from absl.testing import parameterized
from acme import specs
from acme import types
from acme.agents.jax import bc
from acme.jax import networks as networks_lib
from acme.jax import utils
from acme.testing import fakes
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax.scipy import special
import numpy as np
import optax
def make_networks(
spec: specs.EnvironmentSpec,
discrete_actions: bool = False) -> networks_lib.FeedForwardNetwork:
"""Creates networks used by the agent."""
if discrete_actions:
final_layer_size = spec.actions.num_values
else:
final_layer_size = np.prod(spec.actions.shape, dtype=int)
def _actor_fn(obs, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
if discrete_actions:
network = hk.nets.MLP([64, 64, final_layer_size])
else:
network = hk.Sequential([
networks_lib.LayerNormMLP([64, 64], activate_final=True),
networks_lib.NormalTanhDistribution(final_layer_size),
])
return network(obs)
policy = hk.without_apply_rng(hk.transform(_actor_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.zeros_like(spec.observations)
dummy_obs = utils.add_batch_dim(dummy_obs)
network = networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply)
return network
class BCTest(parameterized.TestCase):
@parameterized.parameters(
('logp',),
('mse',),
('peerbc',)
)
def test_continuous_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.ContinuousEnvironment(
episode_length=10, bounded=True, action_dim=6)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
network = make_networks(spec)
if loss_name == 'logp':
loss_fn = bc.logp(
logp_fn=lambda dist_params, actions: dist_params.log_prob(actions))
elif loss_name == 'mse':
loss_fn = bc.mse(
sample_fn=lambda dist_params, key: dist_params.sample(seed=key))
elif loss_name == 'peerbc':
base_loss_fn = bc.logp(
logp_fn=lambda dist_params, actions: dist_params.log_prob(actions))
loss_fn = bc.peerbc(base_loss_fn, zeta=0.1)
else:
raise ValueError
learner = bc.BCLearner(
network=network,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
demonstrations=dataset_demonstration,
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
@parameterized.parameters(
('logp',),
('rcal',))
def test_discrete_actions(self, loss_name):
with chex.fake_pmap_and_jit():
num_sgd_steps_per_step = 1
num_steps = 5
# Create a fake environment to test with.
environment = fakes.DiscreteEnvironment(
num_actions=10, num_observations=100, obs_shape=(10,),
obs_dtype=np.float32)
spec = specs.make_environment_spec(environment)
dataset_demonstration = fakes.transition_dataset(environment)
dataset_demonstration = dataset_demonstration.map(
lambda sample: types.Transition(*sample.data))
dataset_demonstration = dataset_demonstration.batch(8).as_numpy_iterator()
# Construct the agent.
network = make_networks(spec, discrete_actions=True)
def logp_fn(logits, actions):
max_logits = jnp.max(logits, axis=-1, keepdims=True)
logits = logits - max_logits
logits_actions = jnp.sum(
jax.nn.one_hot(actions, spec.actions.num_values) * logits, axis=-1)
log_prob = logits_actions - special.logsumexp(logits, axis=-1)
return log_prob
if loss_name == 'logp':
loss_fn = bc.logp(logp_fn=logp_fn)
elif loss_name == 'rcal':
base_loss_fn = bc.logp(logp_fn=logp_fn)
loss_fn = bc.rcal(base_loss_fn, discount=0.99, alpha=0.1)
else:
raise ValueError
learner = bc.BCLearner(
network=network,
random_key=jax.random.PRNGKey(0),
loss_fn=loss_fn,
optimizer=optax.adam(0.01),
demonstrations=dataset_demonstration,
num_sgd_steps_per_step=num_sgd_steps_per_step)
# Train the agent
for _ in range(num_steps):
learner.step()
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"chex.fake_pmap_and_jit",
"optax.adam",
"acme.types.Transition",
"acme.jax.networks.NormalTanhDistribution",
"jax.random.PRNGKey",
"jax.nn.one_hot",
"acme.testing.fakes.transition_dataset",
"numpy.prod",
"acme.testing.fakes.DiscreteEnvironment",
"acme.agents.jax.bc.peerbc",
"haiku.nets.MLP",
"acme.jax.utils.zeros_like",
"acme.jax.utils.add_batch_dim",
"haiku.transform",
"jax.scipy.special.logsumexp",
"acme.agents.jax.bc.logp",
"acme.specs.make_environment_spec",
"acme.testing.fakes.ContinuousEnvironment",
"acme.agents.jax.bc.rcal",
"jax.numpy.max",
"absl.testing.parameterized.parameters",
"acme.jax.networks.LayerNormMLP"
] |
[((1951, 1986), 'acme.jax.utils.zeros_like', 'utils.zeros_like', (['spec.observations'], {}), '(spec.observations)\n', (1967, 1986), False, 'from acme.jax import utils\n'), ((2001, 2031), 'acme.jax.utils.add_batch_dim', 'utils.add_batch_dim', (['dummy_obs'], {}), '(dummy_obs)\n', (2020, 2031), False, 'from acme.jax import utils\n'), ((2199, 2257), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["('logp',)", "('mse',)", "('peerbc',)"], {}), "(('logp',), ('mse',), ('peerbc',))\n", (2223, 2257), False, 'from absl.testing import parameterized\n'), ((3823, 3869), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["('logp',)", "('rcal',)"], {}), "(('logp',), ('rcal',))\n", (3847, 3869), False, 'from absl.testing import parameterized\n'), ((5606, 5621), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (5619, 5621), False, 'from absl.testing import absltest\n'), ((1301, 1339), 'numpy.prod', 'np.prod', (['spec.actions.shape'], {'dtype': 'int'}), '(spec.actions.shape, dtype=int)\n', (1308, 1339), True, 'import numpy as np\n'), ((1839, 1862), 'haiku.transform', 'hk.transform', (['_actor_fn'], {}), '(_actor_fn)\n', (1851, 1862), True, 'import haiku as hk\n'), ((1558, 1597), 'haiku.nets.MLP', 'hk.nets.MLP', (['[64, 64, final_layer_size]'], {}), '([64, 64, final_layer_size])\n', (1569, 1597), True, 'import haiku as hk\n'), ((2341, 2365), 'chex.fake_pmap_and_jit', 'chex.fake_pmap_and_jit', ([], {}), '()\n', (2363, 2365), False, 'import chex\n'), ((2489, 2563), 'acme.testing.fakes.ContinuousEnvironment', 'fakes.ContinuousEnvironment', ([], {'episode_length': '(10)', 'bounded': '(True)', 'action_dim': '(6)'}), '(episode_length=10, bounded=True, action_dim=6)\n', (2516, 2563), False, 'from acme.testing import fakes\n'), ((2589, 2629), 'acme.specs.make_environment_spec', 'specs.make_environment_spec', (['environment'], {}), '(environment)\n', (2616, 2629), False, 'from acme import specs\n'), ((2660, 2697), 'acme.testing.fakes.transition_dataset', 'fakes.transition_dataset', (['environment'], {}), '(environment)\n', (2684, 2697), False, 'from acme.testing import fakes\n'), ((3938, 3962), 'chex.fake_pmap_and_jit', 'chex.fake_pmap_and_jit', ([], {}), '()\n', (3960, 3962), False, 'import chex\n'), ((4087, 4194), 'acme.testing.fakes.DiscreteEnvironment', 'fakes.DiscreteEnvironment', ([], {'num_actions': '(10)', 'num_observations': '(100)', 'obs_shape': '(10,)', 'obs_dtype': 'np.float32'}), '(num_actions=10, num_observations=100, obs_shape=(\n 10,), obs_dtype=np.float32)\n', (4112, 4194), False, 'from acme.testing import fakes\n'), ((4225, 4265), 'acme.specs.make_environment_spec', 'specs.make_environment_spec', (['environment'], {}), '(environment)\n', (4252, 4265), False, 'from acme import specs\n'), ((4296, 4333), 'acme.testing.fakes.transition_dataset', 'fakes.transition_dataset', (['environment'], {}), '(environment)\n', (4320, 4333), False, 'from acme.testing import fakes\n'), ((4676, 4715), 'jax.numpy.max', 'jnp.max', (['logits'], {'axis': '(-1)', 'keepdims': '(True)'}), '(logits, axis=-1, keepdims=True)\n', (4683, 4715), True, 'import jax.numpy as jnp\n'), ((5012, 5036), 'acme.agents.jax.bc.logp', 'bc.logp', ([], {'logp_fn': 'logp_fn'}), '(logp_fn=logp_fn)\n', (5019, 5036), False, 'from acme.agents.jax import bc\n'), ((1650, 1706), 'acme.jax.networks.LayerNormMLP', 'networks_lib.LayerNormMLP', (['[64, 64]'], {'activate_final': '(True)'}), '([64, 64], activate_final=True)\n', (1675, 1706), True, 'from acme.jax import networks as networks_lib\n'), ((1718, 1771), 'acme.jax.networks.NormalTanhDistribution', 'networks_lib.NormalTanhDistribution', (['final_layer_size'], {}), '(final_layer_size)\n', (1753, 1771), True, 'from acme.jax import networks as networks_lib\n'), ((2780, 2810), 'acme.types.Transition', 'types.Transition', (['*sample.data'], {}), '(*sample.data)\n', (2796, 2810), False, 'from acme import types\n'), ((3545, 3566), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (3563, 3566), False, 'import jax\n'), ((3615, 3631), 'optax.adam', 'optax.adam', (['(0.01)'], {}), '(0.01)\n', (3625, 3631), False, 'import optax\n'), ((4416, 4446), 'acme.types.Transition', 'types.Transition', (['*sample.data'], {}), '(*sample.data)\n', (4432, 4446), False, 'from acme import types\n'), ((4904, 4938), 'jax.scipy.special.logsumexp', 'special.logsumexp', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (4921, 4938), False, 'from jax.scipy import special\n'), ((5093, 5117), 'acme.agents.jax.bc.logp', 'bc.logp', ([], {'logp_fn': 'logp_fn'}), '(logp_fn=logp_fn)\n', (5100, 5117), False, 'from acme.agents.jax import bc\n'), ((5136, 5183), 'acme.agents.jax.bc.rcal', 'bc.rcal', (['base_loss_fn'], {'discount': '(0.99)', 'alpha': '(0.1)'}), '(base_loss_fn, discount=0.99, alpha=0.1)\n', (5143, 5183), False, 'from acme.agents.jax import bc\n'), ((5301, 5322), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (5319, 5322), False, 'import jax\n'), ((5371, 5387), 'optax.adam', 'optax.adam', (['(0.01)'], {}), '(0.01)\n', (5381, 5387), False, 'import optax\n'), ((3395, 3428), 'acme.agents.jax.bc.peerbc', 'bc.peerbc', (['base_loss_fn'], {'zeta': '(0.1)'}), '(base_loss_fn, zeta=0.1)\n', (3404, 3428), False, 'from acme.agents.jax import bc\n'), ((4799, 4847), 'jax.nn.one_hot', 'jax.nn.one_hot', (['actions', 'spec.actions.num_values'], {}), '(actions, spec.actions.num_values)\n', (4813, 4847), False, 'import jax\n')]
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cv2
import numpy as np
import StringIO
import datetime
import pytz
import angus
import angus_display as ad
import stats as st
def f(stream_index, width, height):
camera = cv2.VideoCapture(stream_index)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, int(width))
camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))
camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)
if not camera.isOpened():
print("Cannot open stream of index {}".format(stream_index))
exit(1)
print("Video stream is of resolution {} x {}".format(camera.get(3), camera.get(4)))
stats = st.Stats("stats.json")
animation = []
engaged = []
conn = angus.connect()
service = conn.services.get_service("scene_analysis", version=1)
service.enable_session()
while camera.isOpened():
ret, frame = camera.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
buff = StringIO.StringIO(np.array(buff).tostring())
t = datetime.datetime.now(pytz.utc)
job = service.process({"image": buff,
"timestamp" : t.isoformat(),
"camera_position": "facing",
"sensitivity": {
"appearance": 0.7,
"disappearance": 0.7,
"age_estimated": 0.4,
"gender_estimated": 0.5,
"focus_locked": 0.9,
"emotion_detected": 0.4,
"direction_estimated": 0.8
}
})
res = job.result
events = res["events"]
entities = res["entities"]
for idx, h in entities.iteritems():
pt = ad.displayAge(frame, idx, h, 0.50, 0.35)
ch = ad.displayHair(frame, idx, h)
ad.displayAvatar(frame, h, pt, ch)
ad.displayEmotion(frame, h, pt)
ad.displayGender(frame, h, pt)
ad.displayGaze(frame, idx, h, pt, 0.50)
panel = ((width - 180, 40), (width-20, height - 40))
ad.blur(frame, panel[0], panel[1], (255, 255, 255), 2)
ad.computeConversion(res, events, entities, engaged, stats, animation, 0.5, 500)
ad.displayConversion(frame, stats, (width - 100, int(0.3*height)))
ad.displayAnimation(frame, animation)
ad.display_logo(frame, 20, height - 60)
cv2.imshow('window', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
stats.save()
break
service.disable_session()
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
### Web cam index might be different from 0 on your setup.
### To grab a given video file instead of the host computer cam, try:
### main("/path/to/myvideo.avi")
f(0, 640, 480)
|
[
"angus_display.blur",
"angus_display.displayHair",
"cv2.imencode",
"cv2.imshow",
"angus_display.displayAnimation",
"angus_display.displayAge",
"angus_display.displayGaze",
"cv2.cvtColor",
"angus_display.displayAvatar",
"angus_display.display_logo",
"cv2.destroyAllWindows",
"datetime.datetime.now",
"angus_display.displayEmotion",
"cv2.waitKey",
"angus_display.displayGender",
"cv2.VideoCapture",
"stats.Stats",
"numpy.array",
"angus.connect",
"angus_display.computeConversion"
] |
[((995, 1025), 'cv2.VideoCapture', 'cv2.VideoCapture', (['stream_index'], {}), '(stream_index)\n', (1011, 1025), False, 'import cv2\n'), ((1407, 1429), 'stats.Stats', 'st.Stats', (['"""stats.json"""'], {}), "('stats.json')\n", (1415, 1429), True, 'import stats as st\n'), ((1478, 1493), 'angus.connect', 'angus.connect', ([], {}), '()\n', (1491, 1493), False, 'import angus\n'), ((3597, 3620), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3618, 3620), False, 'import cv2\n'), ((1712, 1751), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1724, 1751), False, 'import cv2\n'), ((1772, 1830), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'gray', '[cv2.IMWRITE_JPEG_QUALITY, 80]'], {}), "('.jpg', gray, [cv2.IMWRITE_JPEG_QUALITY, 80])\n", (1784, 1830), False, 'import cv2\n'), ((1905, 1936), 'datetime.datetime.now', 'datetime.datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (1926, 1936), False, 'import datetime\n'), ((3100, 3154), 'angus_display.blur', 'ad.blur', (['frame', 'panel[0]', 'panel[1]', '(255, 255, 255)', '(2)'], {}), '(frame, panel[0], panel[1], (255, 255, 255), 2)\n', (3107, 3154), True, 'import angus_display as ad\n'), ((3163, 3248), 'angus_display.computeConversion', 'ad.computeConversion', (['res', 'events', 'entities', 'engaged', 'stats', 'animation', '(0.5)', '(500)'], {}), '(res, events, entities, engaged, stats, animation, 0.5, 500\n )\n', (3183, 3248), True, 'import angus_display as ad\n'), ((3327, 3364), 'angus_display.displayAnimation', 'ad.displayAnimation', (['frame', 'animation'], {}), '(frame, animation)\n', (3346, 3364), True, 'import angus_display as ad\n'), ((3373, 3412), 'angus_display.display_logo', 'ad.display_logo', (['frame', '(20)', '(height - 60)'], {}), '(frame, 20, height - 60)\n', (3388, 3412), True, 'import angus_display as ad\n'), ((3422, 3449), 'cv2.imshow', 'cv2.imshow', (['"""window"""', 'frame'], {}), "('window', frame)\n", (3432, 3449), False, 'import cv2\n'), ((2756, 2795), 'angus_display.displayAge', 'ad.displayAge', (['frame', 'idx', 'h', '(0.5)', '(0.35)'], {}), '(frame, idx, h, 0.5, 0.35)\n', (2769, 2795), True, 'import angus_display as ad\n'), ((2814, 2843), 'angus_display.displayHair', 'ad.displayHair', (['frame', 'idx', 'h'], {}), '(frame, idx, h)\n', (2828, 2843), True, 'import angus_display as ad\n'), ((2856, 2890), 'angus_display.displayAvatar', 'ad.displayAvatar', (['frame', 'h', 'pt', 'ch'], {}), '(frame, h, pt, ch)\n', (2872, 2890), True, 'import angus_display as ad\n'), ((2903, 2934), 'angus_display.displayEmotion', 'ad.displayEmotion', (['frame', 'h', 'pt'], {}), '(frame, h, pt)\n', (2920, 2934), True, 'import angus_display as ad\n'), ((2947, 2977), 'angus_display.displayGender', 'ad.displayGender', (['frame', 'h', 'pt'], {}), '(frame, h, pt)\n', (2963, 2977), True, 'import angus_display as ad\n'), ((2990, 3028), 'angus_display.displayGaze', 'ad.displayGaze', (['frame', 'idx', 'h', 'pt', '(0.5)'], {}), '(frame, idx, h, pt, 0.5)\n', (3004, 3028), True, 'import angus_display as ad\n'), ((3462, 3476), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3473, 3476), False, 'import cv2\n'), ((1865, 1879), 'numpy.array', 'np.array', (['buff'], {}), '(buff)\n', (1873, 1879), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import logging
import numpy as np
import gensim
from .common import MultiprocModelsRunner, MultiprocModelsWorkerABC, MultiprocEvaluationRunner, \
MultiprocEvaluationWorkerABC, dtm_to_gensim_corpus
from .eval_metrics import metric_cao_juan_2009
AVAILABLE_METRICS = (
'perplexity',
# 'cross_validation',
'cao_juan_2009',
# 'arun_2010',
)
logger = logging.getLogger('tmtoolkit')
def get_model_perplexity(model, eval_corpus):
n_words = sum(cnt for document in eval_corpus for _, cnt in document)
bound = model.bound(eval_corpus)
perwordbound = bound / n_words
return np.exp2(-perwordbound)
class MultiprocModelsWorkerGensim(MultiprocModelsWorkerABC):
package_name = 'gensim'
def fit_model(self, data, params, return_data=False):
data = dtm_to_gensim_corpus(data.tocsr())
model = gensim.models.ldamodel.LdaModel(data, **params)
if return_data:
return model, data
else:
return model
class MultiprocEvaluationWorkerGensim(MultiprocEvaluationWorkerABC, MultiprocModelsWorkerGensim):
def fit_model(self, data, params, return_data=False):
model, data = super(MultiprocEvaluationWorkerGensim, self).fit_model(data, params, return_data=True)
results = {}
if self.return_models:
results['model'] = model
for metric in self.eval_metric:
# if metric == 'cross_validation': continue
if metric == 'cao_juan_2009':
res = metric_cao_juan_2009(model.state.get_lambda())
# elif metric == 'arun_2010': # TODO: fix this (get document topic distr. from gensim model)
# results = metric_arun_2010(train_model.state.get_lambda(), train_model[corpus_train], data.sum(axis=1))
else: # default: perplexity
res = get_model_perplexity(model, data)
logger.info('> evaluation result with metric "%s": %f' % (metric, res))
results[metric] = res
return results
def compute_models_parallel(data, varying_parameters=None, constant_parameters=None, n_max_processes=None):
"""
Compute several Topic Models in parallel using the "gensim" package. Use a single or multiple document term matrices
`data` and optionally a list of varying parameters `varying_parameters`. Pass parameters in `constant_parameters`
dict to each model calculation. Use at maximum `n_max_processes` processors or use all available processors if None
is passed.
`data` can be either a Document-Term-Matrix (NumPy array/matrix, SciPy sparse matrix) or a dict with document ID ->
Document-Term-Matrix mapping when calculating models for multiple corpora (named multiple documents).
If `data` is a dict of named documents, this function will return a dict with document ID -> result list. Otherwise
it will only return a result list. A result list always is a list containing tuples `(parameter_set, model)` where
`parameter_set` is a dict of the used parameters.
"""
mp_models = MultiprocModelsRunner(MultiprocModelsWorkerGensim, data, varying_parameters, constant_parameters,
n_max_processes=n_max_processes)
return mp_models.run()
def evaluate_topic_models(data, varying_parameters, constant_parameters=None, n_max_processes=None, return_models=False,
metric=None, **metric_kwargs):
"""
Compute several Topic Models in parallel using the "gensim" package. Calculate the models using a list of varying
parameters `varying_parameters` on a single Document-Term-Matrix `data`. Pass parameters in `constant_parameters`
dict to each model calculation. Use at maximum `n_max_processes` processors or use all available processors if None
is passed.
`data` must be a Document-Term-Matrix (NumPy array/matrix, SciPy sparse matrix).
Will return a list of size `len(varying_parameters)` containing tuples `(parameter_set, eval_results)` where
`parameter_set` is a dict of the used parameters and `eval_results` is a dict of metric names -> metric results.
"""
mp_eval = MultiprocEvaluationRunner(MultiprocEvaluationWorkerGensim, AVAILABLE_METRICS, data,
varying_parameters, constant_parameters,
metric=metric, metric_options=metric_kwargs,
n_max_processes=n_max_processes, return_models=return_models)
return mp_eval.run()
|
[
"numpy.exp2",
"gensim.models.ldamodel.LdaModel",
"logging.getLogger"
] |
[((393, 423), 'logging.getLogger', 'logging.getLogger', (['"""tmtoolkit"""'], {}), "('tmtoolkit')\n", (410, 423), False, 'import logging\n'), ((630, 652), 'numpy.exp2', 'np.exp2', (['(-perwordbound)'], {}), '(-perwordbound)\n', (637, 652), True, 'import numpy as np\n'), ((869, 916), 'gensim.models.ldamodel.LdaModel', 'gensim.models.ldamodel.LdaModel', (['data'], {}), '(data, **params)\n', (900, 916), False, 'import gensim\n')]
|
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
import pytest
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import xarray as xr
from snl_d3d_cec_verify.cases import CaseStudy
from snl_d3d_cec_verify.result.faces import (_check_case_study,
_faces_frame_to_slice,
_faces_frame_to_depth,
_map_to_faces_frame_with_tke,
_map_to_faces_frame,
_get_quadrilateral_centre,
_FMFaces,
_trim_to_faces_frame,
_StructuredFaces)
def test_check_case_study_error():
case = CaseStudy(dx=[1, 2, 3])
with pytest.raises(ValueError) as excinfo:
_check_case_study(case)
assert "case study must have length one" in str(excinfo)
@pytest.fixture
def faces_frame_fm(data_dir):
csv_path = data_dir / "output" / "faces_frame_fm.csv"
frame = pd.read_csv(csv_path, parse_dates=["time"])
times = frame.time.unique()
return frame[frame.time == times[-1]]
@pytest.fixture
def faces_frame_structured(data_dir):
csv_path = data_dir / "output" / "faces_frame_structured.csv"
frame = pd.read_csv(csv_path, parse_dates=["time"])
times = frame.time.unique()
return frame[frame.time == times[-1]]
def test_faces_frame_to_slice_sigma(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = -0.5
ds = _faces_frame_to_slice(faces_frame_fm, ts, "sigma", sigma)
assert isinstance(ds, xr.Dataset)
assert len(ds["$x$"]) == 18
assert len(ds["$y$"]) == 4
assert np.isclose(ds["$x$"].min(), 0.5)
assert np.isclose(ds["$x$"].max(), 17.5)
assert np.isclose(ds["$y$"].min(), 1.5)
assert np.isclose(ds["$y$"].max(), 4.5)
assert ds[r"$\sigma$"].values.take(0) == sigma
assert ds.time.values.take(0) == ts
assert ds["$z$"].min() > -1.0012
assert ds["$z$"].max() < -1
# Same bounds as the frame
assert ds["$u$"].min() >= faces_frame_fm["u"].min()
assert ds["$u$"].max() <= faces_frame_fm["u"].max()
assert ds["$v$"].min() >= faces_frame_fm["v"].min()
assert ds["$v$"].max() <= faces_frame_fm["v"].max()
assert ds["$w$"].min() >= faces_frame_fm["w"].min()
assert ds["$w$"].max() <= faces_frame_fm["w"].max()
def test_faces_frame_structured_to_slice_sigma(faces_frame_structured):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = -0.75
ds = _faces_frame_to_slice(faces_frame_structured, ts, "sigma", sigma)
assert isinstance(ds, xr.Dataset)
assert len(ds["$x$"]) == 18
assert len(ds["$y$"]) == 4
assert np.isclose(ds["$x$"].min(), 0.5)
assert np.isclose(ds["$x$"].max(), 17.5)
assert np.isclose(ds["$y$"].min(), 1.5)
assert np.isclose(ds["$y$"].max(), 4.5)
assert ds[r"$\sigma$"].values.take(0) == sigma
assert ds.time.values.take(0) == ts
assert ds["$z$"].min() > -1.504
assert ds["$z$"].max() < -1.5
# Same bounds as the frame
assert ds["$u$"].min() >= faces_frame_structured["u"].min()
assert ds["$u$"].max() <= faces_frame_structured["u"].max()
assert ds["$v$"].min() >= faces_frame_structured["v"].min()
assert ds["$v$"].max() <= faces_frame_structured["v"].max()
assert ds["$w$"].min() >= faces_frame_structured["w"].min()
assert ds["$w$"].max() <= faces_frame_structured["w"].max()
assert ds["$k$"].min() >= 0
assert ds["$k$"].min() >= faces_frame_structured["tke"].min()
assert ds["$k$"].max() <= faces_frame_structured["tke"].max()
def test_faces_frame_to_slice_sigma_extrapolate_forward(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = 0.1
ds = _faces_frame_to_slice(faces_frame_fm, ts, "sigma", sigma)
assert ds["$z$"].min() > 0.2
assert ds["$z$"].max() < 0.2003
def test_faces_frame_to_slice_sigma_extrapolate_backward(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = -1.1
ds = _faces_frame_to_slice(faces_frame_fm, ts, "sigma", sigma)
assert ds["$z$"].min() > -2.203
assert ds["$z$"].max() < -2.2
def test_faces_frame_to_slice_z(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
z = -1
ds = _faces_frame_to_slice(faces_frame_fm, ts, "z", z)
assert isinstance(ds, xr.Dataset)
assert len(ds["$x$"]) == 18
assert len(ds["$y$"]) == 4
assert np.isclose(ds["$x$"].min(), 0.5)
assert np.isclose(ds["$x$"].max(), 17.5)
assert np.isclose(ds["$y$"].min(), 1.5)
assert np.isclose(ds["$y$"].max(), 4.5)
assert ds["$z$"].values.take(0) == z
assert ds.time.values.take(0) == ts
assert ds[r"$\sigma$"].values.min() >= -1
assert ds["$z$"].max() < 1.002
# Same bounds as the frame
assert ds["$u$"].min() >= faces_frame_fm["u"].min()
assert ds["$u$"].max() <= faces_frame_fm["u"].max()
assert ds["$v$"].min() >= faces_frame_fm["v"].min()
assert ds["$v$"].max() <= faces_frame_fm["v"].max()
assert ds["$w$"].min() >= faces_frame_fm["w"].min()
assert ds["$w$"].max() <= faces_frame_fm["w"].max()
def test_faces_frame_to_slice_error():
with pytest.raises(RuntimeError) as excinfo:
_faces_frame_to_slice("mock", "mock", "mock", "mock")
assert "Given key is not valid" in str(excinfo)
def test_faces_frame_to_depth(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
da = _faces_frame_to_depth(faces_frame_fm, ts)
assert isinstance(da, xr.DataArray)
assert len(da["$x$"]) == 18
assert len(da["$y$"]) == 4
assert da.time.values.take(0) == ts
# Same bounds as the frame
assert da.min() >= faces_frame_fm["depth"].min()
assert da.max() <= faces_frame_fm["depth"].max()
def test_faces_frame_structured_to_depth(faces_frame_structured):
ts = pd.Timestamp("2001-01-01 01:00:00")
da = _faces_frame_to_depth(faces_frame_structured, ts)
assert isinstance(da, xr.DataArray)
assert len(da["$x$"]) == 18
assert len(da["$y$"]) == 4
assert da.time.values.take(0) == ts
# Same bounds as the frame
assert da.min() >= faces_frame_structured["depth"].min()
assert da.max() <= faces_frame_structured["depth"].max()
def test_faces_load_t_step_first(faces):
t_step = -1
expected_t_step = faces._resolve_t_step(t_step)
faces._load_t_step(t_step)
assert len(faces._frame) == 18 * 4 * 7
assert expected_t_step in faces._t_steps
assert faces._t_steps[expected_t_step] == \
pd.Timestamp('2001-01-01 01:00:00')
def test_faces_load_t_step_second(faces):
faces._load_t_step(-1)
faces._load_t_step(0)
assert len(faces._frame) == 18 * 4 * 7 * 2
assert len(faces._t_steps) == 2
assert set(faces._frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00'),
pd.Timestamp('2001-01-01')])
def test_faces_load_t_step_no_repeat(faces):
faces._load_t_step(-1)
faces._load_t_step(1)
assert len(faces._frame) == 18 * 4 * 7
assert len(faces._t_steps) == 1
def test_faces_extract_depth(mocker, faces):
mock = mocker.patch('snl_d3d_cec_verify.result.faces.'
'_faces_frame_to_depth')
faces.extract_depth(-1)
mock.assert_called()
def test_faces_extract_sigma(mocker, faces):
mock = mocker.patch('snl_d3d_cec_verify.result.faces.'
'_faces_frame_to_slice')
faces.extract_sigma(-1, 0)
mock.assert_called()
assert 'sigma' in mock.call_args.args[2]
def test_faces_extract_sigma_interp(faces):
t_step = -1
sigma = -0.5
x = 1
y = 3
ds = faces.extract_sigma(t_step, sigma, x, y)
t_step = faces._resolve_t_step(t_step)
ts = faces._t_steps[t_step]
assert isinstance(ds, xr.Dataset)
assert ds[r"$\sigma$"].values.take(0) == sigma
assert ds.time.values.take(0) == ts
assert ds["$x$"].values.take(0) == x
assert ds["$y$"].values.take(0) == y
assert np.isclose(ds["$z$"].values, -1.00114767)
# Same bounds as the frame
assert (faces._frame["u"].min() <= ds["$u$"].values.take(0) <=
faces._frame["u"].max())
assert (faces._frame["v"].min() <= ds["$v$"].values.take(0) <=
faces._frame["v"].max())
assert (faces._frame["w"].min() <= ds["$w$"].values.take(0) <=
faces._frame["w"].max())
def test_faces_extract_z(mocker, faces):
mock = mocker.patch('snl_d3d_cec_verify.result.faces.'
'_faces_frame_to_slice')
faces.extract_z(-1, -1)
mock.assert_called()
assert 'z' in mock.call_args.args[2]
def test_faces_extract_z_interp(faces):
t_step = -1
z = -1
x = 1
y = 3
ds = faces.extract_z(t_step, z, x, y)
t_step = faces._resolve_t_step(t_step)
ts = faces._t_steps[t_step]
assert isinstance(ds, xr.Dataset)
assert ds["$z$"].values.take(0) == z
assert ds.time.values.take(0) == ts
assert ds["$x$"].values.take(0) == x
assert ds["$y$"].values.take(0) == y
assert np.isclose(ds[r"$\sigma$"].values, -0.49942682)
# Same bounds as the frame
assert (faces._frame["u"].min() <= ds["$u$"].values.take(0) <=
faces._frame["u"].max())
assert (faces._frame["v"].min() <= ds["$v$"].values.take(0) <=
faces._frame["v"].max())
assert (faces._frame["w"].min() <= ds["$w$"].values.take(0) <=
faces._frame["w"].max())
@pytest.mark.parametrize("x, y", [
("mock", None),
(None, "mock")])
def test_faces_extract_interp_error(faces, x, y):
with pytest.raises(RuntimeError) as excinfo:
faces.extract_z("mock", "mock", x, y)
assert "x and y must both be set" in str(excinfo)
def test_faces_extract_turbine_z(mocker, faces):
case = CaseStudy()
offset_z = 0.5
t_step = -1
mock = mocker.patch.object(faces, 'extract_z')
faces.extract_turbine_z(t_step, case, offset_z)
mock.assert_called_with(t_step, case.turb_pos_z + offset_z)
def test_faces_extract_turbine_centreline(mocker, faces):
case = CaseStudy()
t_step = -1
x_step = 0.5
offset_x = 0.5
offset_y = 0.5
offset_z = 0.5
mock = mocker.patch.object(faces, 'extract_z')
faces.extract_turbine_centreline(t_step,
case,
x_step,
offset_x,
offset_y,
offset_z)
mock.assert_called()
assert mock.call_args.args[0] == t_step
assert mock.call_args.args[1] == case.turb_pos_z + offset_z
x = mock.call_args.args[2]
y = mock.call_args.args[3]
assert min(x) == case.turb_pos_x + offset_x
assert max(x) <= faces.xmax
assert np.unique(np.diff(x)).take(0) == x_step
assert set(y) == set([case.turb_pos_y + offset_y])
def test_faces_extract_turbine_centre(mocker, faces):
case = CaseStudy()
t_step = -1
offset_x = 0.5
offset_y = 0.5
offset_z = 0.5
mock = mocker.patch.object(faces, 'extract_z')
faces.extract_turbine_centre(t_step,
case,
offset_x,
offset_y,
offset_z)
mock.assert_called()
assert mock.call_args.args[0] == t_step
assert mock.call_args.args[1] == case.turb_pos_z + offset_z
x = mock.call_args.args[2]
y = mock.call_args.args[3]
assert len(x) == 1
assert len(y) == 1
assert x[0] == case.turb_pos_x + offset_x
assert y[0] == case.turb_pos_y + offset_y
def test_map_to_faces_frame_with_tke(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame_with_tke(map_path, -1)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 18 * 4 * 7
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2.003 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() <= 0
assert np.isclose(faces_frame["sigma"].unique(),
[-1.,
-0.83333333,
-0.66666667,
-0.5,
-0.33333333,
-0.16666667,
0.]).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 2
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() > 0.57
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.0089
sigma_slice = _faces_frame_to_slice(faces_frame,
pd.Timestamp('2001-01-01 01:00:00'),
"sigma",
-0.75)
assert np.isclose(sigma_slice["$z$"].values.mean(), -1.5009617997833038)
assert round(sigma_slice["$k$"].values.mean(), 5) == 0.00627
def test_map_to_faces_frame_with_tke_none(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame_with_tke(map_path)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 18 * 4 * 7 * 2
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2.003 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() <= 0
assert np.isclose(faces_frame["sigma"].unique(),
[-1.,
-0.83333333,
-0.66666667,
-0.5,
-0.33333333,
-0.16666667,
0.]).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 00:00:00'),
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 1.998
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() >= 0
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.0089
def test_map_to_faces_frame(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame(map_path, -1)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 216
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert (faces_frame["sigma"].unique() == (-0.8333333333333334,
-0.5,
-0.16666666666666669)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 2
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() > 0.6
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
sigma_slice = _faces_frame_to_slice(faces_frame,
pd.Timestamp('2001-01-01 01:00:00'),
"sigma",
-0.75)
assert np.isclose(sigma_slice["$z$"].values.mean(), -1.5009617997833038)
def test_map_to_faces_frame_none(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame(map_path)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 432
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert (faces_frame["sigma"].unique() == (-0.8333333333333334,
-0.5,
-0.16666666666666669)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 00:00:00'),
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() >= 2
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() >= 0.
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
def test_get_quadrilateral_centre():
densities = np.array([0, 0, 1, 1])
result = _get_quadrilateral_centre(densities)
assert result == 0.5
def test_FMFaces(mocker):
mock = mocker.patch(
'snl_d3d_cec_verify.result.faces._map_to_faces_frame_with_tke',
autospec=True)
path = "mock"
tstep = 0
test = _FMFaces(path, 2, 18)
test._get_faces_frame(tstep)
mock.assert_called_with(path, tstep)
def test_trim_to_faces_frame(data_dir):
trim_path = data_dir / "output" / "trim-D3D.nc"
faces_frame = _trim_to_faces_frame(trim_path, -1)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 216
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert np.isclose(faces_frame["sigma"].unique(),
(-0.16666667, -0.5, -0.83333331)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 2
assert faces_frame["depth"].max() < 2.005
assert faces_frame["u"].min() > 0.6
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-2
assert faces_frame["v"].max() < 1e-2
assert faces_frame["w"].min() > -0.03
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.004
def test_trim_to_faces_frame_none(data_dir):
trim_path = data_dir / "output" / "trim-D3D.nc"
faces_frame = _trim_to_faces_frame(trim_path)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 432
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert np.isclose(faces_frame["sigma"].unique(),
(-0.16666667, -0.5, -0.83333331)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 00:00:00'),
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() >= 2
assert faces_frame["depth"].max() < 2.005
assert faces_frame["u"].min() >= 0.
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-2
assert faces_frame["v"].max() < 1e-2
assert faces_frame["w"].min() > -0.03
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.004
def test_StructuredFaces(mocker):
mock = mocker.patch('snl_d3d_cec_verify.result.faces._trim_to_faces_frame',
autospec=True)
path = "mock"
tstep = 0
test = _StructuredFaces(path, 2, 18)
test._get_faces_frame(tstep)
mock.assert_called_with(path, tstep)
|
[
"snl_d3d_cec_verify.result.faces._get_quadrilateral_centre",
"pandas.read_csv",
"numpy.isclose",
"snl_d3d_cec_verify.result.faces._faces_frame_to_slice",
"snl_d3d_cec_verify.result.faces._trim_to_faces_frame",
"pytest.mark.parametrize",
"pytest.raises",
"warnings.catch_warnings",
"snl_d3d_cec_verify.result.faces._FMFaces",
"snl_d3d_cec_verify.result.faces._check_case_study",
"snl_d3d_cec_verify.result.faces._map_to_faces_frame_with_tke",
"snl_d3d_cec_verify.result.faces._faces_frame_to_depth",
"snl_d3d_cec_verify.cases.CaseStudy",
"pandas.Timestamp",
"warnings.filterwarnings",
"numpy.diff",
"numpy.array",
"snl_d3d_cec_verify.result.faces._StructuredFaces",
"snl_d3d_cec_verify.result.faces._map_to_faces_frame"
] |
[((10168, 10233), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x, y"""', "[('mock', None), (None, 'mock')]"], {}), "('x, y', [('mock', None), (None, 'mock')])\n", (10191, 10233), False, 'import pytest\n'), ((101, 126), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (124, 126), False, 'import warnings\n'), ((132, 194), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (155, 194), False, 'import warnings\n'), ((918, 941), 'snl_d3d_cec_verify.cases.CaseStudy', 'CaseStudy', ([], {'dx': '[1, 2, 3]'}), '(dx=[1, 2, 3])\n', (927, 941), False, 'from snl_d3d_cec_verify.cases import CaseStudy\n'), ((1210, 1253), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'parse_dates': "['time']"}), "(csv_path, parse_dates=['time'])\n", (1221, 1253), True, 'import pandas as pd\n'), ((1462, 1505), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'parse_dates': "['time']"}), "(csv_path, parse_dates=['time'])\n", (1473, 1505), True, 'import pandas as pd\n'), ((1649, 1684), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (1661, 1684), True, 'import pandas as pd\n'), ((1711, 1768), 'snl_d3d_cec_verify.result.faces._faces_frame_to_slice', '_faces_frame_to_slice', (['faces_frame_fm', 'ts', '"""sigma"""', 'sigma'], {}), "(faces_frame_fm, ts, 'sigma', sigma)\n", (1732, 1768), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((2692, 2727), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (2704, 2727), True, 'import pandas as pd\n'), ((2755, 2820), 'snl_d3d_cec_verify.result.faces._faces_frame_to_slice', '_faces_frame_to_slice', (['faces_frame_structured', 'ts', '"""sigma"""', 'sigma'], {}), "(faces_frame_structured, ts, 'sigma', sigma)\n", (2776, 2820), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((3958, 3993), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (3970, 3993), True, 'import pandas as pd\n'), ((4019, 4076), 'snl_d3d_cec_verify.result.faces._faces_frame_to_slice', '_faces_frame_to_slice', (['faces_frame_fm', 'ts', '"""sigma"""', 'sigma'], {}), "(faces_frame_fm, ts, 'sigma', sigma)\n", (4040, 4076), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((4241, 4276), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (4253, 4276), True, 'import pandas as pd\n'), ((4303, 4360), 'snl_d3d_cec_verify.result.faces._faces_frame_to_slice', '_faces_frame_to_slice', (['faces_frame_fm', 'ts', '"""sigma"""', 'sigma'], {}), "(faces_frame_fm, ts, 'sigma', sigma)\n", (4324, 4360), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((4501, 4536), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (4513, 4536), True, 'import pandas as pd\n'), ((4557, 4606), 'snl_d3d_cec_verify.result.faces._faces_frame_to_slice', '_faces_frame_to_slice', (['faces_frame_fm', 'ts', '"""z"""', 'z'], {}), "(faces_frame_fm, ts, 'z', z)\n", (4578, 4606), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((5721, 5756), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (5733, 5756), True, 'import pandas as pd\n'), ((5766, 5807), 'snl_d3d_cec_verify.result.faces._faces_frame_to_depth', '_faces_frame_to_depth', (['faces_frame_fm', 'ts'], {}), '(faces_frame_fm, ts)\n', (5787, 5807), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((6185, 6220), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (6197, 6220), True, 'import pandas as pd\n'), ((6230, 6279), 'snl_d3d_cec_verify.result.faces._faces_frame_to_depth', '_faces_frame_to_depth', (['faces_frame_structured', 'ts'], {}), '(faces_frame_structured, ts)\n', (6251, 6279), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((8456, 8497), 'numpy.isclose', 'np.isclose', (["ds['$z$'].values", '(-1.00114767)'], {}), "(ds['$z$'].values, -1.00114767)\n", (8466, 8497), True, 'import numpy as np\n'), ((9649, 9696), 'numpy.isclose', 'np.isclose', (["ds['$\\\\sigma$'].values", '(-0.49942682)'], {}), "(ds['$\\\\sigma$'].values, -0.49942682)\n", (9659, 9696), True, 'import numpy as np\n'), ((10567, 10578), 'snl_d3d_cec_verify.cases.CaseStudy', 'CaseStudy', ([], {}), '()\n', (10576, 10578), False, 'from snl_d3d_cec_verify.cases import CaseStudy\n'), ((10862, 10873), 'snl_d3d_cec_verify.cases.CaseStudy', 'CaseStudy', ([], {}), '()\n', (10871, 10873), False, 'from snl_d3d_cec_verify.cases import CaseStudy\n'), ((11762, 11773), 'snl_d3d_cec_verify.cases.CaseStudy', 'CaseStudy', ([], {}), '()\n', (11771, 11773), False, 'from snl_d3d_cec_verify.cases import CaseStudy\n'), ((12586, 12628), 'snl_d3d_cec_verify.result.faces._map_to_faces_frame_with_tke', '_map_to_faces_frame_with_tke', (['map_path', '(-1)'], {}), '(map_path, -1)\n', (12614, 12628), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((14902, 14940), 'snl_d3d_cec_verify.result.faces._map_to_faces_frame_with_tke', '_map_to_faces_frame_with_tke', (['map_path'], {}), '(map_path)\n', (14930, 14940), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((16904, 16937), 'snl_d3d_cec_verify.result.faces._map_to_faces_frame', '_map_to_faces_frame', (['map_path', '(-1)'], {}), '(map_path, -1)\n', (16923, 16937), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((18892, 18921), 'snl_d3d_cec_verify.result.faces._map_to_faces_frame', '_map_to_faces_frame', (['map_path'], {}), '(map_path)\n', (18911, 18921), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((20573, 20595), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (20581, 20595), True, 'import numpy as np\n'), ((20609, 20645), 'snl_d3d_cec_verify.result.faces._get_quadrilateral_centre', '_get_quadrilateral_centre', (['densities'], {}), '(densities)\n', (20634, 20645), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((20893, 20914), 'snl_d3d_cec_verify.result.faces._FMFaces', '_FMFaces', (['path', '(2)', '(18)'], {}), '(path, 2, 18)\n', (20901, 20914), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((21111, 21146), 'snl_d3d_cec_verify.result.faces._trim_to_faces_frame', '_trim_to_faces_frame', (['trim_path', '(-1)'], {}), '(trim_path, -1)\n', (21131, 21146), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((22842, 22873), 'snl_d3d_cec_verify.result.faces._trim_to_faces_frame', '_trim_to_faces_frame', (['trim_path'], {}), '(trim_path)\n', (22862, 22873), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((24738, 24767), 'snl_d3d_cec_verify.result.faces._StructuredFaces', '_StructuredFaces', (['path', '(2)', '(18)'], {}), '(path, 2, 18)\n', (24754, 24767), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((956, 981), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (969, 981), False, 'import pytest\n'), ((1002, 1025), 'snl_d3d_cec_verify.result.faces._check_case_study', '_check_case_study', (['case'], {}), '(case)\n', (1019, 1025), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((5499, 5526), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5512, 5526), False, 'import pytest\n'), ((5547, 5600), 'snl_d3d_cec_verify.result.faces._faces_frame_to_slice', '_faces_frame_to_slice', (['"""mock"""', '"""mock"""', '"""mock"""', '"""mock"""'], {}), "('mock', 'mock', 'mock', 'mock')\n", (5568, 5600), False, 'from snl_d3d_cec_verify.result.faces import _check_case_study, _faces_frame_to_slice, _faces_frame_to_depth, _map_to_faces_frame_with_tke, _map_to_faces_frame, _get_quadrilateral_centre, _FMFaces, _trim_to_faces_frame, _StructuredFaces\n'), ((6919, 6954), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (6931, 6954), True, 'import pandas as pd\n'), ((10355, 10382), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10368, 10382), False, 'import pytest\n'), ((14490, 14525), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (14502, 14525), True, 'import pandas as pd\n'), ((18554, 18589), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (18566, 18589), True, 'import pandas as pd\n'), ((7231, 7266), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (7243, 7266), True, 'import pandas as pd\n'), ((7308, 7334), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01"""'], {}), "('2001-01-01')\n", (7320, 7334), True, 'import pandas as pd\n'), ((13927, 13962), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (13939, 13962), True, 'import pandas as pd\n'), ((16243, 16278), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 00:00:00"""'], {}), "('2001-01-01 00:00:00')\n", (16255, 16278), True, 'import pandas as pd\n'), ((16320, 16355), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (16332, 16355), True, 'import pandas as pd\n'), ((18077, 18112), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (18089, 18112), True, 'import pandas as pd\n'), ((20061, 20096), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 00:00:00"""'], {}), "('2001-01-01 00:00:00')\n", (20073, 20096), True, 'import pandas as pd\n'), ((20138, 20173), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (20150, 20173), True, 'import pandas as pd\n'), ((22259, 22294), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (22271, 22294), True, 'import pandas as pd\n'), ((23986, 24021), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 00:00:00"""'], {}), "('2001-01-01 00:00:00')\n", (23998, 24021), True, 'import pandas as pd\n'), ((24063, 24098), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-01-01 01:00:00"""'], {}), "('2001-01-01 01:00:00')\n", (24075, 24098), True, 'import pandas as pd\n'), ((11605, 11615), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (11612, 11615), True, 'import numpy as np\n')]
|
import colorsys
import math
import os
import random
from decimal import Decimal
import hpbandster.core.result as hpres
import matplotlib.pyplot as plt
import numpy as np
# smallest value is best -> reverse_loss = True
# largest value is best -> reverse_loss = False
REVERSE_LOSS = True
EXP_LOSS = 1
OUTLIER_PERC_WORST = 0.5
OUTLIER_PERC_BEST = 0.0
def analyze_bohb(log_dir, title):
# load the example run from the log files
result = hpres.logged_results_to_HBS_result(log_dir)
# get all executed runs
all_runs = result.get_all_runs()
# get the 'dict' that translates config ids to the actual configurations
id2conf = result.get_id2config_mapping()
# Here is how you get he incumbent (best configuration)
inc_id = result.get_incumbent_id()
# let's grab the run on the highest budget
inc_runs = result.get_runs_by_id(inc_id)
inc_run = inc_runs[-1]
# We have access to all information: the config, the loss observed during
# optimization, and all the additional information
inc_valid_score = inc_run.loss
inc_config = id2conf[inc_id]['config']
inc_info = inc_run['info']
print('Best found configuration :' + str(inc_config))
print('Score: ' + str(inc_valid_score))
print('Info: ' + str(inc_info))
# print('It achieved accuracies of %f (validation) and %f (test).' % (-inc_valid_score, inc_test_score))
# # Let's plot the observed losses grouped by budget,
# hpvis.losses_over_time(all_runs)
#
# # the number of concurent runs,
# hpvis.concurrent_runs_over_time(all_runs)
#
# # and the number of finished runs.
# hpvis.finished_runs_over_time(all_runs)
#
# # This one visualizes the spearman rank correlation coefficients of the losses
# # between different budgets.
# hpvis.correlation_across_budgets(result)
#
# # For model based optimizers, one might wonder how much the model actually helped.
# # The next plot compares the performance of configs picked by the model vs. random ones
# hpvis.performance_histogram_model_vs_random(all_runs, id2conf)
result = remove_outliers(result)
# result = filter_values(result)
# print_configs_sorted_by_loss(result)
# print_stats_per_value(result)
# plot_accuracy_over_budget(result)
plot_parallel_scatter(result)
plt.title(title)
plt.show()
file_name = str(title).strip().replace(' ', '_').lower()
plt.savefig(os.path.join("../experiments/automl_plots/", file_name + ".png"))
def print_configs_sorted_by_loss(result):
lst = []
for k1, v1 in result.data.items():
for k2, v2 in v1.results.items():
loss = v2['loss']
config = v1.config
lst.append((loss, config))
lst.sort(key=lambda x: x[0])
for elem in lst:
print(elem)
def print_stats_per_value(result):
# get all possible keys
min_epoch = float('Inf')
config_params = {}
for value in result.data.values():
for config_param, config_param_val in value.config.items():
for epoch, epoch_result in value.results.items():
try:
loss = epoch_result["loss"]
min_epoch = min(min_epoch, epoch)
if config_param in config_params.keys():
config_params[config_param].append((config_param_val, epoch, loss))
else:
config_params[config_param] = [(config_param_val, epoch, loss)]
except:
print('Error in get_avg_per_value, continuing')
for config_param, data in (dict(sorted(config_params.items()))).items():
print(config_param)
# get all unique possible values for each config parameter
values = set(elem[0] for elem in data)
values = sorted(list(values))
if len(values) > 20:
continue
for value in values:
losses = []
for elem in data:
val, epoch, loss = elem
if val == value and epoch == min_epoch:
losses.append(loss)
print('{} {} {} {}'.format(value, np.mean(losses), np.std(losses), len(losses)))
def remove_outliers(result):
lut = []
for key, value1 in result.data.items():
for value2 in value1.results.values():
if value2 == None:
loss = float('nan')
else:
loss = value2['loss']
lut.append([loss, key])
filtered_lut = [x for x in lut if math.isfinite(x[0])]
worst_loss = sorted(filtered_lut, reverse=REVERSE_LOSS)[0][0]
if REVERSE_LOSS:
worst_loss += 0.01 * abs(worst_loss)
else:
worst_loss -= 0.01 * abs(worst_loss)
# remove NaN's
for i in range(len(lut)):
if not math.isfinite(lut[i][0]) or lut[i][0] == 0:
lut[i][0] = worst_loss
for key in result.data[lut[i][1]].results.keys():
# hacky but sometimes some budgets are missing (presumably when terminating ongoing runs)
if result.data[lut[i][1]].results[key] is None:
continue
else:
result.data[lut[i][1]].results[key]['loss'] = worst_loss
# result.data.pop(elem[1], None)
lut.sort(key=lambda x: x[0], reverse=REVERSE_LOSS)
n_remove_worst = math.ceil(len(lut) * OUTLIER_PERC_WORST)
n_remove_best = math.ceil(len(lut) * OUTLIER_PERC_BEST)
# remove percentage of worst values
for i in range(n_remove_worst):
elem = lut.pop(0)
result.data.pop(elem[1], None)
# remove percentage of best values
for i in range(n_remove_best):
elem = lut.pop()
result.data.pop(elem[1], None)
return result
def filter_values(result):
del_list = []
for key, value1 in result.data.items():
id = key
config = value1.config
rep_env_num = config['rep_env_num']
ddqn_dropout = config['ddqn_dropout']
# if not ddqn_dropout == 0:
# del_list.append(id)
# if not rep_env_num == 5:
# del_list.append(id)
for id in del_list:
result.data.pop(id, None)
return result
def plot_accuracy_over_budget(result):
fig, ax = plt.subplots()
# plot hyperband plot
index = None
color = None
for key, value1 in result.data.items():
if key[0] is not index:
index = key[0]
color = get_bright_random_color()
try:
x = []
y = []
for key2, value2 in value1.results.items():
x.append(key2)
y.append(value2["loss"])
plt.semilogx(x, y, color=color)
except:
print('Error in plot_accuracy_over_budget, continuing')
ax.set_title('Score for different configurations')
ax.set_xlabel('epochs')
ax.set_ylabel('score')
def plot_parallel_scatter(result):
plt.subplots(dpi=300, figsize=(8, 4))
ep_m = 1e9
ep_M = -1e9
loss_m = 1e9
loss_M = -1e9
# get all possible keys
config_params = {}
for value in result.data.values():
for config_param, config_param_val in value.config.items():
for epoch, epoch_result in value.results.items():
try:
loss = epoch_result["loss"]
ep_m = min(ep_m, epoch)
ep_M = max(ep_M, epoch)
loss_m = min(loss_m, loss)
loss_M = max(loss_M, loss)
if config_param in config_params.keys():
config_params[config_param].append((config_param_val, epoch, loss))
else:
config_params[config_param] = [(config_param_val, epoch, loss)]
except:
print('Error in plot_parallel_scatter, continuing')
x_dev = 0.2
r_min = 3
r_max = 4
alpha = 0.4
text_x_offset = -0.1
text_y_offset = -0.1
size_text = 6
index = 0
for config_param, data in (dict(sorted(config_params.items()))).items():
# get all unique possible values for each config parameter
values = set(elem[0] for elem in data)
values = sorted(list(values))
n = len(data)
xs = np.zeros(n)
ys = np.zeros(n)
rads = np.zeros(n)
colors = np.zeros([n, 3])
# extract common features
for i in range(len(values)):
for k in range(len(data)):
if data[k][0] == values[i]:
ep = data[k][1]
acc = map_to_zero_one_range(data[k][2], loss_m, loss_M)
# test:
# loss_b = -1233125.5410615604
# loss_a = -5233125.5410615604 #(we minimize the negative reward)
# print(loss_b, "->", map_to_zero_one_range(loss_b, loss_m, loss_M))
# print(loss_a, "->", map_to_zero_one_range(loss_a, loss_m, loss_M))
rads[k] = linear_interpolation(np.log(ep), np.log(ep_m), np.log(ep_M), r_min, r_max) ** 2
colors[k, :] = get_color(acc)
# check for type (categorical,int,float,log)
if type(values[0]) is bool:
y_dev = x_dev / 2
for i in range(len(values)):
plt.text(index + text_x_offset, values[i] + text_y_offset, str(values[i]), rotation=90,
size=size_text)
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = values[i] + np.random.uniform(-y_dev, y_dev)
elif type(values[0]) is str:
y_dev = min(1 / len(values) / 2.5, x_dev / 2)
for i in range(len(values)):
plt.text(index + text_x_offset, i / (max(len(values) - 1, 1)) + text_y_offset, values[i],
rotation=90, size=size_text)
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = i / (max(len(values) - 1, 1)) + np.random.uniform(-y_dev, y_dev)
elif type(values[0]) is int:
y_dev = min(1 / len(values) / 2.5, x_dev / 2)
plotAllStr = len(values) < 20
if not plotAllStr:
min_val = min(values)
max_val = max(values)
plt.text(index + text_x_offset, 0 + text_y_offset, str(f"{Decimal(min_val):.1E}"), rotation=90, size=size_text)
plt.text(index + text_x_offset, 1 + text_y_offset, str(f"{Decimal(max_val):.1E}"), rotation=90, size=size_text)
for i in range(len(values)):
if plotAllStr:
plt.text(index + text_x_offset, i / (max(len(values) - 1, 1)), str(values[i]), rotation=90,
size=size_text)
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = i / (max(len(values) - 1, 1)) + np.random.uniform(-y_dev, y_dev)
else: # float
min_val = min(values)
max_val = max(values)
# log scale if min/max value differs to much
if max_val / min_val > 100:
val050 = np.exp((np.log(min_val) + np.log(max_val)) / 2)
for i in range(len(values)):
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = linear_interpolation(np.log(data[k][0]), np.log(min_val), np.log(max_val), 0, 1)
# linear scale
else:
val050 = linear_interpolation(0.50, 0, 1, min_val, max_val)
for i in range(len(values)):
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = linear_interpolation(data[k][0], min_val, max_val, 0, 1)
plt.text(index + text_x_offset, 0 + text_y_offset, str(f"{Decimal(min_val):.1E}"), rotation=90, size=size_text)
plt.text(index + text_x_offset, 0.5 + text_y_offset, str(f"{Decimal(val050):.1E}"), rotation=90, size=size_text)
plt.text(index + text_x_offset, 1 + text_y_offset, str(f"{Decimal(max_val):.1E}"), rotation=90, size=size_text)
plt.scatter(xs, ys, s=rads, c=colors, alpha=alpha, edgecolors='none')
index += 1
plt.yticks([], [])
plt.xticks(np.arange(index), (tuple(sorted(config_params.keys()))), rotation=90, fontsize=size_text)
plt.subplots_adjust(bottom=0.25)
def linear_interpolation(x, x0, x1, y0, y1):
# linearly interpolate between two x/y values for a given x value
return y0 + (y1 - y0) * (x - x0) / (x1 - x0 + 1e-9)
def map_to_zero_one_range(loss, loss_m, loss_M):
if loss_M < 1 and loss_m > 0 and REVERSE_LOSS == False:
# if we have already a loss in the [0,1] range, there is no need to normalize anything
acc = loss
elif loss_M < 0 and loss_m > -1 and REVERSE_LOSS == True:
# if we have a loss in the [-1,0] range, simply revert its sign
acc = -loss
else:
# normalize loss to the 0 (bad) - 1(good) range
acc = (loss - loss_m) / (loss_M - loss_m + 1e-9)
if REVERSE_LOSS:
acc = 1 - acc
acc = acc ** EXP_LOSS
return acc
def get_color(acc):
# print("acc: ", acc)
if acc <= 0:
# print("color: ", np.array([[1, 0, 0]]))
return np.array([[1, 0, 0]])
elif acc <= 0.5:
# print("color: ", np.array([[1, 0, 0]]) + 2 * acc * np.array([[0, 1, 0]]))
return np.array([[1, 0, 0]]) + 2 * acc * np.array([[0, 1, 0]])
elif acc <= 1:
# print("color: ", np.array([[1, 1, 0]]) + 2 * (acc - 0.5) * np.array([[-1, 0, 0]]))
return np.array([[1, 1, 0]]) + 2 * (acc - 0.5) * np.array([[-1, 0, 0]])
else:
# print("color: ", np.array([[0, 1, 0]]))
return np.array([[0, 1, 0]])
def get_bright_random_color():
h, s, l = random.random(), 1, 0.5
return colorsys.hls_to_rgb(h, l, s)
if __name__ == '__main__':
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-04-09'
# log_dir = '../results/bohb_params_ql_cb_cliff_2021-03-04-16'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-04-17'
# log_dir = '../results/bohb_params_ql_cb_cliff_2021-03-04-20'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-04-22'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-04-22'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-05-13'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-05-13'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-06-00'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-06-00'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-06-10'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-06-10'
# title = "DDQN ICM on CartPole"
# log_dir = '../results/bohb_params_td3_icm_hc_2021-03-08-20'
# title = "TD3 ICM on HC"
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-08-22'
# title = "TD3 ICM on CMC"
# log_dir = '../results/bohb_params_TD3_discrete_gumbel_temp_annealing_2021-03-11-14'
# title = "Discrete TD3 with annealed temp on CartPole"
# log_dir = '../results/bohb_params_TD3_discrete_gumbel_temp_annealing_on_syn_env_2_2021-03-11-23'
# title = "Discrete TD3 with annealed temp on CartPole Syn Env Model 2"
# log_dir = '../results/bohb_params_ppo_hc_2021-03-13-23'
# title = "PPO on HC"
# log_dir = '../results/bohb_params_td3_icm_cmc_max_reward_2021-03-16-00'
# title = "TD3 ICM on CMC max. reward"
# log_dir = '../results/bohb_params_td3_icm_hc_max_reward_2021-03-16-00'
# title = "TD3 ICM on HC max. reward"
# log_dir = '../results/bohb_params_ppo_hc_icm_1e-3_ent_coef_1e-1_action_std_2021-03-19-20'
# title = "PPO ICM on HC max. reward"
# log_dir = '../results/halfcheetah_td3_bohb_params_se_prep_2021-06-11-11'
# title = "TD3 HC max. reward"
# log_dir = '../results/halfcheetah_td3_bohb_params_se_prep_2021-06-13-17'
# title = "TD3 HC max. reward"
#
# log_dir = '../results/SE_evaluate_cmc_se_params_2021-07-27-11'
# title = "SE CMC HPO"
log_dir = "/home/ferreira/Projects/learning_environments/results/SE_evaluate_cmc_se_params_2021-07-30-10"
title = "SE CMC HPO"
analyze_bohb(log_dir, title=title)
|
[
"matplotlib.pyplot.title",
"numpy.mean",
"numpy.arange",
"colorsys.hls_to_rgb",
"os.path.join",
"numpy.std",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplots",
"hpbandster.core.result.logged_results_to_HBS_result",
"matplotlib.pyplot.show",
"random.random",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.semilogx",
"numpy.random.uniform",
"numpy.log",
"decimal.Decimal",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.array",
"math.isfinite"
] |
[((445, 488), 'hpbandster.core.result.logged_results_to_HBS_result', 'hpres.logged_results_to_HBS_result', (['log_dir'], {}), '(log_dir)\n', (479, 488), True, 'import hpbandster.core.result as hpres\n'), ((2340, 2356), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2349, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2369, 2371), True, 'import matplotlib.pyplot as plt\n'), ((6298, 6312), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6310, 6312), True, 'import matplotlib.pyplot as plt\n'), ((6984, 7021), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)', 'figsize': '(8, 4)'}), '(dpi=300, figsize=(8, 4))\n', (6996, 7021), True, 'import matplotlib.pyplot as plt\n'), ((12817, 12835), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]', '[]'], {}), '([], [])\n', (12827, 12835), True, 'import matplotlib.pyplot as plt\n'), ((12945, 12977), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.25)'}), '(bottom=0.25)\n', (12964, 12977), True, 'import matplotlib.pyplot as plt\n'), ((14447, 14475), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['h', 'l', 's'], {}), '(h, l, s)\n', (14466, 14475), False, 'import colorsys\n'), ((2449, 2513), 'os.path.join', 'os.path.join', (['"""../experiments/automl_plots/"""', "(file_name + '.png')"], {}), "('../experiments/automl_plots/', file_name + '.png')\n", (2461, 2513), False, 'import os\n'), ((8334, 8345), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8342, 8345), True, 'import numpy as np\n'), ((8359, 8370), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8367, 8370), True, 'import numpy as np\n'), ((8386, 8397), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8394, 8397), True, 'import numpy as np\n'), ((8415, 8431), 'numpy.zeros', 'np.zeros', (['[n, 3]'], {}), '([n, 3])\n', (8423, 8431), True, 'import numpy as np\n'), ((12723, 12792), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'s': 'rads', 'c': 'colors', 'alpha': 'alpha', 'edgecolors': '"""none"""'}), "(xs, ys, s=rads, c=colors, alpha=alpha, edgecolors='none')\n", (12734, 12792), True, 'import matplotlib.pyplot as plt\n'), ((12851, 12867), 'numpy.arange', 'np.arange', (['index'], {}), '(index)\n', (12860, 12867), True, 'import numpy as np\n'), ((13878, 13899), 'numpy.array', 'np.array', (['[[1, 0, 0]]'], {}), '([[1, 0, 0]])\n', (13886, 13899), True, 'import numpy as np\n'), ((14412, 14427), 'random.random', 'random.random', ([], {}), '()\n', (14425, 14427), False, 'import random\n'), ((4560, 4579), 'math.isfinite', 'math.isfinite', (['x[0]'], {}), '(x[0])\n', (4573, 4579), False, 'import math\n'), ((6716, 6747), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['x', 'y'], {'color': 'color'}), '(x, y, color=color)\n', (6728, 6747), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4858), 'math.isfinite', 'math.isfinite', (['lut[i][0]'], {}), '(lut[i][0])\n', (4847, 4858), False, 'import math\n'), ((14020, 14041), 'numpy.array', 'np.array', (['[[1, 0, 0]]'], {}), '([[1, 0, 0]])\n', (14028, 14041), True, 'import numpy as np\n'), ((14343, 14364), 'numpy.array', 'np.array', (['[[0, 1, 0]]'], {}), '([[0, 1, 0]])\n', (14351, 14364), True, 'import numpy as np\n'), ((4180, 4195), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (4187, 4195), True, 'import numpy as np\n'), ((4197, 4211), 'numpy.std', 'np.std', (['losses'], {}), '(losses)\n', (4203, 4211), True, 'import numpy as np\n'), ((14054, 14075), 'numpy.array', 'np.array', (['[[0, 1, 0]]'], {}), '([[0, 1, 0]])\n', (14062, 14075), True, 'import numpy as np\n'), ((14203, 14224), 'numpy.array', 'np.array', (['[[1, 1, 0]]'], {}), '([[1, 1, 0]])\n', (14211, 14224), True, 'import numpy as np\n'), ((14245, 14267), 'numpy.array', 'np.array', (['[[-1, 0, 0]]'], {}), '([[-1, 0, 0]])\n', (14253, 14267), True, 'import numpy as np\n'), ((9095, 9105), 'numpy.log', 'np.log', (['ep'], {}), '(ep)\n', (9101, 9105), True, 'import numpy as np\n'), ((9107, 9119), 'numpy.log', 'np.log', (['ep_m'], {}), '(ep_m)\n', (9113, 9119), True, 'import numpy as np\n'), ((9121, 9133), 'numpy.log', 'np.log', (['ep_M'], {}), '(ep_M)\n', (9127, 9133), True, 'import numpy as np\n'), ((9641, 9673), 'numpy.random.uniform', 'np.random.uniform', (['(-x_dev)', 'x_dev'], {}), '(-x_dev, x_dev)\n', (9658, 9673), True, 'import numpy as np\n'), ((9718, 9750), 'numpy.random.uniform', 'np.random.uniform', (['(-y_dev)', 'y_dev'], {}), '(-y_dev, y_dev)\n', (9735, 9750), True, 'import numpy as np\n'), ((10179, 10211), 'numpy.random.uniform', 'np.random.uniform', (['(-x_dev)', 'x_dev'], {}), '(-x_dev, x_dev)\n', (10196, 10211), True, 'import numpy as np\n'), ((10276, 10308), 'numpy.random.uniform', 'np.random.uniform', (['(-y_dev)', 'y_dev'], {}), '(-y_dev, y_dev)\n', (10293, 10308), True, 'import numpy as np\n'), ((11173, 11205), 'numpy.random.uniform', 'np.random.uniform', (['(-x_dev)', 'x_dev'], {}), '(-x_dev, x_dev)\n', (11190, 11205), True, 'import numpy as np\n'), ((11270, 11302), 'numpy.random.uniform', 'np.random.uniform', (['(-y_dev)', 'y_dev'], {}), '(-y_dev, y_dev)\n', (11287, 11302), True, 'import numpy as np\n'), ((11526, 11541), 'numpy.log', 'np.log', (['min_val'], {}), '(min_val)\n', (11532, 11541), True, 'import numpy as np\n'), ((11544, 11559), 'numpy.log', 'np.log', (['max_val'], {}), '(max_val)\n', (11550, 11559), True, 'import numpy as np\n'), ((12411, 12427), 'decimal.Decimal', 'Decimal', (['min_val'], {}), '(min_val)\n', (12418, 12427), False, 'from decimal import Decimal\n'), ((12537, 12552), 'decimal.Decimal', 'Decimal', (['val050'], {}), '(val050)\n', (12544, 12552), False, 'from decimal import Decimal\n'), ((12660, 12676), 'decimal.Decimal', 'Decimal', (['max_val'], {}), '(max_val)\n', (12667, 12676), False, 'from decimal import Decimal\n'), ((10630, 10646), 'decimal.Decimal', 'Decimal', (['min_val'], {}), '(min_val)\n', (10637, 10646), False, 'from decimal import Decimal\n'), ((10758, 10774), 'decimal.Decimal', 'Decimal', (['max_val'], {}), '(max_val)\n', (10765, 10774), False, 'from decimal import Decimal\n'), ((11754, 11786), 'numpy.random.uniform', 'np.random.uniform', (['(-x_dev)', 'x_dev'], {}), '(-x_dev, x_dev)\n', (11771, 11786), True, 'import numpy as np\n'), ((11844, 11862), 'numpy.log', 'np.log', (['data[k][0]'], {}), '(data[k][0])\n', (11850, 11862), True, 'import numpy as np\n'), ((11864, 11879), 'numpy.log', 'np.log', (['min_val'], {}), '(min_val)\n', (11870, 11879), True, 'import numpy as np\n'), ((11881, 11896), 'numpy.log', 'np.log', (['max_val'], {}), '(max_val)\n', (11887, 11896), True, 'import numpy as np\n'), ((12214, 12246), 'numpy.random.uniform', 'np.random.uniform', (['(-x_dev)', 'x_dev'], {}), '(-x_dev, x_dev)\n', (12231, 12246), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from pytest import approx
from lenskit.topn import precision
from lenskit.util.test import demo_recs
from lenskit import topn
def _test_prec(items, rel, **k):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return precision(recs, truth, **k)
def test_precision_empty_none():
prec = _test_prec([], [1, 3])
assert prec is None
def test_precision_simple_cases():
prec = _test_prec([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1, 2, 3, 4], [1, 3])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_prec([1, 2, 3, 4], range(4, 10))
assert prec == approx(0.25)
def test_precision_series():
prec = _test_prec(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_set():
prec = _test_prec(pd.Series([1, 2, 3, 4]), [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), range(4, 10))
assert prec == approx(0.25)
def test_precision_series_index():
prec = _test_prec(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_array():
prec = _test_prec(pd.Series([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_precision_array():
prec = _test_prec(np.array([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(np.array([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(np.array([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_prec_long_rel():
rel = np.arange(100)
items = [1, 0, 150, 3, 10]
r = _test_prec(items, rel, k=5)
assert r == approx(0.8)
def test_prec_long_items():
rel = np.arange(100)
items = [1, 0, 150, 3, 10, 30, 120, 4, 17]
r = _test_prec(items, rel, k=5)
assert r == approx(0.8)
def test_prec_short_items():
rel = np.arange(100)
items = [1, 0, 150]
r = _test_prec(items, rel, k=5)
assert r == approx(2 / 3)
def test_recall_bulk_k(demo_recs):
"bulk and normal match"
train, test, recs = demo_recs
assert test['user'].value_counts().max() > 5
rla = topn.RecListAnalysis()
rla.add_metric(precision, name='pk', k=5)
rla.add_metric(precision)
# metric without the bulk capabilities
rla.add_metric(lambda *a, **k: precision(*a, **k), name='ind_pk', k=5)
rla.add_metric(lambda *a: precision(*a), name='ind_p')
res = rla.compute(recs, test)
assert res.precision.values == approx(res.ind_p.values)
assert res.pk.values == approx(res.ind_pk.values)
|
[
"pandas.DataFrame",
"pandas.Index",
"numpy.arange",
"pandas.Series",
"numpy.array",
"lenskit.topn.precision",
"lenskit.topn.RecListAnalysis",
"pytest.approx"
] |
[((213, 242), 'pandas.DataFrame', 'pd.DataFrame', (["{'item': items}"], {}), "({'item': items})\n", (225, 242), True, 'import pandas as pd\n'), ((312, 339), 'lenskit.topn.precision', 'precision', (['recs', 'truth'], {}), '(recs, truth, **k)\n', (321, 339), False, 'from lenskit.topn import precision\n'), ((2499, 2513), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2508, 2513), True, 'import numpy as np\n'), ((2650, 2664), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2659, 2664), True, 'import numpy as np\n'), ((2818, 2832), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2827, 2832), True, 'import numpy as np\n'), ((3083, 3105), 'lenskit.topn.RecListAnalysis', 'topn.RecListAnalysis', ([], {}), '()\n', (3103, 3105), False, 'from lenskit import topn\n'), ((527, 538), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (533, 538), False, 'from pytest import approx\n'), ((594, 605), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (600, 605), False, 'from pytest import approx\n'), ((670, 681), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (676, 681), False, 'from pytest import approx\n'), ((749, 760), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (755, 760), False, 'from pytest import approx\n'), ((831, 842), 'pytest.approx', 'approx', (['(0.0)'], {}), '(0.0)\n', (837, 842), False, 'from pytest import approx\n'), ((913, 925), 'pytest.approx', 'approx', (['(0.25)'], {}), '(0.25)\n', (919, 925), False, 'from pytest import approx\n'), ((979, 996), 'pandas.Series', 'pd.Series', (['[1, 3]'], {}), '([1, 3])\n', (988, 996), True, 'import pandas as pd\n'), ((998, 1015), 'pandas.Series', 'pd.Series', (['[1, 3]'], {}), '([1, 3])\n', (1007, 1015), True, 'import pandas as pd\n'), ((1036, 1047), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (1042, 1047), False, 'from pytest import approx\n'), ((1071, 1094), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1080, 1094), True, 'import pandas as pd\n'), ((1096, 1116), 'pandas.Series', 'pd.Series', (['[1, 3, 5]'], {}), '([1, 3, 5])\n', (1105, 1116), True, 'import pandas as pd\n'), ((1137, 1148), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (1143, 1148), False, 'from pytest import approx\n'), ((1172, 1195), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1181, 1195), True, 'import pandas as pd\n'), ((1241, 1253), 'pytest.approx', 'approx', (['(0.25)'], {}), '(0.25)\n', (1247, 1253), False, 'from pytest import approx\n'), ((1311, 1334), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1320, 1334), True, 'import pandas as pd\n'), ((1366, 1377), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (1372, 1377), False, 'from pytest import approx\n'), ((1401, 1424), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1410, 1424), True, 'import pandas as pd\n'), ((1459, 1471), 'pytest.approx', 'approx', (['(0.25)'], {}), '(0.25)\n', (1465, 1471), False, 'from pytest import approx\n'), ((1531, 1548), 'pandas.Series', 'pd.Series', (['[1, 3]'], {}), '([1, 3])\n', (1540, 1548), True, 'import pandas as pd\n'), ((1550, 1566), 'pandas.Index', 'pd.Index', (['[1, 3]'], {}), '([1, 3])\n', (1558, 1566), True, 'import pandas as pd\n'), ((1587, 1598), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (1593, 1598), False, 'from pytest import approx\n'), ((1622, 1645), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1631, 1645), True, 'import pandas as pd\n'), ((1647, 1666), 'pandas.Index', 'pd.Index', (['[1, 3, 5]'], {}), '([1, 3, 5])\n', (1655, 1666), True, 'import pandas as pd\n'), ((1687, 1698), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (1693, 1698), False, 'from pytest import approx\n'), ((1722, 1745), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1731, 1745), True, 'import pandas as pd\n'), ((1790, 1802), 'pytest.approx', 'approx', (['(0.25)'], {}), '(0.25)\n', (1796, 1802), False, 'from pytest import approx\n'), ((1862, 1879), 'pandas.Series', 'pd.Series', (['[1, 3]'], {}), '([1, 3])\n', (1871, 1879), True, 'import pandas as pd\n'), ((1881, 1897), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1889, 1897), True, 'import numpy as np\n'), ((1918, 1929), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (1924, 1929), False, 'from pytest import approx\n'), ((1953, 1976), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1962, 1976), True, 'import pandas as pd\n'), ((1978, 1997), 'numpy.array', 'np.array', (['[1, 3, 5]'], {}), '([1, 3, 5])\n', (1986, 1997), True, 'import numpy as np\n'), ((2018, 2029), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (2024, 2029), False, 'from pytest import approx\n'), ((2053, 2076), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2062, 2076), True, 'import pandas as pd\n'), ((2078, 2103), 'numpy.arange', 'np.arange', (['(4)', '(10)', '(1)', '"""u4"""'], {}), "(4, 10, 1, 'u4')\n", (2087, 2103), True, 'import numpy as np\n'), ((2124, 2136), 'pytest.approx', 'approx', (['(0.25)'], {}), '(0.25)\n', (2130, 2136), False, 'from pytest import approx\n'), ((2189, 2205), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (2197, 2205), True, 'import numpy as np\n'), ((2207, 2223), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (2215, 2223), True, 'import numpy as np\n'), ((2244, 2255), 'pytest.approx', 'approx', (['(1.0)'], {}), '(1.0)\n', (2250, 2255), False, 'from pytest import approx\n'), ((2279, 2301), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2287, 2301), True, 'import numpy as np\n'), ((2303, 2322), 'numpy.array', 'np.array', (['[1, 3, 5]'], {}), '([1, 3, 5])\n', (2311, 2322), True, 'import numpy as np\n'), ((2343, 2354), 'pytest.approx', 'approx', (['(0.5)'], {}), '(0.5)\n', (2349, 2354), False, 'from pytest import approx\n'), ((2378, 2400), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2386, 2400), True, 'import numpy as np\n'), ((2402, 2427), 'numpy.arange', 'np.arange', (['(4)', '(10)', '(1)', '"""u4"""'], {}), "(4, 10, 1, 'u4')\n", (2411, 2427), True, 'import numpy as np\n'), ((2448, 2460), 'pytest.approx', 'approx', (['(0.25)'], {}), '(0.25)\n', (2454, 2460), False, 'from pytest import approx\n'), ((2598, 2609), 'pytest.approx', 'approx', (['(0.8)'], {}), '(0.8)\n', (2604, 2609), False, 'from pytest import approx\n'), ((2765, 2776), 'pytest.approx', 'approx', (['(0.8)'], {}), '(0.8)\n', (2771, 2776), False, 'from pytest import approx\n'), ((2910, 2923), 'pytest.approx', 'approx', (['(2 / 3)'], {}), '(2 / 3)\n', (2916, 2923), False, 'from pytest import approx\n'), ((3429, 3453), 'pytest.approx', 'approx', (['res.ind_p.values'], {}), '(res.ind_p.values)\n', (3435, 3453), False, 'from pytest import approx\n'), ((3482, 3507), 'pytest.approx', 'approx', (['res.ind_pk.values'], {}), '(res.ind_pk.values)\n', (3488, 3507), False, 'from pytest import approx\n'), ((255, 282), 'pandas.DataFrame', 'pd.DataFrame', (["{'item': rel}"], {}), "({'item': rel})\n", (267, 282), True, 'import pandas as pd\n'), ((3260, 3278), 'lenskit.topn.precision', 'precision', (['*a'], {}), '(*a, **k)\n', (3269, 3278), False, 'from lenskit.topn import precision\n'), ((3330, 3343), 'lenskit.topn.precision', 'precision', (['*a'], {}), '(*a)\n', (3339, 3343), False, 'from lenskit.topn import precision\n')]
|
import numpy as np
from skmultiflow.trees.nodes import ActiveLearningNodePerceptron
from skmultiflow.trees.attribute_observer import NominalAttributeRegressionObserver
from skmultiflow.trees.attribute_observer import NumericAttributeRegressionObserver
from skmultiflow.utils import get_dimensions
class RandomLearningNodePerceptron(ActiveLearningNodePerceptron):
""" Learning Node for regression tasks that always use a linear perceptron
model to provide responses.
Parameters
----------
initial_class_observations: dict
In regression tasks this dictionary carries the sufficient statistics
to perform online variance calculation. They refer to the number of
observations (key '0'), the sum of the target values (key '1'), and
the sum of the squared target values (key '2').
max_features: int
Number of attributes per subset for each node split.
parent_node: RandomLearningNodePerceptron (default=None)
A node containing statistics about observed data.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self, initial_class_observations, max_features, parent_node=None,
random_state=None):
super().__init__(initial_class_observations, parent_node, random_state)
self.max_features = max_features
self.list_attributes = np.array([])
def learn_from_instance(self, X, y, weight, rht):
"""Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: float
Instance target value.
weight: float
Instance weight.
rht: HoeffdingTreeRegressor
Regression Hoeffding Tree to update.
"""
# In regression, the self._observed_class_distribution dictionary keeps three statistics:
# [0] sum of sample seen by the node
# [1] sum of target values
# [2] sum of squared target values
# These statistics are useful to calculate the mean and to calculate the variance reduction
if self.perceptron_weight is None:
self.perceptron_weight = self.random_state.uniform(-1, 1, len(X)+1)
try:
self._observed_class_distribution[0] += weight
self._observed_class_distribution[1] += y * weight
self._observed_class_distribution[2] += y * y * weight
except KeyError:
self._observed_class_distribution[0] = weight
self._observed_class_distribution[1] = y * weight
self._observed_class_distribution[2] = y * y * weight
# Update perceptron
self.samples_seen = self._observed_class_distribution[0]
if rht.learning_ratio_const:
learning_ratio = rht.learning_ratio_perceptron
else:
learning_ratio = rht.learning_ratio_perceptron / \
(1 + self.samples_seen * rht.learning_ratio_decay)
# Loop for compatibility with bagging methods
for i in range(int(weight)):
self.update_weights(X, y, learning_ratio, rht)
if self.list_attributes.size == 0:
self.list_attributes = self._sample_features(get_dimensions(X)[1])
for i in self.list_attributes:
try:
obs = self._attribute_observers[i]
except KeyError:
if rht.nominal_attributes is not None and i in rht.nominal_attributes:
obs = NominalAttributeRegressionObserver()
else:
obs = NumericAttributeRegressionObserver()
self._attribute_observers[i] = obs
obs.observe_attribute_class(X[i], y, weight)
def _sample_features(self, n_features):
return self.random_state.choice(
n_features, size=self.max_features, replace=False
)
|
[
"skmultiflow.trees.attribute_observer.NominalAttributeRegressionObserver",
"skmultiflow.trees.attribute_observer.NumericAttributeRegressionObserver",
"skmultiflow.utils.get_dimensions",
"numpy.array"
] |
[((1649, 1661), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1657, 1661), True, 'import numpy as np\n'), ((3595, 3612), 'skmultiflow.utils.get_dimensions', 'get_dimensions', (['X'], {}), '(X)\n', (3609, 3612), False, 'from skmultiflow.utils import get_dimensions\n'), ((3867, 3903), 'skmultiflow.trees.attribute_observer.NominalAttributeRegressionObserver', 'NominalAttributeRegressionObserver', ([], {}), '()\n', (3901, 3903), False, 'from skmultiflow.trees.attribute_observer import NominalAttributeRegressionObserver\n'), ((3952, 3988), 'skmultiflow.trees.attribute_observer.NumericAttributeRegressionObserver', 'NumericAttributeRegressionObserver', ([], {}), '()\n', (3986, 3988), False, 'from skmultiflow.trees.attribute_observer import NumericAttributeRegressionObserver\n')]
|
import numpy as np
import theano
import lasagne
## ALIASES ##
L = lasagne.layers
T = theano.tensor
get_output = L.get_output
get_all_params = L.get_all_params
cross_entropy = lasagne.objectives.categorical_crossentropy
get_layers = L.get_all_layers
class Network(object):
"""
Wrapper for neural networks for MNK that automates network compilation and
provides some conveninece functions for freezing, saving, and loading params
Things to consider doing:
mod save/load to use named layers
add self.reinitialize(layers)
"""
def __init__(self, architecture):
self.architecture = architecture
self.input_var = T.tensor4('inputs')
self.target_var = T.ivector('targets')
self.update_algo = lasagne.updates.adam # just a default
self.build()
self.objectives()
self.compile_functions()
self.val_trace = np.zeros(500)
self.train_trace = np.zeros(500)
self.trace_loc = 0
def build(self):
"""
Generates network graph, grabs params and output symbols
"""
self.net = self.architecture(self.input_var)
self.prediction = get_output(self.net)
self.test_prediction = get_output(self.net, deterministic=True)
self.params = get_all_params(self.net, trainable=True)
self.value_layer = get_layers(self.net)[-4]
self.value_prediction = get_output(self.value_layer)
return None
def objectives(self):
"""
Adds loss and accuracy nodes
"""
self.loss = cross_entropy(self.prediction, self.target_var)
self.loss = self.loss.mean()
self.itemized_loss = cross_entropy(self.test_prediction, self.target_var)
self.test_loss = self.itemized_loss.mean()
self.test_acc = T.mean(
T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
dtype=theano.config.floatX
)
self.updates = self.update_algo(self.loss, self.params)
return None
def compile_functions(self):
"""
Compiles theano functions for computing output, losses, etc
"""
self.output_fn = theano.function([self.input_var], self.test_prediction)
self.value_fn = theano.function([self.input_var], self.value_prediction)
self.train_fn = theano.function(
[self.input_var, self.target_var], self.loss,
updates=self.updates
)
self.test_fn = theano.function(
[self.input_var, self.target_var],
[self.test_loss, self.test_acc]
)
self.itemized_test_fn = theano.function(
[self.input_var, self.target_var],
self.itemized_loss
)
return None
def update_traces(self):
"""
Saves traces for plotting
"""
self.val_trace[self.trace_loc] = self.val_err
self.train_trace[self.trace_loc] = self.train_err
self.trace_loc += 1 # so hacky
return None
def freeze_params(self, net=None, exclude=None):
"""
Sets params to be untrainable
Excludes layers in optional arg exclude (tuple or list)
"""
if net is None:
net = self.net
layers = get_layers(net)
num_layers = len(layers)
exclude = [i if i >= 0 else num_layers + i for i in exclude]
if exclude is not None:
layers = [layer for l, layer in enumerate(layers) if not (l in exclude)]
for layer in layers:
for param in layer.params:
layer.params[param].remove('trainable')
self.params = get_all_params(net, trainable=True) # CAUTION: needs rewritten to not throw errors as autoencoders develop
return None
def unfreeze_params(self):
"""
Sets all parameters back to trainable
"""
for layer in L.get_all_layers(self.net):
for param in layer.params:
layer.params[param].add('trainable')
self.params = L.get_all_params(self.net, trainable=True)
return None
def save_params(self, param_file):
"""
Save parameters for reuse later
"""
all_params = L.get_all_param_values(self.net)
np.savez(param_file, *all_params)
return None
def load_params(self, paramsfile):
"""
Loads parameters from npz files
"""
with np.load(paramsfile) as loaded:
params_list = [(i[0], i[1]) for i in loaded.items()]
params_order = np.array([i[0][4:6] for i in params_list]).astype(int)
params_list = [params_list[i] for i in params_order.argsort()]
L.set_all_param_values(self.net, [i[1] for i in params_list])
return None
class Autoencoder(Network):
"""
Wrapper for training and testing transfer learning with an autoencoder.
Almost as cool as it sounds.
Later, use super() to cut down bloat inside functions
"""
def __init__(self, architecture):
self.architecture = architecture
self.input_var = T.tensor4('inputs')
self.target_var = T.ivector('targets')
self.ae_target_var = T.tensor4('ae inputs')
self.update_algo = lasagne.updates.adam
self.val_trace = []
self.train_trace = []
self.build()
self.objectives()
self.compile_functions()
def build(self):
"""Generates graph, caches params, output symbols"""
self.autoencoder, self.value_layer, self.net = self.architecture(self.input_var)
self.prediction = get_output(self.net)
self.test_prediction = get_output(self.net, deterministic=True)
self.value_prediction = get_output(self.value_layer)
self.image = get_output(self.autoencoder)
self.test_image = get_output(self.autoencoder, deterministic=True)
self.params = get_all_params(self.net)
self.ae_params = get_all_params(self.autoencoder)
return None
def objectives(self):
"""Loss functions, etc"""
self.loss = cross_entropy(self.prediction, self.target_var).mean()
self.itemized_test_loss = cross_entropy(self.test_prediction, self.target_var)
self.test_loss = self.itemized_test_loss.mean()
self.test_acc = T.mean(
T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
dtype=theano.config.floatX
)
self.updates = self.update_algo(self.loss, self.params)
self.ae_loss = T.mean((self.ae_target_var - self.image)**2, dtype=theano.config.floatX)
self.ae_test_loss = T.mean((self.ae_target_var - self.test_image)**2, dtype=theano.config.floatX)
self.ae_updates = self.update_algo(self.ae_loss, self.ae_params)
return None
def compile_functions(self):
"""Compile theano functions"""
self.output_fn = theano.function([self.input_var], self.test_prediction)
self.value_fn = theano.function([self.input_var], self.value_prediction)
self.train_fn = theano.function(
[self.input_var, self.target_var],
self.loss,
updates = self.updates
)
self.test_fn = theano.function(
[self.input_var, self.target_var],
[self.test_loss, self.test_acc]
)
self.itemized_test_fn = theano.function(
[self.input_var, self.target_var],
self.itemized_test_loss
)
self.ae_output_fn = theano.function([self.input_var], self.test_image)
self.ae_train_fn = theano.function(
[self.input_var, self.ae_target_var],
self.ae_loss,
updates=self.ae_updates
)
self.ae_test_fn = theano.function(
[self.input_var, self.ae_target_var],
self.ae_test_loss
)
return None
|
[
"numpy.load",
"theano.function",
"numpy.zeros",
"numpy.array",
"numpy.savez"
] |
[((904, 917), 'numpy.zeros', 'np.zeros', (['(500)'], {}), '(500)\n', (912, 917), True, 'import numpy as np\n'), ((945, 958), 'numpy.zeros', 'np.zeros', (['(500)'], {}), '(500)\n', (953, 958), True, 'import numpy as np\n'), ((2183, 2238), 'theano.function', 'theano.function', (['[self.input_var]', 'self.test_prediction'], {}), '([self.input_var], self.test_prediction)\n', (2198, 2238), False, 'import theano\n'), ((2263, 2319), 'theano.function', 'theano.function', (['[self.input_var]', 'self.value_prediction'], {}), '([self.input_var], self.value_prediction)\n', (2278, 2319), False, 'import theano\n'), ((2344, 2432), 'theano.function', 'theano.function', (['[self.input_var, self.target_var]', 'self.loss'], {'updates': 'self.updates'}), '([self.input_var, self.target_var], self.loss, updates=self.\n updates)\n', (2359, 2432), False, 'import theano\n'), ((2485, 2573), 'theano.function', 'theano.function', (['[self.input_var, self.target_var]', '[self.test_loss, self.test_acc]'], {}), '([self.input_var, self.target_var], [self.test_loss, self.\n test_acc])\n', (2500, 2573), False, 'import theano\n'), ((2635, 2705), 'theano.function', 'theano.function', (['[self.input_var, self.target_var]', 'self.itemized_loss'], {}), '([self.input_var, self.target_var], self.itemized_loss)\n', (2650, 2705), False, 'import theano\n'), ((4281, 4314), 'numpy.savez', 'np.savez', (['param_file', '*all_params'], {}), '(param_file, *all_params)\n', (4289, 4314), True, 'import numpy as np\n'), ((6921, 6976), 'theano.function', 'theano.function', (['[self.input_var]', 'self.test_prediction'], {}), '([self.input_var], self.test_prediction)\n', (6936, 6976), False, 'import theano\n'), ((7001, 7057), 'theano.function', 'theano.function', (['[self.input_var]', 'self.value_prediction'], {}), '([self.input_var], self.value_prediction)\n', (7016, 7057), False, 'import theano\n'), ((7082, 7170), 'theano.function', 'theano.function', (['[self.input_var, self.target_var]', 'self.loss'], {'updates': 'self.updates'}), '([self.input_var, self.target_var], self.loss, updates=self.\n updates)\n', (7097, 7170), False, 'import theano\n'), ((7238, 7326), 'theano.function', 'theano.function', (['[self.input_var, self.target_var]', '[self.test_loss, self.test_acc]'], {}), '([self.input_var, self.target_var], [self.test_loss, self.\n test_acc])\n', (7253, 7326), False, 'import theano\n'), ((7389, 7464), 'theano.function', 'theano.function', (['[self.input_var, self.target_var]', 'self.itemized_test_loss'], {}), '([self.input_var, self.target_var], self.itemized_test_loss)\n', (7404, 7464), False, 'import theano\n'), ((7528, 7578), 'theano.function', 'theano.function', (['[self.input_var]', 'self.test_image'], {}), '([self.input_var], self.test_image)\n', (7543, 7578), False, 'import theano\n'), ((7606, 7703), 'theano.function', 'theano.function', (['[self.input_var, self.ae_target_var]', 'self.ae_loss'], {'updates': 'self.ae_updates'}), '([self.input_var, self.ae_target_var], self.ae_loss, updates\n =self.ae_updates)\n', (7621, 7703), False, 'import theano\n'), ((7772, 7844), 'theano.function', 'theano.function', (['[self.input_var, self.ae_target_var]', 'self.ae_test_loss'], {}), '([self.input_var, self.ae_target_var], self.ae_test_loss)\n', (7787, 7844), False, 'import theano\n'), ((4452, 4471), 'numpy.load', 'np.load', (['paramsfile'], {}), '(paramsfile)\n', (4459, 4471), True, 'import numpy as np\n'), ((4575, 4617), 'numpy.array', 'np.array', (['[i[0][4:6] for i in params_list]'], {}), '([i[0][4:6] for i in params_list])\n', (4583, 4617), True, 'import numpy as np\n')]
|
# Copyright 2019 Graphcore Ltd.
"""
Dataset reader from Datalogue keras-attention tutorial.
References:
https://github.com/datalogue/keras-attention
https://medium.com/datalogue
"""
import json
import csv
import random
import numpy as np
# from keras.utils.np_utils import to_categorical
INPUT_PADDING = 50
OUTPUT_PADDING = 100
class Vocabulary(object):
def __init__(self, vocabulary_file, padding=None):
"""
Creates a vocabulary from a file
:param vocabulary_file: the path to the vocabulary
"""
self.vocabulary_file = vocabulary_file
with open(vocabulary_file, 'r') as f:
self.vocabulary = json.load(f)
self.padding = padding
self.reverse_vocabulary = {v: k for k, v in self.vocabulary.items()}
def start_id(self):
return self.vocabulary['<sot>']
def end_id(self):
return self.vocabulary['<eot>']
def size(self):
"""
Gets the size of the vocabulary
"""
return len(self.vocabulary.keys())
def string_to_int(self, text):
"""
Converts a string into it's character integer
representation
:param text: text to convert
"""
characters = list(text)
integers = []
if self.padding and len(characters) >= self.padding:
# truncate if too long
characters = characters[:self.padding-1]
characters.append('<eot>')
for c in characters:
if c in self.vocabulary:
integers.append(self.vocabulary[c])
else:
integers.append(self.vocabulary['<unk>'])
# pad:
if self.padding and len(integers) < self.padding:
integers.extend(
[self.vocabulary['<unk>']] * (self.padding - len(integers))
)
if len(integers) != self.padding:
print(text)
raise AttributeError('Length of text was not padding.')
return integers
def int_to_string(self, integers):
"""
Decodes a list of integers
into it's string representation
"""
characters = []
for i in integers:
characters.append(self.reverse_vocabulary[i])
return characters
class Data(object):
def __init__(self, file_name, input_vocabulary, output_vocabulary):
"""
Creates an object that gets data from a file
:param file_name: name of the file to read from
:param vocabulary: the Vocabulary object to use
:param batch_size: the number of datapoints to return
:param padding: the amount of padding to apply to
a short string
"""
self.input_vocabulary = input_vocabulary
self.output_vocabulary = output_vocabulary
self.file_name = file_name
def load(self):
"""
Loads data from a file
"""
self.inputs = []
self.targets = []
with open(self.file_name, 'r') as f:
reader = csv.reader(f)
for row in reader:
self.inputs.append(row[0])
self.targets.append(row[1])
def transform(self):
"""
Transforms the data as necessary
"""
# @TODO: use `pool.map_async` here?
self.inputs = np.array(list(
map(self.input_vocabulary.string_to_int, self.inputs)))
self.targets = np.array(list(
map(self.output_vocabulary.string_to_int, self.targets)))
assert len(self.inputs.shape) == 2, 'Inputs could not properly be encoded'
assert len(self.targets.shape) == 2, 'Targets could not properly be encoded'
def generator(self, batch_size):
"""
Creates a generator that can be used in `model.fit_generator()`
Batches are generated randomly.
:param batch_size: the number of instances to include per batch
"""
instance_id = range(len(self.inputs))
while True:
try:
batch_ids = random.sample(instance_id, batch_size)
yield (np.array(self.inputs[batch_ids], dtype=int),
np.array(self.targets[batch_ids]))
except Exception as e:
print('EXCEPTION OMG')
print(e)
yield None, None
if __name__ == '__main__':
input_vocab = Vocabulary('../data/human_vocab.json', padding=50)
output_vocab = Vocabulary('../data/machine_vocab.json', padding=12)
ds = Data('../data/fake.csv', input_vocab, output_vocab)
ds.load()
ds.transform()
print(ds.inputs.shape)
print(ds.targets.shape)
g = ds.generator(32)
print(ds.inputs[[5, 10, 12]].shape)
print(ds.targets[[5, 10, 12]].shape)
|
[
"random.sample",
"json.load",
"csv.reader",
"numpy.array"
] |
[((683, 695), 'json.load', 'json.load', (['f'], {}), '(f)\n', (692, 695), False, 'import json\n'), ((3124, 3137), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3134, 3137), False, 'import csv\n'), ((4146, 4184), 'random.sample', 'random.sample', (['instance_id', 'batch_size'], {}), '(instance_id, batch_size)\n', (4159, 4184), False, 'import random\n'), ((4208, 4251), 'numpy.array', 'np.array', (['self.inputs[batch_ids]'], {'dtype': 'int'}), '(self.inputs[batch_ids], dtype=int)\n', (4216, 4251), True, 'import numpy as np\n'), ((4276, 4309), 'numpy.array', 'np.array', (['self.targets[batch_ids]'], {}), '(self.targets[batch_ids])\n', (4284, 4309), True, 'import numpy as np\n')]
|
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples
from mxfusion.components.distributions import Laplace
from mxfusion.util.testutils import numpy_array_reshape, plot_univariate
from mxfusion.util.testutils import MockMXNetRandomGenerator
from scipy.stats import laplace
@pytest.mark.usefixtures("set_seed")
class TestLaplaceDistribution(object):
@pytest.mark.parametrize(
"dtype, location, location_is_samples, scale, scale_is_samples, rv, rv_is_samples, num_samples", [
(np.float64, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, np.random.rand(5,3,2), True, 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(5,3,2)+0.1, True, np.random.rand(5,3,2), True, 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(3,2)+0.1, False, np.random.rand(5,3,2), True, 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(3,2)+0.1, False, np.random.rand(3,2), False, 1),
(np.float32, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, np.random.rand(5,3,2), True, 5),
])
def test_log_pdf(self, dtype, location, location_is_samples, scale, scale_is_samples, rv, rv_is_samples,
num_samples):
is_samples_any = any([location_is_samples, scale_is_samples, rv_is_samples])
rv_shape = rv.shape[1:] if rv_is_samples else rv.shape
n_dim = 1 + len(rv.shape) if is_samples_any and not rv_is_samples else len(rv.shape)
location_np = numpy_array_reshape(location, location_is_samples, n_dim)
scale_np = numpy_array_reshape(scale, scale_is_samples, n_dim)
rv_np = numpy_array_reshape(rv, rv_is_samples, n_dim)
log_pdf_np = laplace.logpdf(rv_np, location_np, scale_np)
var = Laplace.define_variable(shape=rv_shape, dtype=dtype).factor
location_mx = mx.nd.array(location, dtype=dtype)
if not location_is_samples:
location_mx = add_sample_dimension(mx.nd, location_mx)
var_mx = mx.nd.array(scale, dtype=dtype)
if not scale_is_samples:
var_mx = add_sample_dimension(mx.nd, var_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_is_samples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
variables = {var.location.uuid: location_mx, var.scale.uuid: var_mx, var.random_variable.uuid: rv_mx}
log_pdf_rt = var.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert array_has_samples(mx.nd, log_pdf_rt) == is_samples_any
if is_samples_any:
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize(
"dtype, location, location_is_samples, scale, scale_is_samples, rv_shape, num_samples", [
(np.float64, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(3,2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(5,3,2), True, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float32, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, (3,2), 5),
])
def test_draw_samples(self, dtype, location, location_is_samples, scale,
scale_is_samples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
location_np = numpy_array_reshape(location, location_is_samples, n_dim)
scale_np = numpy_array_reshape(scale, scale_is_samples, n_dim)
rand = np.random.laplace(size=(num_samples,) + rv_shape)
rv_samples_np = location_np + rand * scale_np
rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype))
var = Laplace.define_variable(shape=rv_shape, dtype=dtype, rand_gen=rand_gen).factor
location_mx = mx.nd.array(location, dtype=dtype)
if not location_is_samples:
location_mx = add_sample_dimension(mx.nd, location_mx)
scale_mx = mx.nd.array(scale, dtype=dtype)
if not scale_is_samples:
scale_mx = add_sample_dimension(mx.nd, scale_mx)
variables = {var.location.uuid: location_mx, var.scale.uuid: scale_mx}
rv_samples_rt = var.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert array_has_samples(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(rv_samples_np, rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
def test_draw_samples_non_mock(self, plot=False):
# Also make sure the non-mock sampler works
dtype = np.float32
num_samples = 100000
location = np.array([0.5])
scale = np.array([2])
rv_shape = (1,)
location_mx = add_sample_dimension(mx.nd, mx.nd.array(location, dtype=dtype))
scale_mx = add_sample_dimension(mx.nd, mx.nd.array(scale, dtype=dtype))
rand_gen = None
var = Laplace.define_variable(shape=rv_shape, rand_gen=rand_gen, dtype=dtype).factor
variables = {var.location.uuid: location_mx, var.scale.uuid: scale_mx}
rv_samples_rt = var.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)
assert array_has_samples(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
assert rv_samples_rt.dtype == dtype
if plot:
plot_univariate(samples=rv_samples_rt, dist=laplace, loc=location[0], scale=scale[0])
location_est, scale_est = laplace.fit(rv_samples_rt.asnumpy().ravel())
location_tol = 1e-2
scale_tol = 1e-2
assert np.abs(location[0] - location_est) < location_tol
assert np.abs(scale[0] - scale_est) < scale_tol
|
[
"numpy.abs",
"mxfusion.util.testutils.plot_univariate",
"numpy.random.laplace",
"mxfusion.components.variables.runtime_variable.array_has_samples",
"numpy.random.rand",
"mxfusion.components.distributions.Laplace.define_variable",
"scipy.stats.laplace.logpdf",
"numpy.array",
"mxnet.nd.array",
"mxfusion.components.variables.runtime_variable.add_sample_dimension",
"mxfusion.util.testutils.numpy_array_reshape",
"pytest.mark.usefixtures",
"mxfusion.components.variables.runtime_variable.get_num_samples",
"numpy.issubdtype"
] |
[((391, 426), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""set_seed"""'], {}), "('set_seed')\n", (414, 426), False, 'import pytest\n'), ((1594, 1651), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['location', 'location_is_samples', 'n_dim'], {}), '(location, location_is_samples, n_dim)\n', (1613, 1651), False, 'from mxfusion.util.testutils import numpy_array_reshape, plot_univariate\n'), ((1671, 1722), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['scale', 'scale_is_samples', 'n_dim'], {}), '(scale, scale_is_samples, n_dim)\n', (1690, 1722), False, 'from mxfusion.util.testutils import numpy_array_reshape, plot_univariate\n'), ((1739, 1784), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['rv', 'rv_is_samples', 'n_dim'], {}), '(rv, rv_is_samples, n_dim)\n', (1758, 1784), False, 'from mxfusion.util.testutils import numpy_array_reshape, plot_univariate\n'), ((1807, 1851), 'scipy.stats.laplace.logpdf', 'laplace.logpdf', (['rv_np', 'location_np', 'scale_np'], {}), '(rv_np, location_np, scale_np)\n', (1821, 1851), False, 'from scipy.stats import laplace\n'), ((1949, 1983), 'mxnet.nd.array', 'mx.nd.array', (['location'], {'dtype': 'dtype'}), '(location, dtype=dtype)\n', (1960, 1983), True, 'import mxnet as mx\n'), ((2104, 2135), 'mxnet.nd.array', 'mx.nd.array', (['scale'], {'dtype': 'dtype'}), '(scale, dtype=dtype)\n', (2115, 2135), True, 'import mxnet as mx\n'), ((2242, 2270), 'mxnet.nd.array', 'mx.nd.array', (['rv'], {'dtype': 'dtype'}), '(rv, dtype=dtype)\n', (2253, 2270), True, 'import mxnet as mx\n'), ((2545, 2583), 'numpy.issubdtype', 'np.issubdtype', (['log_pdf_rt.dtype', 'dtype'], {}), '(log_pdf_rt.dtype, dtype)\n', (2558, 2583), True, 'import numpy as np\n'), ((2761, 2793), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.float64'], {}), '(dtype, np.float64)\n', (2774, 2793), True, 'import numpy as np\n'), ((3771, 3828), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['location', 'location_is_samples', 'n_dim'], {}), '(location, location_is_samples, n_dim)\n', (3790, 3828), False, 'from mxfusion.util.testutils import numpy_array_reshape, plot_univariate\n'), ((3848, 3899), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['scale', 'scale_is_samples', 'n_dim'], {}), '(scale, scale_is_samples, n_dim)\n', (3867, 3899), False, 'from mxfusion.util.testutils import numpy_array_reshape, plot_univariate\n'), ((3916, 3965), 'numpy.random.laplace', 'np.random.laplace', ([], {'size': '((num_samples,) + rv_shape)'}), '(size=(num_samples,) + rv_shape)\n', (3933, 3965), True, 'import numpy as np\n'), ((4223, 4257), 'mxnet.nd.array', 'mx.nd.array', (['location'], {'dtype': 'dtype'}), '(location, dtype=dtype)\n', (4234, 4257), True, 'import mxnet as mx\n'), ((4380, 4411), 'mxnet.nd.array', 'mx.nd.array', (['scale'], {'dtype': 'dtype'}), '(scale, dtype=dtype)\n', (4391, 4411), True, 'import mxnet as mx\n'), ((4698, 4739), 'numpy.issubdtype', 'np.issubdtype', (['rv_samples_rt.dtype', 'dtype'], {}), '(rv_samples_rt.dtype, dtype)\n', (4711, 4739), True, 'import numpy as np\n'), ((4755, 4794), 'mxfusion.components.variables.runtime_variable.array_has_samples', 'array_has_samples', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (4772, 4794), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((4875, 4907), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.float64'], {}), '(dtype, np.float64)\n', (4888, 4907), True, 'import numpy as np\n'), ((5268, 5283), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (5276, 5283), True, 'import numpy as np\n'), ((5300, 5313), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (5308, 5313), True, 'import numpy as np\n'), ((5815, 5854), 'mxfusion.components.variables.runtime_variable.array_has_samples', 'array_has_samples', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (5832, 5854), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((1866, 1918), 'mxfusion.components.distributions.Laplace.define_variable', 'Laplace.define_variable', ([], {'shape': 'rv_shape', 'dtype': 'dtype'}), '(shape=rv_shape, dtype=dtype)\n', (1889, 1918), False, 'from mxfusion.components.distributions import Laplace\n'), ((2046, 2086), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'location_mx'], {}), '(mx.nd, location_mx)\n', (2066, 2086), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((2190, 2225), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'var_mx'], {}), '(mx.nd, var_mx)\n', (2210, 2225), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((2321, 2355), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'rv_mx'], {}), '(mx.nd, rv_mx)\n', (2341, 2355), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((2599, 2635), 'mxfusion.components.variables.runtime_variable.array_has_samples', 'array_has_samples', (['mx.nd', 'log_pdf_rt'], {}), '(mx.nd, log_pdf_rt)\n', (2616, 2635), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((4122, 4193), 'mxfusion.components.distributions.Laplace.define_variable', 'Laplace.define_variable', ([], {'shape': 'rv_shape', 'dtype': 'dtype', 'rand_gen': 'rand_gen'}), '(shape=rv_shape, dtype=dtype, rand_gen=rand_gen)\n', (4145, 4193), False, 'from mxfusion.components.distributions import Laplace\n'), ((4320, 4360), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'location_mx'], {}), '(mx.nd, location_mx)\n', (4340, 4360), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((4468, 4505), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'scale_mx'], {}), '(mx.nd, scale_mx)\n', (4488, 4505), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((4810, 4847), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (4825, 4847), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((5390, 5424), 'mxnet.nd.array', 'mx.nd.array', (['location'], {'dtype': 'dtype'}), '(location, dtype=dtype)\n', (5401, 5424), True, 'import mxnet as mx\n'), ((5473, 5504), 'mxnet.nd.array', 'mx.nd.array', (['scale'], {'dtype': 'dtype'}), '(scale, dtype=dtype)\n', (5484, 5504), True, 'import mxnet as mx\n'), ((5545, 5616), 'mxfusion.components.distributions.Laplace.define_variable', 'Laplace.define_variable', ([], {'shape': 'rv_shape', 'rand_gen': 'rand_gen', 'dtype': 'dtype'}), '(shape=rv_shape, rand_gen=rand_gen, dtype=dtype)\n', (5568, 5616), False, 'from mxfusion.components.distributions import Laplace\n'), ((5870, 5907), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (5885, 5907), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((5997, 6087), 'mxfusion.util.testutils.plot_univariate', 'plot_univariate', ([], {'samples': 'rv_samples_rt', 'dist': 'laplace', 'loc': 'location[0]', 'scale': 'scale[0]'}), '(samples=rv_samples_rt, dist=laplace, loc=location[0], scale\n =scale[0])\n', (6012, 6087), False, 'from mxfusion.util.testutils import numpy_array_reshape, plot_univariate\n'), ((6231, 6265), 'numpy.abs', 'np.abs', (['(location[0] - location_est)'], {}), '(location[0] - location_est)\n', (6237, 6265), True, 'import numpy as np\n'), ((6296, 6324), 'numpy.abs', 'np.abs', (['(scale[0] - scale_est)'], {}), '(scale[0] - scale_est)\n', (6302, 6324), True, 'import numpy as np\n'), ((2700, 2734), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'log_pdf_rt'], {}), '(mx.nd, log_pdf_rt)\n', (2715, 2734), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples\n'), ((625, 648), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (639, 648), True, 'import numpy as np\n'), ((686, 709), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (700, 709), True, 'import numpy as np\n'), ((740, 760), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (754, 760), True, 'import numpy as np\n'), ((801, 824), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (815, 824), True, 'import numpy as np\n'), ((855, 875), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (869, 875), True, 'import numpy as np\n'), ((915, 938), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (929, 938), True, 'import numpy as np\n'), ((969, 989), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (983, 989), True, 'import numpy as np\n'), ((1029, 1049), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (1043, 1049), True, 'import numpy as np\n'), ((1082, 1105), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (1096, 1105), True, 'import numpy as np\n'), ((1143, 1166), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (1157, 1166), True, 'import numpy as np\n'), ((3115, 3138), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (3129, 3138), True, 'import numpy as np\n'), ((3208, 3228), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (3222, 3228), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (3315, 3321), True, 'import numpy as np\n'), ((3393, 3416), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (3407, 3416), True, 'import numpy as np\n'), ((3487, 3510), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (3501, 3510), True, 'import numpy as np\n'), ((654, 674), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (668, 674), True, 'import numpy as np\n'), ((768, 791), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (782, 791), True, 'import numpy as np\n'), ((883, 903), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (897, 903), True, 'import numpy as np\n'), ((997, 1017), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (1011, 1017), True, 'import numpy as np\n'), ((1111, 1131), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (1125, 1131), True, 'import numpy as np\n'), ((3144, 3164), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (3158, 3164), True, 'import numpy as np\n'), ((3236, 3259), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (3250, 3259), True, 'import numpy as np\n'), ((3329, 3349), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (3343, 3349), True, 'import numpy as np\n'), ((3422, 3445), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (3436, 3445), True, 'import numpy as np\n'), ((3516, 3536), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (3530, 3536), True, 'import numpy as np\n')]
|
from __future__ import print_function
import argparse
import logging
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from .config import EXT
from .fileio import read_binned_sfh
from .utils import convertz, parse_pipeline, float2sci
logger = logging.getLogger()
def mh2z(num):
return 0.02 * 10 ** num
def quadriture(x):
return np.sqrt(np.sum(x * x))
class SFH(object):
'''
load the match sfh solution as a class with attributes set by the
best fits from the sfh file.
'''
def __init__(self, filename, hmc_file=None, meta_file=None):
"""
Parameters
----------
filename : str
data file
hmc_file : str
data file from which to overwite uncertainties
meta_file : str
data file to only read bestfit line.
"""
self.base, self.name = os.path.split(filename)
self.data = read_binned_sfh(filename, hmc_file)
if meta_file is None:
meta_file = filename
self.load_match_header(meta_file)
def load_match_header(self, filename):
'''
assumes header is from line 0 to 6 and sets footer to be the final
line of the file
header formatting is important:
Line # format requirement
first Ends with "= %f (%s)"
N is the string "Best fit:\n"
N+1 has ',' separated strings of "%s=%f+%f-%f"
last is formatted "%s %f %f %f"
'''
def set_value_err_attr(key, attr, pattr, mattr):
'''
set attributes [key], [key]_perr, [key]_merr
to attr, pattr, mattr (must be floats)
'''
self.__setattr__(key, float(attr))
self.__setattr__(key + '_perr', float(pattr))
self.__setattr__(key + '_merr', float(mattr))
with open(filename, 'r') as infile:
lines = infile.readlines()
if len(lines) == 0:
print('empty file: %s' % filename)
self.header = []
self.footer = []
self.bestfit = np.nan
self.match_out = ''
self.data = np.array([])
return
self.header = lines[0:6]
self.footer = lines[-1]
try:
bestfit, fout = \
self.header[0].replace(' ', '').split('=')[1].split('(')
self.bestfit = float(bestfit)
self.match_out = fout.split(')')[0]
try:
iline = self.header.index('Best fit:\n') + 1
except ValueError:
print('Need Best fit line to assign attributes')
raise ValueError
line = self.header[iline].strip().replace(' ', '').split(',')
for i in line:
key, attrs = i.split('=')
attr, pmattr = attrs.split('+')
pattr, mattr = pmattr.split('-')
set_value_err_attr(key, attr, pattr, mattr)
# the final line has totalSF
key, attr, pattr, mattr = self.header[-1].strip().split()
set_value_err_attr(key, attr, pattr, mattr)
except:
# zcmerge files: the first line has totalSF
self.header = lines[0]
self.footer = ['']
try:
key, attr, pattr, mattr = self.header.strip().split()
set_value_err_attr(key, attr, pattr, mattr)
except:
# no header
pass
self.flag = None
if np.sum(np.diff(self.data.mh)) == 0:
self.flag = 'setz'
if len(np.nonzero(np.diff(self.data.mh) >= 0)[0]) == len(self.data.mh):
self.flag = 'zinc'
return
def mh2z(self, num):
"""nore really [M/H] """
return 0.02 * 10 ** num
def plot_bins(self, val='sfr', err=False, convertz=False, offset=1.):
'''make SFH bins for plotting'''
if isinstance(val, str):
if err:
valm = self.data['%s_errm' % val] * offset
valp = self.data['%s_errp' % val] * offset
val = self.data[val] * offset
if convertz:
val = mh2z(val)
if err:
valm = mh2z(valm)
valp = mh2z(valp)
lagei = self.data.lagei
lagef = self.data.lagef
# double up value
# lagei_i, lagef_i, lagei_i+1, lagef_i+1 ...
lages = np.ravel([(lagei[i], lagef[i]) for i in range(len(lagei))])
vals = np.ravel([(val[i], val[i]) for i in range(len(val))])
if err:
valm = np.ravel([(valm[i], valm[i]) for i in range(len(val))])
valp = np.ravel([(valp[i], valp[i]) for i in range(len(val))])
data = (vals, valm, valp)
else:
data = vals
return lages, data
def age_plot(self, val='sfr', ax=None, plt_kw={}, errors=True,
convertz=False, xlabel=None, ylabel=None,
sfr_offset=1e3):
plt_kw = dict({'lw': 3, 'color': 'black'}, **plt_kw)
eplt_kw = plt_kw.copy()
eplt_kw.update({'linestyle': 'None'})
lages, sfrs = self.plot_bins(offset=sfr_offset)
rlages, (rsfrs, sfr_merrs, sfr_perrs) = \
self.plot_bins(err=True, offset=sfr_offset)
rlages = np.append(self.data['lagei'], self.data['lagef'][-1])
rlages = rlages[:-1] + np.diff(rlages) / 2.
rsfrs = self.data['sfr'] * sfr_offset
rsfr_merrs = self.data['sfr_errm'] * sfr_offset
rsfr_perrs = self.data['sfr_errp'] * sfr_offset
lages = 10 ** (lages - 9.)
rlages = 10 ** (rlages - 9.)
if val != 'sfr':
lages, vals = self.plot_bins(val=val, convertz=convertz)
# mask values with no SF
isfr, = np.nonzero(sfrs == 0)
vals[isfr] = np.nan
if self.flag != 'setz':
rlages, (rvals, val_merrs, val_perrs) = \
self.plot_bins(val=val, err=True)
# mask values with no SF
irsfr, = np.nonzero(rsfrs == 0)
val_merrs[irsfr] = 0.
val_perrs[irsfr] = 0.
if np.sum(val_merrs) == 0 or np.sum(val_perrs) == 0:
errors = False
else:
errors = False
if 'mh' in val:
if ylabel is not None:
ylabel = r'$\rm{[M/H]}$'
if convertz:
ylabel = r'$Z$'
else:
ylabel = r'$SFR\ %s\ (\rm{M_\odot/yr})$' % \
float2sci(1. / sfr_offset).replace('$', '')
vals = sfrs
rvals = rsfrs
val_merrs = rsfr_merrs
val_perrs = rsfr_perrs
if ax is None:
_, ax = plt.subplots()
xlabel = r'$\log Age\ \rm{(yr)}$'
ax.plot(lages, vals, **plt_kw)
if errors:
ax.errorbar(rlages, rvals, yerr=[val_merrs, val_perrs], **eplt_kw)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=20)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=20)
return ax
def plot_csfr(self, ax=None, errors=True, plt_kw={}, fill_between_kw={},
xlim=None, ylim=(-0.01, 1.01), data=True):
'''cumulative sfr plot from match'''
one_off = False
if ax is None:
fig, ax = plt.subplots(figsize=(8, 8))
plt.subplots_adjust(right=0.95, left=0.1, bottom=0.1, top=0.95)
ax.tick_params(direction='in')
one_off = True
fill_between_kw = dict({'alpha': 1, 'color': 'gray'},
**fill_between_kw)
plt_kw = dict({'lw': 3}, **plt_kw)
# lages, (csfh, csfh_errm, csfh_errp) = self.plot_bins(val='csfr',
# err=True)
lages = self.data['lagei']
csfh = self.data['csfr']
csfh_errm = self.data['csfr_errm']
csfh_errp = self.data['csfr_errp']
age = 10 ** (lages - 9.)
# age = lages
age = np.append(age, 10 ** (self.data['lagef'][-1] - 9))
csfh = np.append(csfh, 0)
csfh_errm = np.append(csfh_errm, 0)
csfh_errp = np.append(csfh_errp, 0)
if errors:
ax.fill_between(age, csfh - csfh_errm, csfh + csfh_errp,
**fill_between_kw)
if data:
ax.plot(age, csfh, **plt_kw)
if xlim is not None:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ax.set_xscale('log')
# ax.xaxis.set_major_locator(LogNLocator)
if one_off:
ax.set_xlabel('$\\rm{Star\ Formation\ Time\ (Gyr)}$', fontsize=20)
ax.set_ylabel('$\\rm{Culmulative\ Star\ Formation}$', fontsize=20)
plt.legend(loc=0, frameon=False)
if 'label' in plt_kw.keys():
outfile = \
'{}_csfr'.format(plt_kw['label'].replace('$', '').lower(),
EXT)
else:
outfile = \
'{}_csfr{}'.format(os.path.join(self.base, self.name), EXT)
plt.savefig(outfile)
print('wrote {}'.format(outfile))
return ax
def sf_weighted_metallicity(self):
agebins = (10 ** self.data.lagef - 10 ** self.data.lagei)
totalsf = np.sum(self.data.sfr * agebins)
fracsf = (self.data.sfr * agebins) / totalsf
feh = np.array([convertz(z=0.02 * 10 ** m)[-2] for m in self.data.mh])
return np.sum(fracsf * feh)
def param_table(self, angst=True, agesplit=[1e9, 3e9], target='',
filters=['', '']):
try:
dic = {'bestfit': self.bestfit, 'Av': self.Av, 'dmod': self.dmod}
except:
print('No bestfit info')
dic = {'bestfit': np.nan, 'Av': np.nan, 'dmod': np.nan}
dic['header'] = \
(r'Galaxy & Optical Filters & A$_V$ & $(m\!-\!M)_0$ &'
r'$\% \frac{{\rm{{SF}}}}{{\rm{{SF_{{TOT}}}}}}$ &'
r'$\langle \mbox{{[Fe/H]}} \rangle$ &'
r'$\% \frac{{\rm{{SF}}}}{{\rm{{SF_{{TOT}}}}}}$ &'
r'$\langle \mbox{{[Fe/H]}} \rangle$ & $bestfit$ \\ & & & & '
r'\multicolumn{{2}}{{c}}{{$<{0}\rm{{Gyr}}$}} & '
r'\multicolumn{{2}}{{c}}{{${0}-{1}\rm{{Gyr}}$}} & \\ \hline'
'\n'.format(*agesplit))
dic['target'] = target
if angst:
try:
dic['target'], filters = parse_pipeline(self.name)
except:
pass
dic['filters'] = ','.join(filters)
fyng, fyng_errp, fyng_errm = self.mass_fraction(0, agesplit[0])
fint, fint_errp, fint_errm = self.mass_fraction(agesplit[0],
agesplit[1])
# logZ = 0 if there is no SF, that will add error to mean Fe/H
iyng = self.nearest_age(agesplit[0], i=False)
iint = self.nearest_age(agesplit[1], i=False)
iyngs, = np.nonzero(self.data.mh[:iyng + 1] != 0)
iints, = np.nonzero(self.data.mh[:iint + 1] != 0)
iints = list(set(iints) - set(iyngs))
feh_yng = convertz(z=mh2z(np.mean(self.data.mh[iyngs])))[-2]
feh_int = convertz(z=mh2z(np.mean(self.data.mh[iints])))[-2]
feh_yng_errp = \
convertz(z=mh2z(quadriture(self.data.mh_errp[iyngs])))[-2]
feh_yng_errm = \
convertz(z=mh2z(quadriture(self.data.mh_errm[iyngs])))[-2]
feh_int_errp = \
convertz(z=mh2z(quadriture(self.data.mh_errp[iints])))[-2]
feh_int_errm = \
convertz(z=mh2z(quadriture(self.data.mh_errm[iints])))[-2]
maf = '${0: .2f}^{{+{1: .2f}}}_{{-{2: .2f}}}$'
dic['fyng'], dic['fint'] = \
[maf.format(v, p, m) for v, p, m in zip([fyng, fint],
[fyng_errp, fint_errp],
[fyng_errm, fint_errm])]
dic['feh_yng'], dic['feh_int'] = \
[maf.format(v, p, m) for v, p, m in
zip([feh_yng, feh_int],
[feh_yng_errp, feh_int_errp],
[feh_yng_errm, feh_int_errm])]
line = ['{target}', '{filters}', '{Av: .2f}', '{dmod: .2f}',
'{fyng}', '{feh_yng}', '{fint}', '{feh_int}']
dic['fmt'] = '%s \\\\ \n' % (' & '.join(line))
return dic
def nearest_age(self, lage, i=True):
if lage > 10.15:
lage = np.log10(lage)
logger.warning('converting input age to log age')
age_arr = self.data.lagef
msg = 'lagef'
if i:
age_arr = self.data.lagei
msg = 'lagei'
# min age bin size, will trigger warning if ages requested are
# higher than the min binsize.
tol = np.min(np.diff(age_arr))
# find closest age bin to lage
idx = np.argmin(np.abs(age_arr - lage))
difi = np.abs(age_arr[idx] - lage)
if difi > tol:
logger.warning(('input {}={} not found. ',
'Using {}').format(msg, lage, age_arr[idx]))
return idx
def mass_fraction(self, lagei, lagef):
"""
Return the fraction of total mass formed between lagei and lagef.
lage[] units can be log yr or yr.
Multiply by self.totalSF to obtain the mass formed.
"""
agebins = (10 ** self.data.lagef - 10 ** self.data.lagei)
if lagef-lagei < np.min(np.diff(self.data.lagei)):
logger.error('Age difference smaller than bin sizes (or negative)')
return 0, 0, 0
# higher precision than self.totalSF
totalsf = np.sum(self.data.sfr * agebins)
idxi = self.nearest_age(lagei)
# +1 is to include final bin
idxf = self.nearest_age(lagef, i=False) + 1
fracsfr = np.sum(self.data.sfr[idxi:idxf] *
agebins[idxi:idxf]) / totalsf
fracsfr_errp = quadriture(self.data.sfr_errp[idxi:idxf] *
agebins[idxi:idxf]) / totalsf
fracsfr_errm = quadriture(self.data.sfr_errm[idxi:idxf] *
agebins[idxi:idxf]) / totalsf
return fracsfr, fracsfr_errp, fracsfr_errm
def sfh_plot(self):
from matplotlib.ticker import NullFormatter
_, (ax1, ax2) = plt.subplots(nrows=2)
self.age_plot(ax=ax1)
self.age_plot(val='mh', convertz=False, ax=ax2)
ax1.xaxis.set_major_formatter(NullFormatter())
plt.subplots_adjust(hspace=0.1)
figname = os.path.join(self.base, self.name + EXT)
print('wrote {}'.format(figname))
plt.savefig(figname)
plt.close()
def main(argv):
"""
Main function for sfh.py plot sfh output from calcsfh, zcombine, or zcmerge
"""
parser = argparse.ArgumentParser(description="Plot match sfh")
parser.add_argument('sfh_files', nargs='*', type=str,
help='ssp output(s) or formated output(s)')
args = parser.parse_args(argv)
for sfh_file in args.sfh_files:
msfh = SFH(sfh_file)
if len(msfh.data) != 0:
msfh.sfh_plot()
msfh.plot_csfr()
# dic = msfh.param_table()
# print(dic['fmt'].format(**dic))
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"matplotlib.pyplot.savefig",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.abs",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.nonzero",
"numpy.append",
"numpy.diff",
"numpy.array",
"matplotlib.ticker.NullFormatter",
"numpy.mean",
"matplotlib.pyplot.subplots_adjust",
"numpy.log10",
"os.path.split",
"matplotlib.pyplot.subplots",
"logging.getLogger"
] |
[((268, 287), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (285, 287), False, 'import logging\n'), ((14995, 15048), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot match sfh"""'}), "(description='Plot match sfh')\n", (15018, 15048), False, 'import argparse\n'), ((373, 386), 'numpy.sum', 'np.sum', (['(x * x)'], {}), '(x * x)\n', (379, 386), True, 'import numpy as np\n'), ((887, 910), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (900, 910), False, 'import os\n'), ((5345, 5398), 'numpy.append', 'np.append', (["self.data['lagei']", "self.data['lagef'][-1]"], {}), "(self.data['lagei'], self.data['lagef'][-1])\n", (5354, 5398), True, 'import numpy as np\n'), ((8162, 8212), 'numpy.append', 'np.append', (['age', "(10 ** (self.data['lagef'][-1] - 9))"], {}), "(age, 10 ** (self.data['lagef'][-1] - 9))\n", (8171, 8212), True, 'import numpy as np\n'), ((8228, 8246), 'numpy.append', 'np.append', (['csfh', '(0)'], {}), '(csfh, 0)\n', (8237, 8246), True, 'import numpy as np\n'), ((8267, 8290), 'numpy.append', 'np.append', (['csfh_errm', '(0)'], {}), '(csfh_errm, 0)\n', (8276, 8290), True, 'import numpy as np\n'), ((8311, 8334), 'numpy.append', 'np.append', (['csfh_errp', '(0)'], {}), '(csfh_errp, 0)\n', (8320, 8334), True, 'import numpy as np\n'), ((9456, 9487), 'numpy.sum', 'np.sum', (['(self.data.sfr * agebins)'], {}), '(self.data.sfr * agebins)\n', (9462, 9487), True, 'import numpy as np\n'), ((9635, 9655), 'numpy.sum', 'np.sum', (['(fracsf * feh)'], {}), '(fracsf * feh)\n', (9641, 9655), True, 'import numpy as np\n'), ((11125, 11165), 'numpy.nonzero', 'np.nonzero', (['(self.data.mh[:iyng + 1] != 0)'], {}), '(self.data.mh[:iyng + 1] != 0)\n', (11135, 11165), True, 'import numpy as np\n'), ((11183, 11223), 'numpy.nonzero', 'np.nonzero', (['(self.data.mh[:iint + 1] != 0)'], {}), '(self.data.mh[:iint + 1] != 0)\n', (11193, 11223), True, 'import numpy as np\n'), ((13096, 13123), 'numpy.abs', 'np.abs', (['(age_arr[idx] - lage)'], {}), '(age_arr[idx] - lage)\n', (13102, 13123), True, 'import numpy as np\n'), ((13834, 13865), 'numpy.sum', 'np.sum', (['(self.data.sfr * agebins)'], {}), '(self.data.sfr * agebins)\n', (13840, 13865), True, 'import numpy as np\n'), ((14515, 14536), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)'}), '(nrows=2)\n', (14527, 14536), True, 'import matplotlib.pyplot as plt\n'), ((14686, 14717), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.1)'}), '(hspace=0.1)\n', (14705, 14717), True, 'import matplotlib.pyplot as plt\n'), ((14736, 14776), 'os.path.join', 'os.path.join', (['self.base', '(self.name + EXT)'], {}), '(self.base, self.name + EXT)\n', (14748, 14776), False, 'import os\n'), ((14827, 14847), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (14838, 14847), True, 'import matplotlib.pyplot as plt\n'), ((14856, 14867), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14865, 14867), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2176), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2172, 2176), True, 'import numpy as np\n'), ((5834, 5855), 'numpy.nonzero', 'np.nonzero', (['(sfrs == 0)'], {}), '(sfrs == 0)\n', (5844, 5855), True, 'import numpy as np\n'), ((6830, 6844), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6842, 6844), True, 'import matplotlib.pyplot as plt\n'), ((7457, 7485), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (7469, 7485), True, 'import matplotlib.pyplot as plt\n'), ((7498, 7561), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.95)', 'left': '(0.1)', 'bottom': '(0.1)', 'top': '(0.95)'}), '(right=0.95, left=0.1, bottom=0.1, top=0.95)\n', (7517, 7561), True, 'import matplotlib.pyplot as plt\n'), ((8886, 8918), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'frameon': '(False)'}), '(loc=0, frameon=False)\n', (8896, 8918), True, 'import matplotlib.pyplot as plt\n'), ((9247, 9267), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (9258, 9267), True, 'import matplotlib.pyplot as plt\n'), ((12631, 12645), 'numpy.log10', 'np.log10', (['lage'], {}), '(lage)\n', (12639, 12645), True, 'import numpy as np\n'), ((12974, 12990), 'numpy.diff', 'np.diff', (['age_arr'], {}), '(age_arr)\n', (12981, 12990), True, 'import numpy as np\n'), ((13057, 13079), 'numpy.abs', 'np.abs', (['(age_arr - lage)'], {}), '(age_arr - lage)\n', (13063, 13079), True, 'import numpy as np\n'), ((14013, 14066), 'numpy.sum', 'np.sum', (['(self.data.sfr[idxi:idxf] * agebins[idxi:idxf])'], {}), '(self.data.sfr[idxi:idxf] * agebins[idxi:idxf])\n', (14019, 14066), True, 'import numpy as np\n'), ((14661, 14676), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (14674, 14676), False, 'from matplotlib.ticker import NullFormatter\n'), ((3542, 3563), 'numpy.diff', 'np.diff', (['self.data.mh'], {}), '(self.data.mh)\n', (3549, 3563), True, 'import numpy as np\n'), ((5430, 5445), 'numpy.diff', 'np.diff', (['rlages'], {}), '(rlages)\n', (5437, 5445), True, 'import numpy as np\n'), ((6102, 6124), 'numpy.nonzero', 'np.nonzero', (['(rsfrs == 0)'], {}), '(rsfrs == 0)\n', (6112, 6124), True, 'import numpy as np\n'), ((13636, 13660), 'numpy.diff', 'np.diff', (['self.data.lagei'], {}), '(self.data.lagei)\n', (13643, 13660), True, 'import numpy as np\n'), ((9194, 9228), 'os.path.join', 'os.path.join', (['self.base', 'self.name'], {}), '(self.base, self.name)\n', (9206, 9228), False, 'import os\n'), ((6220, 6237), 'numpy.sum', 'np.sum', (['val_merrs'], {}), '(val_merrs)\n', (6226, 6237), True, 'import numpy as np\n'), ((6246, 6263), 'numpy.sum', 'np.sum', (['val_perrs'], {}), '(val_perrs)\n', (6252, 6263), True, 'import numpy as np\n'), ((11305, 11333), 'numpy.mean', 'np.mean', (['self.data.mh[iyngs]'], {}), '(self.data.mh[iyngs])\n', (11312, 11333), True, 'import numpy as np\n'), ((11374, 11402), 'numpy.mean', 'np.mean', (['self.data.mh[iints]'], {}), '(self.data.mh[iints])\n', (11381, 11402), True, 'import numpy as np\n'), ((3628, 3649), 'numpy.diff', 'np.diff', (['self.data.mh'], {}), '(self.data.mh)\n', (3635, 3649), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 10:12:42 2020
@author: shlomi
"""
from PW_paths import work_yuval
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
awd_path = work_yuval/'AW3D30'
def interpolate_var_ds_at_multiple_dts(var_ds, geo_var_df, predict_df,
time_dim='time', dem_path=awd_path,
H_constant=None):
import pandas as pd
times_df = var_ds[time_dim].to_pandas()
df = pd.DataFrame()
for dt in times_df:
print('interpolating on datetime: {}.'.format(dt))
hdf = slice_var_ds_at_dt_and_convert_to_dataframe(var_ds, geo_var_df,
dt=dt.strftime('%Y-%m-%dT%H:%M:%S'))
# if H is None:
# # estimate scale height H by using all stations' data:
if H_constant is not None:
H = H_constant
else:
H = get_var_lapse_rate(hdf, model='LR', plot=False)
print('scale height is: {} meters.'.format(H))
new_hdf = apply_lapse_rate_change(hdf, H)
df_inter = interpolate_at_one_dt(new_hdf, H, predict_df=predict_df,
dem_path=dem_path, ppd=50)
df_inter['datetime'] = dt
df_inter['H'] = H
df = df.append(df_inter)
df['name'] = df.index
df['datetime'] = pd.to_datetime(df['datetime'])
df.set_index('datetime', inplace=True)
df.index.name = 'time'
return df
def slice_var_ds_at_dt_and_convert_to_dataframe(var_ds, df, dt='2018-04-15T22:00:00'):
"""
slice the var dataset (PWV) with specific datetime and add lat, lon and alt from df
Parameters
----------
var_ds : Xarray Dataset
containing variable such as PWV vs. time.
df : Pandas DataFrame
containing lat, lon and alt cols, indexed by var_ds data_vars.
dt : datetime string, optional
DESCRIPTION. The default is '2018-04-15T22:00:00'.
Returns
-------
hdf : pandas dataframe
sliced var indexed by alt.
"""
time_dim = list(set(var_ds.dims))[0]
var_dt = var_ds.sel({time_dim: dt}).expand_dims(time_dim)
hdf = var_dt.to_dataframe().T
hdf = hdf.join(df[['lat', 'lon', 'alt']])
hdf = hdf.set_index('alt')
hdf = hdf.sort_index().dropna()
return hdf
def get_pressure_lapse_rate(path=ims_path, model='LR', plot=False):
from aux_gps import linear_fit_using_scipy_da_ts
import matplotlib.pyplot as plt
import xarray as xr
from aux_gps import keep_iqr
bp = xr.load_dataset(ims_path / 'IMS_BP_israeli_10mins.nc')
bps = [keep_iqr(bp[x]) for x in bp]
bp = xr.merge(bps)
mean_p = bp.mean('time').to_array('alt')
mean_p.name = 'mean_pressure'
alts = [bp[x].attrs['station_alt'] for x in bp.data_vars]
mean_p['alt'] = alts
_, results = linear_fit_using_scipy_da_ts(mean_p, model=model, slope_factor=1, not_time=True)
slope = results['slope']
inter = results['intercept']
modeled_var = slope * mean_p['alt'] + inter
if plot:
fig, ax = plt.subplots()
modeled_var.plot(ax=ax, color='r')
mean_p.plot.line(linewidth=0., marker='o', ax=ax, color='b')
# lr = 1000 * abs(slope)
textstr = 'Pressure lapse rate: {:.1f} hPa/km'.format(1000 * slope)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax.set_xlabel('Height a.s.l [m]')
ax.set_ylabel('Mean Pressure [hPa]')
return results
def get_var_lapse_rate(hdf, model='LR', plot=False):
from aux_gps import linear_fit_using_scipy_da_ts
import matplotlib.pyplot as plt
import numpy as np
hda = hdf.iloc[:, 0].to_xarray()
dt = hda.name.strftime('%Y-%m-%d %H:%M')
hda.name = ''
log_hda = np.log(hda)
# assume pwv = pwv0*exp(-h/H)
# H is the water vapor scale height
_, results = linear_fit_using_scipy_da_ts(log_hda, model=model, slope_factor=1, not_time=True)
H = -1.0 / results['slope']
a0 = np.exp(results['intercept'])
modeled_var = a0 * np.exp(-hda['alt'] / H)
if plot:
fig, ax = plt.subplots()
modeled_var.plot(ax=ax, color='r')
hda.plot.line(linewidth=0., marker='o', ax=ax, color='b')
# lr = 1000 * abs(slope)
ax.set_title(dt)
textstr = 'WV scale height: {:.1f} m'.format(H)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax.set_xlabel('Height a.s.l [m]')
ax.set_ylabel('PWV [mm]')
return H
def apply_lapse_rate_change(hdf, H):
import numpy as np
# make sure lapse rate is negative:
assert H > 0
new_hdf = hdf.copy()
new_hdf.iloc[:, 0] = hdf.iloc[:, 0] * np.exp(hdf.index / H)
return new_hdf
def interpolate_at_one_dt(new_hdf, H, predict_df=None, dem_path=awd_path,
ppd=50):
from aux_gps import coarse_dem
import numpy as np
from pykrige.rk import Krige
""" interpolate to Israel grid the values in new_hdf (already removed the lapse rate)
with ppd being the map resolution. if predict_df is not None,
interpolate only to df's locations and altitudes. predict_df should have lat, lon and alt columns"""
# create mesh and load DEM:
da = create_lat_lon_mesh(points_per_degree=ppd) # 500?
# populate the empty mesh grid with stations data:
for i, row in new_hdf.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
da.loc[{'lat': lat, 'lon': lon}] = row.iloc[0]
c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])
r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(da.values)
X = np.column_stack([rr[vals], cc[vals]])
rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = da.values[vals]
model = Krige(method='ordinary', variogram_model='spherical',
verbose=True)
model.fit(X, y)
if predict_df is None:
# i.e., interpolate to all map coords:
interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)
da_inter = da.copy(data=interpolated)
awd = coarse_dem(da, dem_path=dem_path)
assert H > 0
da_inter *= np.exp(-1.0 * awd / H)
return da_inter
else:
predict_lats = np.linspace(predict_df.lat.min(
), predict_df.lat.max(), predict_df.lat.values.shape[0])
predict_lons = np.linspace(predict_df.lon.min(
), predict_df.lon.max(), predict_df.lon.values.shape[0])
predict_lons_lats_as_cols = np.column_stack(
[predict_lons, predict_lats])
interpolated = model.predict(
predict_lons_lats_as_cols).reshape((predict_lats.shape))
df_inter = predict_df.copy()
df_inter['interpolated'] = interpolated
# fix for lapse rate:
assert H > 0
df_inter['interpolated_lr_fixed'] = df_inter['interpolated'] * np.exp(-1.0 * df_inter['alt'] / H)
return df_inter
def create_lat_lon_mesh(lats=[29.5, 33.5], lons=[34, 36],
points_per_degree=1000):
import xarray as xr
import numpy as np
lat = np.arange(lats[0], lats[1], 1.0 / points_per_degree)
lon = np.arange(lons[0], lons[1], 1.0 / points_per_degree)
nans = np.nan * np.ones((len(lat), len(lon)))
da = xr.DataArray(nans, dims=['lat', 'lon'])
da['lat'] = lat
da['lon'] = lon
return da
def Interpolating_models_ims(time='2013-10-19T22:00:00', var='TD', plot=True,
gis_path=gis_path, method='okrig',
dem_path=work_yuval / 'AW3D30', lapse_rate=5.,
cv=None, rms=None, gridsearch=False):
"""main 2d_interpolation from stations to map"""
# cv usage is {'kfold': 5} or {'rkfold': [2, 3]}
# TODO: try 1d modeling first, like T=f(lat)
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.neighbors import KNeighborsRegressor
from pykrige.rk import Krige
import numpy as np
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from scipy.spatial import Delaunay
from scipy.interpolate import griddata
from sklearn.metrics import mean_squared_error
from aux_gps import coarse_dem
import seaborn as sns
import matplotlib.pyplot as plt
import pyproj
from sklearn.utils.estimator_checks import check_estimator
from pykrige.compat import GridSearchCV
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
def parse_cv(cv):
from sklearn.model_selection import KFold
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import LeaveOneOut
"""input:cv number or string"""
# check for integer:
if 'kfold' in cv.keys():
n_splits = cv['kfold']
print('CV is KFold with n_splits={}'.format(n_splits))
return KFold(n_splits=n_splits)
if 'rkfold' in cv.keys():
n_splits = cv['rkfold'][0]
n_repeats = cv['rkfold'][1]
print('CV is ReapetedKFold with n_splits={},'.format(n_splits) +
' n_repeates={}'.format(n_repeats))
return RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=42)
if 'loo' in cv.keys():
return LeaveOneOut()
# from aux_gps import scale_xr
da = create_lat_lon_mesh(points_per_degree=250) # 500?
awd = coarse_dem(da)
awd = awd.values
geo_snap = geo_pandas_time_snapshot(var=var, datetime=time, plot=False)
if var == 'TD':
[a, b] = np.polyfit(geo_snap['alt'].values, geo_snap['TD'].values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(data=geo_snap, x='alt', y='TD', color='r',
scatter_kws={'color': 'b'}, ax=ax_lapse)
suptitle = time.replace('T', ' ')
ax_lapse.set_xlabel('Altitude [m]')
ax_lapse.set_ylabel('Temperature [degC]')
ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
transform=ax_lapse.transAxes, fontsize=12, color='k',
fontweight='bold')
ax_lapse.grid()
ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
# fig.suptitle(suptitle, fontsize=14, fontweight='bold')
alts = []
for i, row in geo_snap.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
alt = row['alt']
if lapse_rate is not None and var == 'TD':
da.loc[{'lat': lat, 'lon': lon}] = row[var] + \
lapse_rate * alt / 1000.0
alts.append(alt)
elif lapse_rate is None or var != 'TD':
da.loc[{'lat': lat, 'lon': lon}] = row[var]
alts.append(alt)
# da_scaled = scale_xr(da)
c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])
r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(da.values)
if lapse_rate is None:
Xrr, Ycc, Z = pyproj.transform(
lla, ecef, rr[vals], cc[vals], np.array(alts), radians=False)
X = np.column_stack([Xrr, Ycc, Z])
XX, YY, ZZ = pyproj.transform(lla, ecef, rr, cc, awd.values,
radians=False)
rr_cc_as_cols = np.column_stack([XX.flatten(), YY.flatten(), ZZ.flatten()])
else:
X = np.column_stack([rr[vals], cc[vals]])
rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = da.values[vals]
if method == 'gp-rbf':
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = 1.0 * RBF(length_scale=0.25, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
# kernel = None
model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,
n_restarts_optimizer=5,
random_state=42, normalize_y=True)
elif method == 'gp-qr':
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = RationalQuadratic(length_scale=100.0) \
+ WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,
n_restarts_optimizer=5,
random_state=42, normalize_y=True)
elif method == 'knn':
model = KNeighborsRegressor(n_neighbors=5, weights='distance')
elif method == 'svr':
model = SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,
gamma='auto_deprecated', kernel='rbf', max_iter=-1,
shrinking=True, tol=0.001, verbose=False)
elif method == 'okrig':
model = Krige(method='ordinary', variogram_model='spherical',
verbose=True)
elif method == 'ukrig':
model = Krige(method='universal', variogram_model='linear',
verbose=True)
# elif method == 'okrig3d':
# # don't bother - MemoryError...
# model = OrdinaryKriging3D(rr[vals], cc[vals], np.array(alts),
# da.values[vals], variogram_model='linear',
# verbose=True)
# awd = coarse_dem(da)
# interpolated, ss = model.execute('grid', r, c, awd['data'].values)
# elif method == 'rkrig':
# # est = LinearRegression()
# est = RandomForestRegressor()
# model = RegressionKriging(regression_model=est, n_closest_points=5,
# verbose=True)
# p = np.array(alts).reshape(-1, 1)
# model.fit(p, X, y)
# P = awd.flatten().reshape(-1, 1)
# interpolated = model.predict(P, rr_cc_as_cols).reshape(da.values.shape)
# try:
# u = check_estimator(model)
# except TypeError:
# u = False
# pass
if cv is not None and not gridsearch: # and u is None):
# from sklearn.model_selection import cross_validate
from sklearn import metrics
cv = parse_cv(cv)
ytests = []
ypreds = []
for train_idx, test_idx in cv.split(X):
X_train, X_test = X[train_idx], X[test_idx] # requires arrays
y_train, y_test = y[train_idx], y[test_idx]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# there is only one y-test and y-pred per iteration over the loo.split,
# so to get a proper graph, we append them to respective lists.
ytests += list(y_test)
ypreds += list(y_pred)
true_vals = np.array(ytests)
predicted = np.array(ypreds)
r2 = metrics.r2_score(ytests, ypreds)
ms_error = metrics.mean_squared_error(ytests, ypreds)
print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
if gridsearch:
cv = parse_cv(cv)
param_dict = {"method": ["ordinary", "universal"],
"variogram_model": ["linear", "power", "gaussian",
"spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict, verbose=True, cv=cv,
scoring='neg_mean_absolute_error',
return_train_score=True, n_jobs=1)
estimator.fit(X, y)
if hasattr(estimator, 'best_score_'):
print('best_score = {:.3f}'.format(estimator.best_score_))
print('best_params = ', estimator.best_params_)
return estimator
# if (cv is not None and not u):
# from sklearn import metrics
# cv = parse_cv(cv)
# ytests = []
# ypreds = []
# for train_idx, test_idx in cv.split(X):
# X_train, X_test = X[train_idx], X[test_idx] # requires arrays
# y_train, y_test = y[train_idx], y[test_idx]
## model = UniversalKriging(X_train[:, 0], X_train[:, 1], y_train,
## variogram_model='linear', verbose=False,
## enable_plotting=False)
# model.X_ORIG = X_train[:, 0]
# model.X_ADJUSTED = model.X_ORIG
# model.Y_ORIG = X_train[:, 1]
# model.Y_ADJUSTED = model.Y_ORIG
# model.Z = y_train
# y_pred, ss = model.execute('points', X_test[0, 0],
# X_test[0, 1])
# # there is only one y-test and y-pred per iteration over the loo.split,
# # so to get a proper graph, we append them to respective lists.
# ytests += list(y_test) cmap = plt.get_cmap('spring', 10)
Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'],
isr['cm_per_year'], cmap=cmap)
fig.colorbar(Q, extend='max')
# ypreds += list(y_pred)
# true_vals = np.array(ytests)
# predicted = np.array(ypreds)
# r2 = metrics.r2_score(ytests, ypreds)
# ms_error = metrics.mean_squared_error(ytests, ypreds)
# print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
# cv_results = cross_validate(gp, X, y, cv=cv, scoring='mean_squared_error',
# return_train_score=True, n_jobs=-1)
# test = xr.DataArray(cv_results['test_score'], dims=['kfold'])
# train = xr.DataArray(cv_results['train_score'], dims=['kfold'])
# train.name = 'train'
# cds = test.to_dataset(name='test')
# cds['train'] = train
# cds['kfold'] = np.arange(len(cv_results['test_score'])) + 1
# cds['mean_train'] = cds.train.mean('kfold')
# cds['mean_test'] = cds.test.mean('kfold')
# interpolated=griddata(X, y, (rr, cc), method='nearest')
model.fit(X, y)
interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)
da_inter = da.copy(data=interpolated)
if lapse_rate is not None and var == 'TD':
da_inter -= lapse_rate * awd / 1000.0
if (rms is not None and cv is None): # or (rms is not None and not u):
predicted = []
true_vals = []
for i, row in geo_snap.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
pred = da_inter.loc[{'lat': lat, 'lon': lon}].values.item()
true = row[var]
predicted.append(pred)
true_vals.append(true)
predicted = np.array(predicted)
true_vals = np.array(true_vals)
ms_error = mean_squared_error(true_vals, predicted)
print("MSE: {:.5f}".format(ms_error))
if plot:
import salem
from salem import DataLevels, Map
import cartopy.crs as ccrs
# import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
# fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'
# fname = gis_path / 'gadm36_ISR_0.shp'
# ax = plt.axes(projection=ccrs.PlateCarree())
f, ax = plt.subplots(figsize=(6, 10))
# shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))
shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')
# shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel'] # remove other countries
shdf.crs = {'init': 'epsg:4326'}
dsr = da_inter.salem.roi(shape=shdf)
grid = dsr.salem.grid
grid = da_inter.salem.grid
sm = Map(grid)
# sm.set_shapefile(gis_path / 'Israel_and_Yosh.shp')
# sm = dsr.salem.quick_map(ax=ax)
# sm2 = salem.Map(grid, factor=1)
# sm2.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
# edgecolor='k')
sm.set_data(dsr)
# sm.set_nlevels(7)
# sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
# cbar_title='degC')
sm.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
edgecolor='k') # , facecolor='aqua')
# sm.set_topography(awd.values, crs=awd.crs)
# sm.set_rgb(crs=shdf.crs, natural_earth='hr') # ad
# lakes = salem.read_shapefile(gis_path/'gis_osm_water_a_free_1.shp')
sm.set_cmap(cm='rainbow')
sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
cbar_title='degC')
dl = DataLevels(geo_snap[var], levels=sm.levels)
dl.set_cmap(sm.cmap)
x, y = sm.grid.transform(geo_snap.lon.values, geo_snap.lat.values)
ax.scatter(x, y, color=dl.to_rgb(), s=20, edgecolors='k', linewidths=0.5)
suptitle = time.replace('T', ' ')
f.suptitle(suptitle, fontsize=14, fontweight='bold')
if (rms is not None or cv is not None) and (not gridsearch):
import seaborn as sns
f, ax = plt.subplots(1, 2, figsize=(12, 6))
sns.scatterplot(x=true_vals, y=predicted, ax=ax[0], marker='.',
s=100)
resid = predicted - true_vals
sns.distplot(resid, bins=5, color='c', label='residuals',
ax=ax[1])
rmean = np.mean(resid)
rstd = np.std(resid)
rmedian = np.median(resid)
rmse = np.sqrt(mean_squared_error(true_vals, predicted))
plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
_, max_ = plt.ylim()
plt.text(rmean + rmean / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
f.tight_layout()
# lakes.plot(ax=ax, color='b', edgecolor='k')
# lake_borders = gpd.overlay(countries, capitals, how='difference')
# adm1_shapes = list(shpreader.Reader(fname).geometries())
# ax = plt.axes(projection=ccrs.PlateCarree())
# ax.coastlines(resolution='10m')
# ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),
# edgecolor='black', facecolor='gray', alpha=0.5)
# da_inter.plot.pcolormesh('lon', 'lat', ax=ax)
#geo_snap.plot(ax=ax, column=var, cmap='viridis', edgecolor='black',
# legend=False)
return da_inter
|
[
"numpy.abs",
"numpy.polyfit",
"sklearn.metrics.r2_score",
"numpy.isnan",
"seaborn.regplot",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"aux_gps.coarse_dem",
"salem.read_shapefile",
"sklearn.model_selection.RepeatedKFold",
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"numpy.meshgrid",
"numpy.std",
"xarray.merge",
"matplotlib.pyplot.subplots",
"sklearn.gaussian_process.GaussianProcessRegressor",
"sklearn.metrics.mean_squared_error",
"salem.Map",
"aux_gps.keep_iqr",
"seaborn.scatterplot",
"numpy.median",
"matplotlib.pyplot.ylim",
"sklearn.gaussian_process.kernels.RBF",
"sklearn.gaussian_process.kernels.RationalQuadratic",
"pandas.to_datetime",
"aux_gps.linear_fit_using_scipy_da_ts",
"sklearn.gaussian_process.kernels.WhiteKernel",
"sklearn.svm.SVR",
"sklearn.neighbors.KNeighborsRegressor",
"numpy.log",
"sklearn.model_selection.KFold",
"sklearn.model_selection.LeaveOneOut",
"pykrige.rk.Krige",
"pyproj.Proj",
"xarray.DataArray",
"numpy.array",
"numpy.column_stack",
"seaborn.distplot",
"salem.DataLevels",
"pyproj.transform",
"xarray.load_dataset"
] |
[((515, 529), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (527, 529), True, 'import pandas as pd\n'), ((1408, 1438), 'pandas.to_datetime', 'pd.to_datetime', (["df['datetime']"], {}), "(df['datetime'])\n", (1422, 1438), True, 'import pandas as pd\n'), ((2602, 2656), 'xarray.load_dataset', 'xr.load_dataset', (["(ims_path / 'IMS_BP_israeli_10mins.nc')"], {}), "(ims_path / 'IMS_BP_israeli_10mins.nc')\n", (2617, 2656), True, 'import xarray as xr\n'), ((2706, 2719), 'xarray.merge', 'xr.merge', (['bps'], {}), '(bps)\n', (2714, 2719), True, 'import xarray as xr\n'), ((2903, 2988), 'aux_gps.linear_fit_using_scipy_da_ts', 'linear_fit_using_scipy_da_ts', (['mean_p'], {'model': 'model', 'slope_factor': '(1)', 'not_time': '(True)'}), '(mean_p, model=model, slope_factor=1, not_time=True\n )\n', (2931, 2988), False, 'from aux_gps import linear_fit_using_scipy_da_ts\n'), ((3999, 4010), 'numpy.log', 'np.log', (['hda'], {}), '(hda)\n', (4005, 4010), True, 'import numpy as np\n'), ((4102, 4188), 'aux_gps.linear_fit_using_scipy_da_ts', 'linear_fit_using_scipy_da_ts', (['log_hda'], {'model': 'model', 'slope_factor': '(1)', 'not_time': '(True)'}), '(log_hda, model=model, slope_factor=1, not_time\n =True)\n', (4130, 4188), False, 'from aux_gps import linear_fit_using_scipy_da_ts\n'), ((4225, 4253), 'numpy.exp', 'np.exp', (["results['intercept']"], {}), "(results['intercept'])\n", (4231, 4253), True, 'import numpy as np\n'), ((6132, 6149), 'numpy.meshgrid', 'np.meshgrid', (['r', 'c'], {}), '(r, c)\n', (6143, 6149), True, 'import numpy as np\n'), ((6190, 6227), 'numpy.column_stack', 'np.column_stack', (['[rr[vals], cc[vals]]'], {}), '([rr[vals], cc[vals]])\n', (6205, 6227), True, 'import numpy as np\n'), ((6363, 6430), 'pykrige.rk.Krige', 'Krige', ([], {'method': '"""ordinary"""', 'variogram_model': '"""spherical"""', 'verbose': '(True)'}), "(method='ordinary', variogram_model='spherical', verbose=True)\n", (6368, 6430), False, 'from pykrige.rk import Krige\n'), ((7682, 7734), 'numpy.arange', 'np.arange', (['lats[0]', 'lats[1]', '(1.0 / points_per_degree)'], {}), '(lats[0], lats[1], 1.0 / points_per_degree)\n', (7691, 7734), True, 'import numpy as np\n'), ((7745, 7797), 'numpy.arange', 'np.arange', (['lons[0]', 'lons[1]', '(1.0 / points_per_degree)'], {}), '(lons[0], lons[1], 1.0 / points_per_degree)\n', (7754, 7797), True, 'import numpy as np\n'), ((7857, 7896), 'xarray.DataArray', 'xr.DataArray', (['nans'], {'dims': "['lat', 'lon']"}), "(nans, dims=['lat', 'lon'])\n", (7869, 7896), True, 'import xarray as xr\n'), ((9074, 9131), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""latlong"""', 'ellps': '"""WGS84"""', 'datum': '"""WGS84"""'}), "(proj='latlong', ellps='WGS84', datum='WGS84')\n", (9085, 9131), False, 'import pyproj\n'), ((9143, 9200), 'pyproj.Proj', 'pyproj.Proj', ([], {'proj': '"""geocent"""', 'ellps': '"""WGS84"""', 'datum': '"""WGS84"""'}), "(proj='geocent', ellps='WGS84', datum='WGS84')\n", (9154, 9200), False, 'import pyproj\n'), ((10172, 10186), 'aux_gps.coarse_dem', 'coarse_dem', (['da'], {}), '(da)\n', (10182, 10186), False, 'from aux_gps import coarse_dem\n'), ((11919, 11936), 'numpy.meshgrid', 'np.meshgrid', (['r', 'c'], {}), '(r, c)\n', (11930, 11936), True, 'import numpy as np\n'), ((2668, 2683), 'aux_gps.keep_iqr', 'keep_iqr', (['bp[x]'], {}), '(bp[x])\n', (2676, 2683), False, 'from aux_gps import keep_iqr\n'), ((3125, 3139), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3137, 3139), True, 'import matplotlib.pyplot as plt\n'), ((4277, 4300), 'numpy.exp', 'np.exp', (["(-hda['alt'] / H)"], {}), "(-hda['alt'] / H)\n", (4283, 4300), True, 'import numpy as np\n'), ((4332, 4346), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4344, 4346), True, 'import matplotlib.pyplot as plt\n'), ((5096, 5117), 'numpy.exp', 'np.exp', (['(hdf.index / H)'], {}), '(hdf.index / H)\n', (5102, 5117), True, 'import numpy as np\n'), ((6162, 6181), 'numpy.isnan', 'np.isnan', (['da.values'], {}), '(da.values)\n', (6170, 6181), True, 'import numpy as np\n'), ((6680, 6713), 'aux_gps.coarse_dem', 'coarse_dem', (['da'], {'dem_path': 'dem_path'}), '(da, dem_path=dem_path)\n', (6690, 6713), False, 'from aux_gps import coarse_dem\n'), ((6755, 6777), 'numpy.exp', 'np.exp', (['(-1.0 * awd / H)'], {}), '(-1.0 * awd / H)\n', (6761, 6777), True, 'import numpy as np\n'), ((7088, 7133), 'numpy.column_stack', 'np.column_stack', (['[predict_lons, predict_lats]'], {}), '([predict_lons, predict_lats])\n', (7103, 7133), True, 'import numpy as np\n'), ((10321, 10381), 'numpy.polyfit', 'np.polyfit', (["geo_snap['alt'].values", "geo_snap['TD'].values", '(1)'], {}), "(geo_snap['alt'].values, geo_snap['TD'].values, 1)\n", (10331, 10381), True, 'import numpy as np\n'), ((10481, 10510), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (10493, 10510), True, 'import matplotlib.pyplot as plt\n'), ((10519, 10618), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'geo_snap', 'x': '"""alt"""', 'y': '"""TD"""', 'color': '"""r"""', 'scatter_kws': "{'color': 'b'}", 'ax': 'ax_lapse'}), "(data=geo_snap, x='alt', y='TD', color='r', scatter_kws={'color':\n 'b'}, ax=ax_lapse)\n", (10530, 10618), True, 'import seaborn as sns\n'), ((11949, 11968), 'numpy.isnan', 'np.isnan', (['da.values'], {}), '(da.values)\n', (11957, 11968), True, 'import numpy as np\n'), ((12126, 12156), 'numpy.column_stack', 'np.column_stack', (['[Xrr, Ycc, Z]'], {}), '([Xrr, Ycc, Z])\n', (12141, 12156), True, 'import numpy as np\n'), ((12178, 12240), 'pyproj.transform', 'pyproj.transform', (['lla', 'ecef', 'rr', 'cc', 'awd.values'], {'radians': '(False)'}), '(lla, ecef, rr, cc, awd.values, radians=False)\n', (12194, 12240), False, 'import pyproj\n'), ((12385, 12422), 'numpy.column_stack', 'np.column_stack', (['[rr[vals], cc[vals]]'], {}), '([rr[vals], cc[vals]])\n', (12400, 12422), True, 'import numpy as np\n'), ((12897, 13010), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'alpha': '(0.0)', 'kernel': 'kernel', 'n_restarts_optimizer': '(5)', 'random_state': '(42)', 'normalize_y': '(True)'}), '(alpha=0.0, kernel=kernel, n_restarts_optimizer=5,\n random_state=42, normalize_y=True)\n', (12921, 13010), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((15835, 15851), 'numpy.array', 'np.array', (['ytests'], {}), '(ytests)\n', (15843, 15851), True, 'import numpy as np\n'), ((15872, 15888), 'numpy.array', 'np.array', (['ypreds'], {}), '(ypreds)\n', (15880, 15888), True, 'import numpy as np\n'), ((15902, 15934), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['ytests', 'ypreds'], {}), '(ytests, ypreds)\n', (15918, 15934), False, 'from sklearn import metrics\n'), ((15954, 15996), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['ytests', 'ypreds'], {}), '(ytests, ypreds)\n', (15980, 15996), False, 'from sklearn import metrics\n'), ((19777, 19796), 'numpy.array', 'np.array', (['predicted'], {}), '(predicted)\n', (19785, 19796), True, 'import numpy as np\n'), ((19817, 19836), 'numpy.array', 'np.array', (['true_vals'], {}), '(true_vals)\n', (19825, 19836), True, 'import numpy as np\n'), ((19856, 19896), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['true_vals', 'predicted'], {}), '(true_vals, predicted)\n', (19874, 19896), False, 'from sklearn.metrics import mean_squared_error\n'), ((20328, 20357), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 10)'}), '(figsize=(6, 10))\n', (20340, 20357), True, 'import matplotlib.pyplot as plt\n'), ((20453, 20507), 'salem.read_shapefile', 'salem.read_shapefile', (["(gis_path / 'Israel_and_Yosh.shp')"], {}), "(gis_path / 'Israel_and_Yosh.shp')\n", (20473, 20507), False, 'import salem\n'), ((20756, 20765), 'salem.Map', 'Map', (['grid'], {}), '(grid)\n', (20759, 20765), False, 'from salem import DataLevels, Map\n'), ((21712, 21755), 'salem.DataLevels', 'DataLevels', (['geo_snap[var]'], {'levels': 'sm.levels'}), '(geo_snap[var], levels=sm.levels)\n', (21722, 21755), False, 'from salem import DataLevels, Map\n'), ((7461, 7495), 'numpy.exp', 'np.exp', (["(-1.0 * df_inter['alt'] / H)"], {}), "(-1.0 * df_inter['alt'] / H)\n", (7467, 7495), True, 'import numpy as np\n'), ((9611, 9635), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (9616, 9635), False, 'from sklearn.model_selection import KFold\n'), ((9899, 9969), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats', 'random_state': '(42)'}), '(n_splits=n_splits, n_repeats=n_repeats, random_state=42)\n', (9912, 9969), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((10053, 10066), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (10064, 10066), False, 'from sklearn.model_selection import LeaveOneOut\n'), ((12083, 12097), 'numpy.array', 'np.array', (['alts'], {}), '(alts)\n', (12091, 12097), True, 'import numpy as np\n'), ((12794, 12857), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': '(0.01)', 'noise_level_bounds': '(1e-10, 10.0)'}), '(noise_level=0.01, noise_level_bounds=(1e-10, 10.0))\n', (12805, 12857), False, 'from sklearn.gaussian_process.kernels import WhiteKernel\n'), ((13405, 13518), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'alpha': '(0.0)', 'kernel': 'kernel', 'n_restarts_optimizer': '(5)', 'random_state': '(42)', 'normalize_y': '(True)'}), '(alpha=0.0, kernel=kernel, n_restarts_optimizer=5,\n random_state=42, normalize_y=True)\n', (13429, 13518), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((16447, 16454), 'pykrige.rk.Krige', 'Krige', ([], {}), '()\n', (16452, 16454), False, 'from pykrige.rk import Krige\n'), ((22168, 22203), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 6)'}), '(1, 2, figsize=(12, 6))\n', (22180, 22203), True, 'import matplotlib.pyplot as plt\n'), ((22216, 22286), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'true_vals', 'y': 'predicted', 'ax': 'ax[0]', 'marker': '"""."""', 's': '(100)'}), "(x=true_vals, y=predicted, ax=ax[0], marker='.', s=100)\n", (22231, 22286), True, 'import seaborn as sns\n'), ((22369, 22436), 'seaborn.distplot', 'sns.distplot', (['resid'], {'bins': '(5)', 'color': '"""c"""', 'label': '"""residuals"""', 'ax': 'ax[1]'}), "(resid, bins=5, color='c', label='residuals', ax=ax[1])\n", (22381, 22436), True, 'import seaborn as sns\n'), ((22482, 22496), 'numpy.mean', 'np.mean', (['resid'], {}), '(resid)\n', (22489, 22496), True, 'import numpy as np\n'), ((22516, 22529), 'numpy.std', 'np.std', (['resid'], {}), '(resid)\n', (22522, 22529), True, 'import numpy as np\n'), ((22552, 22568), 'numpy.median', 'np.median', (['resid'], {}), '(resid)\n', (22561, 22568), True, 'import numpy as np\n'), ((22650, 22712), 'matplotlib.pyplot.axvline', 'plt.axvline', (['rmean'], {'color': '"""r"""', 'linestyle': '"""dashed"""', 'linewidth': '(1)'}), "(rmean, color='r', linestyle='dashed', linewidth=1)\n", (22661, 22712), True, 'import matplotlib.pyplot as plt\n'), ((22735, 22745), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (22743, 22745), True, 'import matplotlib.pyplot as plt\n'), ((10440, 10449), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (10446, 10449), True, 'import numpy as np\n'), ((12722, 12780), 'sklearn.gaussian_process.kernels.RBF', 'RBF', ([], {'length_scale': '(0.25)', 'length_scale_bounds': '(0.01, 1000.0)'}), '(length_scale=0.25, length_scale_bounds=(0.01, 1000.0))\n', (12725, 12780), False, 'from sklearn.gaussian_process.kernels import RBF\n'), ((13271, 13308), 'sklearn.gaussian_process.kernels.RationalQuadratic', 'RationalQuadratic', ([], {'length_scale': '(100.0)'}), '(length_scale=100.0)\n', (13288, 13308), False, 'from sklearn.gaussian_process.kernels import RationalQuadratic\n'), ((13325, 13388), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': '(0.01)', 'noise_level_bounds': '(1e-10, 10.0)'}), '(noise_level=0.01, noise_level_bounds=(1e-10, 10.0))\n', (13336, 13388), False, 'from sklearn.gaussian_process.kernels import WhiteKernel\n'), ((13639, 13693), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {'n_neighbors': '(5)', 'weights': '"""distance"""'}), "(n_neighbors=5, weights='distance')\n", (13658, 13693), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((22596, 22636), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['true_vals', 'predicted'], {}), '(true_vals, predicted)\n', (22614, 22636), False, 'from sklearn.metrics import mean_squared_error\n'), ((13736, 13899), 'sklearn.svm.SVR', 'SVR', ([], {'C': '(1.0)', 'cache_size': '(200)', 'coef0': '(0.0)', 'degree': '(3)', 'epsilon': '(0.1)', 'gamma': '"""auto_deprecated"""', 'kernel': '"""rbf"""', 'max_iter': '(-1)', 'shrinking': '(True)', 'tol': '(0.001)', 'verbose': '(False)'}), "(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma=\n 'auto_deprecated', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001,\n verbose=False)\n", (13739, 13899), False, 'from sklearn.svm import SVR\n'), ((13975, 14042), 'pykrige.rk.Krige', 'Krige', ([], {'method': '"""ordinary"""', 'variogram_model': '"""spherical"""', 'verbose': '(True)'}), "(method='ordinary', variogram_model='spherical', verbose=True)\n", (13980, 14042), False, 'from pykrige.rk import Krige\n'), ((14109, 14174), 'pykrige.rk.Krige', 'Krige', ([], {'method': '"""universal"""', 'variogram_model': '"""linear"""', 'verbose': '(True)'}), "(method='universal', variogram_model='linear', verbose=True)\n", (14114, 14174), False, 'from pykrige.rk import Krige\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.