code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
""" Abstract class for defining scenarios """ import random from typing import Tuple import numpy as np from copy import deepcopy import torch import stillleben as sl import nimblephysics as nimble from sl_cutscenes.room_models import RoomAssembler from sl_cutscenes.objects.mesh_loader import MeshLoader from sl_cutscenes.objects.object_loader import ObjectLoader from sl_cutscenes.objects.decorator_loader import DecoratorLoader from sl_cutscenes.lighting import get_lightmap from sl_cutscenes.camera import Camera import sl_cutscenes.utils.utils as utils import sl_cutscenes.constants as CONSTANTS from sl_cutscenes import object_info class Scenario(object): """ Abstract class for defining scenarios """ config = dict() name = 'scenario' def __init__(self, cfg, scene: sl.Scene, randomize=True): self.device = cfg.device self.viewer_mode = cfg.viewer self.scene = scene if randomize: utils.randomize() self.mesh_loader = MeshLoader() self.object_loader = ObjectLoader(scenario_reset=True) self.room_assembler = RoomAssembler(scene=self.scene) self.decorator_loader = DecoratorLoader(scene=self.scene) self.meshes_loaded, self.objects_loaded = False, False self.z_offset = 0. self.lights = cfg.lights self.lightmap = cfg.lightmap if self.lights == 0 else None if getattr(self, "allow_multiple_cameras", True): self.n_cameras = cfg.cameras else: print(f"scenario '{self.name}' supports only 1 camera -> ignoring n_cameras...") self.n_cameras = 1 self.coplanar_stereo = cfg.coplanar_stereo self.coplanar_stereo_dist = cfg.coplanar_stereo_dist self.cam_movement_complexity = cfg.cam_movement_complexity self.sim_dt = cfg.sim_dt self.cam_dt = cfg.cam_dt self.physics_engine = cfg.physics_engine self.nimble_debug = cfg.nimble_debug self.reset_sim() return def reset_sim(self): self.meshes_loaded, self.objects_loaded, self.cameras_loaded = False, False, False if self.physics_engine == "nimble": self.nimble_loaded = False self.sim_t = 0 self.setup_scene() self.setup_lighting() self.setup_objects() self.setup_cameras() self.decorate_scene() self.finalize_scene() @property def all_objects(self): return self.object_loader.all_objects @property def static_objects(self): return self.object_loader.static_objects @property def dynamic_objects(self): return self.object_loader.dynamic_objects def set_camera_look_at(self, pos, lookat): self.scene.set_camera_look_at(position=pos, look_at=lookat) def can_render(self): raise NotImplementedError def decorate_scene(self): self.room_assembler.add_wall_furniture() self.decorator_loader.decorate_scene(object_loader=self.object_loader) return def finalize_scene(self): """ Scene setup stuff that has to be done after everything else """ for obj in self.static_objects: obj.casts_shadows = False def setup_scene(self): """ Default setup_scene. Can be overriden from specific scenes """ _ = self.room_assembler.make_room() def setup_lighting(self): """ Default setup lighting. """ self.scene.ambient_light = torch.tensor([0.2, 0.2, 0.2]) if self.lightmap is not None: self.scene.light_map = get_lightmap(self.lightmap) self.scene.light_directions *= 0. # disable point lights self.scene.manual_exposure = 5.0 else: for i in range(self.lights): # self.scene.choose_random_light_direction() ori_angle = np.random.uniform(0, 360) elev_angle = np.random.uniform(30, 90) light_x = np.cos(ori_angle * np.pi / 180.) * np.cos(elev_angle * np.pi / 180.) light_y = np.sin(ori_angle * np.pi / 180.) * np.cos(elev_angle * np.pi / 180.) light_z = np.sin(elev_angle * np.pi / 180.) light_direction = torch.tensor([-light_x, -light_y, -light_z]) self.scene.light_directions[i] = light_direction light_color = torch.tensor([4.0, 4.0, 4.0]) + torch.rand(3) light_color_normalized = 5. * light_color / torch.linalg.norm(light_color) self.scene.light_colors[i] = light_color_normalized self.scene.manual_exposure = 3.0 def get_separations(self): # assert len(self.dynamic_objects) > 0, "Objects must be added to dynamic_objects before computing collisions" self.scene.check_collisions() separations = [obj.separation for obj in self.dynamic_objects if hasattr(obj, "separation")] return separations def is_there_collision(self): separations = self.get_separations() collision = True if np.sum(separations) < 0 else False return collision def load_meshes(self): """ """ if self.meshes_loaded: return print("mesh setup...") self.load_meshes_() self.meshes_loaded = True def load_meshes_(self): """ Scenario-specific logic """ raise NotImplementedError def setup_objects(self): """ """ if self.objects_loaded: return print("object setup...") if not self.meshes_loaded: self.load_meshes() # if objects have not been loaded yet, load them self.setup_objects_() self.objects_loaded = True return def setup_objects_(self): """ Scenario-specific logic """ raise NotImplementedError def setup_cameras(self): if self.cameras_loaded: return print("camera setup...") self.cameras = [] self.camera_objs = [] cam_config = self.config["camera"] base_lookat = cam_config["base_lookat"] # pick default ori. angle and (n_cameras-1) other angles from a linspace of angles that are 5 degrees apart default_ori_angle = cam_config["orientation_angle_default"] cam_ori_angles = [0] + random.sample(np.linspace(0, 360, 72+1).tolist()[1:-1], k=self.n_cameras-1) cam_ori_angles = [(angle + default_ori_angle) % 360 for angle in cam_ori_angles] # TODO parameters 'orientation_angle_min/max' are not yet used! for i, cam_ori_angle in enumerate(cam_ori_angles): cam_elev_angle = random.uniform(cam_config["elevation_angle_min"], cam_config["elevation_angle_max"]) cam_dist = random.uniform(cam_config["distance_min"], cam_config["distance_max"]) cam_lookat = deepcopy(base_lookat) cam_name = f"cam_{str(i).zfill(2)}" cam_stereo_positions = ["left", "right"] if self.coplanar_stereo else ["mono"] self.cameras.append(Camera(cam_name, self.cam_dt, cam_elev_angle, cam_ori_angle, cam_dist, cam_lookat, self.coplanar_stereo_dist, cam_stereo_positions, self.cam_movement_complexity)) self.setup_cameras_() # e.g. scenario-specific height adjustment self.setup_camera_objs() self.cameras_loaded = True def setup_camera_objs(self): """ Setting an object for each of the cameras. - Viewer mode: A full mesh is displayed at the position and with the pose of the camera - Normal mode: A tiny-dummy obj is place on the location of the camera to fill the occ-matrix cell """ camera_mesh = CONSTANTS.CAMERA_OBJ if self.viewer_mode else CONSTANTS.DUMMY_CAMERA_OBJ for camera_id, camera in enumerate(self.cameras): self.mesh_loader.load_meshes(camera_mesh) camera_pos = camera.get_pos() camera_info_mesh = self.mesh_loader.get_meshes()[-1] self.camera_objs.append(self.add_object_to_scene(camera_info_mesh, is_static=True)) pose = torch.eye(4) pose[:2, -1] = camera_pos[:2] pose[2, -1] = camera_pos[-1] + self.camera_objs[-1].mesh.bbox.min[-1] pose[:3, :3] = utils.get_rot_matrix( yaw=torch.tensor(camera.ori_angle * np.pi / 180), pitch=torch.tensor(-1 * camera.elev_angle * np.pi / 180), roll=torch.tensor(0.) ) self.camera_objs[-1].set_pose(pose) self.scene.add_object(self.camera_objs[-1]) return def setup_cameras_(self): """ Scenario-specific logic, e.g. height adjustment """ raise NotImplementedError def simulate(self): ''' Can be overwritten by scenario-specific logic ''' self.sim_t += self.sim_dt self.sim_step_() def sim_step_(self): ''' Just calls the appropriate simulator; assumes that all other things have been taken care of. ''' if self.physics_engine == "physx": self.scene.simulate(self.sim_dt) elif self.physics_engine == "physx_manipulation_sim": raise NotImplementedError # TODO implement for gripper sim elif self.physics_engine == "nimble": if not self.nimble_loaded: self.setup_nimble_() self.simulate_nimble_() else: raise ValueError(f"invalid physics_engine parameter: {self.physics_engine}") def setup_nimble_(self): ''' Creates a clone of the current stillleben scene for nimblephysics, enabling physics simulation there. ''' print("initializing nimble scene from sl...") # utils.dump_sl_scene_to_urdf(self.scene, "scene.urdf") self.nimble_world = nimble.simulation.World() self.nimble_world.setTimeStep(self.sim_dt) positions, velocities = [], [] for obj in self.scene.objects: obj_info = object_info.get_object_by_class_id(obj.mesh.class_index) skel, pos, vel = utils.sl_object_to_nimble(obj, obj_info, debug_mode=self.nimble_debug) self.nimble_world.addSkeleton(skel) positions.extend(pos) velocities.extend(vel) self.nimble_states = [torch.cat(positions + velocities)] self.nimble_loaded = True def simulate_nimble_(self, action=None): ''' Simulates a timestep in nimblephysics. ''' # simulate timestep in nimble if action is None: action = torch.zeros(self.nimble_world.getNumDofs()) new_state = nimble.timestep(self.nimble_world, self.nimble_states[-1], action) self.nimble_states.append(new_state) self.nimble_world.setState(new_state) # transfer object state back into the stillleben context obj_pos, obj_vel = torch.chunk(new_state.clone(), 2) obj_pos = torch.chunk(obj_pos, obj_pos.shape[0] // 6) obj_vel = torch.chunk(obj_vel, obj_vel.shape[0] // 6) for obj, pos, vel in zip(self.scene.objects, obj_pos, obj_vel): obj_pose = obj.pose() obj_rpy, obj_t = pos.split([3, 3]) obj_pose[:3, :3] = utils.get_mat_from_rpy(obj_rpy) obj_pose[:3, 3] = obj_t obj.set_pose(obj_pose) angular_velocity, obj.linear_velocity = vel.split([3, 3]) obj.angular_velocity = angular_velocity.flip(0) # flip back from ZYX convention def add_object_to_scene(self, obj_info_mesh: Tuple[object_info.ObjectInfo, sl.Mesh], is_static: bool, **obj_mod): obj_info, obj_mesh = obj_info_mesh obj = self.object_loader.create_object(obj_info, obj_mesh, is_static, **obj_mod) self.scene.add_object(obj) return obj def remove_obj_from_scene(self, obj: sl.Object, decrement_ins_idx: bool=True): self.scene.remove_object(obj) self.object_loader.remove_object(obj.instance_index, decrement_ins_idx=decrement_ins_idx) def update_object_height(self, cur_obj, objs=None, scales=None): """ Updating an object z-position given a list of supporting objects""" if objs is None: objs = [] scales = [1.0] * len(objs) if scales is None else scales assert len(objs) == len(scales), "provided non-matching scales for update_camera_height" cur_obj_pose = cur_obj.pose() z_pose = self.get_obj_z_offset(cur_obj) for obj, scale in zip(objs, scales): z_pose += self.get_obj_z_offset(obj) * scale cur_obj_pose[2, -1] = z_pose cur_obj.set_pose(cur_obj_pose) return cur_obj def update_camera_height(self, camera, objs=None, scales=None): """ Updating the camera position, camera-object position and the look-at parameter""" if objs is None: objs = [] scales = [1.0] * len(objs) if scales is None else scales assert len(objs) == len(scales), "provided non-matching scales for update_camera_height" z_lookat = deepcopy(camera.start_base_lookat[-1]) for obj, scale in zip(objs, scales): z_lookat += self.get_obj_z_offset(obj) * scale camera.start_base_lookat[-1] = z_lookat return camera def get_obj_z_offset(self, obj): """ Obtaining the z_offset (z-pos + height) for a given object""" obj_pose = obj.pose() z_offset = obj_pose[2, -1] + (obj.mesh.bbox.max[-1] - obj.mesh.bbox.min[-1]) / 2 return z_offset def get_obj_offset(self, obj): """ Obtaining the bbox boundaries (pos + size for x,y,z) for a given object""" obj_pose = obj.pose() offset_x, offset_y, offset_z = obj_pose[:3, -1] + obj.mesh.bbox.max offset = torch.Tensor([-offset_x, -offset_y, offset_z]) return offset
[ "torch.eye", "sl_cutscenes.object_info.get_object_by_class_id", "numpy.sum", "sl_cutscenes.camera.Camera", "torch.cat", "numpy.sin", "sl_cutscenes.objects.mesh_loader.MeshLoader", "sl_cutscenes.utils.utils.sl_object_to_nimble", "sl_cutscenes.objects.decorator_loader.DecoratorLoader", "sl_cutscenes.objects.object_loader.ObjectLoader", "torch.Tensor", "sl_cutscenes.utils.utils.randomize", "torch.linalg.norm", "numpy.linspace", "copy.deepcopy", "sl_cutscenes.room_models.RoomAssembler", "numpy.cos", "torch.rand", "numpy.random.uniform", "random.uniform", "sl_cutscenes.utils.utils.get_mat_from_rpy", "nimblephysics.timestep", "sl_cutscenes.lighting.get_lightmap", "nimblephysics.simulation.World", "torch.chunk", "torch.tensor" ]
[((999, 1011), 'sl_cutscenes.objects.mesh_loader.MeshLoader', 'MeshLoader', ([], {}), '()\n', (1009, 1011), False, 'from sl_cutscenes.objects.mesh_loader import MeshLoader\n'), ((1041, 1074), 'sl_cutscenes.objects.object_loader.ObjectLoader', 'ObjectLoader', ([], {'scenario_reset': '(True)'}), '(scenario_reset=True)\n', (1053, 1074), False, 'from sl_cutscenes.objects.object_loader import ObjectLoader\n'), ((1105, 1136), 'sl_cutscenes.room_models.RoomAssembler', 'RoomAssembler', ([], {'scene': 'self.scene'}), '(scene=self.scene)\n', (1118, 1136), False, 'from sl_cutscenes.room_models import RoomAssembler\n'), ((1169, 1202), 'sl_cutscenes.objects.decorator_loader.DecoratorLoader', 'DecoratorLoader', ([], {'scene': 'self.scene'}), '(scene=self.scene)\n', (1184, 1202), False, 'from sl_cutscenes.objects.decorator_loader import DecoratorLoader\n'), ((3476, 3505), 'torch.tensor', 'torch.tensor', (['[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2])\n', (3488, 3505), False, 'import torch\n'), ((9912, 9937), 'nimblephysics.simulation.World', 'nimble.simulation.World', ([], {}), '()\n', (9935, 9937), True, 'import nimblephysics as nimble\n'), ((10730, 10796), 'nimblephysics.timestep', 'nimble.timestep', (['self.nimble_world', 'self.nimble_states[-1]', 'action'], {}), '(self.nimble_world, self.nimble_states[-1], action)\n', (10745, 10796), True, 'import nimblephysics as nimble\n'), ((11033, 11076), 'torch.chunk', 'torch.chunk', (['obj_pos', '(obj_pos.shape[0] // 6)'], {}), '(obj_pos, obj_pos.shape[0] // 6)\n', (11044, 11076), False, 'import torch\n'), ((11095, 11138), 'torch.chunk', 'torch.chunk', (['obj_vel', '(obj_vel.shape[0] // 6)'], {}), '(obj_vel, obj_vel.shape[0] // 6)\n', (11106, 11138), False, 'import torch\n'), ((13152, 13190), 'copy.deepcopy', 'deepcopy', (['camera.start_base_lookat[-1]'], {}), '(camera.start_base_lookat[-1])\n', (13160, 13190), False, 'from copy import deepcopy\n'), ((13866, 13912), 'torch.Tensor', 'torch.Tensor', (['[-offset_x, -offset_y, offset_z]'], {}), '([-offset_x, -offset_y, offset_z])\n', (13878, 13912), False, 'import torch\n'), ((953, 970), 'sl_cutscenes.utils.utils.randomize', 'utils.randomize', ([], {}), '()\n', (968, 970), True, 'import sl_cutscenes.utils.utils as utils\n'), ((3579, 3606), 'sl_cutscenes.lighting.get_lightmap', 'get_lightmap', (['self.lightmap'], {}), '(self.lightmap)\n', (3591, 3606), False, 'from sl_cutscenes.lighting import get_lightmap\n'), ((6664, 6753), 'random.uniform', 'random.uniform', (["cam_config['elevation_angle_min']", "cam_config['elevation_angle_max']"], {}), "(cam_config['elevation_angle_min'], cam_config[\n 'elevation_angle_max'])\n", (6678, 6753), False, 'import random\n'), ((6772, 6842), 'random.uniform', 'random.uniform', (["cam_config['distance_min']", "cam_config['distance_max']"], {}), "(cam_config['distance_min'], cam_config['distance_max'])\n", (6786, 6842), False, 'import random\n'), ((6868, 6889), 'copy.deepcopy', 'deepcopy', (['base_lookat'], {}), '(base_lookat)\n', (6876, 6889), False, 'from copy import deepcopy\n'), ((8148, 8160), 'torch.eye', 'torch.eye', (['(4)'], {}), '(4)\n', (8157, 8160), False, 'import torch\n'), ((10090, 10146), 'sl_cutscenes.object_info.get_object_by_class_id', 'object_info.get_object_by_class_id', (['obj.mesh.class_index'], {}), '(obj.mesh.class_index)\n', (10124, 10146), False, 'from sl_cutscenes import object_info\n'), ((10176, 10246), 'sl_cutscenes.utils.utils.sl_object_to_nimble', 'utils.sl_object_to_nimble', (['obj', 'obj_info'], {'debug_mode': 'self.nimble_debug'}), '(obj, obj_info, debug_mode=self.nimble_debug)\n', (10201, 10246), True, 'import sl_cutscenes.utils.utils as utils\n'), ((10394, 10427), 'torch.cat', 'torch.cat', (['(positions + velocities)'], {}), '(positions + velocities)\n', (10403, 10427), False, 'import torch\n'), ((11323, 11354), 'sl_cutscenes.utils.utils.get_mat_from_rpy', 'utils.get_mat_from_rpy', (['obj_rpy'], {}), '(obj_rpy)\n', (11345, 11354), True, 'import sl_cutscenes.utils.utils as utils\n'), ((3866, 3891), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(360)'], {}), '(0, 360)\n', (3883, 3891), True, 'import numpy as np\n'), ((3921, 3946), 'numpy.random.uniform', 'np.random.uniform', (['(30)', '(90)'], {}), '(30, 90)\n', (3938, 3946), True, 'import numpy as np\n'), ((4163, 4197), 'numpy.sin', 'np.sin', (['(elev_angle * np.pi / 180.0)'], {}), '(elev_angle * np.pi / 180.0)\n', (4169, 4197), True, 'import numpy as np\n'), ((4231, 4275), 'torch.tensor', 'torch.tensor', (['[-light_x, -light_y, -light_z]'], {}), '([-light_x, -light_y, -light_z])\n', (4243, 4275), False, 'import torch\n'), ((5048, 5067), 'numpy.sum', 'np.sum', (['separations'], {}), '(separations)\n', (5054, 5067), True, 'import numpy as np\n'), ((7061, 7231), 'sl_cutscenes.camera.Camera', 'Camera', (['cam_name', 'self.cam_dt', 'cam_elev_angle', 'cam_ori_angle', 'cam_dist', 'cam_lookat', 'self.coplanar_stereo_dist', 'cam_stereo_positions', 'self.cam_movement_complexity'], {}), '(cam_name, self.cam_dt, cam_elev_angle, cam_ori_angle, cam_dist,\n cam_lookat, self.coplanar_stereo_dist, cam_stereo_positions, self.\n cam_movement_complexity)\n', (7067, 7231), False, 'from sl_cutscenes.camera import Camera\n'), ((3973, 4006), 'numpy.cos', 'np.cos', (['(ori_angle * np.pi / 180.0)'], {}), '(ori_angle * np.pi / 180.0)\n', (3979, 4006), True, 'import numpy as np\n'), ((4008, 4042), 'numpy.cos', 'np.cos', (['(elev_angle * np.pi / 180.0)'], {}), '(elev_angle * np.pi / 180.0)\n', (4014, 4042), True, 'import numpy as np\n'), ((4068, 4101), 'numpy.sin', 'np.sin', (['(ori_angle * np.pi / 180.0)'], {}), '(ori_angle * np.pi / 180.0)\n', (4074, 4101), True, 'import numpy as np\n'), ((4103, 4137), 'numpy.cos', 'np.cos', (['(elev_angle * np.pi / 180.0)'], {}), '(elev_angle * np.pi / 180.0)\n', (4109, 4137), True, 'import numpy as np\n'), ((4372, 4401), 'torch.tensor', 'torch.tensor', (['[4.0, 4.0, 4.0]'], {}), '([4.0, 4.0, 4.0])\n', (4384, 4401), False, 'import torch\n'), ((4404, 4417), 'torch.rand', 'torch.rand', (['(3)'], {}), '(3)\n', (4414, 4417), False, 'import torch\n'), ((4478, 4508), 'torch.linalg.norm', 'torch.linalg.norm', (['light_color'], {}), '(light_color)\n', (4495, 4508), False, 'import torch\n'), ((8358, 8402), 'torch.tensor', 'torch.tensor', (['(camera.ori_angle * np.pi / 180)'], {}), '(camera.ori_angle * np.pi / 180)\n', (8370, 8402), False, 'import torch\n'), ((8430, 8480), 'torch.tensor', 'torch.tensor', (['(-1 * camera.elev_angle * np.pi / 180)'], {}), '(-1 * camera.elev_angle * np.pi / 180)\n', (8442, 8480), False, 'import torch\n'), ((8507, 8524), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (8519, 8524), False, 'import torch\n'), ((6352, 6379), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', '(72 + 1)'], {}), '(0, 360, 72 + 1)\n', (6363, 6379), True, 'import numpy as np\n')]
from datetime import datetime from collections import Counter, defaultdict, OrderedDict from itertools import chain from random import random import numpy as np from cma import CMAEvolutionStrategy, CMAOptions from loguru import logger from math import sqrt from sklearn.preprocessing import MinMaxScaler from sortedcontainers import SortedDict from trueskill import BETA, global_env, rate_1vs1, Rating from xgboost import XGBRegressor from .data import DATA from .data_2016 import DATA_2016 from .data_2017 import DATA_2017 from .data_2018 import DATA_2018 def win_probability(team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma ** 2 for r in chain(team1, team2)) size = len(team1) + len(team2) denom = sqrt(size * (BETA * BETA) + sum_sigma) ts = global_env() return ts.cdf(delta_mu / denom) def to_decimal_odds(us_odds): if us_odds > 0: return us_odds / 100 + 1 else: return 100 / us_odds + 1 def to_implied_odds(us_odds: float) -> float: decimal_odds = to_decimal_odds(us_odds) try: return 1 / decimal_odds except ZeroDivisionError: return 1 def get_regressor(X_train, y_train, X_test=None, y_test=None, **reg_params): """get regressor""" logger.info('') logger.info('Training model...') eval_set = [(np.array(X_train), y_train)] if X_test and y_test: eval_set.append((np.array(X_test), y_test)) reg = XGBRegressor(objective='reg:squarederror', n_jobs=4, **reg_params) reg = reg.fit(X_train, y_train, eval_set=eval_set, eval_metric='auc', verbose=0) return reg def main(hyper_params, train=0): logger.info('Starting main training') all_data = DATA_2016 + DATA_2017 + DATA_2018 + DATA # estimators, learning_rate = hyper_params # gamma, max_depth, min_child_weight = hyper_params # max_delta_step, subsample, scale_pos_weight = hyper_params reg_params = { 'n_estimators': 100 if train else 1000, # 'learning_rate': 0.09426181829690375, # 0.24678854038938264 # 'gamma': 0.1860088097748791, # 0.0012826703538762253, # 'max_depth': int(round(2.1956102758009424)), # 2.5506573766936533)), # 'min_child_weight': 3.5802932556001426, # 'max_delta_step': 0.10779250505931337, # 'subsample': 0.9859889452465481, # 'scale_pos_weight': 1.2283288967549404, } # bet_pred_a, bet_pred_b, bet_odds_a, bet_odds_b, bet_wnl_a, bet_wnl_b = hyper_params bet_pred_a = 1.713980438805089 # -3.55 bet_pred_b = -4.065137791049565 # -17.93 bet_odds_a = 3.122323263774503 # -12.44 bet_odds_b = 0.0837110561236318 # -16.17 bet_wnl_a = 15.100288654913749 # -3.52 # -8.01 bet_wnl_b = -10.111913271763338 # -4.96 # 2.50 # bet_ts_a, bet_ts_b, bet_tmi_a, bet_tmi_b, bet_tma_a, bet_tma_b = hyper_params bet_ts_a = -50.59979897765422 # -26.88 # -3.52 # -8.01 bet_ts_b = -69.5794588139756 # -72.60 # -3.52 # -8.01 bet_tmi_a = -45.94904856923797 bet_tmi_b = -1.128236337281963 bet_tma_a = -28.62283185173976 bet_tma_b = -26.933801584409544 # init reg = None scaler = MinMaxScaler() cutoff = int(len(all_data) * 0.6) start_date = None ratings = defaultdict(lambda: Rating()) wins_losses = defaultdict(lambda: []) early_fights = defaultdict(lambda: 0.5) last_fights = defaultdict(lambda: 0.5) X_train = [] y_train = [] X_test = [] y_test = [] payouts = [] bet_amts = [] accuracy = (0, 0) tab = [] tab_amts = [] actual = (0, 0) actual_debug = [] bet_multis = [] bet_multis_cat = [] preds_flipped = [] odds_outcomes = [] # loop through scenes for i, scene in enumerate(all_data): is_training = i < cutoff if not is_training: if not reg: start_date = datetime.strptime(scene['date'], '%Y-%m-%d') # scale scaler.partial_fit(X_train) X_train = scaler.transform(X_train) reg = get_regressor(X_train, y_train, **reg_params) logger.info('') logger.info(f'{scene["date"]} {scene["name"]}') for fight in scene['fights']: bet_size = 1 # skip if no odds: if 'odds' not in fight: continue f1 = fight['fighters'][0]['name'] f2 = fight['fighters'][1]['name'] # trueskill data f1_ts = ratings[f1].mu f1_sigma = ratings[f1].sigma f2_ts = ratings[f2].mu f2_sigma = ratings[f2].sigma f1_ts_min = f1_ts - f1_sigma * 2 f2_ts_min = f2_ts - f2_sigma * 2 f1_ts_max = f1_ts + f1_sigma * 2 f2_ts_max = f2_ts + f2_sigma * 2 # odds data f1_odds = fight['odds'][f1] f2_odds = fight['odds'][f2] if not -50 < f1_odds < 50 or not -50 < f2_odds < 50: raise ValueError(f'surely these odds are wrong? {f1_odds} {f2_odds}') win1_prob = win_probability([ratings[f1]], [ratings[f2]]) win2_prob = win_probability([ratings[f2]], [ratings[f1]]) # wins losses data f1_wins_losses = Counter(wins_losses[f1]) f1_wnl_winrate = f1_wins_losses[1] / max(1, len(wins_losses[f1])) f2_wins_losses = Counter(wins_losses[f2]) f2_wnl_winrate = f2_wins_losses[1] / max(1, len(wins_losses[f2])) fight_data = [ [ win1_prob, f1_odds, f2_odds, f1_ts, f2_ts, f1_sigma, f2_sigma, f1_ts_min - f2_ts_min, f1_ts - f2_ts, f1_ts_max - f2_ts_max, last_fights[f1], last_fights[f2], early_fights[f1], early_fights[f2], f1_wins_losses[1], f1_wins_losses[-1], f1_wnl_winrate, f2_wins_losses[1], f2_wins_losses[-1], f2_wnl_winrate, ], [ win2_prob, f2_odds, f1_odds, f2_ts, f1_ts, f2_sigma, f1_sigma, f2_ts_min - f1_ts_min, f2_ts - f1_ts, f2_ts_max - f1_ts_max, last_fights[f2], last_fights[f1], early_fights[f2], early_fights[f1], f2_wins_losses[1], f2_wins_losses[-1], f2_wnl_winrate, f1_wins_losses[1], f1_wins_losses[-1], f1_wnl_winrate, ] ] ########################################## # update data if 'winner' in fight: # get winner fw = fight['winner']['fighter'] is_win_1 = fw == f1 fl = f2 if is_win_1 else f1 if not is_win_1 and fw != f2 and fw is not None: raise ValueError(f'unknown winner {fw}') drawn = fw is None # update wins losses wins_losses[f1] += [1] wins_losses[f2] += [-1] # update fights early_fights[fw] = last_fights[fw] early_fights[fl] = last_fights[fl] last_fights[fw] = 1 last_fights[fl] = 0 # update ratings ratings[fw], ratings[fl] = rate_1vs1(ratings[fw], ratings[fl], drawn=drawn) ################################### # train if is_training: if 'winner' in fight: X_train.extend(fight_data) y_train.extend([is_win_1, not is_win_1]) ################################### # test else: scaled_fight_data = scaler.transform(fight_data) f1_pred, f2_pred = reg.predict(scaled_fight_data) ############################# # bet scaling bet_multi = 1 # pred max if f1_pred > f2_pred: f_pred = f1_pred - f2_pred else: f_pred = f2_pred - f1_pred bet_pred_multi = np.polyval([bet_pred_a, bet_pred_b], [f_pred])[0] bet_pred_multi = round(min(1, max(0, bet_pred_multi))) bet_multi += bet_pred_multi bet_multis_cat.append(f'pred:{bet_pred_multi:.0f}') # odds diff if f1_pred > f2_pred: f_odds = 1 / f1_odds - 1 / f2_odds else: f_odds = 1 / f2_odds - 1 / f1_odds bet_odds_multi = np.polyval([bet_odds_a, bet_odds_b], [f_odds])[0] bet_odds_multi = round(min(1, max(0, bet_odds_multi))) bet_multi += bet_odds_multi bet_multis_cat.append(f'odds:{bet_odds_multi:.0f}') # wins and losses if f1_pred > f2_pred: f_wnl = f1_wnl_winrate - f2_wnl_winrate else: f_wnl = f2_wnl_winrate - f1_wnl_winrate bet_wnl_multi = np.polyval([bet_wnl_a, bet_wnl_b], [f_wnl])[0] bet_wnl_multi = round(min(1, max(0, bet_wnl_multi))) bet_multi += bet_wnl_multi bet_multis_cat.append(f'wnl:{bet_wnl_multi:.0f}') # trueskill mu if f1_pred > f2_pred: f_ts = f1_ts - f2_ts else: f_ts = f2_ts - f1_ts bet_ts_multi = np.polyval([bet_ts_a, bet_ts_b], [f_ts])[0] bet_ts_multi = round(min(1, max(0, bet_ts_multi))) bet_multi += bet_ts_multi bet_multis_cat.append(f'ts:{bet_ts_multi:.0f}') # trueskill min if f1_pred > f2_pred: f_ts_min = f1_ts_min - f2_ts_min else: f_ts_min = f2_ts_min - f1_ts_min bet_tmi_multi = np.polyval([bet_tmi_a, bet_tmi_b], [f_ts_min])[0] bet_tmi_multi = round(min(1, max(0, bet_tmi_multi))) bet_multi += bet_tmi_multi bet_multis_cat.append(f'tmi:{bet_tmi_multi:.0f}') # trueskill max if f1_pred > f2_pred: f_ts_max = f1_ts_max - f2_ts_max else: f_ts_max = f2_ts_max - f1_ts_max bet_tma_multi = np.polyval([bet_tma_a, bet_tma_b], [f_ts_max])[0] bet_tma_multi = round(min(1, max(0, bet_tma_multi))) bet_multi += bet_tma_multi bet_multis_cat.append(f'tma:{bet_tma_multi:.0f}') bet_size *= round(bet_multi) bet_amt = round(bet_size * bet_multi) assert bet_amt >= 1, f'bet multi is fucked: {bet_multi}' bet_amts.append(bet_size) bet_multis.append(int(round(bet_multi))) ############################# # prediction made if 'prediction' in fight and fight['prediction'] is None: if f1_pred > f2_pred: exp_winner = f1 pred_exp_winner = f1_pred exp_loser = f2 pred_exp_loser = f2_pred else: exp_winner = f2 pred_exp_winner = f2_pred exp_loser = f1 pred_exp_loser = f1_pred logger.warning(f'[{pred_exp_winner * 100:.0f}% vs {pred_exp_loser * 100:.0f}%] Bet x{bet_multi} on {exp_winner} to beat {exp_loser} [{ratings[exp_winner].mu:.0f} vs {ratings[exp_loser].mu:.0f}]') continue # good luck with your bets elif 'winner' not in fight: logger.warning(f'Pending {f1} vs {f2}') continue if is_win_1: fw_pred = f1_pred fl_pred = f2_pred else: fw_pred = f2_pred fl_pred = f1_pred # add test data X_test.extend(scaled_fight_data) y_test.extend([is_win_1, not is_win_1]) # testing outcome correct = 0 payout = -bet_size if is_win_1 and f1_pred > f2_pred: correct = 1 payout += f1_odds * bet_size elif not is_win_1 and f2_pred > f1_pred: correct = 1 payout += f2_odds * bet_size odds_outcomes.append(int((f1_odds < f2_odds and is_win_1) or (f2_odds > f1_odds and not is_win_1))) payouts.append(round(payout, 2)) accuracy = (accuracy[0] + correct, accuracy[1] + 1) # actual outcome pred_flipped = False if 'bet' in fight: is_actual_correct = fight['prediction'] == fw actual = (actual[0] + is_actual_correct, actual[1] + 1) cash = -fight['bet'] if is_actual_correct: fw_odds = f1_odds if is_win_1 else f2_odds cash += fw_odds * fight['bet'] else: fw_odds = f2_odds if is_win_1 else f1_odds tab.append(round(cash, 2)) tab_amts.append(fight['bet']) # pred flipped? pred_flipped = (f1_pred > f2_pred and fight['prediction'] != f1) or ( f2_pred > f1_pred and fight['prediction'] != f2) actual_debug.append(f'${fight["bet"]} {fw_odds:.2f}: {cash:.2f} {fight["prediction"]} {fight["date"]}') preds_flipped.append(int(pred_flipped)) log_balance = f'{"!!" if pred_flipped else " "}[{sum(payouts):.0f}|{payout:.0f}]' log_pred = f'[{fw_pred * 100:.0f}% vs {fl_pred * 100:.0f}%]' log_fight = f'x{bet_multi} {fw} {fight["winner"]["by"]} {fl}' log_ratings = f'[{ratings[fw].mu:.0f} vs {ratings[fl].mu:.0f}]' logger.info(f'{log_balance} {log_pred} {log_fight} {log_ratings}') if train: total_payouts = sum(payouts) roi = total_payouts / sum(bet_amts) res = -roi - (total_payouts / 5000) print(f'Score: {-res*100:.2f} ROI {roi * 100:.1f}% Profit ${total_payouts:.0f}') return res else: summary(reg, accuracy, payouts, start_date, bet_amts, bet_multis, bet_multis_cat, actual, tab, tab_amts, odds_outcomes) def summary(reg, accuracy, payouts, start_date, bet_amts, bet_multis, bet_multis_cat, actual, tab, tab_amts, odds_outcomes): logger.info('') logger.info('Tree info:') # reg = get_regressor(X_train, y_train, X_test, y_test, estimators=estimators, max_depth=max_depth) reg_score = reg.evals_result() params = reg.get_params() logger.info(f'Num estimators: {params["n_estimators"]}') logger.info(f'Learning rate: {params["learning_rate"]:.2f}') logger.info(f'Max depth: {params["max_depth"]}') logger.info(f'Accuracy: training={reg_score["validation_0"]["auc"][-1]*100:.0f}%') feature_names = [ 'win%', 'odds', '~odds', 'ts', '~ts', 'sigma', '~sigma', 'ts_min_diff', 'ts_diff', 'ts_max_diff', 'last', '~last', 'early', '~early', 'wins', '~wins', 'losses', '~losses', 'winrate', '~winrate', ] assert len(feature_names) == len(reg.feature_importances_), f'{len(feature_names)} features vs {len(reg.feature_importances_)} reg values' logger.info('') logger.info(f'Features:') features = SortedDict({v: k for k, v in zip(feature_names, reg.feature_importances_)}) for k in features.keys(): logger.info(f'{features[k]}: {k*1000:.0f}') continue if accuracy[1]: payouts = np.array(payouts) logger.info('') logger.info('Testing:') odds_acc = sum([t for t in odds_outcomes if t > 0]) / len(odds_outcomes) logger.info(f'Accuracy {accuracy[0]}/{accuracy[1]} = {accuracy[0]/accuracy[1]*100:.1f}% Odds: {odds_acc*100:.1f}%') logger.info(f'ROI {sum(payouts) / sum(bet_amts) * 100:.1f}% Profit ${sum(payouts):.0f}') days = (datetime.now() - start_date).days logger.info(f'Profit: per day: ${sum(payouts) / days:.2f} per bet ${payouts.mean():.2f}') logger.info(f'Common multis: {Counter(bet_multis).most_common(4)}') logger.info(f'cat multis: {Counter(bet_multis_cat).most_common()}') if actual[1]: tab = np.array(tab) logger.info('') logger.info('Actual:') logger.info(f'Accuracy {actual[0]}/{actual[1]} = {actual[0]/actual[1] * 100:.1f}%') logger.info(f'ROI {sum(tab) / sum(tab_amts) * 100:.2f}% Profit ${sum(tab):.0f}') days = (datetime.now() - datetime(2019, 7, 13)).days logger.info(f'Profit: per day: ${sum(tab) / days:.2f} per bet ${tab.mean():.2f}') sheet = -62.62 if abs(sum(tab) - sheet) > 0.01: for l in actual_debug: logger.warning(l) logger.error(f'debug! {sheet:.2f} != {sum(tab):.2f} diff {sum(tab) - sheet:.2f}') def run(): train = 0 names = [ # 'bet_pred_a', 'bet_pred_b', 'bet_odds_a', 'bet_odds_b', 'bet_wnl_a', 'bet_wnl_b', 'bet_ts_a', 'bet_ts_b', 'bet_tmi_a', 'bet_tmi_b', 'bet_tma_a', 'bet_tma_b', ] params = [ 0, 0, 0, 0, 0, 0 ] bounds = [[-np.inf], [np.inf]] assert len(params) == len(names) # assert len(params) == len(bounds[0]) if train: sigma = 1 opts = CMAOptions() # opts['tolx'] = 1E-2 opts['bounds'] = bounds es = CMAEvolutionStrategy(params, sigma, inopts=opts) while not es.stop(): solutions = es.ask() fitness = [main(x, train=1) for x in solutions] es.tell(solutions, fitness) es.disp() print(list(es.result[0])) print(list(es.result[5])) es.result_pretty() print('') print('best') print(list(es.result[0])) print('') print('xfavorite: distribution mean in "phenotype" space, to be considered as current best estimate of the optimum') print(list(es.result[5])) else: main(params) if __name__ == '__main__': run()
[ "trueskill.global_env", "math.sqrt", "numpy.polyval", "sklearn.preprocessing.MinMaxScaler", "loguru.logger.warning", "cma.CMAEvolutionStrategy", "datetime.datetime.now", "datetime.datetime", "collections.defaultdict", "loguru.logger.info", "datetime.datetime.strptime", "numpy.array", "xgboost.XGBRegressor", "cma.CMAOptions", "collections.Counter", "trueskill.rate_1vs1", "itertools.chain", "trueskill.Rating" ]
[((774, 812), 'math.sqrt', 'sqrt', (['(size * (BETA * BETA) + sum_sigma)'], {}), '(size * (BETA * BETA) + sum_sigma)\n', (778, 812), False, 'from math import sqrt\n'), ((822, 834), 'trueskill.global_env', 'global_env', ([], {}), '()\n', (832, 834), False, 'from trueskill import BETA, global_env, rate_1vs1, Rating\n'), ((1286, 1301), 'loguru.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (1297, 1301), False, 'from loguru import logger\n'), ((1306, 1338), 'loguru.logger.info', 'logger.info', (['"""Training model..."""'], {}), "('Training model...')\n", (1317, 1338), False, 'from loguru import logger\n'), ((1475, 1541), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'objective': '"""reg:squarederror"""', 'n_jobs': '(4)'}), "(objective='reg:squarederror', n_jobs=4, **reg_params)\n", (1487, 1541), False, 'from xgboost import XGBRegressor\n'), ((1682, 1719), 'loguru.logger.info', 'logger.info', (['"""Starting main training"""'], {}), "('Starting main training')\n", (1693, 1719), False, 'from loguru import logger\n'), ((3209, 3223), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3221, 3223), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3346, 3370), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (3357, 3370), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((3389, 3414), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.5)'], {}), '(lambda : 0.5)\n', (3400, 3414), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((3432, 3457), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.5)'], {}), '(lambda : 0.5)\n', (3443, 3457), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((15420, 15435), 'loguru.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (15431, 15435), False, 'from loguru import logger\n'), ((15440, 15465), 'loguru.logger.info', 'logger.info', (['"""Tree info:"""'], {}), "('Tree info:')\n", (15451, 15465), False, 'from loguru import logger\n'), ((15639, 15695), 'loguru.logger.info', 'logger.info', (['f"""Num estimators: {params[\'n_estimators\']}"""'], {}), '(f"Num estimators: {params[\'n_estimators\']}")\n', (15650, 15695), False, 'from loguru import logger\n'), ((15700, 15760), 'loguru.logger.info', 'logger.info', (['f"""Learning rate: {params[\'learning_rate\']:.2f}"""'], {}), '(f"Learning rate: {params[\'learning_rate\']:.2f}")\n', (15711, 15760), False, 'from loguru import logger\n'), ((15765, 15813), 'loguru.logger.info', 'logger.info', (['f"""Max depth: {params[\'max_depth\']}"""'], {}), '(f"Max depth: {params[\'max_depth\']}")\n', (15776, 15813), False, 'from loguru import logger\n'), ((15818, 15907), 'loguru.logger.info', 'logger.info', (['f"""Accuracy: training={reg_score[\'validation_0\'][\'auc\'][-1] * 100:.0f}%"""'], {}), '(\n f"Accuracy: training={reg_score[\'validation_0\'][\'auc\'][-1] * 100:.0f}%")\n', (15829, 15907), False, 'from loguru import logger\n'), ((16327, 16342), 'loguru.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (16338, 16342), False, 'from loguru import logger\n'), ((16347, 16372), 'loguru.logger.info', 'logger.info', (['f"""Features:"""'], {}), "(f'Features:')\n", (16358, 16372), False, 'from loguru import logger\n'), ((4194, 4241), 'loguru.logger.info', 'logger.info', (['f"""{scene[\'date\']} {scene[\'name\']}"""'], {}), '(f"{scene[\'date\']} {scene[\'name\']}")\n', (4205, 4241), False, 'from loguru import logger\n'), ((16502, 16547), 'loguru.logger.info', 'logger.info', (['f"""{features[k]}: {k * 1000:.0f}"""'], {}), "(f'{features[k]}: {k * 1000:.0f}')\n", (16513, 16547), False, 'from loguru import logger\n'), ((16602, 16619), 'numpy.array', 'np.array', (['payouts'], {}), '(payouts)\n', (16610, 16619), True, 'import numpy as np\n'), ((16628, 16643), 'loguru.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (16639, 16643), False, 'from loguru import logger\n'), ((16652, 16675), 'loguru.logger.info', 'logger.info', (['"""Testing:"""'], {}), "('Testing:')\n", (16663, 16675), False, 'from loguru import logger\n'), ((16765, 16897), 'loguru.logger.info', 'logger.info', (['f"""Accuracy {accuracy[0]}/{accuracy[1]} = {accuracy[0] / accuracy[1] * 100:.1f}% Odds: {odds_acc * 100:.1f}%"""'], {}), "(\n f'Accuracy {accuracy[0]}/{accuracy[1]} = {accuracy[0] / accuracy[1] * 100:.1f}% Odds: {odds_acc * 100:.1f}%'\n )\n", (16776, 16897), False, 'from loguru import logger\n'), ((17314, 17327), 'numpy.array', 'np.array', (['tab'], {}), '(tab)\n', (17322, 17327), True, 'import numpy as np\n'), ((17336, 17351), 'loguru.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (17347, 17351), False, 'from loguru import logger\n'), ((17360, 17382), 'loguru.logger.info', 'logger.info', (['"""Actual:"""'], {}), "('Actual:')\n", (17371, 17382), False, 'from loguru import logger\n'), ((17391, 17481), 'loguru.logger.info', 'logger.info', (['f"""Accuracy {actual[0]}/{actual[1]} = {actual[0] / actual[1] * 100:.1f}%"""'], {}), "(\n f'Accuracy {actual[0]}/{actual[1]} = {actual[0] / actual[1] * 100:.1f}%')\n", (17402, 17481), False, 'from loguru import logger\n'), ((18396, 18408), 'cma.CMAOptions', 'CMAOptions', ([], {}), '()\n', (18406, 18408), False, 'from cma import CMAEvolutionStrategy, CMAOptions\n'), ((18484, 18532), 'cma.CMAEvolutionStrategy', 'CMAEvolutionStrategy', (['params', 'sigma'], {'inopts': 'opts'}), '(params, sigma, inopts=opts)\n', (18504, 18532), False, 'from cma import CMAEvolutionStrategy, CMAOptions\n'), ((1357, 1374), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1365, 1374), True, 'import numpy as np\n'), ((3318, 3326), 'trueskill.Rating', 'Rating', ([], {}), '()\n', (3324, 3326), False, 'from trueskill import BETA, global_env, rate_1vs1, Rating\n'), ((4170, 4185), 'loguru.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (4181, 4185), False, 'from loguru import logger\n'), ((5311, 5335), 'collections.Counter', 'Counter', (['wins_losses[f1]'], {}), '(wins_losses[f1])\n', (5318, 5335), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((5443, 5467), 'collections.Counter', 'Counter', (['wins_losses[f2]'], {}), '(wins_losses[f2])\n', (5450, 5467), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((706, 725), 'itertools.chain', 'chain', (['team1', 'team2'], {}), '(team1, team2)\n', (711, 725), False, 'from itertools import chain\n'), ((1437, 1453), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (1445, 1453), True, 'import numpy as np\n'), ((3925, 3969), 'datetime.datetime.strptime', 'datetime.strptime', (["scene['date']", '"""%Y-%m-%d"""'], {}), "(scene['date'], '%Y-%m-%d')\n", (3942, 3969), False, 'from datetime import datetime\n'), ((7904, 7952), 'trueskill.rate_1vs1', 'rate_1vs1', (['ratings[fw]', 'ratings[fl]'], {'drawn': 'drawn'}), '(ratings[fw], ratings[fl], drawn=drawn)\n', (7913, 7952), False, 'from trueskill import BETA, global_env, rate_1vs1, Rating\n'), ((14834, 14900), 'loguru.logger.info', 'logger.info', (['f"""{log_balance} {log_pred} {log_fight} {log_ratings}"""'], {}), "(f'{log_balance} {log_pred} {log_fight} {log_ratings}')\n", (14845, 14900), False, 'from loguru import logger\n'), ((16996, 17010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17008, 17010), False, 'from datetime import datetime\n'), ((17581, 17595), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17593, 17595), False, 'from datetime import datetime\n'), ((17598, 17619), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(13)'], {}), '(2019, 7, 13)\n', (17606, 17619), False, 'from datetime import datetime\n'), ((17832, 17849), 'loguru.logger.warning', 'logger.warning', (['l'], {}), '(l)\n', (17846, 17849), False, 'from loguru import logger\n'), ((8735, 8781), 'numpy.polyval', 'np.polyval', (['[bet_pred_a, bet_pred_b]', '[f_pred]'], {}), '([bet_pred_a, bet_pred_b], [f_pred])\n', (8745, 8781), True, 'import numpy as np\n'), ((9200, 9246), 'numpy.polyval', 'np.polyval', (['[bet_odds_a, bet_odds_b]', '[f_odds]'], {}), '([bet_odds_a, bet_odds_b], [f_odds])\n', (9210, 9246), True, 'import numpy as np\n'), ((9680, 9723), 'numpy.polyval', 'np.polyval', (['[bet_wnl_a, bet_wnl_b]', '[f_wnl]'], {}), '([bet_wnl_a, bet_wnl_b], [f_wnl])\n', (9690, 9723), True, 'import numpy as np\n'), ((10110, 10150), 'numpy.polyval', 'np.polyval', (['[bet_ts_a, bet_ts_b]', '[f_ts]'], {}), '([bet_ts_a, bet_ts_b], [f_ts])\n', (10120, 10150), True, 'import numpy as np\n'), ((10558, 10604), 'numpy.polyval', 'np.polyval', (['[bet_tmi_a, bet_tmi_b]', '[f_ts_min]'], {}), '([bet_tmi_a, bet_tmi_b], [f_ts_min])\n', (10568, 10604), True, 'import numpy as np\n'), ((11017, 11063), 'numpy.polyval', 'np.polyval', (['[bet_tma_a, bet_tma_b]', '[f_ts_max]'], {}), '([bet_tma_a, bet_tma_b], [f_ts_max])\n', (11027, 11063), True, 'import numpy as np\n'), ((12116, 12321), 'loguru.logger.warning', 'logger.warning', (['f"""[{pred_exp_winner * 100:.0f}% vs {pred_exp_loser * 100:.0f}%] Bet x{bet_multi} on {exp_winner} to beat {exp_loser} [{ratings[exp_winner].mu:.0f} vs {ratings[exp_loser].mu:.0f}]"""'], {}), "(\n f'[{pred_exp_winner * 100:.0f}% vs {pred_exp_loser * 100:.0f}%] Bet x{bet_multi} on {exp_winner} to beat {exp_loser} [{ratings[exp_winner].mu:.0f} vs {ratings[exp_loser].mu:.0f}]'\n )\n", (12130, 12321), False, 'from loguru import logger\n'), ((12449, 12488), 'loguru.logger.warning', 'logger.warning', (['f"""Pending {f1} vs {f2}"""'], {}), "(f'Pending {f1} vs {f2}')\n", (12463, 12488), False, 'from loguru import logger\n'), ((17167, 17186), 'collections.Counter', 'Counter', (['bet_multis'], {}), '(bet_multis)\n', (17174, 17186), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((17240, 17263), 'collections.Counter', 'Counter', (['bet_multis_cat'], {}), '(bet_multis_cat)\n', (17247, 17263), False, 'from collections import Counter, defaultdict, OrderedDict\n')]
import unittest import pickle import tempfile import os import math from datetime import datetime import numpy as np import quaternion import cv2 from visnav.algo.model import Camera from visnav.algo.odometry import VisualOdometry, Pose from visnav.algo import tools class TestOdometry(unittest.TestCase): def setUp(self, verbose=False): self.cam = get_cam() params = { 'min_keypoint_dist': 10, 'min_inliers': 12, 'min_2d2d_inliers': 24, } self.odo = VisualOdometry(self.cam, self.cam.width/4, verbose=verbose, pause=False, use_scale_correction=False, est_cam_pose=False, **params) def tearDown(self): pass def assertQuatAlmostEqual(self, quat0, quat1, delta=1e-4, msg=None): if quat0 is None and quat1 is None: return diff = math.degrees(tools.angle_between_q(quat0, quat1)) self.assertAlmostEqual(0, diff, delta=delta, msg=None if msg is None else (msg + ': angle[deg] %f > %f' % (diff, delta))) def assertArrayAlmostEqual(self, arr0, arr1, delta=1e-7, ord=np.inf, msg=None): if arr0 is None and arr1 is None: return norm = np.linalg.norm(np.array(arr0)-np.array(arr1), ord=ord) self.assertAlmostEqual(0, norm, delta=delta, msg=None if msg is None else (msg + ': norm(%s) %f > %f' % (ord, norm, delta))) def assertPoseAlmostEqual(self, pose0: Pose, pose1: Pose, delta_v=1e-7, delta_q=1e-4, msg=None): if pose0 is None and pose1 is None: return self.assertArrayAlmostEqual(pose0.loc, pose1.loc, delta=delta_v, ord=2, msg=None if msg is None else (msg + ': loc %s vs %s'%(pose0.loc, pose1.loc))) self.assertQuatAlmostEqual(pose0.quat, pose1.quat, delta=delta_q, msg=None if msg is None else (msg + ': quat %s vs %s'%(pose0.quat, pose1.quat))) def assertOdomResultAlmostEqual(self, result0, result1): pose0, bias_sds0, scale_sd0 = result0 pose1, bias_sds1, scale_sd1 = result1 msg = '%s deviate(s) too much from the expected value(s)' self.assertPoseAlmostEqual(pose0, pose1, delta_v=0.02, delta_q=1, msg=msg%'estimated poses') self.assertArrayAlmostEqual(bias_sds0, bias_sds1, delta=0.1, ord=np.inf, msg=msg%'error estimates') self.assertAlmostEqual(scale_sd0, scale_sd1, delta=0.01, msg=msg%'scale error estimate') def test_rotating_object(self, inputs=None, results=None): pickle_file = os.path.join(os.path.dirname(__file__), 'data', 'test_rotating_object.pickle') record = inputs is not None and results is None if not record and results is None: inputs, results = self._load_recording(pickle_file) else: results = [] cam_q = quaternion.one orig_time = datetime.strptime('2020-07-01 15:42:00', '%Y-%m-%d %H:%M:%S').timestamp() for i, (img, cam_obj_v, cam_obj_q) in enumerate(inputs): time = datetime.fromtimestamp(orig_time + i*60) prior = Pose(cam_obj_v, cam_obj_q, np.ones((3,)) * 0.1, np.ones((3,)) * 0.01) res = self.odo.process(img, time, prior, cam_q) if record: results.append(res) elif 0: self.assertOdomResultAlmostEqual(results[i], res) if i > 1 and 0: self.assertIsNotNone(res[0], msg='failed to get pose estimate') self.assertPoseAlmostEqual(prior, res[0], delta_v=0.1, delta_q=10, msg='estimated pose deviates too much from the real one') if record: self._save_recording(pickle_file, inputs, results) def _save_recording(self, fname, inputs, results): tf = tempfile.NamedTemporaryFile(suffix='.png', delete=False) tf.close() for i in range(len(inputs)): cv2.imwrite(tf.name, inputs[i][0], (cv2.IMWRITE_PNG_COMPRESSION, 9)) with open(tf.name, 'br') as fh: inputs[i][0] = fh.read() os.unlink(tf.name) with open(fname, 'wb') as fh: pickle.dump((inputs, results), fh) def _load_recording(self, fname): with open(fname, 'rb') as fh: inputs, results = pickle.load(fh) tf = tempfile.NamedTemporaryFile(suffix='.png', delete=False) tf.close() for i in range(len(inputs)): with open(tf.name, 'wb') as fh: fh.write(inputs[i][0]) inputs[i][0] = cv2.imread(tf.name, cv2.IMREAD_GRAYSCALE) os.unlink(tf.name) return inputs, results def get_rot_imgs(): pass def get_cam(): common_kwargs_worst = { 'sensor_size': (2048 * 0.0022, 1944 * 0.0022), 'quantum_eff': 0.30, 'px_saturation_e': 2200, # snr_max = 20*log10(sqrt(sat_e)) dB 'lambda_min': 350e-9, 'lambda_eff': 580e-9, 'lambda_max': 800e-9, 'dark_noise_mu': 40, 'dark_noise_sd': 6.32, 'readout_noise_sd': 15, # dark_noise_sd should be sqrt(dark_noise_mu) 'emp_coef': 1, # dynamic range = 20*log10(sat_e/readout_noise)) 'exclusion_angle_x': 55, 'exclusion_angle_y': 90, } common_kwargs_best = dict(common_kwargs_worst) common_kwargs_best.update({ 'quantum_eff': 0.4, 'px_saturation_e': 3500, 'dark_noise_mu': 25, 'dark_noise_sd': 5, 'readout_noise_sd': 5, }) common_kwargs = common_kwargs_best return Camera( 2048, # width in pixels 1944, # height in pixels 7.7, # x fov in degrees (could be 6 & 5.695, 5.15 & 4.89, 7.7 & 7.309) 7.309, # y fov in degrees f_stop=5, # TODO: put better value here point_spread_fn=0.50, # ratio of brightness in center pixel scattering_coef=2e-10, # affects strength of haze/veil when sun shines on the lens **common_kwargs ) if __name__ == '__main__': import sys if len(sys.argv) > 1 and sys.argv[1] == 'record': from visnav.algo.model import SystemModel from visnav.missions.didymos import DidymosSystemModel from visnav.render.render import RenderEngine from visnav.settings import * sm = DidymosSystemModel(use_narrow_cam=False, target_primary=False, hi_res_shape_model=True) re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0) re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.05, 2) obj = sm.asteroid.real_shape_model obj_idx = re.load_object(obj) light = np.array([1, 0, -0.5]) light /= np.linalg.norm(light) cam_ast_v0 = np.array([0, 0, -sm.min_med_distance * 0.7]) cam_ast_q0 = quaternion.one dq = tools.angleaxis_to_q((math.radians(1), 0, 1, 0)) inputs = [] for i in range(60): cam_ast_v = cam_ast_v0 cam_ast_q = dq**i * cam_ast_q0 image = re.render(obj_idx, cam_ast_v, cam_ast_q, light, gamma=1.8, get_depth=False) cam_ast_cv_v = tools.q_times_v(SystemModel.cv2gl_q, cam_ast_v) cam_ast_cv_q = SystemModel.cv2gl_q * cam_ast_q * SystemModel.cv2gl_q.conj() inputs.append([image, cam_ast_cv_v, cam_ast_cv_q]) if 0: for image, _, _ in inputs: cv2.imshow('t', cv2.resize(image, None, fx=0.5, fy=0.5)) cv2.waitKey() else: t = TestOdometry() t.setUp(verbose=True) t.test_rotating_object(inputs=inputs) else: unittest.main()
[ "pickle.dump", "os.unlink", "numpy.ones", "pickle.load", "numpy.linalg.norm", "unittest.main", "visnav.render.render.RenderEngine", "math.radians", "cv2.imwrite", "os.path.dirname", "visnav.algo.odometry.VisualOdometry", "visnav.missions.didymos.DidymosSystemModel", "cv2.resize", "cv2.waitKey", "datetime.datetime.strptime", "datetime.datetime.fromtimestamp", "visnav.algo.tools.angle_between_q", "tempfile.NamedTemporaryFile", "visnav.algo.model.SystemModel.cv2gl_q.conj", "visnav.algo.model.Camera", "cv2.imread", "numpy.array", "visnav.algo.tools.q_times_v" ]
[((5755, 5860), 'visnav.algo.model.Camera', 'Camera', (['(2048)', '(1944)', '(7.7)', '(7.309)'], {'f_stop': '(5)', 'point_spread_fn': '(0.5)', 'scattering_coef': '(2e-10)'}), '(2048, 1944, 7.7, 7.309, f_stop=5, point_spread_fn=0.5,\n scattering_coef=2e-10, **common_kwargs)\n', (5761, 5860), False, 'from visnav.algo.model import Camera\n'), ((551, 687), 'visnav.algo.odometry.VisualOdometry', 'VisualOdometry', (['self.cam', '(self.cam.width / 4)'], {'verbose': 'verbose', 'pause': '(False)', 'use_scale_correction': '(False)', 'est_cam_pose': '(False)'}), '(self.cam, self.cam.width / 4, verbose=verbose, pause=False,\n use_scale_correction=False, est_cam_pose=False, **params)\n', (565, 687), False, 'from visnav.algo.odometry import VisualOdometry, Pose\n'), ((3996, 4052), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".png"""', 'delete': '(False)'}), "(suffix='.png', delete=False)\n", (4023, 4052), False, 'import tempfile\n'), ((4289, 4307), 'os.unlink', 'os.unlink', (['tf.name'], {}), '(tf.name)\n', (4298, 4307), False, 'import os\n'), ((4540, 4596), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".png"""', 'delete': '(False)'}), "(suffix='.png', delete=False)\n", (4567, 4596), False, 'import tempfile\n'), ((4823, 4841), 'os.unlink', 'os.unlink', (['tf.name'], {}), '(tf.name)\n', (4832, 4841), False, 'import os\n'), ((6525, 6616), 'visnav.missions.didymos.DidymosSystemModel', 'DidymosSystemModel', ([], {'use_narrow_cam': '(False)', 'target_primary': '(False)', 'hi_res_shape_model': '(True)'}), '(use_narrow_cam=False, target_primary=False,\n hi_res_shape_model=True)\n', (6543, 6616), False, 'from visnav.missions.didymos import DidymosSystemModel\n'), ((6627, 6689), 'visnav.render.render.RenderEngine', 'RenderEngine', (['sm.cam.width', 'sm.cam.height'], {'antialias_samples': '(0)'}), '(sm.cam.width, sm.cam.height, antialias_samples=0)\n', (6639, 6689), False, 'from visnav.render.render import RenderEngine\n'), ((6853, 6875), 'numpy.array', 'np.array', (['[1, 0, -0.5]'], {}), '([1, 0, -0.5])\n', (6861, 6875), True, 'import numpy as np\n'), ((6894, 6915), 'numpy.linalg.norm', 'np.linalg.norm', (['light'], {}), '(light)\n', (6908, 6915), True, 'import numpy as np\n'), ((6938, 6982), 'numpy.array', 'np.array', (['[0, 0, -sm.min_med_distance * 0.7]'], {}), '([0, 0, -sm.min_med_distance * 0.7])\n', (6946, 6982), True, 'import numpy as np\n'), ((7856, 7871), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7869, 7871), False, 'import unittest\n'), ((928, 963), 'visnav.algo.tools.angle_between_q', 'tools.angle_between_q', (['quat0', 'quat1'], {}), '(quat0, quat1)\n', (949, 963), False, 'from visnav.algo import tools\n'), ((2708, 2733), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2723, 2733), False, 'import os\n'), ((3198, 3240), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(orig_time + i * 60)'], {}), '(orig_time + i * 60)\n', (3220, 3240), False, 'from datetime import datetime\n'), ((4124, 4192), 'cv2.imwrite', 'cv2.imwrite', (['tf.name', 'inputs[i][0]', '(cv2.IMWRITE_PNG_COMPRESSION, 9)'], {}), '(tf.name, inputs[i][0], (cv2.IMWRITE_PNG_COMPRESSION, 9))\n', (4135, 4192), False, 'import cv2\n'), ((4362, 4396), 'pickle.dump', 'pickle.dump', (['(inputs, results)', 'fh'], {}), '((inputs, results), fh)\n', (4373, 4396), False, 'import pickle\n'), ((4508, 4523), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (4519, 4523), False, 'import pickle\n'), ((7340, 7387), 'visnav.algo.tools.q_times_v', 'tools.q_times_v', (['SystemModel.cv2gl_q', 'cam_ast_v'], {}), '(SystemModel.cv2gl_q, cam_ast_v)\n', (7355, 7387), False, 'from visnav.algo import tools\n'), ((1309, 1323), 'numpy.array', 'np.array', (['arr0'], {}), '(arr0)\n', (1317, 1323), True, 'import numpy as np\n'), ((1324, 1338), 'numpy.array', 'np.array', (['arr1'], {}), '(arr1)\n', (1332, 1338), True, 'import numpy as np\n'), ((3036, 3097), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-07-01 15:42:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-07-01 15:42:00', '%Y-%m-%d %H:%M:%S')\n", (3053, 3097), False, 'from datetime import datetime\n'), ((4772, 4813), 'cv2.imread', 'cv2.imread', (['tf.name', 'cv2.IMREAD_GRAYSCALE'], {}), '(tf.name, cv2.IMREAD_GRAYSCALE)\n', (4782, 4813), False, 'import cv2\n'), ((7056, 7071), 'math.radians', 'math.radians', (['(1)'], {}), '(1)\n', (7068, 7071), False, 'import math\n'), ((7450, 7476), 'visnav.algo.model.SystemModel.cv2gl_q.conj', 'SystemModel.cv2gl_q.conj', ([], {}), '()\n', (7474, 7476), False, 'from visnav.algo.model import SystemModel\n'), ((7689, 7702), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (7700, 7702), False, 'import cv2\n'), ((3287, 3300), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (3294, 3300), True, 'import numpy as np\n'), ((3308, 3321), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (3315, 3321), True, 'import numpy as np\n'), ((7631, 7670), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(image, None, fx=0.5, fy=0.5)\n', (7641, 7670), False, 'import cv2\n')]
""" Show differences between WT and STFT """ from scipy import signal import matplotlib.pyplot as plt import numpy as np import pywt waveletname = 'morl' scales = range(1,200) t = np.linspace(-1, 1, 200, endpoint=False) sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) t = np.linspace(-1, 1, 50, endpoint=False) sig1 = np.sin(2 * np.pi * 16 * t)+100*np.sin(2 * np.pi *0.1 * t) for i in range(50): sig[50+i] = sig1[i] + sig[50+i] coeff, freq = pywt.cwt(sig, scales, waveletname, 1) t = np.linspace(0, 200, 200, endpoint=False) plt.plot(t,sig,color='k') plt.title('Transformed signal') plt.ylabel('Amplitude') plt.xlabel('t [s]') plt.figure() plt.pcolormesh(coeff, cmap='plasma') plt.title('Wavelet Transform (Morlett kernel)') plt.ylabel('f [Hz]') plt.xlabel('t [s]') f, t, Zxx = signal.stft(sig, fs=400,nperseg = 8) t = t*400 plt.figure() plt.pcolormesh(t, f, np.abs(Zxx), cmap='plasma') plt.title('Short Time Fourier Transform (STFT)') plt.ylabel('f [Hz]') plt.xlabel('t [s]') plt.show()
[ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "scipy.signal.gausspulse", "matplotlib.pyplot.plot", "numpy.abs", "pywt.cwt", "matplotlib.pyplot.figure", "numpy.sin", "numpy.linspace", "matplotlib.pyplot.pcolormesh", "numpy.cos", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "scipy.signal.stft" ]
[((183, 222), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {'endpoint': '(False)'}), '(-1, 1, 200, endpoint=False)\n', (194, 222), True, 'import numpy as np\n'), ((295, 333), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(50)'], {'endpoint': '(False)'}), '(-1, 1, 50, endpoint=False)\n', (306, 333), True, 'import numpy as np\n'), ((470, 507), 'pywt.cwt', 'pywt.cwt', (['sig', 'scales', 'waveletname', '(1)'], {}), '(sig, scales, waveletname, 1)\n', (478, 507), False, 'import pywt\n'), ((512, 552), 'numpy.linspace', 'np.linspace', (['(0)', '(200)', '(200)'], {'endpoint': '(False)'}), '(0, 200, 200, endpoint=False)\n', (523, 552), True, 'import numpy as np\n'), ((553, 580), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sig'], {'color': '"""k"""'}), "(t, sig, color='k')\n", (561, 580), True, 'import matplotlib.pyplot as plt\n'), ((579, 610), 'matplotlib.pyplot.title', 'plt.title', (['"""Transformed signal"""'], {}), "('Transformed signal')\n", (588, 610), True, 'import matplotlib.pyplot as plt\n'), ((611, 634), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (621, 634), True, 'import matplotlib.pyplot as plt\n'), ((635, 654), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [s]"""'], {}), "('t [s]')\n", (645, 654), True, 'import matplotlib.pyplot as plt\n'), ((655, 667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (665, 667), True, 'import matplotlib.pyplot as plt\n'), ((668, 704), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['coeff'], {'cmap': '"""plasma"""'}), "(coeff, cmap='plasma')\n", (682, 704), True, 'import matplotlib.pyplot as plt\n'), ((705, 752), 'matplotlib.pyplot.title', 'plt.title', (['"""Wavelet Transform (Morlett kernel)"""'], {}), "('Wavelet Transform (Morlett kernel)')\n", (714, 752), True, 'import matplotlib.pyplot as plt\n'), ((753, 773), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (763, 773), True, 'import matplotlib.pyplot as plt\n'), ((774, 793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [s]"""'], {}), "('t [s]')\n", (784, 793), True, 'import matplotlib.pyplot as plt\n'), ((806, 841), 'scipy.signal.stft', 'signal.stft', (['sig'], {'fs': '(400)', 'nperseg': '(8)'}), '(sig, fs=400, nperseg=8)\n', (817, 841), False, 'from scipy import signal\n'), ((853, 865), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (863, 865), True, 'import matplotlib.pyplot as plt\n'), ((915, 963), 'matplotlib.pyplot.title', 'plt.title', (['"""Short Time Fourier Transform (STFT)"""'], {}), "('Short Time Fourier Transform (STFT)')\n", (924, 963), True, 'import matplotlib.pyplot as plt\n'), ((964, 984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (974, 984), True, 'import matplotlib.pyplot as plt\n'), ((985, 1004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [s]"""'], {}), "('t [s]')\n", (995, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1013, 1015), True, 'import matplotlib.pyplot as plt\n'), ((230, 255), 'numpy.cos', 'np.cos', (['(2 * np.pi * 7 * t)'], {}), '(2 * np.pi * 7 * t)\n', (236, 255), True, 'import numpy as np\n'), ((258, 290), 'scipy.signal.gausspulse', 'signal.gausspulse', (['(t - 0.4)'], {'fc': '(2)'}), '(t - 0.4, fc=2)\n', (275, 290), False, 'from scipy import signal\n'), ((342, 368), 'numpy.sin', 'np.sin', (['(2 * np.pi * 16 * t)'], {}), '(2 * np.pi * 16 * t)\n', (348, 368), True, 'import numpy as np\n'), ((887, 898), 'numpy.abs', 'np.abs', (['Zxx'], {}), '(Zxx)\n', (893, 898), True, 'import numpy as np\n'), ((373, 400), 'numpy.sin', 'np.sin', (['(2 * np.pi * 0.1 * t)'], {}), '(2 * np.pi * 0.1 * t)\n', (379, 400), True, 'import numpy as np\n')]
import math import numpy as np #1-a def function1(): value=0 for i in range(1,1000+1): value+=i return value #1-b def function2(m): value=0 for i in range(1,m+1): value+=i return value #2 def function3(): value=0 for i in range(1,100+1): value+=math.sqrt(i*math.pi/100)*math.sin(i*math.pi/100) return value print(function1()) print(function2(1000)) print(function3()) # 500500 # 500500 # 77.51389798916512 #oriented object programming class physic_calculation: def __init__(self): pass def function_a(self): value1=0 for i in range(1,1000+1): value1+=i return value1 def function_b(self,m): self.m=m value2=0 for i in range(1,self.m+1): value2+=i return value2 def function_c(self): value3=0 for i in range(1,100+1): value3+=math.sqrt(i*math.pi/100)*math.sin(i*math.pi/100) return value3 pc=physic_calculation() print("---------------OOP----------------") print(pc.function_a()) print(pc.function_b(1000)) print(pc.function_c()) # 500500 # 500500 # 77.51389798916512 print("---------------numpy----------------") a=np.arange(1,26).reshape(5,5) print(a) # [[ 1 2 3 4 5] # [ 6 7 8 9 10] # [11 12 13 14 15] # [16 17 18 19 20] # [21 22 23 24 25]]
[ "math.sin", "numpy.arange", "math.sqrt" ]
[((1229, 1245), 'numpy.arange', 'np.arange', (['(1)', '(26)'], {}), '(1, 26)\n', (1238, 1245), True, 'import numpy as np\n'), ((308, 336), 'math.sqrt', 'math.sqrt', (['(i * math.pi / 100)'], {}), '(i * math.pi / 100)\n', (317, 336), False, 'import math\n'), ((333, 360), 'math.sin', 'math.sin', (['(i * math.pi / 100)'], {}), '(i * math.pi / 100)\n', (341, 360), False, 'import math\n'), ((929, 957), 'math.sqrt', 'math.sqrt', (['(i * math.pi / 100)'], {}), '(i * math.pi / 100)\n', (938, 957), False, 'import math\n'), ((954, 981), 'math.sin', 'math.sin', (['(i * math.pi / 100)'], {}), '(i * math.pi / 100)\n', (962, 981), False, 'import math\n')]
import torch import numpy as np from time import time from os.path import join import lpips from Hessian.GAN_hessian_compute import hessian_compute #%% ImDist = lpips.LPIPS(net='squeeze').cuda() use_gpu = True if torch.cuda.is_available() else False model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', 'PGAN', model_name='celebAHQ-256', pretrained=True, useGPU=use_gpu) num_images = 1 noise, _ = model.buildNoiseData(num_images) noise.requires_grad_(True) # with torch.no_grad(): generated_images = model.test(noise) #%% img = model.avgG.forward(noise) #%% class PGGAN_wrapper(): # nn.Module def __init__(self, PGGAN, ): self.PGGAN = PGGAN def visualize(self, code, scale=1): imgs = self.PGGAN.forward(code,) # Matlab version default to 0.7 return torch.clamp((imgs + 1.0) / 2.0, 0, 1) * scale G = PGGAN_wrapper(model.avgG) #%% feat = noise.detach().clone().cuda() EPS = 1E-2 T0 = time() eva_BI, evc_BI, H_BI = hessian_compute(G, feat, ImDist, hessian_method="BackwardIter") print("%.2f sec" % (time() - T0)) # 95.7 sec T0 = time() eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method="ForwardIter") print("%.2f sec" % (time() - T0)) # 61.8 sec T0 = time() eva_BP, evc_BP, H_BP = hessian_compute(G, feat, ImDist, hessian_method="BP") print("%.2f sec" % (time() - T0)) # 95.4 sec #%% print("Correlation of Flattened Hessian matrix BP vs BackwardIter %.3f" % np.corrcoef(H_BP.flatten(), H_BI.flatten())[0, 1]) print("Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f" % np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1]) print("Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter %.3f"% np.corrcoef(H_FI.flatten(), H_BI.flatten())[0, 1]) # Correlation of Flattened Hessian matrix BP vs BackwardIter 1.000 # Correlation of Flattened Hessian matrix BP vs ForwardIter 0.877 # Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter 0.877 #%% H_col = [] for EPS in [1E-5, 1E-4, 1E-3, 1E-2, 1E-1, 1, 2, 10]: T0 = time() eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method="ForwardIter", EPS=EPS) print("%.2f sec" % (time() - T0)) # 325.83 sec print("EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f" % (EPS, np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1])) H_col.append((eva_FI, evc_FI, H_FI)) # EPS 1.0e-05 Correlation of Flattened Hessian matrix BP vs ForwardIter 1.000 # EPS 1.0e-04 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.999 # EPS 1.0e-03 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.989 # EPS 1.0e-02 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.901 # EPS 1.0e-01 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.398 # EPS 1.0e+00 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.046 # EPS 2.0e+00 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.008 # EPS 1.0e+01 Correlation of Flattened Hessian matrix BP vs ForwardIter -0.003 #%% #%% Visualize Spectra figdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\PGGAN" savedir = r"E:\Cluster_Backup\PGGAN" # eva_col = [] # evc_col = [] # for triali in tqdm(range(400)): # data = np.load(join(savedir, "Hessian_cmp_%d.npz" % triali)) # eva_BP = data["eva_BP"] # evc_BP = data["evc_BP"] # eva_col.append(eva_BP) # evc_col.append(evc_BP) # # eva_col = np.array(eva_col) from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz eva_col, evc_col, feat_col, meta = scan_hess_npz(savedir, "Hessian_cmp_(\d*).npz", featkey="feat") feat_col = np.array(feat_col).squeeze() H_avg, eva_avg, evc_avg = average_H(eva_col, evc_col) np.savez(join(figdir, "H_avg_%s.npz"%"PGGAN"), H_avg=H_avg, eva_avg=eva_avg, evc_avg=evc_avg, feats=feat_col) #%% fig = plot_spectra(eva_col, figdir=figdir, titstr="PGGAN", ) #%% corr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=True) # without cuda 12:11 mins, with cuda 8:21 # corr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=False) #%% fig1, fig2 = plot_consistentcy_mat(corr_mat_log, corr_mat_lin, figdir=figdir, titstr="PGGAN") #%% fig3 = plot_consistency_example(eva_col, evc_col, figdir=figdir, nsamp=5, titstr="PGGAN",) fig3.show()
[ "Hessian.hessian_analysis_tools.plot_consistency_example", "Hessian.hessian_analysis_tools.average_H", "Hessian.GAN_hessian_compute.hessian_compute", "Hessian.hessian_analysis_tools.plot_spectra", "Hessian.hessian_analysis_tools.scan_hess_npz", "time.time", "torch.clamp", "torch.cuda.is_available", "lpips.LPIPS", "numpy.array", "Hessian.hessian_analysis_tools.plot_consistentcy_mat", "torch.hub.load", "os.path.join", "Hessian.hessian_analysis_tools.compute_hess_corr" ]
[((259, 386), 'torch.hub.load', 'torch.hub.load', (['"""facebookresearch/pytorch_GAN_zoo:hub"""', '"""PGAN"""'], {'model_name': '"""celebAHQ-256"""', 'pretrained': '(True)', 'useGPU': 'use_gpu'}), "('facebookresearch/pytorch_GAN_zoo:hub', 'PGAN', model_name=\n 'celebAHQ-256', pretrained=True, useGPU=use_gpu)\n", (273, 386), False, 'import torch\n'), ((975, 981), 'time.time', 'time', ([], {}), '()\n', (979, 981), False, 'from time import time\n'), ((1005, 1068), 'Hessian.GAN_hessian_compute.hessian_compute', 'hessian_compute', (['G', 'feat', 'ImDist'], {'hessian_method': '"""BackwardIter"""'}), "(G, feat, ImDist, hessian_method='BackwardIter')\n", (1020, 1068), False, 'from Hessian.GAN_hessian_compute import hessian_compute\n'), ((1120, 1126), 'time.time', 'time', ([], {}), '()\n', (1124, 1126), False, 'from time import time\n'), ((1150, 1212), 'Hessian.GAN_hessian_compute.hessian_compute', 'hessian_compute', (['G', 'feat', 'ImDist'], {'hessian_method': '"""ForwardIter"""'}), "(G, feat, ImDist, hessian_method='ForwardIter')\n", (1165, 1212), False, 'from Hessian.GAN_hessian_compute import hessian_compute\n'), ((1264, 1270), 'time.time', 'time', ([], {}), '()\n', (1268, 1270), False, 'from time import time\n'), ((1294, 1347), 'Hessian.GAN_hessian_compute.hessian_compute', 'hessian_compute', (['G', 'feat', 'ImDist'], {'hessian_method': '"""BP"""'}), "(G, feat, ImDist, hessian_method='BP')\n", (1309, 1347), False, 'from Hessian.GAN_hessian_compute import hessian_compute\n'), ((3660, 3724), 'Hessian.hessian_analysis_tools.scan_hess_npz', 'scan_hess_npz', (['savedir', '"""Hessian_cmp_(\\\\d*).npz"""'], {'featkey': '"""feat"""'}), "(savedir, 'Hessian_cmp_(\\\\d*).npz', featkey='feat')\n", (3673, 3724), False, 'from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz\n'), ((3790, 3817), 'Hessian.hessian_analysis_tools.average_H', 'average_H', (['eva_col', 'evc_col'], {}), '(eva_col, evc_col)\n', (3799, 3817), False, 'from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz\n'), ((3938, 3990), 'Hessian.hessian_analysis_tools.plot_spectra', 'plot_spectra', (['eva_col'], {'figdir': 'figdir', 'titstr': '"""PGGAN"""'}), "(eva_col, figdir=figdir, titstr='PGGAN')\n", (3950, 3990), False, 'from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz\n'), ((4026, 4091), 'Hessian.hessian_analysis_tools.compute_hess_corr', 'compute_hess_corr', (['eva_col', 'evc_col'], {'figdir': 'figdir', 'use_cuda': '(True)'}), '(eva_col, evc_col, figdir=figdir, use_cuda=True)\n', (4043, 4091), False, 'from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz\n'), ((4249, 4334), 'Hessian.hessian_analysis_tools.plot_consistentcy_mat', 'plot_consistentcy_mat', (['corr_mat_log', 'corr_mat_lin'], {'figdir': 'figdir', 'titstr': '"""PGGAN"""'}), "(corr_mat_log, corr_mat_lin, figdir=figdir, titstr='PGGAN'\n )\n", (4270, 4334), False, 'from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz\n'), ((4341, 4428), 'Hessian.hessian_analysis_tools.plot_consistency_example', 'plot_consistency_example', (['eva_col', 'evc_col'], {'figdir': 'figdir', 'nsamp': '(5)', 'titstr': '"""PGGAN"""'}), "(eva_col, evc_col, figdir=figdir, nsamp=5, titstr=\n 'PGGAN')\n", (4365, 4428), False, 'from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz\n'), ((214, 239), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (237, 239), False, 'import torch\n'), ((2079, 2085), 'time.time', 'time', ([], {}), '()\n', (2083, 2085), False, 'from time import time\n'), ((2113, 2184), 'Hessian.GAN_hessian_compute.hessian_compute', 'hessian_compute', (['G', 'feat', 'ImDist'], {'hessian_method': '"""ForwardIter"""', 'EPS': 'EPS'}), "(G, feat, ImDist, hessian_method='ForwardIter', EPS=EPS)\n", (2128, 2184), False, 'from Hessian.GAN_hessian_compute import hessian_compute\n'), ((3827, 3865), 'os.path.join', 'join', (['figdir', "('H_avg_%s.npz' % 'PGGAN')"], {}), "(figdir, 'H_avg_%s.npz' % 'PGGAN')\n", (3831, 3865), False, 'from os.path import join\n'), ((162, 188), 'lpips.LPIPS', 'lpips.LPIPS', ([], {'net': '"""squeeze"""'}), "(net='squeeze')\n", (173, 188), False, 'import lpips\n'), ((3735, 3753), 'numpy.array', 'np.array', (['feat_col'], {}), '(feat_col)\n', (3743, 3753), True, 'import numpy as np\n'), ((841, 878), 'torch.clamp', 'torch.clamp', (['((imgs + 1.0) / 2.0)', '(0)', '(1)'], {}), '((imgs + 1.0) / 2.0, 0, 1)\n', (852, 878), False, 'import torch\n'), ((1089, 1095), 'time.time', 'time', ([], {}), '()\n', (1093, 1095), False, 'from time import time\n'), ((1233, 1239), 'time.time', 'time', ([], {}), '()\n', (1237, 1239), False, 'from time import time\n'), ((1368, 1374), 'time.time', 'time', ([], {}), '()\n', (1372, 1374), False, 'from time import time\n'), ((2209, 2215), 'time.time', 'time', ([], {}), '()\n', (2213, 2215), False, 'from time import time\n')]
import torch from torch.utils.data import Dataset import numpy as np import matplotlib.pyplot as plt import pandas as pd import collections from chr import coverage import pdb class RegressionDataset(Dataset): def __init__(self, X_data, y_data): self.X_data = torch.from_numpy(X_data).float() self.y_data = torch.from_numpy(y_data).float() def __getitem__(self, index): return self.X_data[index], self.y_data[index] def __len__ (self): return len(self.X_data) def evaluate_predictions(pred, Y, X=None): # Extract lower and upper prediction bands pred_l = np.min(pred,1) pred_h = np.max(pred,1) # Marginal coverage cover = (Y>=pred_l)*(Y<=pred_h) marg_coverage = np.mean(cover) if X is None: wsc_coverage = None else: # Estimated conditional coverage (worse-case slab) wsc_coverage = coverage.wsc_unbiased(X, Y, pred, M=100) # Marginal length lengths = pred_h-pred_l length = np.mean(lengths) # Length conditional on coverage idx_cover = np.where(cover)[0] length_cover = np.mean([lengths for i in idx_cover]) # Combine results out = pd.DataFrame({'Coverage': [marg_coverage], 'Conditional coverage': [wsc_coverage], 'Length': [length], 'Length cover': [length_cover]}) return out def plot_histogram(breaks, weights, S=None, fig=None, limits=None, i=0, colors=None, linestyles=None, xlim=None, filename=None): if colors is None: if limits is not None: colors = ['tab:blue'] * len(limits) if linestyles is None: if limits is not None: linestyles = ['-'] * len(limits) if fig is None: fig = plt.figure() plt.step(breaks, weights[i], where='pre', color='black') if S is not None: idx = S[i] z = np.zeros(len(breaks),) z[idx] = weights[i,idx] plt.fill_between(breaks, z, step="pre", alpha=0.4, color='gray') if limits is not None: for q_idx in range(len(limits[i])): q = limits[i][q_idx] plt.axvline(q, 0, 1, linestyle=linestyles[q_idx], color=colors[q_idx]) plt.xlabel('$Y$') plt.ylabel('Density') if xlim is not None: plt.xlim(xlim) if filename is not None: fig.set_size_inches(4.5, 3) plt.savefig(filename, bbox_inches='tight', dpi=300) plt.show()
[ "pandas.DataFrame", "matplotlib.pyplot.xlim", "matplotlib.pyplot.axvline", "matplotlib.pyplot.show", "matplotlib.pyplot.step", "numpy.min", "numpy.mean", "numpy.max", "chr.coverage.wsc_unbiased", "numpy.where", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "torch.from_numpy" ]
[((614, 629), 'numpy.min', 'np.min', (['pred', '(1)'], {}), '(pred, 1)\n', (620, 629), True, 'import numpy as np\n'), ((642, 657), 'numpy.max', 'np.max', (['pred', '(1)'], {}), '(pred, 1)\n', (648, 657), True, 'import numpy as np\n'), ((737, 751), 'numpy.mean', 'np.mean', (['cover'], {}), '(cover)\n', (744, 751), True, 'import numpy as np\n'), ((995, 1011), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (1002, 1011), True, 'import numpy as np\n'), ((1103, 1140), 'numpy.mean', 'np.mean', (['[lengths for i in idx_cover]'], {}), '([lengths for i in idx_cover])\n', (1110, 1140), True, 'import numpy as np\n'), ((1174, 1314), 'pandas.DataFrame', 'pd.DataFrame', (["{'Coverage': [marg_coverage], 'Conditional coverage': [wsc_coverage],\n 'Length': [length], 'Length cover': [length_cover]}"], {}), "({'Coverage': [marg_coverage], 'Conditional coverage': [\n wsc_coverage], 'Length': [length], 'Length cover': [length_cover]})\n", (1186, 1314), True, 'import pandas as pd\n'), ((1736, 1792), 'matplotlib.pyplot.step', 'plt.step', (['breaks', 'weights[i]'], {'where': '"""pre"""', 'color': '"""black"""'}), "(breaks, weights[i], where='pre', color='black')\n", (1744, 1792), True, 'import matplotlib.pyplot as plt\n'), ((2166, 2183), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Y$"""'], {}), "('$Y$')\n", (2176, 2183), True, 'import matplotlib.pyplot as plt\n'), ((2188, 2209), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (2198, 2209), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2400), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2398, 2400), True, 'import matplotlib.pyplot as plt\n'), ((890, 930), 'chr.coverage.wsc_unbiased', 'coverage.wsc_unbiased', (['X', 'Y', 'pred'], {'M': '(100)'}), '(X, Y, pred, M=100)\n', (911, 930), False, 'from chr import coverage\n'), ((1065, 1080), 'numpy.where', 'np.where', (['cover'], {}), '(cover)\n', (1073, 1080), True, 'import numpy as np\n'), ((1719, 1731), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1729, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1909, 1973), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['breaks', 'z'], {'step': '"""pre"""', 'alpha': '(0.4)', 'color': '"""gray"""'}), "(breaks, z, step='pre', alpha=0.4, color='gray')\n", (1925, 1973), True, 'import matplotlib.pyplot as plt\n'), ((2244, 2258), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (2252, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2384), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(filename, bbox_inches='tight', dpi=300)\n", (2344, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2160), 'matplotlib.pyplot.axvline', 'plt.axvline', (['q', '(0)', '(1)'], {'linestyle': 'linestyles[q_idx]', 'color': 'colors[q_idx]'}), '(q, 0, 1, linestyle=linestyles[q_idx], color=colors[q_idx])\n', (2101, 2160), True, 'import matplotlib.pyplot as plt\n'), ((276, 300), 'torch.from_numpy', 'torch.from_numpy', (['X_data'], {}), '(X_data)\n', (292, 300), False, 'import torch\n'), ((331, 355), 'torch.from_numpy', 'torch.from_numpy', (['y_data'], {}), '(y_data)\n', (347, 355), False, 'import torch\n')]
from typing import List, Tuple import numpy as np from PIL import Image import pytorch_lightning as pl import torch from torchvision.models import resnet18 from torchvision import transforms from ml_models.model_initializer import ModelInitializer class PredPostureNet(pl.LightningModule): def __init__(self): super().__init__() self.resnet = resnet18(pretrained=True) self.fc = torch.nn.Linear(1000, 4) def forward(self, x): h0 = self.resnet(x) h1 = self.fc(h0) return h1 class Inference: def __init__(self): BUCKET_NAME: str = 'classify-posture' MODEL_SOURCE_NAME: str = 'posture_4_classes_model.pt' MODEL_FILE_PATH: str = f'ml_models/classify_images/{MODEL_SOURCE_NAME}' initializer: ModelInitializer = ModelInitializer( BUCKET_NAME, MODEL_SOURCE_NAME, MODEL_FILE_PATH ) self.net: PredPostureNet = initializer.init_model(network_class=PredPostureNet) self.class_names: List[str] = ['handstand', 'lying_down', 'sit', 'stand'] def run(self, image_name: str) -> Tuple[np.ndarray, int]: path: str = self._image_file_path(image_name) image = self._prepare_image(path) with torch.no_grad(): y = self.net(image) # NOTE: 1行しかないので 0 で次元を落とす result: np.ndarray = y.softmax(dim=-1).detach().numpy()[0] cls: int = np.argmax(result) return np.round(result, decimals=4), self.class_names[cls] def _image_file_path(self, image_name: str) -> str: return f'media/images/{image_name}' def _prepare_image(self, path: str): transform = transforms.Compose([ # ImageNetで学習したモデルを使うときは、256->224の変換が一般的 transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), # PyTorch公式でもこのmean, stdが推奨されている transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img = Image.open(path).convert('RGB') transformed_img = transform(img) img_torch = torch.stack([transformed_img]) return img_torch
[ "torchvision.models.resnet18", "torch.stack", "numpy.argmax", "ml_models.model_initializer.ModelInitializer", "torchvision.transforms.ToTensor", "PIL.Image.open", "torch.nn.Linear", "torchvision.transforms.CenterCrop", "torchvision.transforms.Normalize", "torch.no_grad", "numpy.round", "torchvision.transforms.Resize" ]
[((367, 392), 'torchvision.models.resnet18', 'resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (375, 392), False, 'from torchvision.models import resnet18\n'), ((411, 435), 'torch.nn.Linear', 'torch.nn.Linear', (['(1000)', '(4)'], {}), '(1000, 4)\n', (426, 435), False, 'import torch\n'), ((805, 870), 'ml_models.model_initializer.ModelInitializer', 'ModelInitializer', (['BUCKET_NAME', 'MODEL_SOURCE_NAME', 'MODEL_FILE_PATH'], {}), '(BUCKET_NAME, MODEL_SOURCE_NAME, MODEL_FILE_PATH)\n', (821, 870), False, 'from ml_models.model_initializer import ModelInitializer\n'), ((1407, 1424), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1416, 1424), True, 'import numpy as np\n'), ((2092, 2122), 'torch.stack', 'torch.stack', (['[transformed_img]'], {}), '([transformed_img])\n', (2103, 2122), False, 'import torch\n'), ((1236, 1251), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1249, 1251), False, 'import torch\n'), ((1440, 1468), 'numpy.round', 'np.round', (['result'], {'decimals': '(4)'}), '(result, decimals=4)\n', (1448, 1468), True, 'import numpy as np\n'), ((1741, 1763), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1758, 1763), False, 'from torchvision import transforms\n'), ((1777, 1803), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1798, 1803), False, 'from torchvision import transforms\n'), ((1817, 1838), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1836, 1838), False, 'from torchvision import transforms\n'), ((1897, 1972), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1917, 1972), False, 'from torchvision import transforms\n'), ((1999, 2015), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2009, 2015), False, 'from PIL import Image\n')]
# -*- coding: utf-8 -*- """Tests for PCATransformer.""" import numpy as np import pytest from sktime.transformations.panel.pca import PCATransformer from sktime.utils._testing.panel import _make_nested_from_array @pytest.mark.parametrize("bad_components", ["str", 1.2, -1.2, -1, 11]) def test_bad_input_args(bad_components): """Check that exception is raised for bad input args.""" X = _make_nested_from_array(np.ones(10), n_instances=10, n_columns=1) if isinstance(bad_components, str): with pytest.raises(TypeError): PCATransformer(n_components=bad_components).fit(X) else: with pytest.raises(ValueError): PCATransformer(n_components=bad_components).fit(X)
[ "pytest.mark.parametrize", "pytest.raises", "sktime.transformations.panel.pca.PCATransformer", "numpy.ones" ]
[((217, 286), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bad_components"""', "['str', 1.2, -1.2, -1, 11]"], {}), "('bad_components', ['str', 1.2, -1.2, -1, 11])\n", (240, 286), False, 'import pytest\n'), ((421, 432), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (428, 432), True, 'import numpy as np\n'), ((517, 541), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (530, 541), False, 'import pytest\n'), ((629, 654), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (642, 654), False, 'import pytest\n'), ((555, 598), 'sktime.transformations.panel.pca.PCATransformer', 'PCATransformer', ([], {'n_components': 'bad_components'}), '(n_components=bad_components)\n', (569, 598), False, 'from sktime.transformations.panel.pca import PCATransformer\n'), ((668, 711), 'sktime.transformations.panel.pca.PCATransformer', 'PCATransformer', ([], {'n_components': 'bad_components'}), '(n_components=bad_components)\n', (682, 711), False, 'from sktime.transformations.panel.pca import PCATransformer\n')]
import numpy as np import torch from torch import nn from torch.utils.data import Dataset from tqdm import tqdm class SeqMaskGenerator(object): def __init__(self, seqconfig): self.seqconfig = seqconfig def create_enc_mask(self, enc_inp): #enc_inp = [N, inp_seq_len] # N is total number of input sequences bsize, seq_len = enc_inp.shape # #enc_mask.shape = [1, 1, inp_seq_len, inp_seq_len] # enc_mask = np.ones((1, 1, seq_len, seq_len)) # #enc_mask.shape = [1, 1, inp_seq_len, inp_seq_len] # # enc_mask = enc_mask.reshape(1, 1, seq_len, seq_len) # #enc_mask.shape = [bsize, 1, inp_seq_len, inp_seq_len] # enc_mask = np.repeat(enc_mask, bsize, axis=0) enc_mask = np.full((bsize,1, seq_len, seq_len), 1) return enc_mask def create_enc_dec_mask(self, num_samples): inp_seqlen = self.seqconfig.seq_len outp_seqlen = self.seqconfig.ewindow_end+1 # enc_dec_mask = np.ones((1,1, outp_seqlen, inp_seqlen)) # enc_dec_mask = np.repeat(enc_dec_mask, num_samples, axis=0) enc_dec_mask = np.full((num_samples, 1, outp_seqlen, inp_seqlen), 1) return enc_dec_mask def create_dec_mask(self, mask_targetbase): # dec_inp = [num_haplotypes, outcome_seq_len] # outcome_seq_len is length of haplotype outcome sequence # mask_targetbase = [num_haplotyptes, outcome_seq_len] # generate causal mask seqconfig = self.seqconfig num_haplotypes = mask_targetbase.shape[0] ewindow_st, ewindow_end = seqconfig.ewindow_st, seqconfig.ewindow_end # ewindow_st = 0 # 6-13 # print('ewindow_st:', ewindow_st, 'ewindow_end:', ewindow_end) tm = mask_targetbase[:, ewindow_st:ewindow_end+1] tindx = np.where(tm.astype(np.bool)) # print('tindx:\n', tindx) # tindx (array(), array()) representing row and column indices where mask has 1 entries target_pos_st = tindx[1][0] # give the start of target base occurence in the sequence ew_seqlen = ewindow_end - (target_pos_st + ewindow_st) + 1 # print('ew_seqlen:', ew_seqlen) sub_mask = np.ones((ew_seqlen, ew_seqlen)) sub_mask_ind = np.triu_indices(ew_seqlen, k=0) sub_mask[sub_mask_ind[0], sub_mask_ind[1]] = 0 dec_causal_mask = np.ones((ewindow_end+1,ewindow_end+1)) # print('dec_causal_mask.shape', dec_causal_mask.shape) offset = target_pos_st + ewindow_st # print('offset:',offset) for i in range(ewindow_end+1): if i < offset: dec_causal_mask[i, offset:] = 0 else: dec_causal_mask[i, offset:] = sub_mask[i-offset,:] # print('dec_causal_mask:\n', dec_causal_mask) #dec_causal_mask.shape = [1, 0:ewindow_end+1, 0:ewindow_end+1] dec_causal_mask = dec_causal_mask.reshape(1, dec_causal_mask.shape[0], dec_causal_mask.shape[1]) dec_causal_mask = np.repeat(dec_causal_mask, num_haplotypes, axis=0) return dec_causal_mask class HaplotypeDataTensor(Dataset): def __init__(self, seqconfig): self.seqconfig = seqconfig # def _encode_to_one_hot(self, mask, n_dims=None): # """ turn matrix with labels into one-hot encoding using the max number of classes detected""" # original_mask_shape = mask.shape # mask = mask.type(torch.LongTensor).view(-1, 1) # if n_dims is None: # n_dims = int(torch.max(mask)) + 1 # one_hot = torch.zeros(mask.shape[0], n_dims).scatter_(1, mask, 1) # one_hot = one_hot.view(*original_mask_shape, -1) # return one_hot def generate_tensor_from_df(self, proc_df, tb_cb_nucl, outcome_prop_col): # create the tensors we need # N is total number of input sequences print('Generating tensors using sequence config:\n', self.seqconfig) Xinp_enc = [] # tensor, (N x inp_sequence_len) Xinp_dec = [] # list of tensors, (N x num_haplotypes x outp_sequence_len) mask_inp_targetbase = [] # list of tensors, (N x num_haplotypes x outp_sequence_len) target_conv = [] # list of tensors, (N x num_haplotypes x outp_sequence_len) target_conv_onehot = [] # list of tensors (i.e. one-hot encoding), (N x num_haplotypes x outp_sequence_len x 2 x 1) target_prob = [] # list of tensors, (N x num_haplotypes) mask_dec = [] indx_seqid_map = {} # dict, int_id:(seqid, target_seq) inpseq_outpseq_map = {} # dict([]), int_id:[outp_seq1, out_seq2, ....] seqconfig = self.seqconfig mask_generator = SeqMaskGenerator(seqconfig) seq_len = seqconfig.seq_len tb_nucl, cb_nucl = tb_cb_nucl # target base, conversion base (i.e. A->G for ABE base editor) # C->T for CBE base editor # output sequence will be from 0:end of editable window indx for gr_name, gr_df in tqdm(proc_df.groupby(by=['seq_id', 'Inp_seq'])): Xinp_enc.append(gr_df[[f'Inp_B{i}' for i in range(1,seq_len+1)]].values[0,:]) Xinp_dec.append(gr_df[[f'Outp_B{i}' for i in range(1,seq_len+1)]].values[:,0:seqconfig.ewindow_end+1]) mask_inp_targetbase.append(gr_df[[f'Inp_M{i}' for i in range(1,seq_len+1)]].values[:,0:seqconfig.ewindow_end+1]) conv = gr_df[[f'conv{tb_nucl}{cb_nucl}_{i}' for i in range(1,seq_len+1)]].values[:,0:seqconfig.ewindow_end+1] target_conv.append(conv) if outcome_prop_col is not None: target_prob.append(gr_df[outcome_prop_col].values) # print(target_prob[-1]) # compute mask_enc and mask_dec # print(mask_targetbase[-1]) mask_dec.append(mask_generator.create_dec_mask(mask_inp_targetbase[-1])) inpseq_id = len(indx_seqid_map) indx_seqid_map[inpseq_id] = gr_name inpseq_outpseq_map[inpseq_id] = gr_df['Outp_seq'].values.tolist() mask_enc = None mask_encdec = None # tensorize print('--- tensorizing ---') device_cpu = torch.device('cpu') self.Xinp_enc = torch.tensor(Xinp_enc).long().to(device_cpu) self.Xinp_enc = self.Xinp_enc.reshape(self.Xinp_enc.shape[0], 1, self.Xinp_enc.shape[1]) self.Xinp_dec = [torch.from_numpy(arr).long().to(device_cpu) for arr in Xinp_dec] self.mask_inp_targetbase = [torch.from_numpy(arr).long().to(device_cpu) for arr in mask_inp_targetbase] self.target_conv_onehot = [torch.nn.functional.one_hot(torch.from_numpy(arr).long().to(device_cpu), num_classes=2) for arr in target_conv] if outcome_prop_col is not None: self.target_prob = [torch.from_numpy(arr).float().to(device_cpu) for arr in target_prob] else: self.target_prob = None self.mask_enc = mask_enc self.mask_encdec = mask_encdec self.mask_dec = [torch.from_numpy(arr).long().to(device_cpu) for arr in mask_dec] self.num_samples = len(self.Xinp_enc) # int, number of sequences self.indx_seqid_map = indx_seqid_map self.inpseq_outpseq_map = inpseq_outpseq_map print('--- end ---') def hap_collate(self, batch): # pack batches in a list for now # to be used in dataloader object return [item for item in batch] def __getitem__(self, indx): if self.target_prob is None: return_target_prob = None else: return_target_prob = self.target_prob[indx] return(self.Xinp_enc[indx], self.Xinp_dec[indx], self.mask_enc, self.mask_dec[indx], self.mask_encdec, self.mask_inp_targetbase[indx], self.target_conv_onehot[indx], return_target_prob, indx, self.indx_seqid_map[indx], self.inpseq_outpseq_map[indx]) def __len__(self): return(self.num_samples) class PartitionDataTensor(Dataset): def __init__(self, dtensor, partition_ids, dsettype, run_num): self.dtensor = dtensor # instance of :class:`HaplotypeDataTensor` self.partition_ids = partition_ids # list of sequence indices self.dsettype = dsettype # string, dataset type (i.e. train, validation, test) self.run_num = run_num # int, run number self.num_samples = len(self.partition_ids[:]) # int, number of docs in the partition def __getitem__(self, indx): target_id = self.partition_ids[indx] return self.dtensor[target_id] def __len__(self): return(self.num_samples) def print_data_example(elm): Xinp_enc, Xinp_dec, mask_enc, mask_dec, mask_encdec, mask_targetbase_enc, target_conv_onehot, target_prob, indx, seqid = elm print('Xinp_enc:\n', Xinp_enc, 'shape:', Xinp_enc.shape) print('Xinp_dec:\n',Xinp_dec, 'shape:',Xinp_dec.shape) if mask_enc is not None: print('mask_enc:\n', mask_enc, 'shape:',mask_enc.shape) print('mask_dec:\n',mask_dec, 'shape:',mask_dec.shape) if mask_encdec is not None: print('mask_encdec:\n', mask_encdec, 'shape:',mask_encdec.shape) print('mask_targetbase_enc:\n', mask_targetbase_enc,'shape:', mask_targetbase_enc.shape) print('target_conv_onehot:\n',target_conv_onehot, 'shape:',target_conv_onehot.shape) if target_prob is not None: print('target_prob:\n',target_prob, 'shape:',target_prob.shape) else: print('target_prob:None') print('indx:', indx) print('seqid:', seqid) def hap_collate(batch): # pack batches in a list for now # to be used in dataloader object return [item for item in batch]
[ "numpy.full", "torch.from_numpy", "numpy.ones", "numpy.triu_indices", "torch.device", "torch.tensor", "numpy.repeat" ]
[((762, 802), 'numpy.full', 'np.full', (['(bsize, 1, seq_len, seq_len)', '(1)'], {}), '((bsize, 1, seq_len, seq_len), 1)\n', (769, 802), True, 'import numpy as np\n'), ((1128, 1181), 'numpy.full', 'np.full', (['(num_samples, 1, outp_seqlen, inp_seqlen)', '(1)'], {}), '((num_samples, 1, outp_seqlen, inp_seqlen), 1)\n', (1135, 1181), True, 'import numpy as np\n'), ((2241, 2272), 'numpy.ones', 'np.ones', (['(ew_seqlen, ew_seqlen)'], {}), '((ew_seqlen, ew_seqlen))\n', (2248, 2272), True, 'import numpy as np\n'), ((2296, 2327), 'numpy.triu_indices', 'np.triu_indices', (['ew_seqlen'], {'k': '(0)'}), '(ew_seqlen, k=0)\n', (2311, 2327), True, 'import numpy as np\n'), ((2418, 2461), 'numpy.ones', 'np.ones', (['(ewindow_end + 1, ewindow_end + 1)'], {}), '((ewindow_end + 1, ewindow_end + 1))\n', (2425, 2461), True, 'import numpy as np\n'), ((3073, 3123), 'numpy.repeat', 'np.repeat', (['dec_causal_mask', 'num_haplotypes'], {'axis': '(0)'}), '(dec_causal_mask, num_haplotypes, axis=0)\n', (3082, 3123), True, 'import numpy as np\n'), ((6317, 6336), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6329, 6336), False, 'import torch\n'), ((6361, 6383), 'torch.tensor', 'torch.tensor', (['Xinp_enc'], {}), '(Xinp_enc)\n', (6373, 6383), False, 'import torch\n'), ((6528, 6549), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (6544, 6549), False, 'import torch\n'), ((6629, 6650), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (6645, 6650), False, 'import torch\n'), ((7178, 7199), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (7194, 7199), False, 'import torch\n'), ((6768, 6789), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (6784, 6789), False, 'import torch\n'), ((6960, 6981), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (6976, 6981), False, 'import torch\n')]
import cv2 import numpy as np def detect(img): # finds and fills the located robots img = cv2.convertScaleAbs(img, 1, 1.5) structure = np.ones((3, 3)) canny = np.copy(cv2.Canny(img, 20, 120)) dilated = cv2.dilate(canny, structure) contours, hier = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) filled = cv2.drawContours(np.zeros(img.shape, dtype=np.uint8), contours, -1, 1, -1, 0, hier, 1) return np.copy(filled) def get_large_contours(detect): # take a detection mask, and contour information add circles contours, hier = cv2.findContours(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) large_contours = [] contour_area_minimum = 2000 for c in contours: if cv2.contourArea(c) > contour_area_minimum: large_contours.append(c) return large_contours def get_robot_angle(contour, center): contour = np.squeeze(np.copy(contour)) contour -= center theta = np.arctan2(contour[:, 1], contour[:, 0]) # rho = np.sqrt(contour[:, 0] ** 2 + contour[:, 1] ** 2) val, bin_edges = np.histogram(theta, bins=50, range=[-np.pi, np.pi]) bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2 return np.nanmean(np.where(val == 0, bin_centers, np.nan)) def get_robots(large_contours, detect, objective): # get memory robot_control_mask = np.zeros(detect.shape) large_contour_image = cv2.drawContours(np.copy(robot_control_mask), large_contours, -1, 1, -1) # probably needs more adjustment in the future, so will make a dict for now objective_calibration_dict = {'2x': 4, '4x': 2, '10x': 1, '20x': 1, '40x': 1} robot_angles = [] contours_towards_center = [] contour_range_border_limit = 100 * objective_calibration_dict[objective] contours_in_limits = [] for contour in large_contours: xs = np.squeeze(contour)[:, 0] ys = np.squeeze(contour)[:, 1] # check that our contours are within acceptable limits, draw their circle if they are if np.all(xs > contour_range_border_limit) and np.all( xs < large_contour_image.shape[0] - contour_range_border_limit): if np.all(ys > contour_range_border_limit) and np.all( ys < large_contour_image.shape[0] - contour_range_border_limit): contours_in_limits.append(contour) M = cv2.moments(contour) cx = int(M["m10"] / M["m00"]) cy = int(M["m01"] / M["m00"]) contours_towards_center.append(contour) angle = get_robot_angle(contour, (cx, cy)) robot_angles.append(angle) return contours_towards_center, robot_angles def get_robot_control(img, objective): detected = detect(img) large_contours = get_large_contours(detected) robots, robot_angles = get_robots(large_contours, detected, objective) return robots, robot_angles
[ "cv2.Canny", "cv2.contourArea", "numpy.arctan2", "numpy.copy", "cv2.dilate", "cv2.moments", "numpy.zeros", "numpy.ones", "numpy.all", "numpy.histogram", "numpy.where", "numpy.diff", "cv2.convertScaleAbs", "numpy.squeeze", "cv2.findContours" ]
[((106, 138), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['img', '(1)', '(1.5)'], {}), '(img, 1, 1.5)\n', (125, 138), False, 'import cv2\n'), ((156, 171), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (163, 171), True, 'import numpy as np\n'), ((233, 261), 'cv2.dilate', 'cv2.dilate', (['canny', 'structure'], {}), '(canny, structure)\n', (243, 261), False, 'import cv2\n'), ((284, 349), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (300, 349), False, 'import cv2\n'), ((463, 478), 'numpy.copy', 'np.copy', (['filled'], {}), '(filled)\n', (470, 478), True, 'import numpy as np\n'), ((604, 668), 'cv2.findContours', 'cv2.findContours', (['detect', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(detect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (620, 668), False, 'import cv2\n'), ((996, 1036), 'numpy.arctan2', 'np.arctan2', (['contour[:, 1]', 'contour[:, 0]'], {}), '(contour[:, 1], contour[:, 0])\n', (1006, 1036), True, 'import numpy as np\n'), ((1121, 1172), 'numpy.histogram', 'np.histogram', (['theta'], {'bins': '(50)', 'range': '[-np.pi, np.pi]'}), '(theta, bins=50, range=[-np.pi, np.pi])\n', (1133, 1172), True, 'import numpy as np\n'), ((1399, 1421), 'numpy.zeros', 'np.zeros', (['detect.shape'], {}), '(detect.shape)\n', (1407, 1421), True, 'import numpy as np\n'), ((193, 216), 'cv2.Canny', 'cv2.Canny', (['img', '(20)', '(120)'], {}), '(img, 20, 120)\n', (202, 216), False, 'import cv2\n'), ((381, 416), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.uint8'}), '(img.shape, dtype=np.uint8)\n', (389, 416), True, 'import numpy as np\n'), ((942, 958), 'numpy.copy', 'np.copy', (['contour'], {}), '(contour)\n', (949, 958), True, 'import numpy as np\n'), ((1257, 1296), 'numpy.where', 'np.where', (['(val == 0)', 'bin_centers', 'np.nan'], {}), '(val == 0, bin_centers, np.nan)\n', (1265, 1296), True, 'import numpy as np\n'), ((1466, 1493), 'numpy.copy', 'np.copy', (['robot_control_mask'], {}), '(robot_control_mask)\n', (1473, 1493), True, 'import numpy as np\n'), ((763, 781), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (778, 781), False, 'import cv2\n'), ((1209, 1227), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (1216, 1227), True, 'import numpy as np\n'), ((2047, 2066), 'numpy.squeeze', 'np.squeeze', (['contour'], {}), '(contour)\n', (2057, 2066), True, 'import numpy as np\n'), ((2087, 2106), 'numpy.squeeze', 'np.squeeze', (['contour'], {}), '(contour)\n', (2097, 2106), True, 'import numpy as np\n'), ((2220, 2259), 'numpy.all', 'np.all', (['(xs > contour_range_border_limit)'], {}), '(xs > contour_range_border_limit)\n', (2226, 2259), True, 'import numpy as np\n'), ((2264, 2334), 'numpy.all', 'np.all', (['(xs < large_contour_image.shape[0] - contour_range_border_limit)'], {}), '(xs < large_contour_image.shape[0] - contour_range_border_limit)\n', (2270, 2334), True, 'import numpy as np\n'), ((2370, 2409), 'numpy.all', 'np.all', (['(ys > contour_range_border_limit)'], {}), '(ys > contour_range_border_limit)\n', (2376, 2409), True, 'import numpy as np\n'), ((2414, 2484), 'numpy.all', 'np.all', (['(ys < large_contour_image.shape[0] - contour_range_border_limit)'], {}), '(ys < large_contour_image.shape[0] - contour_range_border_limit)\n', (2420, 2484), True, 'import numpy as np\n'), ((2581, 2601), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (2592, 2601), False, 'import cv2\n')]
import cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Red color low_red = np.array([161, 155, 84]) high_red = np.array([179, 255, 255]) red_mask = cv2.inRange(hsv_frame, low_red, high_red) red = cv2.bitwise_and(frame, frame, mask=red_mask) # Blue color low_blue = np.array([94, 80, 2]) high_blue = np.array([126, 255, 255]) blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue) blue = cv2.bitwise_and(frame, frame, mask=blue_mask) # Green color low_green = np.array([25, 52, 72]) high_green = np.array([102, 255, 255]) green_mask = cv2.inRange(hsv_frame, low_green, high_green) green = cv2.bitwise_and(frame, frame, mask=green_mask) #yellow low_yellow = np.array([21, 39, 64]) high_yellow = np.array([40, 255, 255]) yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow) yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask) # Every color except white low = np.array([0, 42, 0]) high = np.array([179, 255, 255]) mask = cv2.inRange(hsv_frame, low, high) result = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow("Frame", frame) #cv2.imshow("Red", red) #cv2.imshow("Blue", blue) cv2.imshow("Green", green) cv2.imshow("Yellow", yellow) #cv2.imshow("Result", result) key = cv2.waitKey(1) if key == 27: break
[ "cv2.bitwise_and", "cv2.cvtColor", "cv2.waitKey", "cv2.VideoCapture", "numpy.array", "cv2.imshow", "cv2.inRange" ]
[((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2\n'), ((112, 150), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (124, 150), False, 'import cv2\n'), ((182, 206), 'numpy.array', 'np.array', (['[161, 155, 84]'], {}), '([161, 155, 84])\n', (190, 206), True, 'import numpy as np\n'), ((222, 247), 'numpy.array', 'np.array', (['[179, 255, 255]'], {}), '([179, 255, 255])\n', (230, 247), True, 'import numpy as np\n'), ((263, 304), 'cv2.inRange', 'cv2.inRange', (['hsv_frame', 'low_red', 'high_red'], {}), '(hsv_frame, low_red, high_red)\n', (274, 304), False, 'import cv2\n'), ((315, 359), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'red_mask'}), '(frame, frame, mask=red_mask)\n', (330, 359), False, 'import cv2\n'), ((393, 414), 'numpy.array', 'np.array', (['[94, 80, 2]'], {}), '([94, 80, 2])\n', (401, 414), True, 'import numpy as np\n'), ((431, 456), 'numpy.array', 'np.array', (['[126, 255, 255]'], {}), '([126, 255, 255])\n', (439, 456), True, 'import numpy as np\n'), ((473, 516), 'cv2.inRange', 'cv2.inRange', (['hsv_frame', 'low_blue', 'high_blue'], {}), '(hsv_frame, low_blue, high_blue)\n', (484, 516), False, 'import cv2\n'), ((528, 573), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'blue_mask'}), '(frame, frame, mask=blue_mask)\n', (543, 573), False, 'import cv2\n'), ((609, 631), 'numpy.array', 'np.array', (['[25, 52, 72]'], {}), '([25, 52, 72])\n', (617, 631), True, 'import numpy as np\n'), ((649, 674), 'numpy.array', 'np.array', (['[102, 255, 255]'], {}), '([102, 255, 255])\n', (657, 674), True, 'import numpy as np\n'), ((692, 737), 'cv2.inRange', 'cv2.inRange', (['hsv_frame', 'low_green', 'high_green'], {}), '(hsv_frame, low_green, high_green)\n', (703, 737), False, 'import cv2\n'), ((750, 796), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'green_mask'}), '(frame, frame, mask=green_mask)\n', (765, 796), False, 'import cv2\n'), ((827, 849), 'numpy.array', 'np.array', (['[21, 39, 64]'], {}), '([21, 39, 64])\n', (835, 849), True, 'import numpy as np\n'), ((868, 892), 'numpy.array', 'np.array', (['[40, 255, 255]'], {}), '([40, 255, 255])\n', (876, 892), True, 'import numpy as np\n'), ((911, 958), 'cv2.inRange', 'cv2.inRange', (['hsv_frame', 'low_yellow', 'high_yellow'], {}), '(hsv_frame, low_yellow, high_yellow)\n', (922, 958), False, 'import cv2\n'), ((972, 1019), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'yellow_mask'}), '(frame, frame, mask=yellow_mask)\n', (987, 1019), False, 'import cv2\n'), ((1061, 1081), 'numpy.array', 'np.array', (['[0, 42, 0]'], {}), '([0, 42, 0])\n', (1069, 1081), True, 'import numpy as np\n'), ((1093, 1118), 'numpy.array', 'np.array', (['[179, 255, 255]'], {}), '([179, 255, 255])\n', (1101, 1118), True, 'import numpy as np\n'), ((1130, 1163), 'cv2.inRange', 'cv2.inRange', (['hsv_frame', 'low', 'high'], {}), '(hsv_frame, low, high)\n', (1141, 1163), False, 'import cv2\n'), ((1177, 1217), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (1192, 1217), False, 'import cv2\n'), ((1312, 1338), 'cv2.imshow', 'cv2.imshow', (['"""Green"""', 'green'], {}), "('Green', green)\n", (1322, 1338), False, 'import cv2\n'), ((1343, 1371), 'cv2.imshow', 'cv2.imshow', (['"""Yellow"""', 'yellow'], {}), "('Yellow', yellow)\n", (1353, 1371), False, 'import cv2\n'), ((1417, 1431), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1428, 1431), False, 'import cv2\n')]
#!/usr/bin/env python import sys import loco import tinymath as tm import numpy as np PHYSICS_BACKEND = loco.sim.PHYSICS_NONE RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0 ] TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2], 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ] TETRAHEDRON_FACES = [ 0, 1, 3, 0, 2, 1, 0, 3, 2, 1, 2, 3 ] COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 / 9.0 ] RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2], -1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ] RAMP_FACES = [ 0, 1, 2, 0, 2, 3, 0, 4, 5, 0, 5, 1, 0, 3, 7, 0, 7, 4, 2, 6, 7, 2, 7, 3, 1, 5, 6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ] def create_path_part( idx ) : height = 1.0 inner_rad = 2.0 outer_rad = 3.0 dtheta = 2.0 * np.pi / 12.0 ctheta = np.cos( dtheta * idx ) stheta = np.sin( dtheta * idx ) ctheta_n = np.cos( dtheta * ( idx + 1 ) ) stheta_n = np.sin( dtheta * ( idx + 1 ) ) half_rad = 0.5* ( inner_rad + outer_rad ) com_position = [ half_rad * np.cos( ( idx + 0.5 ) * dtheta ), half_rad * np.sin( ( idx + 0.5 ) * dtheta ), 0.5 * height ] vertices = [ inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height, inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5 * height, outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height, inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height ] faces = [ 0, 1, 2, 0, 2, 3, 0, 4, 5, 0, 5, 1, 0, 3, 7, 0, 7, 4, 2, 6, 7, 2, 7, 3, 1, 5, 6, 1, 6, 2, 4, 7, 6, 4, 6, 5 ] return vertices, faces if __name__ == '__main__' : if len( sys.argv ) > 1 : choice_backend = sys.argv[1] if choice_backend == 'mujoco' : PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO elif choice_backend == 'bullet' : PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET elif choice_backend == 'dart' : PHYSICS_BACKEND = loco.sim.PHYSICS_DART elif choice_backend == 'raisim' : PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) ) print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi / 6 ] ) ) #### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] ) ) rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) ) scenario = loco.sim.Scenario() scenario.AddSingleBody( loco.sim.Plane( "floor", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) ) scenario.AddSingleBody( loco.sim.Sphere( "sphere", 0.1, [ 1.0, -1.0, 2.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( "tetrahedron_0", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0, [ -1.0, -1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( "tetrahedron_1", TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5, [ -1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( "ramp_0", RAMP_VERTICES, RAMP_FACES, 0.3, [ 1.0, 1.0, 1.0 ], rotation ) ) scenario.AddSingleBody( loco.sim.Mesh( "ramp_1", RAMP_VERTICES, RAMP_FACES, 0.5, [ 1.0, -1.0, 1.0 ], rotation ) ) for i in range( 0, 12 ) : height = 1.0 inner_rad = 2.0 outer_rad = 3.0 half_rad = 0.5* ( inner_rad + outer_rad ) dtheta = 2.0 * np.pi / 12.0 com_position = [ half_rad * np.cos( ( i + 0.5 ) * dtheta ), half_rad * np.sin( ( i + 0.5 ) * dtheta ), 0.5 * height ] vertices, faces = create_path_part( i ) scenario.AddSingleBody( loco.sim.Mesh( "path_part_{}".format( i ), vertices, faces, 1.0, com_position, tm.Matrix3f(), loco.sim.DynamicsType.STATIC ) ) runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND ) simulation = runtime.CreateSimulation( scenario ) visualizer = runtime.CreateVisualizer( scenario ) sphere = scenario.GetSingleBodyByName( "sphere" ) floor = scenario.GetSingleBodyByName( "floor" ) floor.drawable.texture = 'built_in_chessboard' floor.drawable.ambient = [ 0.3, 0.5, 0.7 ] floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ] floor.drawable.specular = [ 0.3, 0.5, 0.7 ] while visualizer.IsActive() : if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) : break elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) : simulation.Reset() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) : simulation.Pause() if simulation.running else simulation.Resume() elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) : sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) : sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) : sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) : sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] ) elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) : sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] ) simulation.Step() visualizer.Update() runtime.DestroySimulation() runtime.DestroyVisualizer()
[ "loco.sim.Scenario", "tinymath.Matrix3f", "numpy.sin", "tinymath.Vector3f", "loco.sim.Runtime", "numpy.cos", "loco.sim.Sphere", "loco.sim.Mesh" ]
[((1846, 1866), 'numpy.cos', 'np.cos', (['(dtheta * idx)'], {}), '(dtheta * idx)\n', (1852, 1866), True, 'import numpy as np\n'), ((1882, 1902), 'numpy.sin', 'np.sin', (['(dtheta * idx)'], {}), '(dtheta * idx)\n', (1888, 1902), True, 'import numpy as np\n'), ((1920, 1946), 'numpy.cos', 'np.cos', (['(dtheta * (idx + 1))'], {}), '(dtheta * (idx + 1))\n', (1926, 1946), True, 'import numpy as np\n'), ((1966, 1992), 'numpy.sin', 'np.sin', (['(dtheta * (idx + 1))'], {}), '(dtheta * (idx + 1))\n', (1972, 1992), True, 'import numpy as np\n'), ((4251, 4270), 'loco.sim.Scenario', 'loco.sim.Scenario', ([], {}), '()\n', (4268, 4270), False, 'import loco\n'), ((6219, 6271), 'loco.sim.Runtime', 'loco.sim.Runtime', (['PHYSICS_BACKEND', 'RENDERING_BACKEND'], {}), '(PHYSICS_BACKEND, RENDERING_BACKEND)\n', (6235, 6271), False, 'import loco\n'), ((4200, 4228), 'tinymath.Vector3f', 'tm.Vector3f', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4211, 4228), True, 'import tinymath as tm\n'), ((4397, 4455), 'loco.sim.Sphere', 'loco.sim.Sphere', (['"""sphere"""', '(0.1)', '[1.0, -1.0, 2.0]', 'rotation'], {}), "('sphere', 0.1, [1.0, -1.0, 2.0], rotation)\n", (4412, 4455), False, 'import loco\n'), ((4490, 4599), 'loco.sim.Mesh', 'loco.sim.Mesh', (['"""tetrahedron_0"""', 'TETRAHEDRON_VERTICES', 'TETRAHEDRON_FACES', '(1.0)', '[-1.0, -1.0, 1.0]', 'rotation'], {}), "('tetrahedron_0', TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 1.0,\n [-1.0, -1.0, 1.0], rotation)\n", (4503, 4599), False, 'import loco\n'), ((4759, 4867), 'loco.sim.Mesh', 'loco.sim.Mesh', (['"""tetrahedron_1"""', 'TETRAHEDRON_VERTICES', 'TETRAHEDRON_FACES', '(0.5)', '[-1.0, 1.0, 1.0]', 'rotation'], {}), "('tetrahedron_1', TETRAHEDRON_VERTICES, TETRAHEDRON_FACES, 0.5,\n [-1.0, 1.0, 1.0], rotation)\n", (4772, 4867), False, 'import loco\n'), ((5027, 5113), 'loco.sim.Mesh', 'loco.sim.Mesh', (['"""ramp_0"""', 'RAMP_VERTICES', 'RAMP_FACES', '(0.3)', '[1.0, 1.0, 1.0]', 'rotation'], {}), "('ramp_0', RAMP_VERTICES, RAMP_FACES, 0.3, [1.0, 1.0, 1.0],\n rotation)\n", (5040, 5113), False, 'import loco\n'), ((5273, 5360), 'loco.sim.Mesh', 'loco.sim.Mesh', (['"""ramp_1"""', 'RAMP_VERTICES', 'RAMP_FACES', '(0.5)', '[1.0, -1.0, 1.0]', 'rotation'], {}), "('ramp_1', RAMP_VERTICES, RAMP_FACES, 0.5, [1.0, -1.0, 1.0],\n rotation)\n", (5286, 5360), False, 'import loco\n'), ((2076, 2104), 'numpy.cos', 'np.cos', (['((idx + 0.5) * dtheta)'], {}), '((idx + 0.5) * dtheta)\n', (2082, 2104), True, 'import numpy as np\n'), ((2142, 2170), 'numpy.sin', 'np.sin', (['((idx + 0.5) * dtheta)'], {}), '((idx + 0.5) * dtheta)\n', (2148, 2170), True, 'import numpy as np\n'), ((4336, 4349), 'tinymath.Vector3f', 'tm.Vector3f', ([], {}), '()\n', (4347, 4349), True, 'import tinymath as tm\n'), ((4351, 4364), 'tinymath.Matrix3f', 'tm.Matrix3f', ([], {}), '()\n', (4362, 4364), True, 'import tinymath as tm\n'), ((5715, 5741), 'numpy.cos', 'np.cos', (['((i + 0.5) * dtheta)'], {}), '((i + 0.5) * dtheta)\n', (5721, 5741), True, 'import numpy as np\n'), ((5783, 5809), 'numpy.sin', 'np.sin', (['((i + 0.5) * dtheta)'], {}), '((i + 0.5) * dtheta)\n', (5789, 5809), True, 'import numpy as np\n'), ((6109, 6122), 'tinymath.Matrix3f', 'tm.Matrix3f', ([], {}), '()\n', (6120, 6122), True, 'import tinymath as tm\n')]
''' Present an interactive function explorer with slider widgets. Scrub the sliders to change the properties of the ``sin`` curve, or type into the title text box to update the title of the plot. Use the ``bokeh serve`` command to run the example by executing: bokeh serve sliders.py at your command prompt. Then navigate to the URL http://localhost:5006/sliders in your browser. ''' import numpy as np from bokeh.io import curdoc from bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET UP THE DATA ### n_samples = 1500 random_state = 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400, title="Clusters", tools="crosshair,pan,reset,save,wheel_zoom", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N = 200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400, title="my sine wave", tools="crosshair,pan,reset,save,wheel_zoom", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title="title", value='my sine wave') offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get the current slider values a = amplitude.value b = offset.value w = phase.value k = freq.value # Generate the new curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) + b source.data = dict(x=x, y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts and add to document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800)) curdoc().title = "Sliders" '''
[ "bokeh.plotting.figure", "sklearn.datasets.make_blobs", "bokeh.plotting.output_file", "bokeh.plotting.show", "numpy.dot", "numpy.vstack" ]
[((828, 886), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_samples', 'random_state': 'random_state'}), '(n_samples=n_samples, random_state=random_state)\n', (838, 886), False, 'from sklearn.datasets import make_blobs\n'), ((1008, 1033), 'numpy.dot', 'np.dot', (['X', 'transformation'], {}), '(X, transformation)\n', (1014, 1033), True, 'import numpy as np\n'), ((1105, 1197), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_samples', 'cluster_std': '[1.0, 2.5, 0.5]', 'random_state': 'random_state'}), '(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=\n random_state)\n', (1115, 1197), False, 'from sklearn.datasets import make_blobs\n'), ((1323, 1384), 'numpy.vstack', 'np.vstack', (['(X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])'], {}), '((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))\n', (1332, 1384), True, 'import numpy as np\n'), ((1696, 1853), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': '(400)', 'plot_width': '(400)', 'title': '"""Clusters"""', 'tools': '"""crosshair,pan,reset,save,wheel_zoom"""', 'x_range': '[0, 4 * np.pi]', 'y_range': '[-2.5, 2.5]'}), "(plot_height=400, plot_width=400, title='Clusters', tools=\n 'crosshair,pan,reset,save,wheel_zoom', x_range=[0, 4 * np.pi], y_range=\n [-2.5, 2.5])\n", (1702, 1853), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1941, 1951), 'bokeh.plotting.show', 'show', (['plot'], {}), '(plot)\n', (1945, 1951), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1952, 1982), 'bokeh.plotting.output_file', 'output_file', (['"""clustering.html"""'], {}), "('clustering.html')\n", (1963, 1982), False, 'from bokeh.plotting import figure, output_file, show\n')]
import numpy as np import xarray as xr from glob import glob import observation_operators as obs import tropomi_tools as tt import scipy.linalg as la import toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from numpy files raise NotImplementedError else: #Assume list of strings errs = np.array([float(e) for e in err_config]) #Provide a list of observation operator classes in order of the species to assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] == "false": raise NotImplementedError #No support for real observations yet! else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data from GEOS-Chem restart files and #emissions scaling factor netCDFs. After initialization it contains the necessary data #and can output it in useful ways to other functions in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f"GC_translator number {self.num} has been called for directory {path_to_rundir} and restart {self.filename}; construction beginning") self.emis_ds_list = {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f"GC_translator number {self.num} has loaded scaling factors for {name}") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until state vector is initialized this variable is None if self.testing: print(f"GC_Translator number {self.num} construction complete.") #Since only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f"GC_Translator number {self.num} got 3D conc for species {species} which are of dimension {np.shape(da)}.") return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f"GC_Translator number {self.num} set 3D conc for species {species} which are of dimension {np.shape(conc4d)}.") self.restart_ds[f'SpeciesRst_{species}'] = (["time","lev","lat","lon"],conc4d,{"long_name":f"Dry mixing ratio of species {species}","units":"mol mol-1 dry","averaging_method":"instantaneous"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest are just for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end of the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds = xr.Dataset( {"Scalar": (("time","lat","lon"), np.expand_dims(emis2d,axis = 0),{"long_name": "Scaling factor", "units":"1"})}, coords={ "time": (["time"], np.array([new_last_time]), {"long_name": "time", "calendar": "standard", "units":f"hours since {orig_timestamp} 00:00:00"}), "lat": (["lat"], self.getEmisLat(species),{"long_name": "Latitude", "units":"degrees_north"}), "lon": (["lon"], self.getEmisLon(species),{"long_name": "Longitude", "units":"degrees_east"}) }, attrs={ "Title":"CHEEREIO scaling factors", "Conventions":"COARDS", "Format":"NetCDF-4", "Model":"GENERIC", "NLayers":"1", "History":f"The LETKF utility added new scaling factors on {str(date.today())}", "Start_Date":f"{orig_timestamp}", "Start_Time":"0", "End_Date":f"{end_timestamp}", "End_Time":"0" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print("*****************************************************************") print(f"GC_Translator number {self.num} is starting build of statevector!") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because this is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f"GC_Translator number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.") print("*****************************************************************") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f"GC_Translator is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return None #If loop doesn't terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.") print(f"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column; values are {dummywhere_match}") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in the column.") print(f"Matched value in the overall flattened and subsetted square is {dummy2dwhere_match}") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec if self.testing: print(f"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.") return statevec_toreturn #Randomize the restart for purposes of testing. Perturbation is 1/2 of range of percent change selected from a uniform distribution. #E.g. 0.1 would range from 90% to 110% of initial values. Bias adds that percent on top of the perturbed fields (0.1 raises everything 10%). #Repeats this procedure for every species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis vector and overwrite relevant terms in the xr restart dataset. #Also construct new scaling factors and add them as a separate array at the new timestep in each of the scaling factor netCDFs. #However, only do so for species in the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds["time"] = (["time"], np.array([0]), {"long_name": "Time", "calendar": "gregorian", "axis":"T", "units":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects them with the main state vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], "%Y%m%d_%H%M") for spc in specconc_list] if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], "%Y%m%d_%H%M") for le in le_list] le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs" subdirs = glob(f"{path_to_ensemble}/*/") subdirs.remove(f"{path_to_ensemble}/logs/") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, "%Y%m%d_%H%M") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', "%Y%m%d_%H%M") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']=="True": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']=="True": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']=="True": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs = [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']=="True": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns, update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs" self.path_to_scratch = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f"{path_to_ensemble}/*/") subdirs.remove(f"{path_to_ensemble}/logs/") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run directory. #In the special case where there is a nature run present (with number 0) #store the nature run in GC_Translator object nature. #Also contains an observation operator (pass in the class you would like to use) for each species to assimilate. #Class contains function to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we will be assuming that geos-chem stopped and left a restart at assimilation time in each run directory. #That restart will be overwritten in place (name not changed) so next run starts from the assimilation state vector. #Emissions scaling factors are most recent available (one assimilation timestep ago). New values will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction beginning") print(f"This core will be handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs" self.path_to_scratch = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f"{path_to_ensemble}/*/") subdirs.remove(f"{path_to_ensemble}/logs/") dirnames = [d.split('/')[-2] for d in subdirs] if self.testing: print(f"The following ensemble directories were detected: {dirnames}") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config["MinimumScalingFactorAllowed"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config["MaximumScalingFactorAllowed"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config["InflateScalingsToXOfPreviousStandardDeviation"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config["MaximumScaleFactorRelativeChangePerAssimilationPeriod"]] self.AveragePriorAndPosterior = spc_config["AveragePriorAndPosterior"] == "True" self.PriorWeightinPriorPosteriorAverage = float(spc_config["PriorWeightinPriorPosteriorAverage"]) self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f"Begin creating GC Translators with state vectors.") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f"GC Translators created. Ensemble number list: {self.ensemble_numbers}") if self.nature is None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f"Assimilator construction complete") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who should always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f"Making R for lat/lon inds {(latind,lonind)}.") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = [email protected]@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the background standard deviation, per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f"LETKF called! Beginning loop.") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True) analysisSubset = self.applyAnalysisCorrections(analysisSubset,backgroundSubset) self.saveColumn(latval,lonval,analysisSubset)
[ "toolbox.getSpeciesConfig", "numpy.load", "numpy.sum", "toolbox.getLatLonList", "numpy.abs", "numpy.ones", "numpy.isnan", "numpy.shape", "numpy.mean", "numpy.arange", "glob.glob", "toolbox.calcDist_km", "toolbox.getIndsOfInterest", "observation_operators.NatureHelper", "numpy.std", "numpy.transpose", "numpy.identity", "xarray.merge", "scipy.linalg.inv", "numpy.reshape", "numpy.random.choice", "scipy.linalg.block_diag", "toolbox.getLatLonVals", "datetime.date.today", "xarray.concat", "datetime.datetime.strptime", "scipy.linalg.sqrtm", "numpy.concatenate", "tropomi_tools.TROPOMI_Translator", "numpy.datetime64", "numpy.zeros", "numpy.expand_dims", "numpy.where", "numpy.array", "numpy.sign", "xarray.load_dataset", "numpy.in1d" ]
[((261, 289), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['testing'], {}), '(testing)\n', (280, 289), True, 'import toolbox as tx\n'), ((1749, 1779), 'xarray.load_dataset', 'xr.load_dataset', (['self.filename'], {}), '(self.filename)\n', (1764, 1779), True, 'import xarray as xr\n'), ((1807, 1848), 'glob.glob', 'glob', (['f"""{path_to_rundir}*_SCALEFACTOR.nc"""'], {}), "(f'{path_to_rundir}*_SCALEFACTOR.nc')\n", (1811, 1848), False, 'from glob import glob\n'), ((2989, 3005), 'numpy.shape', 'np.shape', (['conc3d'], {}), '(conc3d)\n', (2997, 3005), True, 'import numpy as np\n'), ((3439, 3471), 'numpy.array', 'np.array', (["self.restart_ds['lat']"], {}), "(self.restart_ds['lat'])\n", (3447, 3471), True, 'import numpy as np\n'), ((3500, 3532), 'numpy.array', 'np.array', (["self.restart_ds['lon']"], {}), "(self.restart_ds['lon'])\n", (3508, 3532), True, 'import numpy as np\n'), ((3561, 3593), 'numpy.array', 'np.array', (["self.restart_ds['lev']"], {}), "(self.restart_ds['lev'])\n", (3569, 3593), True, 'import numpy as np\n'), ((3630, 3663), 'numpy.array', 'np.array', (["self.restart_ds['time']"], {}), "(self.restart_ds['time'])\n", (3638, 3663), True, 'import numpy as np\n'), ((3986, 4029), 'numpy.array', 'np.array', (["self.emis_ds_list[species]['lat']"], {}), "(self.emis_ds_list[species]['lat'])\n", (3994, 4029), True, 'import numpy as np\n'), ((4071, 4114), 'numpy.array', 'np.array', (["self.emis_ds_list[species]['lon']"], {}), "(self.emis_ds_list[species]['lon'])\n", (4079, 4114), True, 'import numpy as np\n'), ((4563, 4582), 'numpy.datetime64', 'np.datetime64', (['tstr'], {}), '(tstr)\n', (4576, 4582), True, 'import numpy as np\n'), ((5956, 6011), 'xarray.concat', 'xr.concat', (['[self.emis_ds_list[species], ds]'], {'dim': '"""time"""'}), "([self.emis_ds_list[species], ds], dim='time')\n", (5965, 6011), True, 'import xarray as xr\n'), ((6249, 6282), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (6268, 6282), True, 'import toolbox as tx\n'), ((6933, 6968), 'numpy.concatenate', 'np.concatenate', (['statevec_components'], {}), '(statevec_components)\n', (6947, 6968), True, 'import numpy as np\n'), ((7268, 7326), 'toolbox.getIndsOfInterest', 'tx.getIndsOfInterest', (['latind', 'lonind'], {'testing': 'self.testing'}), '(latind, lonind, testing=self.testing)\n', (7288, 7326), True, 'import toolbox as tx\n'), ((8237, 8270), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (8256, 8270), True, 'import toolbox as tx\n'), ((8663, 8692), 'numpy.concatenate', 'np.concatenate', (['ind_collector'], {}), '(ind_collector)\n', (8677, 8692), True, 'import numpy as np\n'), ((9642, 9675), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (9661, 9675), True, 'import toolbox as tx\n'), ((10076, 10105), 'numpy.concatenate', 'np.concatenate', (['ind_collector'], {}), '(ind_collector)\n', (10090, 10105), True, 'import numpy as np\n'), ((10353, 10386), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (10372, 10386), True, 'import toolbox as tx\n'), ((10740, 10773), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (10759, 10773), True, 'import toolbox as tx\n'), ((11136, 11194), 'toolbox.getIndsOfInterest', 'tx.getIndsOfInterest', (['latind', 'lonind'], {'testing': 'self.testing'}), '(latind, lonind, testing=self.testing)\n', (11156, 11194), True, 'import toolbox as tx\n'), ((12656, 12689), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (12675, 12689), True, 'import toolbox as tx\n'), ((13128, 13157), 'numpy.concatenate', 'np.concatenate', (['ind_collector'], {}), '(ind_collector)\n', (13142, 13157), True, 'import numpy as np\n'), ((15023, 15056), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (15042, 15056), True, 'import toolbox as tx\n'), ((16926, 16959), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (16945, 16959), True, 'import toolbox as tx\n'), ((17138, 17188), 'glob.glob', 'glob', (['f"""{self.hist_dir}/GEOSChem.SpeciesConc*.nc4"""'], {}), "(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4')\n", (17142, 17188), False, 'from glob import glob\n'), ((18541, 18558), 'xarray.merge', 'xr.merge', (['dataset'], {}), '(dataset)\n', (18549, 18558), True, 'import xarray as xr\n'), ((18825, 18858), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (18844, 18858), True, 'import toolbox as tx\n'), ((18968, 18998), 'glob.glob', 'glob', (['f"""{path_to_ensemble}/*/"""'], {}), "(f'{path_to_ensemble}/*/')\n", (18972, 18998), False, 'from glob import glob\n'), ((19191, 19234), 'datetime.datetime.strptime', 'datetime.strptime', (['timestamp', '"""%Y%m%d_%H%M"""'], {}), "(timestamp, '%Y%m%d_%H%M')\n", (19208, 19234), False, 'from datetime import date, datetime, timedelta\n'), ((19944, 19970), 'numpy.array', 'np.array', (['ensemble_numbers'], {}), '(ensemble_numbers)\n', (19952, 19970), True, 'import numpy as np\n'), ((21071, 21094), 'scipy.linalg.block_diag', 'la.block_diag', (['*errmats'], {}), '(*errmats)\n', (21084, 21094), True, 'import scipy.linalg as la\n'), ((21600, 21611), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (21608, 21611), True, 'import numpy as np\n'), ((21724, 21741), 'numpy.zeros', 'np.zeros', (['shape2D'], {}), '(shape2D)\n', (21732, 21741), True, 'import numpy as np\n'), ((22493, 22540), 'toolbox.getLatLonVals', 'tx.getLatLonVals', (['self.spc_config', 'self.testing'], {}), '(self.spc_config, self.testing)\n', (22509, 22540), True, 'import toolbox as tx\n'), ((23578, 23602), 'numpy.concatenate', 'np.concatenate', (['obsmeans'], {}), '(obsmeans)\n', (23592, 23602), True, 'import numpy as np\n'), ((23621, 23653), 'numpy.concatenate', 'np.concatenate', (['obsperts'], {'axis': '(0)'}), '(obsperts, axis=0)\n', (23635, 23653), True, 'import numpy as np\n'), ((23673, 23697), 'numpy.concatenate', 'np.concatenate', (['obsdiffs'], {}), '(obsdiffs)\n', (23687, 23697), True, 'import numpy as np\n'), ((23992, 24025), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (24011, 24025), True, 'import toolbox as tx\n'), ((24219, 24275), 'glob.glob', 'glob', (['f"""{self.path_to_scratch}/**/*.npy"""'], {'recursive': '(True)'}), "(f'{self.path_to_scratch}/**/*.npy', recursive=True)\n", (24223, 24275), False, 'from glob import glob\n'), ((24471, 24501), 'glob.glob', 'glob', (['f"""{path_to_ensemble}/*/"""'], {}), "(f'{path_to_ensemble}/*/')\n", (24475, 24501), False, 'from glob import glob\n'), ((25083, 25109), 'numpy.array', 'np.array', (['ensemble_numbers'], {}), '(ensemble_numbers)\n', (25091, 25109), True, 'import numpy as np\n'), ((29821, 29868), 'toolbox.getLatLonList', 'tx.getLatLonList', (['ensnum', 'corenum', 'self.testing'], {}), '(ensnum, corenum, self.testing)\n', (29837, 29868), True, 'import toolbox as tx\n'), ((30139, 30172), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (30158, 30172), True, 'import toolbox as tx\n'), ((30426, 30456), 'glob.glob', 'glob', (['f"""{path_to_ensemble}/*/"""'], {}), "(f'{path_to_ensemble}/*/')\n", (30430, 30456), False, 'from glob import glob\n'), ((32092, 32118), 'numpy.array', 'np.array', (['ensemble_numbers'], {}), '(ensemble_numbers)\n', (32100, 32118), True, 'import numpy as np\n'), ((34162, 34188), 'numpy.mean', 'np.mean', (['statevecs'], {'axis': '(1)'}), '(statevecs, axis=1)\n', (34169, 34188), True, 'import numpy as np\n'), ((35065, 35089), 'numpy.concatenate', 'np.concatenate', (['obsmeans'], {}), '(obsmeans)\n', (35079, 35089), True, 'import numpy as np\n'), ((35108, 35140), 'numpy.concatenate', 'np.concatenate', (['obsperts'], {'axis': '(0)'}), '(obsperts, axis=0)\n', (35122, 35140), True, 'import numpy as np\n'), ((35160, 35184), 'numpy.concatenate', 'np.concatenate', (['obsdiffs'], {}), '(obsdiffs)\n', (35174, 35184), True, 'import numpy as np\n'), ((35773, 35784), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (35781, 35784), True, 'import numpy as np\n'), ((35802, 35819), 'numpy.shape', 'np.shape', (['first3D'], {}), '(first3D)\n', (35810, 35819), True, 'import numpy as np\n'), ((35903, 35920), 'numpy.zeros', 'np.zeros', (['shape4D'], {}), '(shape4D)\n', (35911, 35920), True, 'import numpy as np\n'), ((38570, 38588), 'scipy.linalg.inv', 'la.inv', (['(iden + cyb)'], {}), '(iden + cyb)\n', (38576, 38588), True, 'import scipy.linalg as la\n'), ((38814, 38853), 'scipy.linalg.sqrtm', 'la.sqrtm', (['((k - 1) * self.PtildeAnalysis)'], {}), '((k - 1) * self.PtildeAnalysis)\n', (38822, 38853), True, 'import scipy.linalg as la\n'), ((2233, 2254), 'xarray.load_dataset', 'xr.load_dataset', (['file'], {}), '(file)\n', (2248, 2254), True, 'import xarray as xr\n'), ((4902, 4935), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4921, 4935), True, 'import toolbox as tx\n'), ((14320, 14353), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (14339, 14353), True, 'import toolbox as tx\n'), ((15951, 15991), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter]'], {}), '(self.statevec_lengths[0:counter])\n', (15957, 15991), True, 'import numpy as np\n'), ((16007, 16051), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter + 1]'], {}), '(self.statevec_lengths[0:counter + 1])\n', (16013, 16051), True, 'import numpy as np\n'), ((16134, 16173), 'numpy.reshape', 'np.reshape', (['analysis_subset', 'emis_shape'], {}), '(analysis_subset, emis_shape)\n', (16144, 16173), True, 'import numpy as np\n'), ((16362, 16375), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (16370, 16375), True, 'import numpy as np\n'), ((17607, 17660), 'glob.glob', 'glob', (['f"""{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4"""'], {}), "(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4')\n", (17611, 17660), False, 'from glob import glob\n'), ((19313, 19367), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{START_DATE}_0000"""', '"""%Y%m%d_%H%M"""'], {}), "(f'{START_DATE}_0000', '%Y%m%d_%H%M')\n", (19330, 19367), False, 'from datetime import date, datetime, timedelta\n'), ((22729, 22757), 'numpy.where', 'np.where', (['(distvec <= loc_rad)'], {}), '(distvec <= loc_rad)\n', (22737, 22757), True, 'import numpy as np\n'), ((22799, 22849), 'numpy.random.choice', 'np.random.choice', (['inds', 'self.maxobs'], {'replace': '(False)'}), '(inds, self.maxobs, replace=False)\n', (22815, 22849), True, 'import numpy as np\n'), ((23312, 23334), 'numpy.mean', 'np.mean', (['gccol'], {'axis': '(1)'}), '(gccol, axis=1)\n', (23319, 23334), True, 'import numpy as np\n'), ((24361, 24374), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (24368, 24374), True, 'import numpy as np\n'), ((32593, 32714), 'observation_operators.NatureHelper', 'obs.NatureHelper', (['self.nature', 'self.observed_species', 'nature_h_functions', 'error_multipliers_or_matrices', 'self.testing'], {}), '(self.nature, self.observed_species, nature_h_functions,\n error_multipliers_or_matrices, self.testing)\n', (32609, 32714), True, 'import observation_operators as obs\n'), ((34233, 34252), 'numpy.shape', 'np.shape', (['statevecs'], {}), '(statevecs)\n', (34241, 34252), True, 'import numpy as np\n'), ((38070, 38093), 'scipy.linalg.block_diag', 'la.block_diag', (['*errmats'], {}), '(*errmats)\n', (38083, 38093), True, 'import scipy.linalg as la\n'), ((38231, 38266), 'numpy.transpose', 'np.transpose', (['self.Ypert_background'], {}), '(self.Ypert_background)\n', (38243, 38266), True, 'import numpy as np\n'), ((38269, 38283), 'scipy.linalg.inv', 'la.inv', (['self.R'], {}), '(self.R)\n', (38275, 38283), True, 'import scipy.linalg as la\n'), ((39558, 39589), 'numpy.shape', 'np.shape', (['self.Xpert_background'], {}), '(self.Xpert_background)\n', (39566, 39589), True, 'import numpy as np\n'), ((2719, 2769), 'numpy.array', 'np.array', (["self.restart_ds[f'SpeciesRst_{species}']"], {}), "(self.restart_ds[f'SpeciesRst_{species}'])\n", (2727, 2769), True, 'import numpy as np\n'), ((4588, 4621), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4607, 4621), True, 'import toolbox as tx\n'), ((4664, 4697), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4683, 4697), True, 'import toolbox as tx\n'), ((4742, 4775), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4761, 4775), True, 'import toolbox as tx\n'), ((6677, 6693), 'numpy.ones', 'np.ones', (['lenones'], {}), '(lenones)\n', (6684, 6693), True, 'import numpy as np\n'), ((7728, 7752), 'numpy.arange', 'np.arange', (['(0)', 'totalcount'], {}), '(0, totalcount)\n', (7737, 7752), True, 'import numpy as np\n'), ((7978, 8011), 'numpy.arange', 'np.arange', (['(0)', '(latcount * loncount)'], {}), '(0, latcount * loncount)\n', (7987, 8011), True, 'import numpy as np\n'), ((9170, 9194), 'numpy.arange', 'np.arange', (['(0)', 'totalcount'], {}), '(0, totalcount)\n', (9179, 9194), True, 'import numpy as np\n'), ((9408, 9441), 'numpy.arange', 'np.arange', (['(0)', '(latcount * loncount)'], {}), '(0, latcount * loncount)\n', (9417, 9441), True, 'import numpy as np\n'), ((9982, 10024), 'numpy.array', 'np.array', (['[dummy2dwhere_flat + cur_offset]'], {}), '([dummy2dwhere_flat + cur_offset])\n', (9990, 10024), True, 'import numpy as np\n'), ((10504, 10548), 'numpy.arange', 'np.arange', (['cur_offset', '(cur_offset + levcount)'], {}), '(cur_offset, cur_offset + levcount)\n', (10513, 10548), True, 'import numpy as np\n'), ((11593, 11617), 'numpy.arange', 'np.arange', (['(0)', 'totalcount'], {}), '(0, totalcount)\n', (11602, 11617), True, 'import numpy as np\n'), ((11815, 11863), 'numpy.in1d', 'np.in1d', (['dummywhere_flat', 'dummywhere_flat_column'], {}), '(dummywhere_flat, dummywhere_flat_column)\n', (11822, 11863), True, 'import numpy as np\n'), ((12138, 12171), 'numpy.arange', 'np.arange', (['(0)', '(latcount * loncount)'], {}), '(0, latcount * loncount)\n', (12147, 12171), True, 'import numpy as np\n'), ((12350, 12402), 'numpy.in1d', 'np.in1d', (['dummy2dwhere_flat', 'dummy2dwhere_flat_column'], {}), '(dummy2dwhere_flat, dummy2dwhere_flat_column)\n', (12357, 12402), True, 'import numpy as np\n'), ((15477, 15517), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter]'], {}), '(self.statevec_lengths[0:counter])\n', (15483, 15517), True, 'import numpy as np\n'), ((15534, 15578), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter + 1]'], {}), '(self.statevec_lengths[0:counter + 1])\n', (15540, 15578), True, 'import numpy as np\n'), ((15658, 15700), 'numpy.reshape', 'np.reshape', (['analysis_subset', 'restart_shape'], {}), '(analysis_subset, restart_shape)\n', (15668, 15700), True, 'import numpy as np\n'), ((18269, 18298), 'xarray.merge', 'xr.merge', (['[hist_val, lev_val]'], {}), '([hist_val, lev_val])\n', (18277, 18298), True, 'import xarray as xr\n'), ((20339, 20374), 'tropomi_tools.TROPOMI_Translator', 'tt.TROPOMI_Translator', (['self.testing'], {}), '(self.testing)\n', (20360, 20374), True, 'import tropomi_tools as tt\n'), ((22616, 22652), 'toolbox.calcDist_km', 'tx.calcDist_km', (['latval', 'lonval', 'a', 'b'], {}), '(latval, lonval, a, b)\n', (22630, 22652), True, 'import toolbox as tx\n'), ((23356, 23371), 'numpy.shape', 'np.shape', (['gccol'], {}), '(gccol)\n', (23364, 23371), True, 'import numpy as np\n'), ((26694, 26713), 'numpy.shape', 'np.shape', (['saved_col'], {}), '(saved_col)\n', (26702, 26713), True, 'import numpy as np\n'), ((34271, 34285), 'numpy.shape', 'np.shape', (['bigX'], {}), '(bigX)\n', (34279, 34285), True, 'import numpy as np\n'), ((38512, 38526), 'numpy.identity', 'np.identity', (['k'], {}), '(k)\n', (38523, 38526), True, 'import numpy as np\n'), ((40156, 40199), 'numpy.shape', 'np.shape', (['self.Xpert_background[colinds, :]'], {}), '(self.Xpert_background[colinds, :])\n', (40164, 40199), True, 'import numpy as np\n'), ((40917, 40935), 'numpy.isnan', 'np.isnan', (['inflator'], {}), '(inflator)\n', (40925, 40935), True, 'import numpy as np\n'), ((40956, 40989), 'numpy.std', 'np.std', (['analysisScalefactor[i, :]'], {}), '(analysisScalefactor[i, :])\n', (40962, 40989), True, 'import numpy as np\n'), ((41010, 41045), 'numpy.std', 'np.std', (['backgroundScalefactor[i, :]'], {}), '(backgroundScalefactor[i, :])\n', (41016, 41045), True, 'import numpy as np\n'), ((41539, 41558), 'numpy.isnan', 'np.isnan', (['maxchange'], {}), '(maxchange)\n', (41547, 41558), True, 'import numpy as np\n'), ((41955, 42000), 'numpy.isnan', 'np.isnan', (['self.MinimumScalingFactorAllowed[i]'], {}), '(self.MinimumScalingFactorAllowed[i])\n', (41963, 42000), True, 'import numpy as np\n'), ((42180, 42225), 'numpy.isnan', 'np.isnan', (['self.MaximumScalingFactorAllowed[i]'], {}), '(self.MaximumScalingFactorAllowed[i])\n', (42188, 42225), True, 'import numpy as np\n'), ((3048, 3061), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3056, 3061), True, 'import numpy as np\n'), ((3914, 3926), 'numpy.array', 'np.array', (['da'], {}), '(da)\n', (3922, 3926), True, 'import numpy as np\n'), ((5128, 5158), 'numpy.expand_dims', 'np.expand_dims', (['emis2d'], {'axis': '(0)'}), '(emis2d, axis=0)\n', (5142, 5158), True, 'import numpy as np\n'), ((18150, 18175), 'xarray.load_dataset', 'xr.load_dataset', (['specfile'], {}), '(specfile)\n', (18165, 18175), True, 'import xarray as xr\n'), ((18216, 18239), 'xarray.load_dataset', 'xr.load_dataset', (['lefile'], {}), '(lefile)\n', (18231, 18239), True, 'import xarray as xr\n'), ((18448, 18473), 'xarray.load_dataset', 'xr.load_dataset', (['specfile'], {}), '(specfile)\n', (18463, 18473), True, 'import xarray as xr\n'), ((23391, 23406), 'numpy.shape', 'np.shape', (['gccol'], {}), '(gccol)\n', (23399, 23406), True, 'import numpy as np\n'), ((41091, 41106), 'numpy.isnan', 'np.isnan', (['ratio'], {}), '(ratio)\n', (41099, 41106), True, 'import numpy as np\n'), ((42021, 42094), 'numpy.where', 'np.where', (['(analysisScalefactor[i, :] < self.MinimumScalingFactorAllowed[i])'], {}), '(analysisScalefactor[i, :] < self.MinimumScalingFactorAllowed[i])\n', (42029, 42094), True, 'import numpy as np\n'), ((42246, 42319), 'numpy.where', 'np.where', (['(analysisScalefactor[i, :] > self.MaximumScalingFactorAllowed[i])'], {}), '(analysisScalefactor[i, :] > self.MaximumScalingFactorAllowed[i])\n', (42254, 42319), True, 'import numpy as np\n'), ((43427, 43458), 'numpy.shape', 'np.shape', (['self.Xpert_background'], {}), '(self.Xpert_background)\n', (43435, 43458), True, 'import numpy as np\n'), ((2900, 2912), 'numpy.shape', 'np.shape', (['da'], {}), '(da)\n', (2908, 2912), True, 'import numpy as np\n'), ((3195, 3211), 'numpy.shape', 'np.shape', (['conc4d'], {}), '(conc4d)\n', (3203, 3211), True, 'import numpy as np\n'), ((5243, 5268), 'numpy.array', 'np.array', (['[new_last_time]'], {}), '([new_last_time])\n', (5251, 5268), True, 'import numpy as np\n'), ((7074, 7097), 'numpy.shape', 'np.shape', (['self.statevec'], {}), '(self.statevec)\n', (7082, 7097), True, 'import numpy as np\n'), ((7464, 7486), 'numpy.shape', 'np.shape', (['surr_latinds'], {}), '(surr_latinds)\n', (7472, 7486), True, 'import numpy as np\n'), ((7489, 7511), 'numpy.shape', 'np.shape', (['surr_loninds'], {}), '(surr_loninds)\n', (7497, 7511), True, 'import numpy as np\n'), ((11329, 11351), 'numpy.shape', 'np.shape', (['surr_latinds'], {}), '(surr_latinds)\n', (11337, 11351), True, 'import numpy as np\n'), ((11354, 11376), 'numpy.shape', 'np.shape', (['surr_loninds'], {}), '(surr_loninds)\n', (11362, 11376), True, 'import numpy as np\n'), ((33909, 33928), 'numpy.shape', 'np.shape', (['statevecs'], {}), '(statevecs)\n', (33917, 33928), True, 'import numpy as np\n'), ((34413, 34433), 'numpy.shape', 'np.shape', (['state_mean'], {}), '(state_mean)\n', (34421, 34433), True, 'import numpy as np\n'), ((34484, 34498), 'numpy.shape', 'np.shape', (['bigX'], {}), '(bigX)\n', (34492, 34498), True, 'import numpy as np\n'), ((35266, 35289), 'numpy.shape', 'np.shape', (['full_obsmeans'], {}), '(full_obsmeans)\n', (35274, 35289), True, 'import numpy as np\n'), ((35343, 35366), 'numpy.shape', 'np.shape', (['full_obsperts'], {}), '(full_obsperts)\n', (35351, 35366), True, 'import numpy as np\n'), ((35424, 35447), 'numpy.shape', 'np.shape', (['full_obsdiffs'], {}), '(full_obsdiffs)\n', (35432, 35447), True, 'import numpy as np\n'), ((37286, 37316), 'numpy.shape', 'np.shape', (['self.ybar_background'], {}), '(self.ybar_background)\n', (37294, 37316), True, 'import numpy as np\n'), ((37395, 37426), 'numpy.shape', 'np.shape', (['self.Ypert_background'], {}), '(self.Ypert_background)\n', (37403, 37426), True, 'import numpy as np\n'), ((37494, 37514), 'numpy.shape', 'np.shape', (['self.ydiff'], {}), '(self.ydiff)\n', (37502, 37514), True, 'import numpy as np\n'), ((37592, 37622), 'numpy.shape', 'np.shape', (['self.xbar_background'], {}), '(self.xbar_background)\n', (37600, 37622), True, 'import numpy as np\n'), ((37701, 37732), 'numpy.shape', 'np.shape', (['self.Xpert_background'], {}), '(self.Xpert_background)\n', (37709, 37732), True, 'import numpy as np\n'), ((38163, 38179), 'numpy.shape', 'np.shape', (['self.R'], {}), '(self.R)\n', (38171, 38179), True, 'import numpy as np\n'), ((38355, 38371), 'numpy.shape', 'np.shape', (['self.C'], {}), '(self.C)\n', (38363, 38371), True, 'import numpy as np\n'), ((38671, 38700), 'numpy.shape', 'np.shape', (['self.PtildeAnalysis'], {}), '(self.PtildeAnalysis)\n', (38679, 38700), True, 'import numpy as np\n'), ((38936, 38960), 'numpy.shape', 'np.shape', (['self.WAnalysis'], {}), '(self.WAnalysis)\n', (38944, 38960), True, 'import numpy as np\n'), ((39162, 39189), 'numpy.shape', 'np.shape', (['self.WbarAnalysis'], {}), '(self.WbarAnalysis)\n', (39170, 39189), True, 'import numpy as np\n'), ((39427, 39451), 'numpy.shape', 'np.shape', (['self.WAnalysis'], {}), '(self.WAnalysis)\n', (39435, 39451), True, 'import numpy as np\n'), ((39831, 39862), 'numpy.shape', 'np.shape', (['self.analysisEnsemble'], {}), '(self.analysisEnsemble)\n', (39839, 39862), True, 'import numpy as np\n'), ((14534, 14550), 'numpy.shape', 'np.shape', (['conc3d'], {}), '(conc3d)\n', (14542, 14550), True, 'import numpy as np\n'), ((41689, 41712), 'numpy.abs', 'np.abs', (['relativechanges'], {}), '(relativechanges)\n', (41695, 41712), True, 'import numpy as np\n'), ((41773, 41811), 'numpy.sign', 'np.sign', (['relativechanges[relOverwrite]'], {}), '(relativechanges[relOverwrite])\n', (41780, 41811), True, 'import numpy as np\n'), ((5785, 5797), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5795, 5797), False, 'from datetime import date, datetime, timedelta\n')]
#Python wrapper / library for Einstein Analytics API import sys import browser_cookie3 import requests import json import time import datetime from dateutil import tz import pandas as pd import numpy as np import re from pandas import json_normalize from decimal import Decimal import base64 import csv import unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect "https://" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime("%I:%M:%S %p") elif timeFORfile == True: return curr_time.strftime("%m_%d_%Y__%I%p") else: return curr_time.strftime("%I:%M:%S %p") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by API name or label name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that they got. Might want to use exact API name if getting multiple matches for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return not found message or return the dataset ID if dataset_df.empty == True: print('Dataset not found. Please check name or API name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a saql query as an argument and returns a dataframe or saves to csv The query can be in JSON form or can be in the UI SAQL form load statements must have the appropreate spaces: =_load_\"datasetname\"; ''' if verbose == True: start = time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\"','\\"') #convert UI saql query to JSON format #create a dictionary with all datasets used in the query load_stmt_old = re.findall(r"(= load )(.*?)(;)", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\"','\"') if verbose == True: print('Running SAQL Query...') #run query and return dataframe or save as csv payload = {"query":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0 = current version 20 is max oldest version. Typically best practice to run the function and view the history first before supplying a version number. ''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None: payload = { "historyId": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start = time.time() progress_counter = 0 print('Getting app user list and access details...') print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add a try/except block to handle the error ''' attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") for app in response['folders']: attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { "AppId": app['id'], "AppName": app['name'], "UserId": u['sharedWithId'], "UserName": u['sharedWithLabel'], "AccessType": u['accessType'], "UserType": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") #continue to pull data from next page attempts = 0 # reset attempts for additional pages while next_page is not None: if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") while attempts < max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { "AppId": app['id'], "AppName": app['name'], "UserId": u['sharedWithId'], "UserName": u['sharedWithLabel'], "AccessType": u['accessType'], "UserType": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") elif app_id is not None: if type(app_id) is list or type(app_id) is tuple: for app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( { "AppId": app, "AppName": response['name'], "UserId": u['sharedWithId'], "UserName": u['sharedWithLabel'], "AccessType": u['accessType'], "UserType": u['shareType'] }, ignore_index=True) else: print('Please input a list or tuple of app Ids') sys.exit(1) if save_path is not None: if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start = time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields in the JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares = shares + user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in user_dict if item["sharedWithId"] == shares[s]['sharedWithId']) #remove fields in the JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else: shares = None print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None: payload = {"shares": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it easier to update access using dashboard names vs finding all apps needed. update dataframe should have the following columns: Dashboard Id, Access Type, and User Id ''' pass def remove_non_ascii(self, df, columns=None): if columns == None: columns = df.columns else: columns = columns for c in columns: if df[c].dtype == "O": df[c] = df[c].apply(lambda x: unidecode(x).replace("?","")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val="0.0", default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(" ","_") fields = [] for c in df.columns: if df[c].dtype == "datetime64[ns]": name = c.replace(" ","_") name = name.replace("__","_") date = { "fullyQualifiedName": name, "name": name, "type": "Date", "label": c, "format": "yyyy-MM-dd HH:mm:ss" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18 scale = 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(" ","_") name = name.replace("__","_") measure = { "fullyQualifiedName": name, "name": name, "type": "Numeric", "label": c, "precision": precision, "defaultValue": default_measure_val, "scale": scale, "format": default_measure_fmt, "decimalSeparator": "." } fields.append(measure) else: name = c.replace(" ","_") name = name.replace("__","_") dimension = { "fullyQualifiedName": name, "name": name, "type": "Text", "label": c } fields.append(dimension) xmd = { "fileFormat": { "charsetName": charset, "fieldsDelimitedBy": deliminator, "linesTerminatedBy": lineterminator }, "objects": [ { "connector": "CSV", "fullyQualifiedName": dataset_api_name, "label": dataset_label, "name": dataset_api_name, "fields": fields } ] } return str(xmd).replace("'",'"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val="0.0", default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show up exactly as the column names in the supplied dataframe ''' if verbose == True: start = time.time() print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(" ","_") if fillna == True: for c in df.columns: if df[c].dtype == "O": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == "datetime64[ns]": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum += 1 if verbose == True: print('\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { "InsightsExternalDataId" : json.loads(r1.text)['id'], "PartNumber" : str(partnum), "DataFile" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\nDatapart Upload Complete...') payload = { "Action" : "Process" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('Data Upload Process Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if __name__ == '__main__': pass
[ "json.dumps", "requests.utils.dict_from_cookiejar", "datetime.datetime.utcnow", "sys.exc_info", "sys.getsizeof", "pandas.DataFrame", "json.loads", "dateutil.tz.tzlocal", "re.findall", "datetime.timedelta", "requests.get", "math.ceil", "pandas.to_datetime", "numpy.issubdtype", "sys.exit", "unidecode.unidecode", "decimal.Decimal", "browser_cookie3.firefox", "time.time", "dateutil.tz.tzutc", "browser_cookie3.chrome" ]
[((1998, 2104), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/datasets')"], {'headers': 'self.header', 'params': 'params'}), "(self.env_url + '/services/data/v48.0/wave/datasets', headers=\n self.header, params=params)\n", (2010, 2104), False, 'import requests\n'), ((3753, 3790), 're.findall', 're.findall', (['"""(= load )(.*?)(;)"""', 'saql'], {}), "('(= load )(.*?)(;)', saql)\n", (3763, 3790), False, 'import re\n'), ((5416, 5539), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/dashboards/' + dashboard_id +\n '/histories')"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/dashboards/' +\n dashboard_id + '/histories', headers=self.header)\n", (5428, 5539), False, 'import requests\n'), ((17615, 17632), 'sys.getsizeof', 'sys.getsizeof', (['df'], {}), '(df)\n', (17628, 17632), False, 'import sys\n'), ((1532, 1544), 'dateutil.tz.tzlocal', 'tz.tzlocal', ([], {}), '()\n', (1542, 1544), False, 'from dateutil import tz\n'), ((2816, 2827), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2824, 2827), False, 'import sys\n'), ((2954, 3052), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/datasets/' + dsid)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/datasets/' + dsid,\n headers=self.header)\n", (2966, 3052), False, 'import requests\n'), ((3473, 3484), 'time.time', 'time.time', ([], {}), '()\n', (3482, 3484), False, 'import time\n'), ((5738, 5800), 'requests.get', 'requests.get', (['(self.env_url + preview_link)'], {'headers': 'self.header'}), '(self.env_url + preview_link, headers=self.header)\n', (5750, 5800), False, 'import requests\n'), ((6326, 6337), 'time.time', 'time.time', ([], {}), '()\n', (6335, 6337), False, 'import time\n'), ((10763, 10774), 'time.time', 'time.time', ([], {}), '()\n', (10772, 10774), False, 'import time\n'), ((12952, 12963), 'time.time', 'time.time', ([], {}), '()\n', (12961, 12963), False, 'import time\n'), ((15916, 15927), 'time.time', 'time.time', ([], {}), '()\n', (15925, 15927), False, 'import time\n'), ((17800, 17836), 'math.ceil', 'math.ceil', (['(df_memory / MAX_FILE_SIZE)'], {}), '(df_memory / MAX_FILE_SIZE)\n', (17809, 17836), False, 'import math\n'), ((19141, 19152), 'time.time', 'time.time', ([], {}), '()\n', (19150, 19152), False, 'import time\n'), ((526, 573), 'browser_cookie3.chrome', 'browser_cookie3.chrome', ([], {'domain_name': 'env_url[8:]'}), '(domain_name=env_url[8:])\n', (548, 573), False, 'import browser_cookie3\n'), ((672, 710), 'requests.utils.dict_from_cookiejar', 'requests.utils.dict_from_cookiejar', (['cj'], {}), '(cj)\n', (706, 710), False, 'import requests\n'), ((1326, 1337), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1334, 1337), False, 'import sys\n'), ((2129, 2158), 'json.loads', 'json.loads', (['dataset_json.text'], {}), '(dataset_json.text)\n', (2139, 2158), False, 'import json\n'), ((3056, 3074), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3066, 3074), False, 'import json\n'), ((4565, 4584), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (4575, 4584), False, 'import json\n'), ((4823, 4834), 'time.time', 'time.time', ([], {}), '()\n', (4832, 4834), False, 'import time\n'), ((4986, 4997), 'time.time', 'time.time', ([], {}), '()\n', (4995, 4997), False, 'import time\n'), ((5560, 5578), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (5570, 5578), False, 'import json\n'), ((10288, 10299), 'time.time', 'time.time', ([], {}), '()\n', (10297, 10299), False, 'import time\n'), ((10465, 10476), 'time.time', 'time.time', ([], {}), '()\n', (10474, 10476), False, 'import time\n'), ((10981, 11080), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app_id)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app_id,\n headers=self.header)\n", (10993, 11080), False, 'import requests\n'), ((11087, 11105), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (11097, 11105), False, 'import json\n'), ((14157, 14194), 'numpy.issubdtype', 'np.issubdtype', (['df[c].dtype', 'np.number'], {}), '(df[c].dtype, np.number)\n', (14170, 14194), True, 'import numpy as np\n'), ((17281, 17306), 'json.dumps', 'json.dumps', (['upload_config'], {}), '(upload_config)\n', (17291, 17306), False, 'import json\n'), ((17429, 17440), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17437, 17440), False, 'import sys\n'), ((17674, 17710), 'math.ceil', 'math.ceil', (['(df_memory / MAX_FILE_SIZE)'], {}), '(df_memory / MAX_FILE_SIZE)\n', (17683, 17710), False, 'import math\n'), ((18822, 18833), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18830, 18833), False, 'import sys\n'), ((19089, 19108), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (19099, 19108), False, 'import json\n'), ((866, 914), 'browser_cookie3.firefox', 'browser_cookie3.firefox', ([], {'domain_name': 'env_url[8:]'}), '(domain_name=env_url[8:])\n', (889, 914), False, 'import browser_cookie3\n'), ((938, 976), 'requests.utils.dict_from_cookiejar', 'requests.utils.dict_from_cookiejar', (['cj'], {}), '(cj)\n', (972, 976), False, 'import requests\n'), ((1175, 1186), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1183, 1186), False, 'import sys\n'), ((4609, 4627), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (4619, 4627), False, 'import json\n'), ((6775, 6865), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders')"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders', headers=\n self.header)\n", (6787, 6865), False, 'import requests\n'), ((6875, 6893), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (6885, 6893), False, 'import json\n'), ((6995, 7009), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7007, 7009), True, 'import pandas as pd\n'), ((10097, 10108), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10105, 10108), False, 'import sys\n'), ((11405, 11504), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app_id)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app_id,\n headers=self.header)\n", (11417, 11504), False, 'import requests\n'), ((11511, 11529), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (11521, 11529), False, 'import json\n'), ((12898, 12917), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (12908, 12917), False, 'import json\n'), ((16216, 16253), 'numpy.issubdtype', 'np.issubdtype', (['df[c].dtype', 'np.number'], {}), '(df[c].dtype, np.number)\n', (16229, 16253), True, 'import numpy as np\n'), ((17318, 17337), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (17328, 17337), False, 'import json\n'), ((18462, 18481), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (18472, 18481), False, 'import json\n'), ((18676, 18695), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (18686, 18695), False, 'import json\n'), ((18707, 18726), 'json.loads', 'json.loads', (['r2.text'], {}), '(r2.text)\n', (18717, 18726), False, 'import json\n'), ((19036, 19055), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (19046, 19055), False, 'import json\n'), ((1467, 1493), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1491, 1493), False, 'import datetime\n'), ((1509, 1519), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (1517, 1519), False, 'from dateutil import tz\n'), ((1604, 1639), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'add_sec'}), '(seconds=add_sec)\n', (1622, 1639), False, 'import datetime\n'), ((6134, 6153), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (6144, 6153), False, 'import json\n'), ((7276, 7379), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'])"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'\n ], headers=self.header)\n", (7288, 7379), False, 'import requests\n'), ((8249, 8308), 'requests.get', 'requests.get', (['(self.env_url + next_page)'], {'headers': 'self.header'}), '(self.env_url + next_page, headers=self.header)\n', (8261, 8308), False, 'import requests\n'), ((8324, 8343), 'json.loads', 'json.loads', (['np.text'], {}), '(np.text)\n', (8334, 8343), False, 'import json\n'), ((9520, 9534), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9532, 9534), True, 'import pandas as pd\n'), ((9544, 9640), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app,\n headers=self.header)\n", (9556, 9640), False, 'import requests\n'), ((9649, 9667), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (9659, 9667), False, 'import json\n'), ((11958, 12057), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app_id)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app_id,\n headers=self.header)\n", (11970, 12057), False, 'import requests\n'), ((12064, 12082), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (12074, 12082), False, 'import json\n'), ((12720, 12731), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12728, 12731), False, 'import sys\n'), ((7385, 7403), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (7395, 7403), False, 'import json\n'), ((8734, 8837), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'])"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'\n ], headers=self.header)\n", (8746, 8837), False, 'import requests\n'), ((19250, 19269), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (19260, 19269), False, 'import json\n'), ((8844, 8862), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (8854, 8862), False, 'import json\n'), ((13543, 13555), 'unidecode.unidecode', 'unidecode', (['x'], {}), '(x)\n', (13552, 13555), False, 'from unidecode import unidecode\n'), ((16350, 16387), 'pandas.to_datetime', 'pd.to_datetime', (['"""1900-01-01 00:00:00"""'], {}), "('1900-01-01 00:00:00')\n", (16364, 16387), True, 'import pandas as pd\n'), ((16635, 16650), 'json.dumps', 'json.dumps', (['xmd'], {}), '(xmd)\n', (16645, 16650), False, 'import json\n'), ((18343, 18379), 'math.ceil', 'math.ceil', (['(df_memory / MAX_FILE_SIZE)'], {}), '(df_memory / MAX_FILE_SIZE)\n', (18352, 18379), False, 'import math\n'), ((7110, 7124), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7122, 7124), False, 'import sys\n'), ((8455, 8469), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8467, 8469), False, 'import sys\n'), ((7878, 7892), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7890, 7892), False, 'import sys\n'), ((8579, 8593), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8591, 8593), False, 'import sys\n'), ((9345, 9359), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9357, 9359), False, 'import sys\n'), ((14438, 14448), 'decimal.Decimal', 'Decimal', (['x'], {}), '(x)\n', (14445, 14448), False, 'from decimal import Decimal\n')]
import argparse import json import os import numpy as np import utils import util def main(args): config = utils.get_hocon_config(config_path="./config/main.conf", config_name="base") input_file = args.input_file if args.is_training == 0: is_training = False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for idx, genre in enumerate(config["genres"])} dataset = [] with open(input_file, "r") as f: for line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data["doc_key"] # Mentions and clusters clusters = json_data["clusters"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers = json_data["speakers"] speaker_dict = get_speaker_dict(util.flatten(speakers), config["max_num_speakers"]) # Segments segments = json_data["segments"] sentence_map = json_data["sentence_map"] num_words = sum([len(s) for s in segments]) segment_len = np.array([len(s) for s in segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data["tokens"] original_sentence_boundaries = json_data["original_sentence_boundaries"] # XXX gold_clusters = json_data["clusters"] subtoken_map = json_data.get("subtoken_map", None) # DataInstanceに変換 kargs = { "doc_key": doc_key, "tokens": tokens, "original_sentence_boundaries": original_sentence_boundaries, # XXX "segments": segments, "sentence_map": sentence_map, "speakers": speakers, "gold_clusters": gold_clusters, "subtoken_map": subtoken_map, # "input_ids": input_ids, "input_mask": input_mask, "speaker_ids": speaker_ids, "segment_len": segment_len, "genre": genre, "is_training": is_training, "gold_starts": gold_starts, "gold_ends": gold_ends, "gold_mention_cluster_map": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype="O") output_file = os.path.basename(input_file).replace(".jsonlines", ".npy") output_file = os.path.join(config["caches"], output_file) np.save(output_file, dataset) print("Cached %s to %s" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): """ Parameters ---------- speakers: list[str] Returns ------- dict[str, int] """ speaker_dict = {"UNK": 0, "[SPL]": 1} for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass # "break" to limit # speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument("--is_training", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True) args = parser.parse_args() main(args)
[ "numpy.save", "numpy.sum", "argparse.ArgumentParser", "json.loads", "os.path.basename", "numpy.asarray", "util.get_tokenizer", "utils.DataInstance", "utils.get_hocon_config", "numpy.array", "os.path.join", "util.flatten" ]
[((116, 192), 'utils.get_hocon_config', 'utils.get_hocon_config', ([], {'config_path': '"""./config/main.conf"""', 'config_name': '"""base"""'}), "(config_path='./config/main.conf', config_name='base')\n", (138, 192), False, 'import utils\n'), ((338, 377), 'util.get_tokenizer', 'util.get_tokenizer', (['args.tokenizer_name'], {}), '(args.tokenizer_name)\n', (356, 377), False, 'import util\n'), ((4216, 4246), 'numpy.asarray', 'np.asarray', (['dataset'], {'dtype': '"""O"""'}), "(dataset, dtype='O')\n", (4226, 4246), True, 'import numpy as np\n'), ((4343, 4386), 'os.path.join', 'os.path.join', (["config['caches']", 'output_file'], {}), "(config['caches'], output_file)\n", (4355, 4386), False, 'import os\n'), ((4391, 4420), 'numpy.save', 'np.save', (['output_file', 'dataset'], {}), '(output_file, dataset)\n', (4398, 4420), True, 'import numpy as np\n'), ((4971, 4996), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4994, 4996), False, 'import argparse\n'), ((624, 640), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (634, 640), False, 'import json\n'), ((2444, 2463), 'numpy.array', 'np.array', (['input_ids'], {}), '(input_ids)\n', (2452, 2463), True, 'import numpy as np\n'), ((2489, 2509), 'numpy.array', 'np.array', (['input_mask'], {}), '(input_mask)\n', (2497, 2509), True, 'import numpy as np\n'), ((2536, 2557), 'numpy.array', 'np.array', (['speaker_ids'], {}), '(speaker_ids)\n', (2544, 2557), True, 'import numpy as np\n'), ((2932, 2953), 'numpy.array', 'np.array', (['gold_starts'], {}), '(gold_starts)\n', (2940, 2953), True, 'import numpy as np\n'), ((2978, 2997), 'numpy.array', 'np.array', (['gold_ends'], {}), '(gold_ends)\n', (2986, 2997), True, 'import numpy as np\n'), ((4140, 4167), 'utils.DataInstance', 'utils.DataInstance', ([], {}), '(**kargs)\n', (4158, 4167), False, 'import utils\n'), ((4266, 4294), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (4282, 4294), False, 'import os\n'), ((1356, 1378), 'util.flatten', 'util.flatten', (['speakers'], {}), '(speakers)\n', (1368, 1378), False, 'import util\n'), ((2590, 2608), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (2596, 2608), True, 'import numpy as np\n'), ((2622, 2640), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (2628, 2640), True, 'import numpy as np\n'), ((832, 854), 'util.flatten', 'util.flatten', (['clusters'], {}), '(clusters)\n', (844, 854), False, 'import util\n')]
# Copyright 2022 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import json import os import random import numpy as np import pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels # 1 - fatty # 2 - scattered fibroglandular density # 3 - heterogeneously dense # 4 - extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = [] dc_tags = [] saved_filenames = [] assert len(ids) == len(images) == len(densities) for i, (id, image, density) in enumerate(zip(ids, images, densities)): if (i + 1) % 200 == 0: print(f"processing {i+1} of {len(ids)}...") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, "**", "*.dcm"), recursive=True ) assert len(img_file) == 1, f"No unique dicom image found for {dir_name}!" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + ".npy"): _success = True else: _success = False _dc_tags = [] if _success and density >= 1: # label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { "patient_id": id, "image": dir_name + ".npy", "label": int(density - 1), } ) saved_filenames.append(dir_name + ".npy") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, "w") as f: json.dump(data_set, f, indent=4) print(f"Data list saved at {save_datalist_file}") def get_indices(all_ids, search_ids): indices = [] for _id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True # set False if dicoms have already been preprocessed out_path = "./data/preprocessed" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = "./data/dataset" # Input folders label_root = "/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/" dicom_root = "/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM" n_clients = 3 """ Run preprocessing """ """ 1. Load the label data """ random.seed(0) label_files = [ os.path.join(label_root, "mass_case_description_train_set.csv"), os.path.join(label_root, "calc_case_description_train_set.csv"), os.path.join(label_root, "mass_case_description_test_set.csv"), os.path.join(label_root, "calc_case_description_test_set.csv"), ] breast_densities = [] patients_ids = [] image_file_path = [] # read annotations for label_file in label_files: print(f"add {label_file}") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data["image file path"], return_index=True ) print( f"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries" ) try: breast_densities.extend(label_data["breast_density"][unique_indices]) except BaseException: breast_densities.extend(label_data["breast density"][unique_indices]) patients_ids.extend(label_data["patient_id"][unique_indices]) image_file_path.extend(label_data["image file path"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f"Mismatch between label data, breast_densities: " f"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}" ) print(f"Read {len(image_file_path)} data entries.") """ 2. Split the data """ # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f"Found {n_patients} patients.") # generate splits using roughly the same ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge ) # test cases will be removed at this point # use groups to avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f"Splitting into {n_splits} folds for test split. (Only the first fold is used.)" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f"Splitting into {n_splits} folds for train/val splits. (Only the first fold is used.)" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), "Overlapping patients in train and validation!" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), "Overlapping patients in train and test!" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), "Overlapping patients in validation and test!" n_total = len(train_images) + len(val_images) + len(test_images) print(20 * "-") print(f"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)") print(f"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)") print(f"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)") print(20 * "-") print(f"Total : {n_total}") assert n_total == len(image_file_path), ( f"mismatch between total split images ({n_total})" f" and length of all images {len(image_file_path)}!" ) """ split train/validation dataset for n_clients """ # Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) """ 3. Preprocess the images """ dc_tags = [] saved_filenames = [] for c in range(n_clients): site_name = f"site-{c+1}" print(f"Preprocessing training set of client {site_name}") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f"Converted {len(train_list)} of {len(train_patients_ids)} training images" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print("Preprocessing validation") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f"Converted {len(val_list)} of {len(val_patients_ids)} validation images") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print("Preprocessing testing") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f"Converted {len(test_list)} of {len(test_patients_ids)} testing images") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { "train": train_list, # will stay the same for both phases "test1": val_list, # like phase 1 leaderboard "test2": test_list, # like phase 2 - final leaderboard } write_datalist(f"{out_dataset_prefix}_{site_name}.json", data_set) print(50 * "=") print( f"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images." ) # check that there were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!" print(f"Data lists saved wit prefix {out_dataset_prefix}") print(50 * "=") print("Processed unique DICOM tags", np.unique(dc_tags)) if __name__ == "__main__": main()
[ "json.dump", "pandas.read_csv", "random.shuffle", "os.path.dirname", "os.path.isfile", "numpy.where", "numpy.array", "random.seed", "sklearn.model_selection.GroupKFold", "preprocess_dicom.dicom_preprocess", "numpy.array_split", "numpy.intersect1d", "os.path.join", "numpy.unique" ]
[((3213, 3227), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3224, 3227), False, 'import random\n'), ((4820, 4846), 'random.shuffle', 'random.shuffle', (['label_data'], {}), '(label_data)\n', (4834, 4846), False, 'import random\n'), ((4959, 4985), 'numpy.array', 'np.array', (['breast_densities'], {}), '(breast_densities)\n', (4967, 4985), True, 'import numpy as np\n'), ((5005, 5027), 'numpy.array', 'np.array', (['patients_ids'], {}), '(patients_ids)\n', (5013, 5027), True, 'import numpy as np\n'), ((5050, 5075), 'numpy.array', 'np.array', (['image_file_path'], {}), '(image_file_path)\n', (5058, 5075), True, 'import numpy as np\n'), ((5102, 5125), 'numpy.unique', 'np.unique', (['patients_ids'], {}), '(patients_ids)\n', (5111, 5125), True, 'import numpy as np\n'), ((5885, 5914), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (5895, 5914), False, 'from sklearn.model_selection import GroupKFold\n'), ((6643, 6672), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (6653, 6672), False, 'from sklearn.model_selection import GroupKFold\n'), ((8305, 8334), 'numpy.unique', 'np.unique', (['train_patients_ids'], {}), '(train_patients_ids)\n', (8314, 8334), True, 'import numpy as np\n'), ((8366, 8418), 'numpy.array_split', 'np.array_split', (['unique_train_patients_ids', 'n_clients'], {}), '(unique_train_patients_ids, n_clients)\n', (8380, 8418), True, 'import numpy as np\n'), ((8450, 8477), 'numpy.unique', 'np.unique', (['val_patients_ids'], {}), '(val_patients_ids)\n', (8459, 8477), True, 'import numpy as np\n'), ((8507, 8557), 'numpy.array_split', 'np.array_split', (['unique_val_patients_ids', 'n_clients'], {}), '(unique_val_patients_ids, n_clients)\n', (8521, 8557), True, 'import numpy as np\n'), ((8590, 8618), 'numpy.unique', 'np.unique', (['test_patients_ids'], {}), '(test_patients_ids)\n', (8599, 8618), True, 'import numpy as np\n'), ((8649, 8700), 'numpy.array_split', 'np.array_split', (['unique_test_patients_ids', 'n_clients'], {}), '(unique_test_patients_ids, n_clients)\n', (8663, 8700), True, 'import numpy as np\n'), ((1504, 1536), 'os.path.join', 'os.path.join', (['out_path', 'dir_name'], {}), '(out_path, dir_name)\n', (1516, 1536), False, 'import os\n'), ((2323, 2358), 'os.path.dirname', 'os.path.dirname', (['save_datalist_file'], {}), '(save_datalist_file)\n', (2338, 2358), False, 'import os\n'), ((2428, 2460), 'json.dump', 'json.dump', (['data_set', 'f'], {'indent': '(4)'}), '(data_set, f, indent=4)\n', (2437, 2460), False, 'import json\n'), ((2618, 2642), 'numpy.where', 'np.where', (['(all_ids == _id)'], {}), '(all_ids == _id)\n', (2626, 2642), True, 'import numpy as np\n'), ((3257, 3320), 'os.path.join', 'os.path.join', (['label_root', '"""mass_case_description_train_set.csv"""'], {}), "(label_root, 'mass_case_description_train_set.csv')\n", (3269, 3320), False, 'import os\n'), ((3330, 3393), 'os.path.join', 'os.path.join', (['label_root', '"""calc_case_description_train_set.csv"""'], {}), "(label_root, 'calc_case_description_train_set.csv')\n", (3342, 3393), False, 'import os\n'), ((3403, 3465), 'os.path.join', 'os.path.join', (['label_root', '"""mass_case_description_test_set.csv"""'], {}), "(label_root, 'mass_case_description_test_set.csv')\n", (3415, 3465), False, 'import os\n'), ((3475, 3537), 'os.path.join', 'os.path.join', (['label_root', '"""calc_case_description_test_set.csv"""'], {}), "(label_root, 'calc_case_description_test_set.csv')\n", (3487, 3537), False, 'import os\n'), ((3734, 3757), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {}), '(label_file)\n', (3745, 3757), True, 'import pandas as pd\n'), ((3798, 3857), 'numpy.unique', 'np.unique', (["label_data['image file path']"], {'return_index': '(True)'}), "(label_data['image file path'], return_index=True)\n", (3807, 3857), True, 'import numpy as np\n'), ((11575, 11593), 'numpy.unique', 'np.unique', (['dc_tags'], {}), '(dc_tags)\n', (11584, 11593), True, 'import numpy as np\n'), ((1324, 1373), 'os.path.join', 'os.path.join', (['dicom_root', 'dir_name', '"""**"""', '"""*.dcm"""'], {}), "(dicom_root, dir_name, '**', '*.dcm')\n", (1336, 1373), False, 'import os\n'), ((1596, 1638), 'preprocess_dicom.dicom_preprocess', 'dicom_preprocess', (['img_file[0]', 'save_prefix'], {}), '(img_file[0], save_prefix)\n', (1612, 1638), False, 'from preprocess_dicom import dicom_preprocess\n'), ((1668, 1704), 'os.path.isfile', 'os.path.isfile', (["(save_prefix + '.npy')"], {}), "(save_prefix + '.npy')\n", (1682, 1704), False, 'import os\n'), ((7238, 7290), 'numpy.intersect1d', 'np.intersect1d', (['train_patients_ids', 'val_patients_ids'], {}), '(train_patients_ids, val_patients_ids)\n', (7252, 7290), True, 'import numpy as np\n'), ((7377, 7430), 'numpy.intersect1d', 'np.intersect1d', (['train_patients_ids', 'test_patients_ids'], {}), '(train_patients_ids, test_patients_ids)\n', (7391, 7430), True, 'import numpy as np\n'), ((7511, 7562), 'numpy.intersect1d', 'np.intersect1d', (['val_patients_ids', 'test_patients_ids'], {}), '(val_patients_ids, test_patients_ids)\n', (7525, 7562), True, 'import numpy as np\n'), ((11316, 11342), 'numpy.unique', 'np.unique', (['saved_filenames'], {}), '(saved_filenames)\n', (11325, 11342), True, 'import numpy as np\n'), ((11418, 11444), 'numpy.unique', 'np.unique', (['saved_filenames'], {}), '(saved_filenames)\n', (11427, 11444), True, 'import numpy as np\n')]
from .contribution import Contribution import numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): """ Computes the contribution to the optical depth occuring from molecular absorption. """ def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): """ Prepares each molecular opacity by weighting them by their mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule and weighted opacity """ self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache() # Loop through all active gases for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 # Get the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get the cross section object relating to the gas xsec = self._opacity_cache[gas] # Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure = tp # Place into the array sigma_xsec[idx_layer] += \ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self): """ Returns the fused weighted cross-section of all active gases """ return self.sigma_xsec
[ "numpy.zeros", "taurex.cache.OpacityCache" ]
[((344, 358), 'taurex.cache.OpacityCache', 'OpacityCache', ([], {}), '()\n', (356, 358), False, 'from taurex.cache import OpacityCache\n'), ((975, 1023), 'numpy.zeros', 'np.zeros', ([], {'shape': '(model.nLayers, wngrid.shape[0])'}), '(shape=(model.nLayers, wngrid.shape[0]))\n', (983, 1023), True, 'import numpy as np\n'), ((1087, 1101), 'taurex.cache.OpacityCache', 'OpacityCache', ([], {}), '()\n', (1099, 1101), False, 'from taurex.cache import OpacityCache\n')]
import sys sys.path.append('../') import matplotlib; matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data = data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=",") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f"Directional gradient ROMML: {dir_grad}") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, "-ob", label="Error Grad") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, "-ob") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, "-ob", label="Error Grad") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, "-ob") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, "-ob", label="Error Grad") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, "-ob") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior #### hs = np.linspace(0, 1, 500) pis = [] # grads = [] for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f"Direction gradient FOM: {dir_grad_fom}") for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend(["FOM", "ROM", "ROMML"]) # plt.savefig('grad_dir.png', dpi=200)
[ "matplotlib.pyplot.loglog", "gaussian_field.make_cov_chol", "fom.thermal_fin.get_space", "matplotlib.pyplot.clf", "rom.averaged_affine_ROM.AffineROMFin", "numpy.linalg.norm", "numpy.exp", "numpy.arange", "sys.path.append", "dolfin.inner", "matplotlib.pyplot.cla", "numpy.loadtxt", "numpy.linspace", "fom.forward_solve.Fin", "matplotlib.use", "dolfin.set_log_level", "matplotlib.pyplot.semilogx", "dolfin.assemble", "matplotlib.pyplot.plot", "dolfin.Function", "deep_learning.dl_model.load_bn_model", "matplotlib.pyplot.savefig" ]
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((54, 78), 'matplotlib.use', 'matplotlib.use', (['"""macosx"""'], {}), "('macosx')\n", (68, 78), False, 'import matplotlib\n'), ((163, 183), 'dolfin.set_log_level', 'dl.set_log_level', (['(40)'], {}), '(40)\n', (179, 183), True, 'import dolfin as dl\n'), ((3232, 3253), 'fom.thermal_fin.get_space', 'get_space', (['resolution'], {}), '(resolution)\n', (3241, 3253), False, 'from fom.thermal_fin import get_space\n'), ((3261, 3289), 'gaussian_field.make_cov_chol', 'make_cov_chol', (['V'], {'length': '(1.2)'}), '(V, length=1.2)\n', (3274, 3289), False, 'from gaussian_field import make_cov_chol\n'), ((3299, 3311), 'fom.forward_solve.Fin', 'Fin', (['V', '(True)'], {}), '(V, True)\n', (3302, 3311), False, 'from fom.forward_solve import Fin\n'), ((3356, 3370), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (3367, 3370), True, 'import dolfin as dl\n'), ((3418, 3445), 'numpy.exp', 'np.exp', (['(0.5 * chol.T @ norm)'], {}), '(0.5 * chol.T @ norm)\n', (3424, 3445), True, 'import numpy as np\n'), ((3678, 3693), 'deep_learning.dl_model.load_bn_model', 'load_bn_model', ([], {}), '()\n', (3691, 3693), False, 'from deep_learning.dl_model import load_parametric_model_avg, load_bn_model\n'), ((3734, 3791), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/basis_nine_param.txt"""'], {'delimiter': '""","""'}), "('../data/basis_nine_param.txt', delimiter=',')\n", (3744, 3791), True, 'import numpy as np\n'), ((3802, 3839), 'rom.averaged_affine_ROM.AffineROMFin', 'AffineROMFin', (['V', 'err_model', 'phi', '(True)'], {}), '(V, err_model, phi, True)\n', (3814, 3839), False, 'from rom.averaged_affine_ROM import AffineROMFin\n'), ((4064, 4078), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (4075, 4078), True, 'import dolfin as dl\n'), ((4121, 4148), 'numpy.exp', 'np.exp', (['(0.5 * chol.T @ norm)'], {}), '(0.5 * chol.T @ norm)\n', (4127, 4148), True, 'import numpy as np\n'), ((4241, 4262), 'numpy.linalg.norm', 'np.linalg.norm', (['eps_z'], {}), '(eps_z)\n', (4255, 4262), True, 'import numpy as np\n'), ((4371, 4398), 'numpy.exp', 'np.exp', (['(0.5 * chol.T @ norm)'], {}), '(0.5 * chol.T @ norm)\n', (4377, 4398), True, 'import numpy as np\n'), ((4881, 4933), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', 'err_grads', '"""-ob"""'], {'label': '"""Error Grad"""'}), "(hs, err_grads, '-ob', label='Error Grad')\n", (4891, 4933), True, 'import matplotlib.pyplot as plt\n'), ((4934, 5009), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', '(0.5 * err_grads[0] / hs[0] * hs)', '"""-.k"""'], {'label': '"""First Order"""'}), "(hs, 0.5 * err_grads[0] / hs[0] * hs, '-.k', label='First Order')\n", (4944, 5009), True, 'import matplotlib.pyplot as plt\n'), ((5005, 5048), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grad_test_ROMML.png"""'], {'dpi': '(200)'}), "('grad_test_ROMML.png', dpi=200)\n", (5016, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5049, 5058), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5056, 5058), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5068), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5066, 5068), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5100), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['hs', 'grads', '"""-ob"""'], {}), "(hs, grads, '-ob')\n", (5082, 5100), True, 'import matplotlib.pyplot as plt\n'), ((5101, 5135), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gradients_ROMML.png"""'], {}), "('gradients_ROMML.png')\n", (5112, 5135), True, 'import matplotlib.pyplot as plt\n'), ((5136, 5145), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5143, 5145), True, 'import matplotlib.pyplot as plt\n'), ((5146, 5155), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5153, 5155), True, 'import matplotlib.pyplot as plt\n'), ((5481, 5533), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', 'err_grads', '"""-ob"""'], {'label': '"""Error Grad"""'}), "(hs, err_grads, '-ob', label='Error Grad')\n", (5491, 5533), True, 'import matplotlib.pyplot as plt\n'), ((5534, 5609), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', '(0.5 * err_grads[0] / hs[0] * hs)', '"""-.k"""'], {'label': '"""First Order"""'}), "(hs, 0.5 * err_grads[0] / hs[0] * hs, '-.k', label='First Order')\n", (5544, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5646), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grad_test_ROM.png"""'], {'dpi': '(200)'}), "('grad_test_ROM.png', dpi=200)\n", (5616, 5646), True, 'import matplotlib.pyplot as plt\n'), ((5647, 5656), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5654, 5656), True, 'import matplotlib.pyplot as plt\n'), ((5657, 5666), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5664, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5668, 5698), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['hs', 'grads', '"""-ob"""'], {}), "(hs, grads, '-ob')\n", (5680, 5698), True, 'import matplotlib.pyplot as plt\n'), ((5699, 5731), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gradients_ROM.png"""'], {}), "('gradients_ROM.png')\n", (5710, 5731), True, 'import matplotlib.pyplot as plt\n'), ((5732, 5741), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5739, 5741), True, 'import matplotlib.pyplot as plt\n'), ((5742, 5751), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5749, 5751), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6102), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', 'err_grads', '"""-ob"""'], {'label': '"""Error Grad"""'}), "(hs, err_grads, '-ob', label='Error Grad')\n", (6060, 6102), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6178), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', '(0.5 * err_grads[0] / hs[0] * hs)', '"""-.k"""'], {'label': '"""First Order"""'}), "(hs, 0.5 * err_grads[0] / hs[0] * hs, '-.k', label='First Order')\n", (6113, 6178), True, 'import matplotlib.pyplot as plt\n'), ((6174, 6215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grad_test_FOM.png"""'], {'dpi': '(200)'}), "('grad_test_FOM.png', dpi=200)\n", (6185, 6215), True, 'import matplotlib.pyplot as plt\n'), ((6216, 6225), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6223, 6225), True, 'import matplotlib.pyplot as plt\n'), ((6226, 6235), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6233, 6235), True, 'import matplotlib.pyplot as plt\n'), ((6237, 6267), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['hs', 'grads', '"""-ob"""'], {}), "(hs, grads, '-ob')\n", (6249, 6267), True, 'import matplotlib.pyplot as plt\n'), ((6268, 6300), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gradients_FOM.png"""'], {}), "('gradients_FOM.png')\n", (6279, 6300), True, 'import matplotlib.pyplot as plt\n'), ((6301, 6310), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6308, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6311, 6320), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6318, 6320), True, 'import matplotlib.pyplot as plt\n'), ((6369, 6391), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(500)'], {}), '(0, 1, 500)\n', (6380, 6391), True, 'import numpy as np\n'), ((7221, 7242), 'matplotlib.pyplot.plot', 'plt.plot', (['hs', 'pi_foms'], {}), '(hs, pi_foms)\n', (7229, 7242), True, 'import matplotlib.pyplot as plt\n'), ((7243, 7283), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func_dir_FOM.png"""'], {'dpi': '(200)'}), "('func_dir_FOM.png', dpi=200)\n", (7254, 7283), True, 'import matplotlib.pyplot as plt\n'), ((7284, 7293), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7291, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7294, 7303), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7301, 7303), True, 'import matplotlib.pyplot as plt\n'), ((7305, 7322), 'matplotlib.pyplot.plot', 'plt.plot', (['hs', 'pis'], {}), '(hs, pis)\n', (7313, 7322), True, 'import matplotlib.pyplot as plt\n'), ((7323, 7363), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func_dir_ROM.png"""'], {'dpi': '(200)'}), "('func_dir_ROM.png', dpi=200)\n", (7334, 7363), True, 'import matplotlib.pyplot as plt\n'), ((7364, 7373), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7371, 7373), True, 'import matplotlib.pyplot as plt\n'), ((7374, 7383), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7381, 7383), True, 'import matplotlib.pyplot as plt\n'), ((7385, 7408), 'matplotlib.pyplot.plot', 'plt.plot', (['hs', 'pi_rommls'], {}), '(hs, pi_rommls)\n', (7393, 7408), True, 'import matplotlib.pyplot as plt\n'), ((7409, 7451), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func_dir_ROMML.png"""'], {'dpi': '(200)'}), "('func_dir_ROMML.png', dpi=200)\n", (7420, 7451), True, 'import matplotlib.pyplot as plt\n'), ((7452, 7461), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7459, 7461), True, 'import matplotlib.pyplot as plt\n'), ((7462, 7471), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7469, 7471), True, 'import matplotlib.pyplot as plt\n'), ((647, 661), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (658, 661), True, 'import dolfin as dl\n'), ((847, 875), 'dolfin.assemble', 'dl.assemble', (['self.solver.reg'], {}), '(self.solver.reg)\n', (858, 875), True, 'import dolfin as dl\n'), ((1384, 1398), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (1395, 1398), True, 'import dolfin as dl\n'), ((2421, 2435), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (2432, 2435), True, 'import dolfin as dl\n'), ((4578, 4594), 'numpy.arange', 'np.arange', (['n_eps'], {}), '(n_eps)\n', (4587, 4594), True, 'import numpy as np\n'), ((1127, 1160), 'dolfin.assemble', 'dl.assemble', (['self.solver.grad_reg'], {}), '(self.solver.grad_reg)\n', (1138, 1160), True, 'import dolfin as dl\n'), ((4208, 4222), 'dolfin.inner', 'dl.inner', (['z', 'z'], {}), '(z, z)\n', (4216, 4222), True, 'import dolfin as dl\n'), ((897, 926), 'numpy.linalg.norm', 'np.linalg.norm', (['(y - self.data)'], {}), '(y - self.data)\n', (911, 926), True, 'import numpy as np\n'), ((1930, 1965), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_romml - self.data)'], {}), '(y_romml - self.data)\n', (1944, 1965), True, 'import numpy as np\n'), ((2884, 2915), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_r - self.data)'], {}), '(y_r - self.data)\n', (2898, 2915), True, 'import numpy as np\n')]
""" There are two useful functions: 1. correlationCoef will tell you the coreelation coefficient of two patches of same size the greater this coefficient is, the similar this two patches are. 2. matchTemplate will automatically go through the whole input 'img' with a sliding window and implement correlationCoef function on every window comparing it to template. """ import cv2 import numpy as np from matplotlib import pyplot as plt def correlationCoef(g1,g2): """ Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). """ #1. make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): """ Parameters: img: image, such as a cat, grayscale(0-255) template: your target, such as a cat's paw, grayscale(0-255) Return: a float image consisted of correlation coefficient of each pixel. """ win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w): t_patch=img[row:row+win_h,col:col+win_w] result[row,col]=correlationCoef(template,t_patch) return result
[ "numpy.std", "numpy.cov", "numpy.zeros" ]
[((938, 948), 'numpy.std', 'np.std', (['g1'], {}), '(g1)\n', (944, 948), True, 'import numpy as np\n'), ((960, 970), 'numpy.std', 'np.std', (['g2'], {}), '(g2)\n', (966, 970), True, 'import numpy as np\n'), ((1030, 1052), 'numpy.cov', 'np.cov', (['array1', 'array2'], {}), '(array1, array2)\n', (1036, 1052), True, 'import numpy as np\n'), ((1510, 1529), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1518, 1529), True, 'import numpy as np\n')]
"""Miscellaneous ECG Batch utils.""" import functools import pint import numpy as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): """Return a multiplicative factor to convert a measured quantity from old to new units. Parameters ---------- old_units : str Current units in SI format. new_units : str Target units in SI format. Returns ------- factor : float A factor to convert quantities between units. """ try: # pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + ": " + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): """Wrap a method with partial application of given positional and keyword arguments. Parameters ---------- func : callable A method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method : callable Wrapped method. """ @functools.wraps(func) def method(self, *args, **kwargs): """Wrapped method.""" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): """Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even for binary problems. """ # pylint: disable=invalid-name def transform(self, y): """Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. """ Y = super().transform(y) if len(self.classes_) == 1: Y = 1 - Y if len(self.classes_) == 2: Y = np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y, threshold=None): """Transform one-hot encoded labels back to class labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The threshold used in the binary and multi-label cases. If ``None``, it is assumed to be half way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. """ if len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else: y = super().inverse_transform(Y, threshold) return y
[ "functools.wraps", "pint.UnitRegistry", "numpy.hstack" ]
[((160, 179), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (177, 179), False, 'import pint\n'), ((1308, 1329), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1323, 1329), False, 'import functools\n'), ((2239, 2260), 'numpy.hstack', 'np.hstack', (['(1 - Y, Y)'], {}), '((1 - Y, Y))\n', (2248, 2260), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Created on Wed Mar 7 08:38:14 2018 @author: <NAME> compute how quickly soccer league tables converge to the final distribution """ import pandas as pd import numpy as np import glob import matplotlib.pyplot as plt from matplotlib import animation from scipy.stats import entropy from scipy.optimize import curve_fit import seaborn as sns sns.set() # function to compute Jensen-Shannon divergence def JSD(p, q): r = 0.5 * (p + q) return 0.5 * (entropy(p, r) + entropy(q, r)) # the data files have already been acquired and cleaned # see get_football-data_data.py # build a list of filenames filenames = glob.glob('data/*.csv') # initialize an array to hold JSD values # each row will contain the JSD curve data for one season jsds = np.zeros((len(filenames),500)) # initialize an array to hold final league tables finals = np.zeros((len(filenames),25)) # initialize a season counter season = 0 # list of columns needed from the data files cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG'] for file in filenames: # load the season data df = pd.read_csv(file,index_col='Date',encoding = "ISO-8859-1",usecols=cols).dropna(axis=0,how='any') # get the unique team names for that season teams = list(df.HomeTeam.unique()) # set up array for league tables # each column corresponds to a team # each row corresponds to the league table after that number of games tables = np.zeros((df.shape[0]+1,len(teams))) # initialize game counter num_games = 1 # loop through the season data game by game for idx,row in df.iterrows(): # initialize the current league table to be the same as the last tables[num_games,:] = tables[num_games-1,:] # get indices for the teams involved in thisgame home_idx = teams.index(row['HomeTeam']) away_idx = teams.index(row['AwayTeam']) # compute home goals - away goals goal_diff = row.FTHG - row.FTAG # update the league table based on the result if goal_diff > 0: tables[num_games,home_idx] += 3 elif goal_diff < 0: tables[num_games,away_idx] += 3 else: tables[num_games,home_idx] += 1 tables[num_games,away_idx] += 1 # increment the game counter num_games += 1 # delete first row of the table tables = tables[1:,:] # compute the probability distribution for the final league table p = tables[-1,:]/np.sum(tables[-1,:]) # store p for idx,team in enumerate(p): finals[season,idx] = team # for each of the running league tables, convert to a distribution # and then compute the JSD for i in range(len(tables[:,0])): #if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]): q = tables[i,:]/np.sum(tables[i,:]) jsds[season,i] = JSD(p,q) # increment the season counter season += 1 # compute the average JSD curve avg = np.sum(jsds,axis=0)/110 # array of x values for the games xs = np.array([i for i in range(len(avg))]) # define function for curve-fitting def f(x, a, b, c): return a * np.exp(-b * x) + c # perform the curve fit popt, pcov = curve_fit(f, xs, avg) # plot the individual JSD curves for i in range(jsds.shape[0]): plt.plot(jsds[i,:],alpha=.3,color='gray') # add title and axis labels plt.title('Convergence of league tables over time') plt.xlabel('Number of games played') plt.ylabel('JSD with final table') # set axis limits, 461 most games in an individual season axes = plt.gca() axes.set_xlim([0,461]) plt.savefig('allseasons.png') # zoom in on the first 100 games axes.set_xlim([0,100]) plt.savefig('convbegin.png') # zoom out again axes.set_xlim([0,380]) # plot the average curve plt.plot(xs,avg,'b-',label='average JSD') # add a legend plt.legend() plt.savefig('convwithavg.png') # plot the best-fit curve plt.plot(xs, f(xs, *popt), 'r-', label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) # update the legend plt.legend() plt.savefig('conv.png') plt.show() plt.clf() plt.cla() plt.close() # compute examples of final probability distributions # spain 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[5,:18])) plt.title('La Liga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('sp1617.png') plt.clf() plt.cla() plt.close() # italy 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[27,:20])) plt.title('Serie A 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('it1617.png') plt.clf() plt.cla() plt.close() # france 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[49,:20])) plt.title('Ligue 1 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('fr1617.png') plt.clf() plt.cla() plt.close() # england 16-17 xd = [i for i in range(20)] plt.bar(xd,np.sort(finals[71,:20])) plt.title('Premier League 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('en1617.png') plt.clf() plt.cla() plt.close() # germany 16-17 xd = [i for i in range(18)] plt.bar(xd,np.sort(finals[93,:18])) plt.title('Bundesliga 2016-2017') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Point distribution') plt.savefig('ge1617.png') plt.clf() plt.cla() plt.close() # generate animation # code below based on an example by <NAME>: # email: <EMAIL> # website: http://jakevdp.github.com # license: BSD # set up the figure fig = plt.figure() # set up the axes ax = plt.axes(xlim=(-1, 20), ylim=(0, .12)) line, = ax.plot([], [],'o',linestyle='None') # add title, legend, etc. plt.title('\'99-\'00 Premier League points distribution over time') plt.xticks([],'') plt.xlabel('Ranked teams') plt.ylabel('Proportion of total points') # draw the background def init(): line.set_data([],[]) plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3) return line, # animation function, each frame draws a distribution after one more game def animate(i): xd = [i for i in range(20)] y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:])) line.set_data(xd, y) return line, # animate anim = animation.FuncAnimation(fig, animate, init_func=init, frames=340, interval=20, blit=True,repeat_delay=1000) # save the animation anim.save('basic_animation.mp4', fps=50, extra_args=['-vcodec', 'libx264']) plt.show()
[ "matplotlib.pyplot.title", "numpy.sum", "matplotlib.pyplot.clf", "matplotlib.pyplot.axes", "pandas.read_csv", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.figure", "numpy.exp", "matplotlib.pyplot.gca", "glob.glob", "matplotlib.pyplot.close", "matplotlib.pyplot.cla", "matplotlib.pyplot.xticks", "seaborn.set", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "scipy.optimize.curve_fit", "numpy.sort", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "scipy.stats.entropy", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ]
[((389, 398), 'seaborn.set', 'sns.set', ([], {}), '()\n', (396, 398), True, 'import seaborn as sns\n'), ((677, 700), 'glob.glob', 'glob.glob', (['"""data/*.csv"""'], {}), "('data/*.csv')\n", (686, 700), False, 'import glob\n'), ((3399, 3420), 'scipy.optimize.curve_fit', 'curve_fit', (['f', 'xs', 'avg'], {}), '(f, xs, avg)\n', (3408, 3420), False, 'from scipy.optimize import curve_fit\n'), ((3572, 3623), 'matplotlib.pyplot.title', 'plt.title', (['"""Convergence of league tables over time"""'], {}), "('Convergence of league tables over time')\n", (3581, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3625, 3661), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of games played"""'], {}), "('Number of games played')\n", (3635, 3661), True, 'import matplotlib.pyplot as plt\n'), ((3663, 3697), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""JSD with final table"""'], {}), "('JSD with final table')\n", (3673, 3697), True, 'import matplotlib.pyplot as plt\n'), ((3767, 3776), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3774, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3804, 3833), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""allseasons.png"""'], {}), "('allseasons.png')\n", (3815, 3833), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3923), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""convbegin.png"""'], {}), "('convbegin.png')\n", (3906, 3923), True, 'import matplotlib.pyplot as plt\n'), ((3997, 4041), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'avg', '"""b-"""'], {'label': '"""average JSD"""'}), "(xs, avg, 'b-', label='average JSD')\n", (4005, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4070), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4068, 4070), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4102), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""convwithavg.png"""'], {}), "('convwithavg.png')\n", (4083, 4102), True, 'import matplotlib.pyplot as plt\n'), ((4255, 4267), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4265, 4267), True, 'import matplotlib.pyplot as plt\n'), ((4269, 4292), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""conv.png"""'], {}), "('conv.png')\n", (4280, 4292), True, 'import matplotlib.pyplot as plt\n'), ((4294, 4304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4302, 4304), True, 'import matplotlib.pyplot as plt\n'), ((4308, 4317), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4315, 4317), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4328), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4326, 4328), True, 'import matplotlib.pyplot as plt\n'), ((4330, 4341), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4339, 4341), True, 'import matplotlib.pyplot as plt\n'), ((4482, 4512), 'matplotlib.pyplot.title', 'plt.title', (['"""La Liga 2016-2017"""'], {}), "('La Liga 2016-2017')\n", (4491, 4512), True, 'import matplotlib.pyplot as plt\n'), ((4514, 4532), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (4524, 4532), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4559), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (4543, 4559), True, 'import matplotlib.pyplot as plt\n'), ((4561, 4593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (4571, 4593), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4620), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sp1617.png"""'], {}), "('sp1617.png')\n", (4606, 4620), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4631), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4629, 4631), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4642), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4640, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4644, 4655), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4653, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4740, 4770), 'matplotlib.pyplot.title', 'plt.title', (['"""Serie A 2016-2017"""'], {}), "('Serie A 2016-2017')\n", (4749, 4770), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4790), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (4782, 4790), True, 'import matplotlib.pyplot as plt\n'), ((4791, 4817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (4801, 4817), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (4829, 4851), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4878), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""it1617.png"""'], {}), "('it1617.png')\n", (4864, 4878), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4889), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4887, 4889), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4900), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4898, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4902, 4913), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4911, 4913), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5029), 'matplotlib.pyplot.title', 'plt.title', (['"""Ligue 1 2016-2017"""'], {}), "('Ligue 1 2016-2017')\n", (5008, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5031, 5049), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (5041, 5049), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5076), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (5060, 5076), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (5088, 5110), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5137), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fr1617.png"""'], {}), "('fr1617.png')\n", (5123, 5137), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5148), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5146, 5148), True, 'import matplotlib.pyplot as plt\n'), ((5150, 5159), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5157, 5159), True, 'import matplotlib.pyplot as plt\n'), ((5161, 5172), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5170, 5172), True, 'import matplotlib.pyplot as plt\n'), ((5259, 5296), 'matplotlib.pyplot.title', 'plt.title', (['"""Premier League 2016-2017"""'], {}), "('Premier League 2016-2017')\n", (5268, 5296), True, 'import matplotlib.pyplot as plt\n'), ((5298, 5316), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (5308, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5317, 5343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (5327, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5345, 5377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (5355, 5377), True, 'import matplotlib.pyplot as plt\n'), ((5379, 5404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""en1617.png"""'], {}), "('en1617.png')\n", (5390, 5404), True, 'import matplotlib.pyplot as plt\n'), ((5406, 5415), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5413, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5426), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5424, 5426), True, 'import matplotlib.pyplot as plt\n'), ((5428, 5439), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5437, 5439), True, 'import matplotlib.pyplot as plt\n'), ((5526, 5559), 'matplotlib.pyplot.title', 'plt.title', (['"""Bundesliga 2016-2017"""'], {}), "('Bundesliga 2016-2017')\n", (5535, 5559), True, 'import matplotlib.pyplot as plt\n'), ((5561, 5579), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (5571, 5579), True, 'import matplotlib.pyplot as plt\n'), ((5580, 5606), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (5590, 5606), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point distribution"""'], {}), "('Point distribution')\n", (5618, 5640), True, 'import matplotlib.pyplot as plt\n'), ((5642, 5667), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ge1617.png"""'], {}), "('ge1617.png')\n", (5653, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5669, 5678), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5676, 5678), True, 'import matplotlib.pyplot as plt\n'), ((5680, 5689), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5687, 5689), True, 'import matplotlib.pyplot as plt\n'), ((5691, 5702), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5700, 5702), True, 'import matplotlib.pyplot as plt\n'), ((5875, 5887), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5885, 5887), True, 'import matplotlib.pyplot as plt\n'), ((5915, 5954), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '(-1, 20)', 'ylim': '(0, 0.12)'}), '(xlim=(-1, 20), ylim=(0, 0.12))\n', (5923, 5954), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6095), 'matplotlib.pyplot.title', 'plt.title', (['"""\'99-\'00 Premier League points distribution over time"""'], {}), '("\'99-\'00 Premier League points distribution over time")\n', (6039, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6099, 6117), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '""""""'], {}), "([], '')\n", (6109, 6117), True, 'import matplotlib.pyplot as plt\n'), ((6118, 6144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ranked teams"""'], {}), "('Ranked teams')\n", (6128, 6144), True, 'import matplotlib.pyplot as plt\n'), ((6146, 6186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion of total points"""'], {}), "('Proportion of total points')\n", (6156, 6186), True, 'import matplotlib.pyplot as plt\n'), ((6607, 6720), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'init_func': 'init', 'frames': '(340)', 'interval': '(20)', 'blit': '(True)', 'repeat_delay': '(1000)'}), '(fig, animate, init_func=init, frames=340, interval=\n 20, blit=True, repeat_delay=1000)\n', (6630, 6720), False, 'from matplotlib import animation\n'), ((6851, 6861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6859, 6861), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3177), 'numpy.sum', 'np.sum', (['jsds'], {'axis': '(0)'}), '(jsds, axis=0)\n', (3163, 3177), True, 'import numpy as np\n'), ((3494, 3539), 'matplotlib.pyplot.plot', 'plt.plot', (['jsds[i, :]'], {'alpha': '(0.3)', 'color': '"""gray"""'}), "(jsds[i, :], alpha=0.3, color='gray')\n", (3502, 3539), True, 'import matplotlib.pyplot as plt\n'), ((4457, 4480), 'numpy.sort', 'np.sort', (['finals[5, :18]'], {}), '(finals[5, :18])\n', (4464, 4480), True, 'import numpy as np\n'), ((4714, 4738), 'numpy.sort', 'np.sort', (['finals[27, :20]'], {}), '(finals[27, :20])\n', (4721, 4738), True, 'import numpy as np\n'), ((4973, 4997), 'numpy.sort', 'np.sort', (['finals[49, :20]'], {}), '(finals[49, :20])\n', (4980, 4997), True, 'import numpy as np\n'), ((5233, 5257), 'numpy.sort', 'np.sort', (['finals[71, :20]'], {}), '(finals[71, :20])\n', (5240, 5257), True, 'import numpy as np\n'), ((5500, 5524), 'numpy.sort', 'np.sort', (['finals[93, :18]'], {}), '(finals[93, :18])\n', (5507, 5524), True, 'import numpy as np\n'), ((2639, 2660), 'numpy.sum', 'np.sum', (['tables[-1, :]'], {}), '(tables[-1, :])\n', (2645, 2660), True, 'import numpy as np\n'), ((510, 523), 'scipy.stats.entropy', 'entropy', (['p', 'r'], {}), '(p, r)\n', (517, 523), False, 'from scipy.stats import entropy\n'), ((526, 539), 'scipy.stats.entropy', 'entropy', (['q', 'r'], {}), '(q, r)\n', (533, 539), False, 'from scipy.stats import entropy\n'), ((1146, 1218), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '"""Date"""', 'encoding': '"""ISO-8859-1"""', 'usecols': 'cols'}), "(file, index_col='Date', encoding='ISO-8859-1', usecols=cols)\n", (1157, 1218), True, 'import pandas as pd\n'), ((2993, 3013), 'numpy.sum', 'np.sum', (['tables[i, :]'], {}), '(tables[i, :])\n', (2999, 3013), True, 'import numpy as np\n'), ((3339, 3353), 'numpy.exp', 'np.exp', (['(-b * x)'], {}), '(-b * x)\n', (3345, 3353), True, 'import numpy as np\n'), ((6518, 6543), 'numpy.sum', 'np.sum', (['tables[i + 40, :]'], {}), '(tables[i + 40, :])\n', (6524, 6543), True, 'import numpy as np\n'), ((6309, 6330), 'numpy.sum', 'np.sum', (['tables[-1, :]'], {}), '(tables[-1, :])\n', (6315, 6330), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018 @author: <NAME> Run pPXF in data """ import os import yaml import numpy as np import matplotlib.pyplot as plt from astropy.io import fits from astropy import constants from astropy.table import Table, vstack, hstack from ppxf.ppxf import ppxf from ppxf import ppxf_util from spectres import spectres import context import misc from der_snr import DER_SNR def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None): """ Running pPXF. """ velscale = context.velscale if velscale is None else velscale V0 = context.V if V0 is None else V0 # Reading templates ssp_templates = fits.getdata(templates_file, extname="SSPS").T params = Table.read(templates_file, hdu=1) nssps = ssp_templates.shape[1] logwave_temp = Table.read(templates_file, hdu=2)["loglam"].data wave_temp = np.exp(logwave_temp) # Use first spectrum to set emission lines start0 = [V0, 100., 0., 0.] bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]] for spec in specs: print("Processing spectrum {}".format(spec)) name = spec.replace(".fits", "") outyaml = os.path.join(outdir, "{}.yaml".format(name)) if os.path.exists(outyaml) and not redo: continue table = Table.read(spec) wave_lin = table["wave"] flux = table["flux"] fluxerr = table["fluxerr"] # Removing red part of the spectrum idx = np.where(wave_lin < 7000)[0] wave_lin = wave_lin[idx] flux = flux[idx] fluxerr = fluxerr[idx] der_sn = misc.snr(flux)[2] data_sn = np.nanmedian(flux / fluxerr) ################################################################### # Rebinning the data to a logarithmic scale for ppxf wave_range = [wave_lin[0], wave_lin[-1]] logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1] wave = np.exp(logwave) wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1] flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr) #################################################################### # Setting up the gas templates gas_templates, line_names, line_wave = \ ppxf_util.emission_lines(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95) ngas = gas_templates.shape[1] #################################################################### # Masking bad pixels skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863]) goodpixels = np.arange(len(wave)) for line in skylines: sky = np.argwhere((wave < line - 10) | (wave > line + 10)).ravel() goodpixels = np.intersect1d(goodpixels, sky) # Making goodpixels mask goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0]) goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite( fluxerr))[0]) # Cleaning input spectrum fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr) flux[~np.isfinite(flux)] = 0. ######################################################################## # Preparing the fit dv = (logwave_temp[0] - logwave[0]) * \ constants.c.to("km/s").value templates = np.column_stack((ssp_templates, gas_templates)) components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype( np.int) gas_component = components > 0 start = [start0[:2]] * (ngas + 1) bounds = [bounds0] * (ngas + 1) moments = [2] * (ngas + 1) ######################################################################## # Fitting with two components pp = ppxf(templates, flux, fluxerr, velscale=velscale, plot=True, moments=moments, start=start, vsyst=dv, lam=wave, component=components, mdegree=-1, gas_component=gas_component, gas_names=line_names, quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels) plt.savefig(os.path.join(outdir, "{}.png".format(name)), dpi=250) plt.close() pp.name = name # Saving results and plot save(pp, outdir) def save(pp, outdir): """ Save results from pPXF into files excluding fitting arrays. """ array_keys = ["lam", "galaxy", "noise", "bestfit", "gas_bestfit", "mpoly", "apoly"] array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _), np.ndarray)] table = Table([getattr(pp, key) for key in array_keys], names=array_keys) table.write(os.path.join(outdir, "{}_bestfit.fits".format(pp.name)), overwrite=True) ppdict = {} save_keys = ["name", "regul", "degree", "mdegree", "reddening", "clean", "ncomp", "chi2"] # Chi2 is a astropy.unit.quantity object, we have to make it a scalar pp.chi2 = float(pp.chi2) for key in save_keys: ppdict[key] = getattr(pp, key) klist = ["V", "sigma"] for j, sol in enumerate(pp.sol): for i in range(len(sol)): ppdict["{}_{}".format(klist[i], j)] = float(sol[i]) ppdict["{}err_{}".format(klist[i], j)] = float(pp.error[j][i]) with open(os.path.join(outdir, "{}.yaml".format(pp.name)), "w") as f: yaml.dump(ppdict, f, default_flow_style=False) # Saving table with emission lines gas = pp.gas_component emtable = [] for j, comp in enumerate(pp.component[gas]): t = Table() t["name"] = [ pp.gas_names[j]] t["flux"] = [pp.gas_flux[j]] t["fluxerr"] = [pp.gas_flux_error[j]] t["V"] = [pp.sol[comp][0]] t["Verr"] = [pp.error[comp][0]] t["sigma"] = [pp.sol[comp][1]] t["sigmaerr"] = [pp.error[comp][1]] emtable.append(t) emtable = vstack(emtable) emtable.write(os.path.join(outdir, "{}_emission_lines.fits".format( pp.name)), overwrite=True) def make_table(direc, output): """ Read all yaml files in a ppf directory to one make table for all bins. """ filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(".yaml")]) keys = ["name", "V_0", "Verr_0", "sigma_0", "sigmaerr_0", "der_sn"] names = {"name": "spec", "V_0": "V", "Verr_0": "Verr", "sigma_0": "sigma", "sigmaerr_0": "sigmaerr", "der_sn": "SNR"} outtable = [] for fname in filenames: with open(os.path.join(direc, fname)) as f: props = yaml.load(f) data = Table([[props[k]] for k in keys], names=[names[k] for k in keys]) outtable.append(data) outtable = vstack(outtable) outtable.write(output, format="fits", overwrite=True) if __name__ == '__main__': targetSN = 100 sample = "kinematics" velscale = context.velscale tempfile = os.path.join(context.data_dir, "templates", "emiles_vel{}_{}_fwhm2.95.fits".format(int(velscale), sample)) wdir = os.path.join(context.data_dir, "MUSE/sn{}/sci".format(targetSN)) os.chdir(wdir) outdir = os.path.join(os.path.split(wdir)[0], "ppxf") if not os.path.exists(outdir): os.mkdir(outdir) specs = sorted([_ for _ in os.listdir(".") if _.endswith(".fits")]) run_ppxf(specs, tempfile, outdir, redo=False)
[ "os.mkdir", "yaml.load", "numpy.nanmedian", "yaml.dump", "numpy.arange", "numpy.exp", "ppxf.ppxf.ppxf", "os.path.join", "os.chdir", "astropy.constants.c.to", "astropy.io.fits.getdata", "matplotlib.pyplot.close", "os.path.exists", "numpy.isfinite", "spectres.spectres", "ppxf.ppxf_util.log_rebin", "numpy.intersect1d", "numpy.argwhere", "os.listdir", "ppxf.ppxf_util.emission_lines", "numpy.nanmax", "astropy.table.Table.read", "astropy.table.Table", "numpy.zeros", "astropy.table.vstack", "numpy.where", "numpy.array", "numpy.column_stack", "misc.snr", "os.path.split" ]
[((790, 823), 'astropy.table.Table.read', 'Table.read', (['templates_file'], {'hdu': '(1)'}), '(templates_file, hdu=1)\n', (800, 823), False, 'from astropy.table import Table, vstack, hstack\n'), ((946, 966), 'numpy.exp', 'np.exp', (['logwave_temp'], {}), '(logwave_temp)\n', (952, 966), True, 'import numpy as np\n'), ((6153, 6168), 'astropy.table.vstack', 'vstack', (['emtable'], {}), '(emtable)\n', (6159, 6168), False, 'from astropy.table import Table, vstack, hstack\n'), ((6964, 6980), 'astropy.table.vstack', 'vstack', (['outtable'], {}), '(outtable)\n', (6970, 6980), False, 'from astropy.table import Table, vstack, hstack\n'), ((7371, 7385), 'os.chdir', 'os.chdir', (['wdir'], {}), '(wdir)\n', (7379, 7385), False, 'import os\n'), ((729, 773), 'astropy.io.fits.getdata', 'fits.getdata', (['templates_file'], {'extname': '"""SSPS"""'}), "(templates_file, extname='SSPS')\n", (741, 773), False, 'from astropy.io import fits\n'), ((1383, 1399), 'astropy.table.Table.read', 'Table.read', (['spec'], {}), '(spec)\n', (1393, 1399), False, 'from astropy.table import Table, vstack, hstack\n'), ((1736, 1764), 'numpy.nanmedian', 'np.nanmedian', (['(flux / fluxerr)'], {}), '(flux / fluxerr)\n', (1748, 1764), True, 'import numpy as np\n'), ((2049, 2064), 'numpy.exp', 'np.exp', (['logwave'], {}), '(logwave)\n', (2055, 2064), True, 'import numpy as np\n'), ((2163, 2212), 'spectres.spectres', 'spectres', (['wave', 'wave_lin', 'flux'], {'spec_errs': 'fluxerr'}), '(wave, wave_lin, flux, spec_errs=fluxerr)\n', (2171, 2212), False, 'from spectres import spectres\n'), ((2394, 2467), 'ppxf.ppxf_util.emission_lines', 'ppxf_util.emission_lines', (['logwave_temp', '[wave_lin[0], wave_lin[-1]]', '(2.95)'], {}), '(logwave_temp, [wave_lin[0], wave_lin[-1]], 2.95)\n', (2418, 2467), False, 'from ppxf import ppxf_util\n'), ((2673, 2719), 'numpy.array', 'np.array', (['[4785, 5577, 5889, 6300, 6360, 6863]'], {}), '([4785, 5577, 5889, 6300, 6360, 6863])\n', (2681, 2719), True, 'import numpy as np\n'), ((3222, 3240), 'numpy.nanmax', 'np.nanmax', (['fluxerr'], {}), '(fluxerr)\n', (3231, 3240), True, 'import numpy as np\n'), ((3504, 3551), 'numpy.column_stack', 'np.column_stack', (['(ssp_templates, gas_templates)'], {}), '((ssp_templates, gas_templates))\n', (3519, 3551), True, 'import numpy as np\n'), ((3946, 4217), 'ppxf.ppxf.ppxf', 'ppxf', (['templates', 'flux', 'fluxerr'], {'velscale': 'velscale', 'plot': '(True)', 'moments': 'moments', 'start': 'start', 'vsyst': 'dv', 'lam': 'wave', 'component': 'components', 'mdegree': '(-1)', 'gas_component': 'gas_component', 'gas_names': 'line_names', 'quiet': '(False)', 'degree': '(15)', 'bounds': 'bounds', 'goodpixels': 'goodpixels'}), '(templates, flux, fluxerr, velscale=velscale, plot=True, moments=\n moments, start=start, vsyst=dv, lam=wave, component=components, mdegree\n =-1, gas_component=gas_component, gas_names=line_names, quiet=False,\n degree=15, bounds=bounds, goodpixels=goodpixels)\n', (3950, 4217), False, 'from ppxf.ppxf import ppxf\n'), ((4364, 4375), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4373, 4375), True, 'import matplotlib.pyplot as plt\n'), ((5620, 5666), 'yaml.dump', 'yaml.dump', (['ppdict', 'f'], {'default_flow_style': '(False)'}), '(ppdict, f, default_flow_style=False)\n', (5629, 5666), False, 'import yaml\n'), ((5816, 5823), 'astropy.table.Table', 'Table', ([], {}), '()\n', (5821, 5823), False, 'from astropy.table import Table, vstack, hstack\n'), ((6851, 6916), 'astropy.table.Table', 'Table', (['[[props[k]] for k in keys]'], {'names': '[names[k] for k in keys]'}), '([[props[k]] for k in keys], names=[names[k] for k in keys])\n', (6856, 6916), False, 'from astropy.table import Table, vstack, hstack\n'), ((7457, 7479), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7471, 7479), False, 'import os\n'), ((7490, 7506), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (7498, 7506), False, 'import os\n'), ((880, 913), 'astropy.table.Table.read', 'Table.read', (['templates_file'], {'hdu': '(2)'}), '(templates_file, hdu=2)\n', (890, 913), False, 'from astropy.table import Table, vstack, hstack\n'), ((1306, 1329), 'os.path.exists', 'os.path.exists', (['outyaml'], {}), '(outyaml)\n', (1320, 1329), False, 'import os\n'), ((1560, 1585), 'numpy.where', 'np.where', (['(wave_lin < 7000)'], {}), '(wave_lin < 7000)\n', (1568, 1585), True, 'import numpy as np\n'), ((1699, 1713), 'misc.snr', 'misc.snr', (['flux'], {}), '(flux)\n', (1707, 1713), False, 'import misc\n'), ((1973, 2029), 'ppxf.ppxf_util.log_rebin', 'ppxf_util.log_rebin', (['wave_range', 'flux'], {'velscale': 'velscale'}), '(wave_range, flux, velscale=velscale)\n', (1992, 2029), False, 'from ppxf import ppxf_util\n'), ((2900, 2931), 'numpy.intersect1d', 'np.intersect1d', (['goodpixels', 'sky'], {}), '(goodpixels, sky)\n', (2914, 2931), True, 'import numpy as np\n'), ((6822, 6834), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (6831, 6834), False, 'import yaml\n'), ((7413, 7432), 'os.path.split', 'os.path.split', (['wdir'], {}), '(wdir)\n', (7426, 7432), False, 'import os\n'), ((3198, 3218), 'numpy.isfinite', 'np.isfinite', (['fluxerr'], {}), '(fluxerr)\n', (3209, 3218), True, 'import numpy as np\n'), ((3256, 3273), 'numpy.isfinite', 'np.isfinite', (['flux'], {}), '(flux)\n', (3267, 3273), True, 'import numpy as np\n'), ((3454, 3476), 'astropy.constants.c.to', 'constants.c.to', (['"""km/s"""'], {}), "('km/s')\n", (3468, 3476), False, 'from astropy import constants\n'), ((6447, 6464), 'os.listdir', 'os.listdir', (['direc'], {}), '(direc)\n', (6457, 6464), False, 'import os\n'), ((6767, 6793), 'os.path.join', 'os.path.join', (['direc', 'fname'], {}), '(direc, fname)\n', (6779, 6793), False, 'import os\n'), ((7539, 7554), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (7549, 7554), False, 'import os\n'), ((2813, 2865), 'numpy.argwhere', 'np.argwhere', (['((wave < line - 10) | (wave > line + 10))'], {}), '((wave < line - 10) | (wave > line + 10))\n', (2824, 2865), True, 'import numpy as np\n'), ((3024, 3041), 'numpy.isfinite', 'np.isfinite', (['flux'], {}), '(flux)\n', (3035, 3041), True, 'import numpy as np\n'), ((3105, 3125), 'numpy.isfinite', 'np.isfinite', (['fluxerr'], {}), '(fluxerr)\n', (3116, 3125), True, 'import numpy as np\n'), ((3585, 3600), 'numpy.zeros', 'np.zeros', (['nssps'], {}), '(nssps)\n', (3593, 3600), True, 'import numpy as np\n'), ((3602, 3617), 'numpy.arange', 'np.arange', (['ngas'], {}), '(ngas)\n', (3611, 3617), True, 'import numpy as np\n')]
from math import exp import cv2 as cv import numpy as np from concurrent.futures import ProcessPoolExecutor from numba import jit from numpy import float32 from tqdm import tqdm from utils import ( get_region_indexes, get_region_centers, associate_index_to_centers, get_window, ) @jit def P(v): return v / 255 @jit def deltaIx(img, channel, x, y): res = 0 if x + 1 < img.shape[0] and y < img.shape[1]: res = abs(img[x + 1][y][channel] - img[x][y][channel]) else: res = 0 return res @jit def deltaIy(img, channel, x, y): res = 0 if y - 1 > 0 and x < img.shape[0]: res = abs(img[x][y - 1][channel] - img[x][y][channel]) else: res = 0 return res def getDetailsRegions(imgs): region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10) M = [] for i in range(len(imgs)): M.append([]) for j in tqdm(range(region_indexes.shape[0])): M_B = 0 M_G = 0 M_R = 0 for x in range(region_indexes[j][0][0], region_indexes[j][0][1]): for y in range(region_indexes[j][1][0], region_indexes[j][1][1]): M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y))) M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y))) M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y))) M[i].append([M_B, M_G, M_R]) return np.array(M), region_indexes def joinBestRegions(imgs, M, region_indexes): res = np.zeros(imgs[0].shape) for channel_indx in range(3): for r_indx in tqdm(range(M.shape[1])): # iterate over each region max_r = {} for i in range(len(imgs)): max_r[np.sum(M[i][r_indx])] = i index_image = max_r[max(max_r)] for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]): for j in range( region_indexes[r_indx][1][0], region_indexes[r_indx][1][1] ): res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx] return res @jit def U(x_c_reg, y_c_reg, x_c, y_c): epsilon = 2 return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon @jit def exp_g(x, y, x_c, y_c) -> float: sigma_x = 100 sigma_y = 100 return exp( -((((x - x_c) ** 2) / (2 * sigma_x)) + (((y - y_c) ** 2) / (2 * sigma_y))) ) @jit def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes): num = exp_g(x, y, x_c, y_c) den = 0.0 for i in range(center_indexes.shape[0]): den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1]) den *= center_indexes.shape[0] return num / den def compute_channel(channel, region_indexes, center_indexes, map_px_center): center_indexes = np.float32(center_indexes) res = np.zeros(shape=channel.shape, dtype=float32) for x in tqdm(range(res.shape[0])): for y in range(res.shape[1]): window = get_window(x, y, channel, 5) # WINDOW VERSION for i in range(window[0][0], window[0][1]): for j in range(window[1][0], window[1][1]): # for i in range(res.shape[0]): # for j in range(res.shape[1]): add = 0 if U( map_px_center[(i, j)][0], map_px_center[(i, j)][1], map_px_center[(x, y)][0], map_px_center[(x, y)][1], ): add = 1 add *= gaussianBlendingFunction( map_px_center[(x, y)][0], map_px_center[(x, y)][1], map_px_center[(i, j)][0], map_px_center[(i, j)][1], region_indexes, center_indexes, ) add *= channel[x][y] res[x][y] += add return res def blend(img, regions_indexes): centers_indexes = get_region_centers(regions_indexes) pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes) b, g, r = cv.split(img) with ProcessPoolExecutor() as excecutor: proc1 = excecutor.submit( compute_channel, b, regions_indexes, centers_indexes, pixel_region_center ) proc2 = excecutor.submit( compute_channel, g, regions_indexes, centers_indexes, pixel_region_center ) proc3 = excecutor.submit( compute_channel, r, regions_indexes, centers_indexes, pixel_region_center ) b = proc1.result() g = proc2.result() r = proc3.result() return cv.merge((b, g, r)) def compute(imgs): for i in range(len(imgs)): imgs[i] = np.float32(imgs[i]) M, regions_indexes = getDetailsRegions(imgs) res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes) res = res / np.amax(res) res = 255 * res return res
[ "math.exp", "utils.get_region_centers", "utils.get_region_indexes", "numpy.sum", "utils.associate_index_to_centers", "numpy.float32", "concurrent.futures.ProcessPoolExecutor", "numpy.zeros", "utils.get_window", "numpy.amax", "cv2.split", "numpy.array", "cv2.merge" ]
[((787, 845), 'utils.get_region_indexes', 'get_region_indexes', (['imgs[0].shape[0]', 'imgs[0].shape[1]', '(10)'], {}), '(imgs[0].shape[0], imgs[0].shape[1], 10)\n', (805, 845), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((1586, 1609), 'numpy.zeros', 'np.zeros', (['imgs[0].shape'], {}), '(imgs[0].shape)\n', (1594, 1609), True, 'import numpy as np\n'), ((2413, 2484), 'math.exp', 'exp', (['(-((x - x_c) ** 2 / (2 * sigma_x) + (y - y_c) ** 2 / (2 * sigma_y)))'], {}), '(-((x - x_c) ** 2 / (2 * sigma_x) + (y - y_c) ** 2 / (2 * sigma_y)))\n', (2416, 2484), False, 'from math import exp\n'), ((2913, 2939), 'numpy.float32', 'np.float32', (['center_indexes'], {}), '(center_indexes)\n', (2923, 2939), True, 'import numpy as np\n'), ((2950, 2994), 'numpy.zeros', 'np.zeros', ([], {'shape': 'channel.shape', 'dtype': 'float32'}), '(shape=channel.shape, dtype=float32)\n', (2958, 2994), True, 'import numpy as np\n'), ((4215, 4250), 'utils.get_region_centers', 'get_region_centers', (['regions_indexes'], {}), '(regions_indexes)\n', (4233, 4250), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((4277, 4337), 'utils.associate_index_to_centers', 'associate_index_to_centers', (['regions_indexes', 'centers_indexes'], {}), '(regions_indexes, centers_indexes)\n', (4303, 4337), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((4352, 4365), 'cv2.split', 'cv.split', (['img'], {}), '(img)\n', (4360, 4365), True, 'import cv2 as cv\n'), ((4884, 4903), 'cv2.merge', 'cv.merge', (['(b, g, r)'], {}), '((b, g, r))\n', (4892, 4903), True, 'import cv2 as cv\n'), ((1500, 1511), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (1508, 1511), True, 'import numpy as np\n'), ((4376, 4397), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (4395, 4397), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((4975, 4994), 'numpy.float32', 'np.float32', (['imgs[i]'], {}), '(imgs[i])\n', (4985, 4994), True, 'import numpy as np\n'), ((5139, 5151), 'numpy.amax', 'np.amax', (['res'], {}), '(res)\n', (5146, 5151), True, 'import numpy as np\n'), ((3094, 3122), 'utils.get_window', 'get_window', (['x', 'y', 'channel', '(5)'], {}), '(x, y, channel, 5)\n', (3104, 3122), False, 'from utils import get_region_indexes, get_region_centers, associate_index_to_centers, get_window\n'), ((1803, 1823), 'numpy.sum', 'np.sum', (['M[i][r_indx]'], {}), '(M[i][r_indx])\n', (1809, 1823), True, 'import numpy as np\n')]
""" 2018, University of Freiburg. <NAME> <<EMAIL>> """ import os import argparse import pickle import numpy as np import re from sklearn.metrics import accuracy_score, precision_score, recall_score from concept_neuron import split_train_valid_test, process_sentence_pos_tags from concept_neuron import print_pos_tag_statistics, compute_LSTM_states # hidden_states or cell_states of LSTMs state_type = 'cell_states' # List of concepts to analyse - Upenn POS tags # http://www.nltk.org/api/nltk.tag.html # To find the available POS tags: # import nltk.help; nltk.help.upenn_tagset() concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD', 'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB'] concepts.extend(['SPACE', 'OTHER']) def concept_neurons_accuracy(args): """ Computes the accuracy for various logistic regression classifiers for different POS tags, as a multiclass classifier. Args: args (argparse): arguments. Returns: None. """ # Directory with LSTM model. save_dir = args.save_dir # Folder to save results. if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) results_dir = args.results_dir # Data to analyse. input_file = args.data_file # Get training data, tokenize and POS tag sentences. # X holds the sentences (word1, word2, ...) # Y holds the corresponding ((word1, tag1), (word2, tags), ...) X, Y = process_sentence_pos_tags(input_file, args.group_tags) # Set the concepts to the whole set if no grouping is required. unique_tags, counts = np.unique([y[1] for sublist in Y for y in sublist], return_counts=True) if not args.group_tags: global concepts concepts = unique_tags # Print some statistics about the initial distribution of POS tags. print_pos_tag_statistics(unique_tags, counts) # Computes the LSTM state for each byte in X. X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y) # Compute the overall metrics for the logistic regression classifiers. print('\n-----> Test results') classifiers_id = ['all', 'top1', 'top2', 'top3'] for classifier_id in classifiers_id: print('\n- {}'.format(classifier_id)) concept_classifiers = [] predicted_probs = [] classes = [] for concept in concepts: lr_file = os.path.join( results_dir, 'log_reg_model_' + concept + '_' + classifier_id + '.sav') if not os.path.exists(lr_file): continue concept_classifiers.append(concept) lr_model = pickle.load(open(lr_file, 'rb')) classes.append(lr_model.classes_[0]) # Largest coefficients lr_file_all = os.path.join( results_dir, 'log_reg_model_' + concept + '_all.sav') coef_sorted = np.argsort(-np.abs(np.squeeze( pickle.load(open(lr_file_all, 'rb')).coef_))) x = re.search(r'^top(?P<k>\d)$', classifier_id) if x is None: # all weights X_t_ = X_t else: # top k weights k = int(x.group('k')) X_t_ = [x[coef_sorted[0:k]] for x in X_t] trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_, X_t_pos_tags) predicted_probs.append(lr_model.predict_proba(teX)[:, 0]) # Find the class with largest predicted probability. concept_classifiers = np.array(concept_classifiers) predicted_probs = np.array(predicted_probs) max_prob_ind = np.argmax(predicted_probs, axis=0) pred_classes = concept_classifiers[max_prob_ind].tolist() y_true, y_pred = teY, pred_classes print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred))) print('Test precision: {:.3f}'.format( precision_score(y_true, y_pred, average='weighted'))) print('Test recall: {:.3f}'.format( recall_score(y_true, y_pred, average='weighted'))) if __name__ == '__main__': """ Parse CLI arguments. """ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--save_dir', type=str, default='../byte_LSTM_trained_models/wikitext/save/95/', help='directory containing LSTM-model') parser.add_argument('--data_file', type=str, default=None, help="""file to use as input to the classifier. If no file is provided, the nltk.corpus.treebank is used """) parser.add_argument('--results_dir', type=str, default='results', help='directory with saved classifiers') parser.add_argument('--group_tags', action='store_true', help="""group all VB* tags into VB; JJ* into JJ; NN* into NN; NNP* into NNP; RB* into RB. """) args = parser.parse_args() concept_neurons_accuracy(args)
[ "concept_neuron.print_pos_tag_statistics", "argparse.ArgumentParser", "os.makedirs", "numpy.argmax", "os.path.isdir", "sklearn.metrics.accuracy_score", "os.path.exists", "concept_neuron.split_train_valid_test", "sklearn.metrics.recall_score", "numpy.array", "re.search", "concept_neuron.process_sentence_pos_tags", "sklearn.metrics.precision_score", "os.path.join", "concept_neuron.compute_LSTM_states", "numpy.unique" ]
[((1446, 1500), 'concept_neuron.process_sentence_pos_tags', 'process_sentence_pos_tags', (['input_file', 'args.group_tags'], {}), '(input_file, args.group_tags)\n', (1471, 1500), False, 'from concept_neuron import split_train_valid_test, process_sentence_pos_tags\n'), ((1596, 1667), 'numpy.unique', 'np.unique', (['[y[1] for sublist in Y for y in sublist]'], {'return_counts': '(True)'}), '([y[1] for sublist in Y for y in sublist], return_counts=True)\n', (1605, 1667), True, 'import numpy as np\n'), ((1864, 1909), 'concept_neuron.print_pos_tag_statistics', 'print_pos_tag_statistics', (['unique_tags', 'counts'], {}), '(unique_tags, counts)\n', (1888, 1909), False, 'from concept_neuron import print_pos_tag_statistics, compute_LSTM_states\n'), ((1985, 2020), 'concept_neuron.compute_LSTM_states', 'compute_LSTM_states', (['save_dir', 'X', 'Y'], {}), '(save_dir, X, Y)\n', (2004, 2020), False, 'from concept_neuron import print_pos_tag_statistics, compute_LSTM_states\n'), ((4222, 4301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (4245, 4301), False, 'import argparse\n'), ((1099, 1130), 'os.path.isdir', 'os.path.isdir', (['args.results_dir'], {}), '(args.results_dir)\n', (1112, 1130), False, 'import os\n'), ((1140, 1169), 'os.makedirs', 'os.makedirs', (['args.results_dir'], {}), '(args.results_dir)\n', (1151, 1169), False, 'import os\n'), ((3590, 3619), 'numpy.array', 'np.array', (['concept_classifiers'], {}), '(concept_classifiers)\n', (3598, 3619), True, 'import numpy as np\n'), ((3646, 3671), 'numpy.array', 'np.array', (['predicted_probs'], {}), '(predicted_probs)\n', (3654, 3671), True, 'import numpy as np\n'), ((3695, 3729), 'numpy.argmax', 'np.argmax', (['predicted_probs'], {'axis': '(0)'}), '(predicted_probs, axis=0)\n', (3704, 3729), True, 'import numpy as np\n'), ((2410, 2498), 'os.path.join', 'os.path.join', (['results_dir', "('log_reg_model_' + concept + '_' + classifier_id + '.sav')"], {}), "(results_dir, 'log_reg_model_' + concept + '_' + classifier_id +\n '.sav')\n", (2422, 2498), False, 'import os\n'), ((2812, 2878), 'os.path.join', 'os.path.join', (['results_dir', "('log_reg_model_' + concept + '_all.sav')"], {}), "(results_dir, 'log_reg_model_' + concept + '_all.sav')\n", (2824, 2878), False, 'import os\n'), ((3032, 3075), 're.search', 're.search', (['"""^top(?P<k>\\\\d)$"""', 'classifier_id'], {}), "('^top(?P<k>\\\\d)$', classifier_id)\n", (3041, 3075), False, 'import re\n'), ((3319, 3361), 'concept_neuron.split_train_valid_test', 'split_train_valid_test', (['X_t_', 'X_t_pos_tags'], {}), '(X_t_, X_t_pos_tags)\n', (3341, 3361), False, 'from concept_neuron import split_train_valid_test, process_sentence_pos_tags\n'), ((2547, 2570), 'os.path.exists', 'os.path.exists', (['lr_file'], {}), '(lr_file)\n', (2561, 2570), False, 'import os\n'), ((3886, 3916), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3900, 3916), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((3978, 4029), 'sklearn.metrics.precision_score', 'precision_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (3993, 4029), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n'), ((4088, 4136), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (4100, 4136), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score\n')]
import numpy as np import random as rnd import pdb def dist(loc1,loc2): return np.sqrt((loc1[0]-loc2[0])**2 + (loc2[1]-loc1[1])**2) #### BUG WHEN LEN(x) != LEN(y) class Generate_field(): def __init__(self,a,b,n,x,y,opt=''): self.xlen=len(x) self.ylen=len(y) self.a = a*rnd.uniform(0.7, 1.3) self.b = b*rnd.uniform(0.7, 1.3) self.x = x self.y = y self.n = n self.opt = opt if type(self.n) != list or type(self.n) != tuple: self.eddies = {'eddy_n%s' % ii:{'loc':[[rnd.randint(0,self.xlen-1),\ rnd.randint(0,self.ylen-1)]],'grow':True,\ 'radius':[self.a,self.b],'angle':rnd.uniform(0, 2*np.pi),\ 'amp':rnd.choice([-1,1])*rnd.uniform(0.7, 1.3)} for ii in range(self.n)} else: raise ValueError("No right input.") def go_right(self,indexs,step): return [0,step] def go_upright(self,indexs,step): return [step,step] def go_up(self,indexs,step): return [step,0] def go_upleft(self,indexs,step): return [step,-step] def go_left(self,indexs,step): return [0,-step] def go_downleft(self,indexs,step): return [-step,-step] def go_down(self,indexs,step): return [-step,0] def go_downright(self,indexs,step): return [-step,step] def twoD_Gaussian(self, coords, sigma_x, sigma_y, theta, slopex=0, slopey=0, offset=0): ''' *************** twoD_Gaussian ******************* Build a 2D gaussian. Notes: Remmember to do g.ravel().reshape(len(x),len(y)) for plotting purposes. Args: coords [x,y] (list|array): Coordinates in x and y. amplitude (float): Amplitud of gaussian. x0 , yo (float): Center of Gausian. sigma_x,sigma_y (float): Deviation. theta (Float): Orientation. offset (Float): Gaussian Offset. Returns: g.ravel() (list|array) - Gaussian surface in a list. Usage: Check scan_eddym function. ''' x=coords[0] y=coords[1] amplitude = coords[2] xo = float(coords[3]) yo = float(coords[4]) xo = float(xo) yo = float(yo) if sigma_y or sigma_x != 0: a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2))) else: g = (x-xo)*0 + (y-yo)*0 return g.ravel() def checkposition(self,away_val=5,loc=False): if loc == True: eddies_loc=[[rnd.randint(0,self.xlen-1),rnd.randint(0,self.ylen-1)] for key,item in self.eddies.items()] else: eddies_loc=[item['loc'][-1] for key,item in self.eddies.items()] for key1,item1 in self.eddies.items(): xc1=item1['loc'][0][0] yc1=item1['loc'][0][1] distance=np.array([dist([self.x[xc1],self.y[yc1]],[self.x[ii],self.y[jj]]) for ii,jj in eddies_loc]) distance[distance==0]=away_val*self.a checker = ((distance < away_val*self.a).any() or (distance < away_val*self.b).any() ) or loc==True count = 0 while checker or count >= 10000: newx=rnd.randint(0,self.xlen-1) newy=rnd.randint(0,self.ylen-1) self.eddies[key1]['loc']=[[newx, newy]] eddies_loc=[item['loc'][-1] for key,item in self.eddies.items()] #pdb.set_trace() xc1=newx yc1=newy distance=np.array([dist([self.x[xc1],self.y[yc1]],[self.x[ii],self.y[jj]]) for ii,jj in eddies_loc]) numzeros = [ii for ii in distance if ii == 0] if len(numzeros) <= 1: distance[distance==0]=np.inf else: distance[distance==0] = away_val*self.a checker = ((distance < away_val*self.a).any() or (distance < away_val*self.b).any() ) count = count + 1 if loc == True: return self.eddies def make_random_walk(self,indexs, steps): move_dict = { 1: self.go_up, 2: self.go_right, 3: self.go_left, 4: self.go_down, 5: self.go_downleft, 6: self.go_downright, 7: self.go_upleft, 8: self.go_upright, } #for _ in range(steps): for ii in indexs: move_in_a_direction = move_dict[rnd.randint(1, 8)] movcood=move_in_a_direction(ii,steps) return indexs[0]+movcood[0],indexs[1]+movcood[1] def assemble_field(self, N,margin=50): data=np.zeros((N,self.xlen+2*margin,self.ylen+2*margin)) for t in range(N): #pdb.set_trace() if self.opt == 'no_interaction' or self.opt == 'Nint': self.eddies=self.checkposition(away_val=5,loc=True) else: pass for keys, item in self.eddies.items(): gauss=self.twoD_Gaussian(self.pass_args(keys,margin),item['radius'][0], item['radius'][1], item['angle']).reshape(np.shape(data[0,:,:])) data[t,:,:]=data[t,:,:]+gauss return data def reconstruct_field(self): data=np.zeros((self.xlen,self.ylen)) for keys, item in self.eddies.items(): gauss=self.twoD_Gaussian(self.pass_args(keys),item['radius'][0], item['radius'][1], item['angle']).reshape(np.shape(data)) data=data+gauss return data def pass_args(self,key,margin=50): self.x = np.linspace(min(self.x),max(self.x),self.xlen+2*margin) self.y = np.linspace(min(self.y),max(self.y),self.ylen+2*margin) X,Y=np.meshgrid(self.x,self.y) if self.opt == 'interaction' or self.opt == 'int': xloc=rnd.randint(0,self.xlen-1)+margin yloc=rnd.randint(0,self.ylen-1)+margin eddy_parms=(X,Y,self.eddies[key]['amp'],self.x[xloc],self.y[yloc]) else: eddy_parms=(X,Y,self.eddies[key]['amp'],self.x[self.eddies[key]['loc'][0][0]+margin],self.y[self.eddies[key]['loc'][0][1]+margin]) return eddy_parms
[ "numpy.meshgrid", "random.randint", "random.uniform", "numpy.zeros", "random.choice", "numpy.shape", "numpy.sin", "numpy.exp", "numpy.cos", "numpy.sqrt" ]
[((88, 148), 'numpy.sqrt', 'np.sqrt', (['((loc1[0] - loc2[0]) ** 2 + (loc2[1] - loc1[1]) ** 2)'], {}), '((loc1[0] - loc2[0]) ** 2 + (loc2[1] - loc1[1]) ** 2)\n', (95, 148), True, 'import numpy as np\n'), ((5131, 5192), 'numpy.zeros', 'np.zeros', (['(N, self.xlen + 2 * margin, self.ylen + 2 * margin)'], {}), '((N, self.xlen + 2 * margin, self.ylen + 2 * margin))\n', (5139, 5192), True, 'import numpy as np\n'), ((5730, 5762), 'numpy.zeros', 'np.zeros', (['(self.xlen, self.ylen)'], {}), '((self.xlen, self.ylen))\n', (5738, 5762), True, 'import numpy as np\n'), ((6190, 6217), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (6201, 6217), True, 'import numpy as np\n'), ((308, 329), 'random.uniform', 'rnd.uniform', (['(0.7)', '(1.3)'], {}), '(0.7, 1.3)\n', (319, 329), True, 'import random as rnd\n'), ((349, 370), 'random.uniform', 'rnd.uniform', (['(0.7)', '(1.3)'], {}), '(0.7, 1.3)\n', (360, 370), True, 'import random as rnd\n'), ((2708, 2786), 'numpy.exp', 'np.exp', (['(-(a * (x - xo) ** 2 + 2 * b * (x - xo) * (y - yo) + c * (y - yo) ** 2))'], {}), '(-(a * (x - xo) ** 2 + 2 * b * (x - xo) * (y - yo) + c * (y - yo) ** 2))\n', (2714, 2786), True, 'import numpy as np\n'), ((3609, 3638), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (3620, 3638), True, 'import random as rnd\n'), ((3657, 3686), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (3668, 3686), True, 'import random as rnd\n'), ((4947, 4964), 'random.randint', 'rnd.randint', (['(1)', '(8)'], {}), '(1, 8)\n', (4958, 4964), True, 'import random as rnd\n'), ((5928, 5942), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (5936, 5942), True, 'import numpy as np\n'), ((6293, 6322), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (6304, 6322), True, 'import random as rnd\n'), ((6344, 6373), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (6355, 6373), True, 'import random as rnd\n'), ((715, 740), 'random.uniform', 'rnd.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (726, 740), True, 'import random as rnd\n'), ((2564, 2581), 'numpy.sin', 'np.sin', (['(2 * theta)'], {}), '(2 * theta)\n', (2570, 2581), True, 'import numpy as np\n'), ((2947, 2976), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (2958, 2976), True, 'import random as rnd\n'), ((2974, 3003), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (2985, 3003), True, 'import random as rnd\n'), ((5594, 5617), 'numpy.shape', 'np.shape', (['data[0, :, :]'], {}), '(data[0, :, :])\n', (5602, 5617), True, 'import numpy as np\n'), ((771, 790), 'random.choice', 'rnd.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (781, 790), True, 'import random as rnd\n'), ((790, 811), 'random.uniform', 'rnd.uniform', (['(0.7)', '(1.3)'], {}), '(0.7, 1.3)\n', (801, 811), True, 'import random as rnd\n'), ((2442, 2455), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2448, 2455), True, 'import numpy as np\n'), ((2478, 2491), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2484, 2491), True, 'import numpy as np\n'), ((2529, 2546), 'numpy.sin', 'np.sin', (['(2 * theta)'], {}), '(2 * theta)\n', (2535, 2546), True, 'import numpy as np\n'), ((2613, 2626), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2619, 2626), True, 'import numpy as np\n'), ((2649, 2662), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2655, 2662), True, 'import numpy as np\n'), ((562, 591), 'random.randint', 'rnd.randint', (['(0)', '(self.xlen - 1)'], {}), '(0, self.xlen - 1)\n', (573, 591), True, 'import random as rnd\n'), ((615, 644), 'random.randint', 'rnd.randint', (['(0)', '(self.ylen - 1)'], {}), '(0, self.ylen - 1)\n', (626, 644), True, 'import random as rnd\n')]
"""Functions for getting data needed to fit the models.""" import bs4 from datetime import datetime import matplotlib.pyplot as plt import numpy as np import pandas as pd import requests from tqdm import tqdm from typing import Union from urllib.error import HTTPError import urllib.request, json import os from datetime import timedelta, date import pandas as pd pd.options.mode.chained_assignment = None # default='warn' JHU_FILTER_DEFAULTS = {'confirmed': 5, 'recovered': 1, 'deaths': 0} COVIDTRACKER_FILTER_DEFAULTS = {'cum_cases': 5, 'cum_recover': 1, 'cum_deaths': 0} US_STATE_ABBREV = { 'Alabama': 'US_AL', 'Alaska': 'US_AK', 'American Samoa': 'US_AS', 'Arizona': 'US_AZ', 'Arkansas': 'US_AR', 'California': 'US_CA', 'Colorado': 'US_CO', 'Connecticut': 'US_CT', 'Delaware': 'US_DE', 'District of Columbia': 'US_DC', 'Florida': 'US_FL', 'Georgia': 'US_GA', 'Guam': 'US_GU', 'Hawaii': 'US_HI', 'Idaho': 'US_ID', 'Illinois': 'US_IL', 'Indiana': 'US_IN', 'Iowa': 'US_IA', 'Kansas': 'US_KS', 'Kentucky': 'US_KY', 'Louisiana': 'US_LA', 'Maine': 'US_ME', 'Maryland': 'US_MD', 'Massachusetts': 'US_MA', 'Michigan': 'US_MI', 'Minnesota': 'US_MN', 'Mississippi': 'US_MS', 'Missouri': 'US_MO', 'Montana': 'US_MT', 'Nebraska': 'US_NE', 'Nevada': 'US_NV', 'New Hampshire': 'US_NH', 'New Jersey': 'US_NJ', 'New Mexico': 'US_NM', 'New York': 'US_NY', 'North Carolina': 'US_NC', 'North Dakota': 'US_ND', 'Northern Mariana Islands':'US_MP', 'Ohio': 'US_OH', 'Oklahoma': 'US_OK', 'Oregon': 'US_OR', 'Pennsylvania': 'US_PA', 'Puerto Rico': 'US_PR', 'Rhode Island': 'US_RI', 'South Carolina': 'US_SC', 'South Dakota': 'US_SD', 'Tennessee': 'US_TN', 'Texas': 'US_TX', 'Utah': 'US_UT', 'Vermont': 'US_VT', 'Virgin Islands': 'US_VI', 'Virginia': 'US_VA', 'Washington': 'US_WA', 'West Virginia': 'US_WV', 'Wisconsin': 'US_WI', 'Wyoming': 'US_WY' } def get_jhu(data_path: str, filter_: Union[dict, bool] = True) -> None: """Gets data from Johns Hopkins CSSEGIS (countries only). https://coronavirus.jhu.edu/map.html https://github.com/CSSEGISandData/COVID-19 Args: data_path (str): Full path to data directory. Returns: None """ # Where JHU stores their data url_template = ("https://raw.githubusercontent.com/CSSEGISandData/" "COVID-19/master/csse_covid_19_data/" "csse_covid_19_time_series/time_series_covid19_%s_%s.csv") # Scrape the data dfs = {} for region in ['global', 'US']: dfs[region] = {} for kind in ['confirmed', 'deaths', 'recovered']: url = url_template % (kind, region) # Create the full data URL try: df = pd.read_csv(url) # Download the data into a dataframe except HTTPError: print("Could not download data for %s, %s" % (kind, region)) else: if region == 'global': has_no_province = df['Province/State'].isnull() # Whole countries only; use country name as index df1 = df[has_no_province].set_index('Country/Region') more_dfs = [] for country in ['China', 'Canada', 'Australia']: if country == 'Canada' and kind in 'recovered': continue is_c = df['Country/Region'] == country df2 = df[is_c].sum(axis=0, skipna=False).to_frame().T df2['Country/Region'] = country df2 = df2.set_index('Country/Region') more_dfs.append(df2) df = pd.concat([df1] + more_dfs) elif region == 'US': # Use state name as index # for k, v in US_STATE_ABBREV.items(): # get US state abbrev # if not US_STATE_ABBREV[k].startswith('US_'): # US_STATE_ABBREV[k] = 'US_' + v # Add 'US_' to abbrev df.replace(US_STATE_ABBREV, inplace=True) df = df.set_index('Province_State') df = df.groupby('Province_State').sum() # combine counties to create state level data df = df[[x for x in df if any(year in x for year in ['20', '21'])]] # Use only data columns # 20 or 21 signifies 2020 or 2021 dfs[region][kind] = df # Add to dictionary of dataframes # Generate a list of countries that have "good" data, # according to these criteria: good_countries = get_countries(dfs['global'], filter_=filter_) # For each "good" country, # reformat and save that data in its own .csv file. source = dfs['global'] for country in tqdm(good_countries, desc='Countries'): # For each country if country in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'Samoa', 'Vanuatu', 'Marshall Islands', 'US', 'Micronesia','Kiribati']: print("Skipping {}".format(country)) continue # If we have data in the downloaded JHU files for that country if country in source['confirmed'].index: df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected']) df['dates2'] = source['confirmed'].columns df['dates2'] = df['dates2'].apply(fix_jhu_dates) df['cum_cases'] = source['confirmed'].loc[country].values df['cum_deaths'] = source['deaths'].loc[country].values df['cum_recover'] = source['recovered'].loc[country].values df[['new_cases', 'new_deaths', 'new_recover']] = \ df[['cum_cases', 'cum_deaths', 'cum_recover']].diff() df['new_uninfected'] = df['new_recover'] + df['new_deaths'] try: population = get_population_count(data_path, country) df['population'] = population except: pass # Fill NaN with 0 and convert to int dfs[country] = df.set_index('dates2').fillna(0).astype(int) dfs[country].to_csv(data_path / ('covidtimeseries_%s.csv' % country)) else: print("No data for %s" % country) source = dfs['US'] states = source['confirmed'].index.tolist() us_recovery_data = covid_tracking_recovery(data_path) for state in tqdm(states, desc='US States'): # For each country if state in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'US_AS']: print("Skipping {}".format(state)) continue # If we have data in the downloaded JHU files for that country if state in source['confirmed'].index: df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths', 'new_cases','new_deaths','new_uninfected']) df['dates2'] = source['confirmed'].columns df['dates2'] = df['dates2'].apply(fix_jhu_dates) df['cum_cases'] = source['confirmed'].loc[state].values df['cum_deaths'] = source['deaths'].loc[state].values df[['new_cases', 'new_deaths']] = df[['cum_cases', 'cum_deaths']].diff() # add recovery data df.set_index('dates2', inplace=True) df = df.merge(us_recovery_data[state], on='dates2', how='left') df['tmp_new_recover'] = df['new_recover'].fillna(0).astype(int) # create temp new recover for df['new_uninfected'] = df['tmp_new_recover'] + df['new_deaths'] # new uninfected calculation df = df.fillna(-1).astype(int) df = df.drop(['tmp_new_recover'], axis=1) try: population = get_population_count(data_path, state) df['population'] = population except: pass dfs[state] = df dfs[state].to_csv(data_path / ('covidtimeseries_%s.csv' % state)) else: print("No data for %s" % state) def fix_jhu_dates(x): y = datetime.strptime(x, '%m/%d/%y') return datetime.strftime(y, '%m/%d/%y') def fix_ct_dates(x): return datetime.strptime(str(x), '%Y%m%d') def get_countries(d: pd.DataFrame, filter_: Union[dict, bool] = True): """Get a list of countries from a global dataframe optionally passing a quality check Args: d (pd.DataFrame): Data from JHU tracker (e.g. df['global]). filter (bool, optional): Whether to filter by quality criteria. """ good = set(d['confirmed'].index) if filter_ and not isinstance(filter_, dict): filter_ = JHU_FILTER_DEFAULTS if filter_: for key, minimum in filter_.items(): enough = d[key].index[d[key].max(axis=1) >= minimum].tolist() good = good.intersection(enough) bad = set(d['confirmed'].index).difference(good) # print("JHU data acceptable for %s" % ','.join(good)) # print("JHU data not acceptable for %s" % ','.join(bad)) return good def get_population_count(data_path:str, roi): """ Check if we have population count for roi and add to timeseries df if we do. Args: data_path (str): Full path to data directory. roi (str): Region. Returns: population (int): Population count for ROI (if exists). """ try: # open population file df_pop = pd.read_csv(data_path / 'population_estimates.csv') except: print("Missing population_estimates.csv in data-path") try: population = df_pop.query('roi == "{}"'.format(roi))['population'].values except: print("{} population estimate not found in population_estimates.csv".format(args.roi)) return int(population) def covid_tracking_recovery(data_path: str): """Gets archived US recovery data from The COVID Tracking Project. https://covidtracking.com Args: data_path (str): Full path to data directory. Returns: ctp_dfs (dict): Dictionary containing US States (keys) and dataframes containing dates, recovery data (values). """ archived_data = data_path / 'covid-tracking-project-recovery.csv' df_raw = pd.read_csv(archived_data) states = df_raw['state'].unique() ctp_dfs = {} for state in states: # For each country source = df_raw[df_raw['state'] == state] # Only the given state df = pd.DataFrame(columns=['dates2','cum_recover','new_recover']) df['dates2'] = source['date'].apply(fix_ct_dates) # Convert date format # first check if roi reports recovery data as recovered if source['recovered'].isnull().all() == False: df['cum_recover'] = source['recovered'].values # check if roi reports recovery data as hospitalizedDischarged elif source['hospitalizedDischarged'].isnull().all() == False: df['cum_recover'] = source['hospitalizedDischarged'].values else: df['cum_recover'] = np.NaN df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string df = df.set_index('dates2') # Convert to int df['new_recover'] = df['cum_recover'].diff() ctp_dfs['US_'+state] = df return ctp_dfs def get_canada(data_path: str, filter_: Union[dict, bool] = True, fixes: bool = False) -> None: """ Gets data from Canada's Open Covid group for Canadian Provinces. https://opencovid.ca/ """ dfs = [] # we will append dfs for cases, deaths, recovered here # URL for API call to get Province-level timeseries data starting on Jan 22 2020 url_template = 'https://api.opencovid.ca/timeseries?stat=%s&loc=prov&date=01-22-2020' for kind in ['cases', 'mortality', 'recovered']: url_path = url_template % kind # Create the full data URL with urllib.request.urlopen(url_path) as url: data = json.loads(url.read().decode()) source = pd.json_normalize(data[kind]) if kind == 'cases': source.drop('cases', axis=1, inplace=True) # removing this column so # we can index into date on all 3 dfs at same position source.rename(columns={source.columns[1]: "date" }, inplace=True) dfs.append(source) cases = dfs[0] deaths = dfs[1] recovered = dfs[2] # combine dfs df_rawtemp = cases.merge(recovered, on=['date', 'province'], how='outer') df_raw = df_rawtemp.merge(deaths, on=['date', 'province'], how='outer') df_raw.fillna(0, inplace=True) provinces = ['Alberta', 'BC', 'Manitoba', 'New Brunswick', 'NL', 'Nova Scotia', 'Nunavut', 'NWT', 'Ontario', 'PEI', 'Quebec', 'Saskatchewan', 'Yukon'] # Export timeseries data for each province for province in tqdm(provinces, desc='Canadian Provinces'): source = df_raw[df_raw['province'] == province] # Only the given province df = pd.DataFrame(columns=['dates2','cum_cases', 'cum_deaths', 'cum_recover', 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected']) df['dates2'] = source['date'].apply(fix_canada_dates) # Convert date format df['cum_cases'] = source['cumulative_cases'].values df['cum_deaths'] = source['cumulative_deaths'].values df['cum_recover'] = source['cumulative_recovered'].values df[['new_cases', 'new_deaths', 'new_recover']] = \ df[['cum_cases', 'cum_deaths', 'cum_recover']].diff() df['new_uninfected'] = df['new_recover'] + df['new_deaths'] try: population = get_population_count(data_path, 'CA_' + province) df['population'] = population except: pass df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string df = df.set_index('dates2').fillna(0).astype(int) # Fill NaN with 0 and convert to int df.to_csv(data_path / ('covidtimeseries_CA_%s.csv' % province)) def fix_canada_dates(x): return datetime.strptime(x, '%d-%m-%Y') def get_brazil(data_path: str, filter_: Union[dict, bool] = True, fixes: bool = False) -> None: """ Get state-level data for Brazil. https://github.com/wcota/covid19br (<NAME>) """ url = "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv" try: df_raw = pd.read_csv(url) except HTTPError: print("Could not download state-level data for Brazil") state_code = {'AC':'Acre', 'AL':'Alagoas', 'AM':'Amazonas', 'AP':'Amapa', 'BA':'Bahia','CE':'Ceara', 'DF':'Distrito Federal', 'ES':'Espirito Santo', 'GO':'Goias', 'MA':'Maranhao', 'MG':'Minas Gerais', 'MS':'Mato Grosso do Sul', 'MT':'Mato Grosso', 'PA':'Para', 'PB':'Paraiba', 'PE':'Pernambuco', 'PI':'Piaui', 'PR':'Parana', 'RJ':'Rio de Janeiro', 'RN':'Rio Grande do Norte', 'RO':'Rondonia', 'RR':'Roraima', 'RS':'Rio Grande do Sul', 'SC':'Santa Catarina', 'SE':'Sergipe', 'SP':'Sao Paulo', 'TO':'Tocantins'} for state in tqdm(state_code, desc='Brazilian States'): source = df_raw[df_raw['state'] == state] # Only the given province df = pd.DataFrame(columns=['dates2','cum_cases', 'cum_deaths', 'cum_recover', 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected']) df['dates2'] = source['date'] df['cum_cases'] = source['totalCases'].values df['cum_deaths'] = source['deaths'].values df['cum_recover'] = source['recovered'].values df['new_cases'] = source['newCases'].values df['new_deaths'] = source['newDeaths'].values df['new_recover'] = df['cum_recover'].diff() df['new_uninfected'] = df['new_recover'] + df['new_deaths'] try: roi = 'BR_' + state_code[state] population = get_population_count(data_path, roi) df['population'] = population except: print("Could not add population data for {}".format(state)) pass df.sort_values(by=['dates2'], inplace=True) # sort by datetime obj before converting to string df['dates2'] = pd.to_datetime(df['dates2']).dt.strftime('%m/%d/%y') # convert dates to string df = df.set_index('dates2').fillna(0).astype(int) # Fill NaN with 0 and convert to int df.to_csv(data_path / ('covidtimeseries_BR_%s.csv' % state_code[state])) def get_owid_tests(data_path: str, filter_: Union[dict, bool] = True, fixes: bool = False) -> None: """ Get testing data from Our World In Data https://github.com/owid/covid-19-data Add columns cum_tests and new_tests to csvs in data_path. """ url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv' src = pd.read_csv(url) roi_codes = pd.read_csv(data_path / 'country_iso_codes.csv') roi_codes_dict = pd.Series(roi_codes.Country.values,index=roi_codes['Alpha-3 code']).to_dict() # trim down source dataframe src_trim = pd.DataFrame(columns=['dates2','Alpha-3 code','cum_tests']) src_trim['dates2'] = src['Date'].apply(fix_owid_dates).values # fix dates src_trim['Alpha-3 code'] = src['ISO code'].values src_trim['cum_tests'] = src['Cumulative total'].fillna(-1).astype(int).values src_trim.set_index('dates2',inplace=True, drop=True) src_rois = src_trim['Alpha-3 code'].unique() unavailable_testing_data = [] # for appending rois that don't have testing data for roi in roi_codes_dict: if roi not in src_rois: unavailable_testing_data.append(roi) continue if roi_codes_dict[roi] in ["US", "Marshall Islands", "Micronesia", "Samoa", "Vanuatu"]: # skipping because bad data continue try: timeseries_path = data_path / ('covidtimeseries_%s.csv' % roi_codes_dict[roi]) df_timeseries = pd.read_csv(timeseries_path, index_col='dates2') except FileNotFoundError as fnf_error: print(fnf_error, 'Could not add OWID data.') pass for i in df_timeseries.columns: # Check if OWID testng data already included if 'tests' in i: df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new src_roi = src_trim[src_trim['Alpha-3 code'] == roi] # filter rows that match roi df_combined = df_timeseries.merge(src_roi[['cum_tests']], how='left', on='dates2') df_combined['new_tests'] = df_combined['cum_tests'].diff() df_combined.loc[df_combined['new_tests'] < 0, 'new_tests'] = -1 # Handle cases where # cumulative counts decrease and new_tests becomes a large negative number df_combined[['cum_tests', 'new_tests']] = df_combined[['cum_tests', 'new_tests']].fillna(-1).astype(int).values df_combined = df_combined.loc[:, ~df_combined.columns.str.contains('^Unnamed')] df_combined.to_csv(timeseries_path) # overwrite timeseries CSV print("OWID global test results missing for: ") for roi in roi_codes_dict: if roi in unavailable_testing_data: print(roi_codes_dict[roi], end=" ") print("") def get_owid_global_vaccines(data_path: str, filter_: Union[dict, bool] = True, fixes: bool = False) -> None: """ Get global vaccines data from Our World In Data https://github.com/owid/covid-19-data Add columns to global csvs in data_path. """ url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv' src = pd.read_csv(url) src_trim = pd.DataFrame(columns=['dates2', 'Alpha-3 code', 'cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated', 'cum_people_fully_vaccinated']) src_trim['dates2'] = src['date'].apply(fix_owid_dates).values # fix dates src_trim['Alpha-3 code'] = src['iso_code'].values src_trim['cum_vaccinations'] = src['total_vaccinations'].values src_trim['daily_vaccinations'] = src['daily_vaccinations'].values src_trim['cum_people_vaccinated'] = src['people_vaccinated'].values src_trim['cum_people_fully_vaccinated'] = src['people_fully_vaccinated'].values roi_codes = pd.read_csv(data_path / 'country_iso_codes.csv') roi_codes_dict = pd.Series(roi_codes.Country.values,index=roi_codes['Alpha-3 code']).to_dict() # trim down source dataframe src_trim.set_index('dates2',inplace=True, drop=True) src_rois = src_trim['Alpha-3 code'].unique() unavailable_testing_data = [] # for appending rois that don't have testing data for roi in roi_codes_dict: if roi not in src_rois: unavailable_testing_data.append(roi) continue if roi_codes_dict[roi] in ["US", "Marshall Islands", "Micronesia", "Samoa", "Vanuatu"]: # skipping because no data continue try: timeseries_path = data_path / ('covidtimeseries_%s.csv' % roi_codes_dict[roi]) df_timeseries = pd.read_csv(timeseries_path, index_col='dates2') except FileNotFoundError as fnf_error: print(fnf_error, 'Could not add OWID global vaccines data.') pass for i in df_timeseries.columns: # Check if OWID testing data already included if 'vaccin' in i: df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new src_roi = src_trim[src_trim['Alpha-3 code'] == roi] # filter rows that match roi df_combined = df_timeseries.merge(src_roi[['cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated', 'cum_people_fully_vaccinated']], how='left', on='dates2') cum_vacc_columns = ['vaccinations', 'people_vaccinated', 'people_fully_vaccinated'] df = dummy_cumulative_new_counts(roi_codes_dict[roi], df_combined, cum_vacc_columns) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df.to_csv(timeseries_path) # overwrite timeseries CSV print("OWID global vaccine results missing for: ") for roi in roi_codes_dict: if roi in unavailable_testing_data: print(roi_codes_dict[roi], end=" ") print("") def dummy_cumulative_new_counts(roi, df, columns: list): """ There are cases where cum counts go missing and new counts get missed. New counts spike when cumulative counts go to -1 for missing data and the difference is taken between a new cumulative count and -1. We don't want it to spike, and we don't want to miss new counts before the gap. So create a dummy dataframe with forward filled cumulative counts and perform new cases calculation, then merge those new cases back into dataframe. Args: roi (str): Region we are working with; used for print statements. df (pd.DataFrame): DataFrame containing counts but not new counts. columns (list): List of columns (without cum_ prefix) so create new counts for. Returns: df_fixed (pd.DataFrame): DataFrame containing cumulative and now new counts. """ dfs = [] df_tmp = df.copy() df_tmp.reset_index(inplace=True) for col in columns: cum_col = 'cum_' + col dummy_cum_col = 'dummy_' + cum_col new_col = 'new_' + col try: start = df_tmp[df_tmp[cum_col] > 0].index.values[0] df_ffill = df_tmp.iloc[start:] df_ffill.set_index('dates2', drop=True, inplace=True) df_ffill[dummy_cum_col] = df_ffill[cum_col].ffill().astype(int).values df_ffill[new_col] = df_ffill[dummy_cum_col].diff().astype('Int64') # If cumulative counts are missing, set new counts to -1 so they don't become 0. df_ffill.loc[df_ffill[cum_col].isnull(), new_col] = -1 except: print(f'No {cum_col} data to add for {roi}.') df_ffill[new_col] = -1 df_ffill = df_ffill[~df_ffill.index.duplicated()] # fix duplication issue dfs.append(df_ffill[new_col]) df_new = pd.concat(dfs, axis=1) df_new = df_new.fillna(-1).astype(int) df_fixed = df.join(df_new) df_fixed = df_fixed.fillna(-1).astype(int) return df_fixed def get_owid_us_vaccines(data_path: str, filter_: Union[dict, bool] = True, fixes: bool = False) -> None: """ Get US vaccines data from Our World In Data https://github.com/owid/covid-19-data Add columns to US csvs in data_path. """ url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv' src = pd.read_csv(url) src_trim = pd.DataFrame(columns=['dates2', 'region', 'cum_vaccinations', 'daily_vaccinations', 'people_vaccinated', 'people_fully_vaccinated']) src_trim['dates2'] = src['date'].apply(fix_owid_dates).values # fix dates src_trim['region'] = src['location'].values src_trim['cum_vaccinations'] = src['total_vaccinations'].values src_trim['daily_vaccinations'] = src['daily_vaccinations'].values src_trim['cum_people_vaccinated'] = src['people_vaccinated'].values src_trim['cum_people_fully_vaccinated'] = src['people_fully_vaccinated'].values src_trim.set_index('dates2', inplace=True, drop=True) src_trim.replace("New York State", "New York", inplace=True) # fix NY name src_rois = src_trim['region'].unique() for roi in src_rois: if roi in US_STATE_ABBREV: try: timeseries_path = data_path / ('covidtimeseries_%s.csv' % US_STATE_ABBREV[roi]) df_timeseries = pd.read_csv(timeseries_path, index_col='dates2') except FileNotFoundError as fnf_error: print(fnf_error, 'Could not add OWID vaccinations data.') pass for i in df_timeseries.columns: # Check if OWID vaccines data already included if 'vaccin' in i: df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new src_roi = src_trim[src_trim['region'] == roi] # filter rows that match roi df_combined = df_timeseries.merge(src_roi[['cum_vaccinations', 'daily_vaccinations', 'cum_people_vaccinated', 'cum_people_fully_vaccinated']], how='left', on='dates2') cum_vacc_columns = ['vaccinations', 'people_vaccinated', 'people_fully_vaccinated'] df = dummy_cumulative_new_counts(US_STATE_ABBREV[roi], df_combined, cum_vacc_columns) df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df.to_csv(timeseries_path) # overwrite timeseries CSV def fix_owid_dates(x): y = datetime.strptime(x, '%Y-%m-%d') return datetime.strftime(y, '%m/%d/%y') def get_jhu_us_states_tests(data_path: str, filter_: Union[dict, bool] = False) -> None: """ Scrape JHU for US State level test results. Data is stored as a collection of CSVs per date containing states and test results. Args: data_path (str): Full path to data directory. Returns: None """ url_template = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/%s.csv" # generate a list of dates for scraping start_dt = date(2020, 4, 12) # When JHU starts reporting end_dt = date.today() dates = [] delta = end_dt - start_dt delta = delta.days for dt in daterange(start_dt, end_dt): dates.append(dt.strftime("%m-%d-%Y")) # cumulative tests are named 'People_Tested' for first 200 ish days # then cumulative tests are named 'Total_Test_Results' after 200 ish days dfs = [] for i in tqdm(dates, desc=f'Scraping {delta} days of data across all states'): url = url_template % i try: df = pd.read_csv(url) df_trim = pd.DataFrame(columns=['Province_State', 'cum_tests', 'dates2']) df_trim['Province_State'] = df['Province_State'].values df_trim['dates2'] = fix_jhu_testing_dates(i) # handle cases where column is people_tested and then switches to Total_Test_Results if 'People_Tested' in df.columns: df_trim['cum_tests'] = df['People_Tested'].fillna(-1).astype(int).values dfs.append(df_trim) if 'Total_Test_Results' in df.columns: df_trim['cum_tests'] = df['Total_Test_Results'].fillna(-1).astype(int).values dfs.append(df_trim) except HTTPError: print("Could not download tests data for %s" % i) df_combined = pd.concat(dfs) df_combined.sort_values(by='Province_State', inplace=True) df_combined['Date'] = pd.to_datetime(df_combined['dates2']) rois = df_combined['Province_State'].unique() sorted_dfs = [] for roi in rois: df_roi = df_combined[df_combined['Province_State'] == roi] df_roi = df_roi.sort_values(by="Date") df_roi['new_tests'] = df_roi['cum_tests'].diff().fillna(-1).astype(int) sorted_dfs.append(df_roi) df_tests = pd.concat(sorted_dfs) df_tests.reset_index(inplace=True, drop=True) df_tests.replace(US_STATE_ABBREV, inplace=True) df_tests.rename(columns={'Province_State': 'roi'}, inplace=True) # now open csvs in data_path that match rois and merge on csv to add cum_test and new_tests rois = df_tests.roi.unique().tolist() to_remove = ['Diamond Princess', 'Grand Princess', 'Recovered'] for i in to_remove: if i in rois: rois.remove(i) for roi in rois: csv_path = data_path / f'covidtimeseries_{roi}.csv' try: df_timeseries = pd.read_csv(csv_path) except: print(f"{csv_path} not found in data path.") try: for i in df_timeseries.columns: # Check if testng data already included if 'tests' in i: df_timeseries.drop([i], axis=1, inplace=True) # drop so we can add new df_roi_tests = df_tests[df_tests['roi'] == roi] # filter down to roi df_result = df_timeseries.merge(df_roi_tests, on='dates2', how='left') df_result.fillna(-1, inplace=True) df_result.loc[df_result['new_tests'] < 0, 'new_tests'] = -1 # Handle cases where # cumulative counts decrease and new_tests becomes a large negative number df_result['new_tests'] = df_result['new_tests'].astype(int) df_result[['cum_tests', 'new_tests']] = df_result[['cum_tests', 'new_tests']].astype(int) df_result_trim = df_result[['dates2', 'cum_cases', 'new_cases', 'cum_deaths', 'new_deaths', 'cum_recover', 'new_recover', 'new_uninfected', 'cum_tests', 'new_tests', 'population']].copy() df_result_trim = df_result_trim.loc[:, ~df_result_trim.columns.str.contains('^Unnamed')] df_result_trim.to_csv(csv_path) # overwrite timeseries CSV except: print(f'Could not get tests data for {roi}.') def daterange(date1, date2): for n in range(int ((date2 - date1).days)+1): yield date1 + timedelta(n) def fix_jhu_testing_dates(x): y = datetime.strptime(x, '%m-%d-%Y') return datetime.strftime(y, '%m/%d/%y') def fix_negatives(data_path: str, plot: bool = False) -> None: """Fix negative values in daily data. The purpose of this script is to fix spurious negative values in new daily numbers. For example, the cumulative total of cases should not go from N to a value less than N on a subsequent day. This script fixes this by nulling such data and applying a monotonic spline interpolation in between valid days of data. This only affects a small number of regions. It overwrites the original .csv files produced by the functions above. Args: data_path (str): Full path to data directory. plot (bool): Whether to plot the changes. Returns: None """ csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)] for csv in tqdm(csvs, desc="Regions"): roi = str(csv).split('.')[0].split('_')[-1] df = pd.read_csv(csv) # Exclude final day because it is often a partial count. df = df.iloc[:-1] df = fix_neg(df, roi, plot=plot) df.to_csv(data_path / (csv.name.split('.')[0]+'.csv')) def fix_neg(df: pd.DataFrame, roi: str, columns: list = ['cases', 'deaths', 'recover'], plot: bool = False) -> pd.DataFrame: """Used by `fix_negatives` to fix negatives values for a single region. This function uses monotonic spline interpolation to make sure that cumulative counts are non-decreasing. Args: df (pd.DataFrame): DataFrame containing data for one region. roi (str): One region, e.g 'US_MI' or 'Greece'. columns (list, optional): Columns to make non-decreasing. Defaults to ['cases', 'deaths', 'recover']. Returns: pd.DataFrame: [description] """ for c in columns: cum = 'cum_%s' % c new = 'new_%s' % c before = df[cum].copy() non_zeros = df[df[new] > 0].index has_negs = before.diff().min() < 0 if len(non_zeros) and has_negs: first_non_zero = non_zeros[0] maxx = df.loc[first_non_zero, cum].max() # Find the bad entries and null the corresponding # cumulative column, which are: # 1) Cumulative columns which are zero after previously # being non-zero bad = df.loc[first_non_zero:, cum] == 0 df.loc[bad[bad].index, cum] = None # 2) New daily columns which are negative bad = df.loc[first_non_zero:, new] < 0 df.loc[bad[bad].index, cum] = None # Protect against 0 null final value which screws up interpolator if np.isnan(df.loc[df.index[-1], cum]): df.loc[df.index[-1], cum] = maxx # Then run a loop which: while True: # Interpolates the cumulative column nulls to have # monotonic growth after = df[cum].interpolate('pchip') diff = after.diff() if diff.min() < 0: # If there are still negative first-differences at this # point, increase the corresponding cumulative values by 1. neg_index = diff[diff < 0].index df.loc[neg_index, cum] += 1 else: break # Then repeat if plot: plt.figure() plt.plot(df.index, before, label='raw') plt.plot(df.index, after, label='fixed') r = np.corrcoef(before, after)[0, 1] plt.title("%s %s Raw vs Fixed R=%.5g" % (roi, c, r)) plt.legend() else: after = before # Make sure the first differences are now all non-negative assert after.diff().min() >= 0 # Replace the values df[new] = df[cum].diff().fillna(0).astype(int).values return df def negify_missing(data_path: str) -> None: """Fix negative values in daily data. The purpose of this script is to fix spurious negative values in new daily numbers. For example, the cumulative total of cases should not go from N to a value less than N on a subsequent day. This script fixes this by nulling such data and applying a monotonic spline interpolation in between valid days of data. This only affects a small number of regions. It overwrites the original .csv files produced by the functions above. Args: data_path (str): Full path to data directory. plot (bool): Whether to plot the changes. Returns: None """ csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)] for csv in tqdm(csvs, desc="Regions"): roi = str(csv).split('.')[0].split('_')[-1] df = pd.read_csv(csv) for kind in ['cases', 'deaths', 'recover']: if df['cum_%s' % kind].sum() == 0: print("Negifying 'new_%s' for %s" % (kind, roi)) df['new_%s' % kind] = -1 out = data_path / (csv.name.split('.')[0]+'.csv') df.to_csv(out) def remove_old_rois(data_path: str): """Delete time-series files for regions no longer tracked, such as: Diamond Princess, MS Zaandam, Samoa, Vanuatu, Marshall Islands, US, US_AS (American Somoa)""" csvs = [x for x in data_path.iterdir() if 'covidtimeseries' in str(x)] rois_to_remove = ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'Samoa', 'Vanuatu', 'Marshall Islands', 'US', 'US_AS', 'Micronesia', 'Kiribati', 'Palau'] for csv in csvs: roi = str(csv).split('.')[0].split('_', 1)[-1] if roi in rois_to_remove: try: if os.path.exists(csv): print("Removing {} from data_path".format(roi)) os.remove(csv) except: print("could not remove {}. Check that path is correct.".format(csv))
[ "matplotlib.pyplot.title", "os.remove", "pandas.read_csv", "numpy.isnan", "matplotlib.pyplot.figure", "pandas.DataFrame", "os.path.exists", "datetime.timedelta", "pandas.concat", "datetime.datetime.strftime", "tqdm.tqdm", "numpy.corrcoef", "matplotlib.pyplot.legend", "datetime.date", "datetime.date.today", "datetime.datetime.strptime", "pandas.to_datetime", "pandas.Series", "matplotlib.pyplot.plot", "pandas.json_normalize" ]
[((4997, 5035), 'tqdm.tqdm', 'tqdm', (['good_countries'], {'desc': '"""Countries"""'}), "(good_countries, desc='Countries')\n", (5001, 5035), False, 'from tqdm import tqdm\n'), ((6806, 6836), 'tqdm.tqdm', 'tqdm', (['states'], {'desc': '"""US States"""'}), "(states, desc='US States')\n", (6810, 6836), False, 'from tqdm import tqdm\n'), ((8490, 8522), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%m/%d/%y"""'], {}), "(x, '%m/%d/%y')\n", (8507, 8522), False, 'from datetime import datetime\n'), ((8534, 8566), 'datetime.datetime.strftime', 'datetime.strftime', (['y', '"""%m/%d/%y"""'], {}), "(y, '%m/%d/%y')\n", (8551, 8566), False, 'from datetime import datetime\n'), ((10644, 10670), 'pandas.read_csv', 'pd.read_csv', (['archived_data'], {}), '(archived_data)\n', (10655, 10670), True, 'import pandas as pd\n'), ((13381, 13423), 'tqdm.tqdm', 'tqdm', (['provinces'], {'desc': '"""Canadian Provinces"""'}), "(provinces, desc='Canadian Provinces')\n", (13385, 13423), False, 'from tqdm import tqdm\n'), ((14800, 14832), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%d-%m-%Y"""'], {}), "(x, '%d-%m-%Y')\n", (14817, 14832), False, 'from datetime import datetime\n'), ((15934, 15975), 'tqdm.tqdm', 'tqdm', (['state_code'], {'desc': '"""Brazilian States"""'}), "(state_code, desc='Brazilian States')\n", (15938, 15975), False, 'from tqdm import tqdm\n'), ((17807, 17823), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (17818, 17823), True, 'import pandas as pd\n'), ((17840, 17888), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'country_iso_codes.csv')"], {}), "(data_path / 'country_iso_codes.csv')\n", (17851, 17888), True, 'import pandas as pd\n'), ((18036, 18097), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'Alpha-3 code', 'cum_tests']"}), "(columns=['dates2', 'Alpha-3 code', 'cum_tests'])\n", (18048, 18097), True, 'import pandas as pd\n'), ((20591, 20607), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (20602, 20607), True, 'import pandas as pd\n'), ((20623, 20777), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'Alpha-3 code', 'cum_vaccinations', 'daily_vaccinations',\n 'cum_people_vaccinated', 'cum_people_fully_vaccinated']"}), "(columns=['dates2', 'Alpha-3 code', 'cum_vaccinations',\n 'daily_vaccinations', 'cum_people_vaccinated',\n 'cum_people_fully_vaccinated'])\n", (20635, 20777), True, 'import pandas as pd\n'), ((21250, 21298), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'country_iso_codes.csv')"], {}), "(data_path / 'country_iso_codes.csv')\n", (21261, 21298), True, 'import pandas as pd\n'), ((25121, 25143), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (25130, 25143), True, 'import pandas as pd\n'), ((25696, 25712), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (25707, 25712), True, 'import pandas as pd\n'), ((25728, 25864), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'region', 'cum_vaccinations', 'daily_vaccinations',\n 'people_vaccinated', 'people_fully_vaccinated']"}), "(columns=['dates2', 'region', 'cum_vaccinations',\n 'daily_vaccinations', 'people_vaccinated', 'people_fully_vaccinated'])\n", (25740, 25864), True, 'import pandas as pd\n'), ((27801, 27833), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (27818, 27833), False, 'from datetime import datetime\n'), ((27845, 27877), 'datetime.datetime.strftime', 'datetime.strftime', (['y', '"""%m/%d/%y"""'], {}), "(y, '%m/%d/%y')\n", (27862, 27877), False, 'from datetime import datetime\n'), ((28435, 28452), 'datetime.date', 'date', (['(2020)', '(4)', '(12)'], {}), '(2020, 4, 12)\n', (28439, 28452), False, 'from datetime import timedelta, date\n'), ((28494, 28506), 'datetime.date.today', 'date.today', ([], {}), '()\n', (28504, 28506), False, 'from datetime import timedelta, date\n'), ((28840, 28908), 'tqdm.tqdm', 'tqdm', (['dates'], {'desc': 'f"""Scraping {delta} days of data across all states"""'}), "(dates, desc=f'Scraping {delta} days of data across all states')\n", (28844, 28908), False, 'from tqdm import tqdm\n'), ((29755, 29769), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (29764, 29769), True, 'import pandas as pd\n'), ((29859, 29896), 'pandas.to_datetime', 'pd.to_datetime', (["df_combined['dates2']"], {}), "(df_combined['dates2'])\n", (29873, 29896), True, 'import pandas as pd\n'), ((30232, 30253), 'pandas.concat', 'pd.concat', (['sorted_dfs'], {}), '(sorted_dfs)\n', (30241, 30253), True, 'import pandas as pd\n'), ((32444, 32476), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%m-%d-%Y"""'], {}), "(x, '%m-%d-%Y')\n", (32461, 32476), False, 'from datetime import datetime\n'), ((32488, 32520), 'datetime.datetime.strftime', 'datetime.strftime', (['y', '"""%m/%d/%y"""'], {}), "(y, '%m/%d/%y')\n", (32505, 32520), False, 'from datetime import datetime\n'), ((33325, 33351), 'tqdm.tqdm', 'tqdm', (['csvs'], {'desc': '"""Regions"""'}), "(csvs, desc='Regions')\n", (33329, 33351), False, 'from tqdm import tqdm\n'), ((37222, 37248), 'tqdm.tqdm', 'tqdm', (['csvs'], {'desc': '"""Regions"""'}), "(csvs, desc='Regions')\n", (37226, 37248), False, 'from tqdm import tqdm\n'), ((9845, 9896), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'population_estimates.csv')"], {}), "(data_path / 'population_estimates.csv')\n", (9856, 9896), True, 'import pandas as pd\n'), ((10856, 10918), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_recover', 'new_recover']"}), "(columns=['dates2', 'cum_recover', 'new_recover'])\n", (10868, 10918), True, 'import pandas as pd\n'), ((13521, 13659), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover', 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover',\n 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected'])\n", (13533, 13659), True, 'import pandas as pd\n'), ((15172, 15188), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (15183, 15188), True, 'import pandas as pd\n'), ((16067, 16205), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover', 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover',\n 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected'])\n", (16079, 16205), True, 'import pandas as pd\n'), ((33418, 33434), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (33429, 33434), True, 'import pandas as pd\n'), ((37315, 37331), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (37326, 37331), True, 'import pandas as pd\n'), ((5434, 5572), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover', 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'cum_recover',\n 'new_cases', 'new_deaths', 'new_recover', 'new_uninfected'])\n", (5446, 5572), True, 'import pandas as pd\n'), ((7144, 7252), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['dates2', 'cum_cases', 'cum_deaths', 'new_cases', 'new_deaths',\n 'new_uninfected']"}), "(columns=['dates2', 'cum_cases', 'cum_deaths', 'new_cases',\n 'new_deaths', 'new_uninfected'])\n", (7156, 7252), True, 'import pandas as pd\n'), ((12529, 12558), 'pandas.json_normalize', 'pd.json_normalize', (['data[kind]'], {}), '(data[kind])\n', (12546, 12558), True, 'import pandas as pd\n'), ((17910, 17978), 'pandas.Series', 'pd.Series', (['roi_codes.Country.values'], {'index': "roi_codes['Alpha-3 code']"}), "(roi_codes.Country.values, index=roi_codes['Alpha-3 code'])\n", (17919, 17978), True, 'import pandas as pd\n'), ((18912, 18960), 'pandas.read_csv', 'pd.read_csv', (['timeseries_path'], {'index_col': '"""dates2"""'}), "(timeseries_path, index_col='dates2')\n", (18923, 18960), True, 'import pandas as pd\n'), ((21320, 21388), 'pandas.Series', 'pd.Series', (['roi_codes.Country.values'], {'index': "roi_codes['Alpha-3 code']"}), "(roi_codes.Country.values, index=roi_codes['Alpha-3 code'])\n", (21329, 21388), True, 'import pandas as pd\n'), ((22033, 22081), 'pandas.read_csv', 'pd.read_csv', (['timeseries_path'], {'index_col': '"""dates2"""'}), "(timeseries_path, index_col='dates2')\n", (22044, 22081), True, 'import pandas as pd\n'), ((28971, 28987), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (28982, 28987), True, 'import pandas as pd\n'), ((29010, 29073), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Province_State', 'cum_tests', 'dates2']"}), "(columns=['Province_State', 'cum_tests', 'dates2'])\n", (29022, 29073), True, 'import pandas as pd\n'), ((30828, 30849), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (30839, 30849), True, 'import pandas as pd\n'), ((35162, 35197), 'numpy.isnan', 'np.isnan', (['df.loc[df.index[-1], cum]'], {}), '(df.loc[df.index[-1], cum])\n', (35170, 35197), True, 'import numpy as np\n'), ((2895, 2911), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (2906, 2911), True, 'import pandas as pd\n'), ((26704, 26752), 'pandas.read_csv', 'pd.read_csv', (['timeseries_path'], {'index_col': '"""dates2"""'}), "(timeseries_path, index_col='dates2')\n", (26715, 26752), True, 'import pandas as pd\n'), ((32392, 32404), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (32401, 32404), False, 'from datetime import timedelta, date\n'), ((35907, 35919), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35917, 35919), True, 'import matplotlib.pyplot as plt\n'), ((35936, 35975), 'matplotlib.pyplot.plot', 'plt.plot', (['df.index', 'before'], {'label': '"""raw"""'}), "(df.index, before, label='raw')\n", (35944, 35975), True, 'import matplotlib.pyplot as plt\n'), ((35992, 36032), 'matplotlib.pyplot.plot', 'plt.plot', (['df.index', 'after'], {'label': '"""fixed"""'}), "(df.index, after, label='fixed')\n", (36000, 36032), True, 'import matplotlib.pyplot as plt\n'), ((36102, 36154), 'matplotlib.pyplot.title', 'plt.title', (["('%s %s Raw vs Fixed R=%.5g' % (roi, c, r))"], {}), "('%s %s Raw vs Fixed R=%.5g' % (roi, c, r))\n", (36111, 36154), True, 'import matplotlib.pyplot as plt\n'), ((36171, 36183), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (36181, 36183), True, 'import matplotlib.pyplot as plt\n'), ((38242, 38261), 'os.path.exists', 'os.path.exists', (['csv'], {}), '(csv)\n', (38256, 38261), False, 'import os\n'), ((3867, 3894), 'pandas.concat', 'pd.concat', (['([df1] + more_dfs)'], {}), '([df1] + more_dfs)\n', (3876, 3894), True, 'import pandas as pd\n'), ((11570, 11598), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates2']"], {}), "(df['dates2'])\n", (11584, 11598), True, 'import pandas as pd\n'), ((14517, 14545), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates2']"], {}), "(df['dates2'])\n", (14531, 14545), True, 'import pandas as pd\n'), ((17126, 17154), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates2']"], {}), "(df['dates2'])\n", (17140, 17154), True, 'import pandas as pd\n'), ((36053, 36079), 'numpy.corrcoef', 'np.corrcoef', (['before', 'after'], {}), '(before, after)\n', (36064, 36079), True, 'import numpy as np\n'), ((38351, 38365), 'os.remove', 'os.remove', (['csv'], {}), '(csv)\n', (38360, 38365), False, 'import os\n')]
from ctypes import * from athena import ndarray from athena.stream import * import numpy as np from enum import Enum import os def _load_nccl_lib(): """Load libary in build/lib.""" curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../../build/lib/') path_to_so_file = os.path.join(lib_path, "lib_mpi_nccl_runtime_api.so") lib = CDLL(path_to_so_file, RTLD_GLOBAL) return lib lib_mpi_nccl = _load_nccl_lib() # lib_mpi_nccl = CDLL("./lib_mpi_nccl_runtime_api.so", RTLD_GLOBAL) class ncclDataType_t(Enum): ncclInt8 = 0 ncclChar = 0 ncclUint8 = 1 ncclInt32 = 2 ncclInt = 2 ncclUint32 = 3 ncclInt64 = 4 ncclUint64 = 5 ncclFloat16 = 6 ncclHalf = 6 ncclFloat32 = 7 ncclFloat = 7 ncclFloat64 = 8 ncclDouble = 8 ncclNumTypes = 9 class ncclRedOp_t(Enum): ncclSum = 0 ncclProd = 1 ncclMax = 2 ncclMin = 3 ncclNumOps = 4 class ncclUniqueId(Structure): _fields_=[("internal", (c_int8 * 128))] class MPI_NCCL_Communicator(): def __init__(self, stream = None): ''' mpicomm: the MPI communicator, to use in MPI_Bcast, MPI_Reduce, MPI_Scatter, etc ncclcomm: the NCCL communicator, to use in ncclAllReduce ... nRanks: the total number of MPI threads myRanks: the rank in all MPI threads localRank: the rank among the MPI threads in this device ncclId: ncclGetUniqueId should be called once when creating a communicator and the Id should be distributed to all ranks in the communicator before calling ncclCommInitRank. stream: the stream for NCCL communication ''' self.mpicomm = c_int64(0) self.ncclcomm = c_int64(0) self.nRanks = c_int32(0) self.myRank = c_int32(0) self.localRank = c_int32(-1) self.ncclId = ncclUniqueId() self.device_id = c_int(0) self.MPI_Init() self.MPIGetComm() self.MPI_Comm_rank() self.MPI_Comm_size() self.getLocalRank() self.device_id.value = self.localRank.value if stream == None: self.stream = create_stream_handle(ndarray.gpu(self.device_id.value)) else: self.stream = stream def MPI_Init(self): lib_mpi_nccl.MPIInit() def MPI_Finalize(self): lib_mpi_nccl.MPIFinalize() def MPIGetComm(self): lib_mpi_nccl.MPIGetComm(ctypes.byref(self.mpicomm)) def MPI_Comm_rank(self): lib_mpi_nccl.getMPICommRank(ctypes.byref(self.mpicomm), ctypes.byref(self.myRank)) def MPI_Comm_size(self): lib_mpi_nccl.getMPICommSize(ctypes.byref(self.mpicomm), ctypes.byref(self.nRanks)) def getLocalRank(self): lib_mpi_nccl.getLocalRank(ctypes.byref(self.mpicomm), self.nRanks, self.myRank, ctypes.byref(self.localRank)) def ncclGetUniqueId(self): lib_mpi_nccl.getNcclUniqueId(ctypes.byref(self.ncclId), self.mpicomm, self.localRank) def dlarrayNcclAllReduce(self, dlarray, datatype, reduceop, executor_stream = None): lib_mpi_nccl.dlarrayAllReduce(dlarray.handle, c_int(datatype.value), c_int(reduceop.value), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle) def dlarrayBroadcast(self, dlarray, datatype, root, executor_stream = None): lib_mpi_nccl.dlarrayBroadcast(dlarray.handle, c_int(datatype.value), c_int(root), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle) def dlarrayAllGather(self, input_arr, output_arr, datatype, executor_stream = None): lib_mpi_nccl.dlarrayAllGather(input_arr.handle, output_arr.handle, c_int(datatype.value), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle) def dlarraySend(self, arr, datatype, target, executor_stream = None): lib_mpi_nccl.dlarraySend(arr.handle, c_int(datatype.value), c_int(target), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle) def dlarrayRecv(self, arr, datatype, src, executor_stream = None): lib_mpi_nccl.dlarrayRecv(arr.handle, c_int(datatype.value), c_int(src), self.ncclcomm, executor_stream.handle if executor_stream else self.stream.handle) def ncclCommInitRank(self): ''' Use partial AllReduce to change here. self.nRanks is the number of threads to use ncclallreduce self.myRank is the rank among these threads. the value must in [0, self.nRank - 1] ''' lib_mpi_nccl.initNcclCommRank(ctypes.byref(self.ncclcomm), self.nRanks, ctypes.byref(self.ncclId), self.myRank, self.localRank) def ncclCommDestroy(self): lib_mpi_nccl.commDestroyNccl(ctypes.byref(self.ncclcomm)) def ncclSetDevice(self, device_id): self.device_id.value = device_id lib_mpi_nccl.setDevice(self.device_id.value) def ncclInit(self): self.ncclSetDevice(self.device_id.value) self.ncclGetUniqueId() self.ncclCommInitRank() def ncclFinish(self): self.MPI_Finalize() def mpi_nccl_communicator(): ''' ''' return MPI_NCCL_Communicator() # NCCL_DEBUG=INFO mpirun --allow-run-as-root -np 4 python mpi_nccl_comm.py if __name__ == "__main__": t = mpi_nccl_communicator() t.ncclInit() arr = np.ones(16)*t.localRank.value print("before: = ", arr) arr = ndarray.array(arr, ctx = ndarray.gpu(t.device_id.value)) output_arr = np.zeros(16 * t.nRanks.value) output_arr = ndarray.array(output_arr, ctx = ndarray.gpu(t.device_id.value)) t.dlarrayNcclAllReduce(arr, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum) # t.dlarrayBroadcast(arr, ncclDataType_t.ncclFloat32, 0) # t.dlarrayAllGather(arr, output_arr, ncclDataType_t.ncclFloat32) print("after: = ", arr.asnumpy()) t.ncclFinish()
[ "os.path.expanduser", "athena.ndarray.gpu", "numpy.zeros", "numpy.ones", "os.path.join" ]
[((280, 326), 'os.path.join', 'os.path.join', (['curr_path', '"""../../../build/lib/"""'], {}), "(curr_path, '../../../build/lib/')\n", (292, 326), False, 'import os\n'), ((349, 402), 'os.path.join', 'os.path.join', (['lib_path', '"""lib_mpi_nccl_runtime_api.so"""'], {}), "(lib_path, 'lib_mpi_nccl_runtime_api.so')\n", (361, 402), False, 'import os\n'), ((5693, 5722), 'numpy.zeros', 'np.zeros', (['(16 * t.nRanks.value)'], {}), '(16 * t.nRanks.value)\n', (5701, 5722), True, 'import numpy as np\n'), ((5550, 5561), 'numpy.ones', 'np.ones', (['(16)'], {}), '(16)\n', (5557, 5561), True, 'import numpy as np\n'), ((234, 262), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (252, 262), False, 'import os\n'), ((5644, 5674), 'athena.ndarray.gpu', 'ndarray.gpu', (['t.device_id.value'], {}), '(t.device_id.value)\n', (5655, 5674), False, 'from athena import ndarray\n'), ((5773, 5803), 'athena.ndarray.gpu', 'ndarray.gpu', (['t.device_id.value'], {}), '(t.device_id.value)\n', (5784, 5803), False, 'from athena import ndarray\n'), ((2369, 2402), 'athena.ndarray.gpu', 'ndarray.gpu', (['self.device_id.value'], {}), '(self.device_id.value)\n', (2380, 2402), False, 'from athena import ndarray\n')]
import numpy as np import warnings from scipy import stats from six import string_types import matplotlib.pyplot as plt from scipy.integrate import trapz from explore.utils import Proportions try: import statsmodels.nonparametric.api as smnp _has_statsmodels = True except ImportError: _has_statsmodels = False def _univariate_kde(data, shade=False, vertical=False, kernel='gau', bw="scott", gridsize=100, cut=3, clip=None, legend=True, ax=None, cumulative=False, **kwargs): """ Computes the KDE of univariate data. shade : bool, optional If True, shade in the area under the KDE curve (or draw with filled contours when data is bivariate). vertical : bool, optional If True, density is on x-axis. kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional Code for shape of kernel to fit with. Bivariate KDE can only use gaussian kernel. bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional Name of reference method to determine kernel size, scalar factor, or scalar for each dimension of the bivariate plot. Note that the underlying computational libraries have different interperetations for this parameter: ``statsmodels`` uses it directly, but ``scipy`` treats it as a scaling factor for the standard deviation of the data. gridsize : int, optional Number of discrete points in the evaluation grid. cut : scalar, optional Draw the estimate to cut * bw from the extreme data points. clip : pair of scalars, or pair of pair of scalars, optional Lower and upper bounds for datapoints used to fit KDE. Can provide a pair of (low, high) bounds for bivariate plots. legend : bool, optional If True, add a legend or label the axes when possible. cumulative : bool, optional If True, draw the cumulative distribution estimated by the kde. ax : matplotlib axes, optional Axes to plot on, otherwise uses current axes. kwargs : key, value pairings Other keyword arguments are passed to ``plt.plot()`` or ``plt.contour{f}`` depending on whether a univariate or bivariate plot is being drawn. Output ------ x: array-like, (n_grid_points, ) The grid of values where the kde is evaluated. y: array-like, (n_grid_points, ) The values of the KDE. """ # Sort out the clipping if clip is None: clip = (-np.inf, np.inf) # Calculate the KDE if np.nan_to_num(data.var()) == 0: # Don't try to compute KDE on singular data msg = "Data must have variance to compute a kernel density estimate." warnings.warn(msg, UserWarning) x, y = np.array([]), np.array([]) elif _has_statsmodels: # Prefer using statsmodels for kernel flexibility x, y = _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip, cumulative=cumulative) else: # Fall back to scipy if missing statsmodels if kernel != "gau": kernel = "gau" msg = "Kernel other than `gau` requires statsmodels." warnings.warn(msg, UserWarning) if cumulative: raise ImportError("Cumulative distributions are currently " "only implemented in statsmodels. " "Please install statsmodels.") x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip) # Make sure the density is nonnegative y = np.amax(np.c_[np.zeros_like(y), y], axis=1) return x, y def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip, cumulative=False): """Compute a univariate kernel density estimate using statsmodels.""" fft = kernel == "gau" kde = smnp.KDEUnivariate(data) kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip) if cumulative: grid, y = kde.support, kde.cdf else: grid, y = kde.support, kde.density return grid, y def _scipy_univariate_kde(data, bw, gridsize, cut, clip): """Compute a univariate kernel density estimate using scipy.""" try: kde = stats.gaussian_kde(data, bw_method=bw) except TypeError: kde = stats.gaussian_kde(data) if bw != "scott": # scipy default msg = ("Ignoring bandwidth choice, " "please upgrade scipy to use a different bandwidth.") warnings.warn(msg, UserWarning) if isinstance(bw, string_types): bw = "scotts" if bw == "scott" else bw bw = getattr(kde, "%s_factor" % bw)() * np.std(data) grid = _kde_support(data, bw, gridsize, cut, clip) y = kde(grid) return grid, y def _kde_support(data, bw, gridsize='default', cut=3, clip=None): """Establish support for a kernel density estimate.""" support_min = max(data.min() - bw * cut, clip[0]) support_max = min(data.max() + bw * cut, clip[1]) return np.linspace(support_min, support_max, gridsize) def get_class_kdes(values, classes, ensure_norm=True, **kde_kws): """ KDEs for values with associated classes. Computes the KDE of each class then weights each KDE by the number of points in each class. Also compute the overall KDE. Output ------ cl_kdes, overall_kde cl_kdes: dict KDE for each class. Keys are class labels. overall_kde: dict Overall KDE (i.e. ignoring class labels) """ # TODO: do we really need ensure_norm overall_grid, overall_y = _univariate_kde(values, **kde_kws) if ensure_norm: overall_y = norm_kde(grid=overall_grid, y=overall_y) overall_kde = {'grid': overall_grid, 'y': overall_y} cl_props = Proportions(classes) cl_kdes = {} for cl in np.unique(classes): cl_mask = classes == cl cl_values = values[cl_mask] cl_grid, cl_y = _univariate_kde(cl_values, **kde_kws) if ensure_norm: cl_y = norm_kde(grid=cl_grid, y=cl_y) # weight area under KDE by number of samples cl_y *= cl_props[cl] cl_kdes[cl] = {'grid': cl_grid, 'y': cl_y} return cl_kdes, overall_kde def norm_kde(grid, y): tot = trapz(y=y, x=grid) return y / tot def _univariate_kdeplot(x, y, shade=True, vertical=False, legend=True, ax=None, **kwargs): """Plot a univariate kernel density estimate on one of the axes.""" if ax is None: ax = plt.gca() # Make sure the density is nonnegative y = np.amax(np.c_[np.zeros_like(y), y], axis=1) # Flip the data if the plot should be on the y axis if vertical: x, y = y, x # Check if a label was specified in the call label = kwargs.pop("label", None) # Otherwise check if the data object has a name if label is None and hasattr(x, "name"): label = x.name # Decide if we're going to add a legend legend = label is not None and legend label = "_nolegend_" if label is None else label # Use the active color cycle to find the plot color facecolor = kwargs.pop("facecolor", None) line, = ax.plot(x, y, **kwargs) color = line.get_color() line.remove() kwargs.pop("color", None) facecolor = color if facecolor is None else facecolor # Draw the KDE plot and, optionally, shade ax.plot(x, y, color=color, label=label, **kwargs) shade_kws = dict( facecolor=facecolor, alpha=kwargs.get("alpha", 0.25), clip_on=kwargs.get("clip_on", True), zorder=kwargs.get("zorder", 1), ) if shade: if vertical: ax.fill_betweenx(y, 0, x, **shade_kws) else: ax.fill_between(x, 0, y, **shade_kws) # Set the density axis minimum to 0 if vertical: ax.set_xlim(0, auto=None) else: ax.set_ylim(0, auto=None) # Draw the legend here handles, labels = ax.get_legend_handles_labels() if legend and handles: ax.legend(loc="best") return ax def _univariate_conditional_kdeplot(values, classes, cl_labels=None, cl_palette=None, include_overall=True, shade=True, vertical=False, legend=True, ax=None, kde_kws={}, kde_plt_kws={}): cl_kdes, overall_kde = get_class_kdes(values, classes, **kde_kws) # in case 'overall' is one of the classes if 'overall' in np.unique(classes): overall_name = ''.join(np.unique(classes)) else: overall_name = 'overall' cl_kdes[overall_name] = overall_kde # plot the KDE for each class for cl in cl_kdes.keys(): _kwargs = kde_plt_kws.copy() _kwargs['shade'] = shade x = cl_kdes[cl]['grid'] y = cl_kdes[cl]['y'] if cl_palette is not None and cl in cl_palette: _kwargs['color'] = cl_palette[cl] if cl_labels is not None and cl in cl_labels: _kwargs['label'] = cl_labels[cl] else: _kwargs['label'] = cl if cl == overall_name: if not include_overall: continue _kwargs['ls'] = '--' # _kwargs['alpha'] = .2 _kwargs['zorder'] = 1 _kwargs['label'] = None # 'overall' _kwargs['color'] = 'gray' _kwargs['shade'] = False _univariate_kdeplot(x=x, y=y, vertical=vertical, legend=legend, ax=ax, **_kwargs)
[ "numpy.zeros_like", "matplotlib.pyplot.gca", "numpy.std", "scipy.stats.gaussian_kde", "explore.utils.Proportions", "numpy.array", "numpy.linspace", "scipy.integrate.trapz", "warnings.warn", "statsmodels.nonparametric.api.KDEUnivariate", "numpy.unique" ]
[((4001, 4025), 'statsmodels.nonparametric.api.KDEUnivariate', 'smnp.KDEUnivariate', (['data'], {}), '(data)\n', (4019, 4025), True, 'import statsmodels.nonparametric.api as smnp\n'), ((5167, 5214), 'numpy.linspace', 'np.linspace', (['support_min', 'support_max', 'gridsize'], {}), '(support_min, support_max, gridsize)\n', (5178, 5214), True, 'import numpy as np\n'), ((5927, 5947), 'explore.utils.Proportions', 'Proportions', (['classes'], {}), '(classes)\n', (5938, 5947), False, 'from explore.utils import Proportions\n'), ((5979, 5997), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (5988, 5997), True, 'import numpy as np\n'), ((6430, 6448), 'scipy.integrate.trapz', 'trapz', ([], {'y': 'y', 'x': 'grid'}), '(y=y, x=grid)\n', (6435, 6448), False, 'from scipy.integrate import trapz\n'), ((2786, 2817), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (2799, 2817), False, 'import warnings\n'), ((4375, 4413), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data'], {'bw_method': 'bw'}), '(data, bw_method=bw)\n', (4393, 4413), False, 'from scipy import stats\n'), ((6690, 6699), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6697, 6699), True, 'import matplotlib.pyplot as plt\n'), ((8892, 8910), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (8901, 8910), True, 'import numpy as np\n'), ((2833, 2845), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2841, 2845), True, 'import numpy as np\n'), ((2847, 2859), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2855, 2859), True, 'import numpy as np\n'), ((4450, 4474), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data'], {}), '(data)\n', (4468, 4474), False, 'from scipy import stats\n'), ((4816, 4828), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (4822, 4828), True, 'import numpy as np\n'), ((8943, 8961), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (8952, 8961), True, 'import numpy as np\n'), ((3332, 3363), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (3345, 3363), False, 'import warnings\n'), ((3720, 3736), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (3733, 3736), True, 'import numpy as np\n'), ((4652, 4683), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (4665, 4683), False, 'import warnings\n'), ((6766, 6782), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (6779, 6782), True, 'import numpy as np\n')]
from abc import ABCMeta, abstractmethod import numpy as np class ProposalDistribution(metaclass=ABCMeta): @abstractmethod def __init__(self): ... @abstractmethod def sample(self, x: np.ndarray) -> np.ndarray: ... @abstractmethod def pdf(self, x: np.ndarray, cond: np.ndarray) -> np.ndarray: ... class Normal(ProposalDistribution): __slots__ = ['mean', 'std'] def __init__(self, mean: float, spread: float): super().__init__() self.mean = mean self.std = spread assert self.std > 0, "Wrong specification of distribution!" def sample(self, x): return x + np.random.normal(self.mean, self.std, x.shape) def pdf(self, x, cond): return 1 / (np.sqrt(2 * np.pi) * self.std) * np.exp(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2)) class Uniform(ProposalDistribution): __slots__ = ['spread'] def __init__(self, spread: float): super().__init__() self.spread = spread assert self.spread > 0, "Wrong specification of distribution!" def sample(self, x): return x + np.random.uniform(low=-self.spread / 2, high=self.spread / 2, size=x.shape) def pdf(self, x, cond): return np.array(1 / self.spread)
[ "numpy.random.uniform", "numpy.array", "numpy.exp", "numpy.random.normal", "numpy.sqrt" ]
[((1248, 1273), 'numpy.array', 'np.array', (['(1 / self.spread)'], {}), '(1 / self.spread)\n', (1256, 1273), True, 'import numpy as np\n'), ((662, 708), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.std', 'x.shape'], {}), '(self.mean, self.std, x.shape)\n', (678, 708), True, 'import numpy as np\n'), ((791, 849), 'numpy.exp', 'np.exp', (['(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2))'], {}), '(-(x - self.mean - cond) ** 2 / (2 * self.std ** 2))\n', (797, 849), True, 'import numpy as np\n'), ((1128, 1203), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.spread / 2)', 'high': '(self.spread / 2)', 'size': 'x.shape'}), '(low=-self.spread / 2, high=self.spread / 2, size=x.shape)\n', (1145, 1203), True, 'import numpy as np\n'), ((758, 776), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (765, 776), True, 'import numpy as np\n')]
from __future__ import print_function import numpy as np try: from builtins import range, zip except: pass def fermi_dirac(e_fermi, delta, energy): """ Return fermi-dirac distribution weight. """ x = (energy - e_fermi)/delta if x < -200: f = 1. elif x > 200: f = 0. else: f = 1./(np.exp(x) + 1) return f def num_electron_diff(e_fermi, delta, e_skn, w_k, nb_k, num_elec): ne = 0 for e_kn in e_skn: for e_n, w, nb in zip(e_kn, w_k, nb_k): f = [fermi_dirac(e_fermi, delta, e) for e in e_n[:nb]] ne += np.sum(f)*w return ne - num_elec
[ "numpy.sum", "numpy.exp", "builtins.zip" ]
[((500, 520), 'builtins.zip', 'zip', (['e_kn', 'w_k', 'nb_k'], {}), '(e_kn, w_k, nb_k)\n', (503, 520), False, 'from builtins import range, zip\n'), ((607, 616), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (613, 616), True, 'import numpy as np\n'), ((343, 352), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (349, 352), True, 'import numpy as np\n')]
import numpy as np import math def softmax(src): # Get size of input vector rows, cols = src.shape # Checking if rows > 1: raise Exception("Input rows > 1") # Find softmax expVec = np.exp(src) return expVec / np.sum(expVec) def softmax_derivative(src): # Get size of input vector rows, cols = src.shape # Checking if rows > 1: raise Exception("Input rows > 1") # Find softmax derivative tmpVec = softmax(src) retMat = np.zeros((cols, cols)) for i in range(cols): for j in range(cols): retMat[i, j] = tmpVec[0, i] * (float((i == j)) - tmpVec[0, j]) return retMat def relu(src): # Get size of input vector rows, cols = src.shape # Checking if rows > 1: raise Exception("Input rows > 1") # Find relu retVec = np.zeros((1, cols)) for i in range(cols): retVec[0, i] = max(src[0, i], 0.0) return retVec def relu_derivative(src): # Get size of input vector rows, cols = src.shape # Checking if rows > 1: raise Exception("Input rows > 1") # Find relu derivative retMat = np.zeros((cols, cols)) for i in range(cols): if src[0, i] < 0.0: retMat[i, i] = 0 else: retMat[i, i] = 1 return retMat
[ "numpy.sum", "numpy.zeros", "numpy.exp" ]
[((216, 227), 'numpy.exp', 'np.exp', (['src'], {}), '(src)\n', (222, 227), True, 'import numpy as np\n'), ((496, 518), 'numpy.zeros', 'np.zeros', (['(cols, cols)'], {}), '((cols, cols))\n', (504, 518), True, 'import numpy as np\n'), ((848, 867), 'numpy.zeros', 'np.zeros', (['(1, cols)'], {}), '((1, cols))\n', (856, 867), True, 'import numpy as np\n'), ((1157, 1179), 'numpy.zeros', 'np.zeros', (['(cols, cols)'], {}), '((cols, cols))\n', (1165, 1179), True, 'import numpy as np\n'), ((248, 262), 'numpy.sum', 'np.sum', (['expVec'], {}), '(expVec)\n', (254, 262), True, 'import numpy as np\n')]
import numpy as np from scipy import sparse import scipy.sparse.linalg as spla import pylab as plt from scipy.linalg import block_diag # # nSub = 2 def load_matrix_basic(pathToFile,makeSparse,makeSymmetric, offset): f0 = open(pathToFile).readlines() firstLine = f0.pop(0) #removes the first line tmp = np.zeros((len(f0),3), dtype = float) for i in range(len(f0)): line = f0[i] k = line.split() tmp[i,0] = float(k[0]) tmp[i,1] = float(k[1]) tmp[i,2] = float(k[2]) if (tmp.shape[0]==1): tmp = [] else: n = np.int32(tmp[0,0]) m = np.int32(tmp[0,1]) I = tmp[1::,0]-offset; J = tmp[1::,1]-offset; V = tmp[1::,2] # # print str0,i,j if (makeSymmetric): logInd = J != I; I = np.concatenate((I,J[logInd])) J = np.concatenate((J,I[logInd])) V = np.concatenate((V,V[logInd])) if (makeSparse): tmp = sparse.csc_matrix((V,(I,J)),shape=(n,m)).tocoo() else: if (m==1): tmp = V else: tmp = sparse.csc_matrix((V,(I,J)),shape=(n,m)).toarray() return tmp def load_matrix(path,str0,i,j,makeSparse,makeSymmetric,offset): pathToFile = path+'/'+str(i)+'/'+str0+str(j)+'.txt' # tmp = load_matrix_basic(pathToFile,makeSparse,makeSymmetric,offset) return tmp path0 = "../data" if 1: K = [] K_reg = [] Fc = [] R = [] Rf = [] Bc = [] Bf = [] BcT_dense = [] Gc = [] # Gf = [] Gf_p = [] Gc = [] Fc_p = [] rhs = [] xx = [] Kplus_f_test = [] KplusBcT_p = [] Bc_nonzRow = [] KplusBcT = [] BcKplus_tmp = [] # BcK_dense = [] K_UT = [] # x_out = [] # x_out_p = [] # Lumped = [] # Lumped = [] for i in range(nSub): K.append(load_matrix(path0,"dump_K_","",str(i),False,True,1)) K_UT.append(load_matrix(path0,"dump_K_","",str(i),False,False,1)) K_reg.append(load_matrix(path0,"dump_K_reg_","",str(i),False,True,1)) Fc.append(load_matrix(path0,"dump_Fc_","",str(i),False,False,1)) R.append(load_matrix(path0,"dump_R_","",str(i),False,False,1)) Rf.append(load_matrix(path0,"dump_Rf_","",str(i),False,False,1)) Bc.append(load_matrix(path0,"dump_Bc_","",str(i),False,False,1)) Bf.append(load_matrix(path0,"dump_Bf_","",str(i),False,False,1)) Gf_p.append(np.dot(Bf[i],Rf[i])) # Lumped.append(load_matrix(path0,"dump_Lumped_","",str(i),False,False,1)) BcT_dense.append(load_matrix(path0,"dump_BcT_dense_","",str(i),False,False,1)) Gc.append(load_matrix(path0,"dump_Gc_","",str(i),False,False,1)) # Gf.append(load_matrix(path0,"dump_Gf_","",str(i),False,False,1)) indBc = np.abs(Bc[i]).sum(axis=1)>0 Bc_nonzRow.append( Bc[i][indBc,:]) # Fc.append( np.dot(Bc_nonzRow[i], np.linalg.solve(K_reg[i],Bc_nonzRow[i].T))) # Lumped.append( np.dot(Bc_nonzRow[i], np.dot(K[i],Bc_nonzRow[i].T))) rhs.append(load_matrix(path0,"dump_rhs_","",str(i),False,False,1)) # xx.append(load_matrix(path0,"dump_xxTest_","",str(i),False,False,1)) # Kplus_f_test.append(load_matrix(path0,"dump_Kplus_f_test_","",str(i),False,False,1)) # KplusBcT_p = BcKplus_List[i] # BcK_dense.append(load_matrix(path0,"dump_BcK_dense_","",str(i),False,False,1)) # BcK_dense.append(np.dot(K[i],Bc_nonzRow[i].T).T) Gc.append(np.dot(Bc[i], R[i])) KplusBcT.append(load_matrix(path0,"dump_KplusBcT_","",str(i),False,False,1)) KplusBcT_p.append(np.linalg.solve(K_reg[i],Bc_nonzRow[i].T)) # BcKplus_tmp.append(np.linalg.solve(K_reg[i],Bc[i].T).T) # x_out.append(load_matrix(path0,"dump_x_out_","",str(i),False,False,1)) Fc_p.append(np.dot(Bc_nonzRow[i],KplusBcT_p[i])) # iK_K = np.linalg.solve(K_reg[i],K[i]) # K_iK_K = np.dot(K[i],iK_K) # del_ = np.linalg.norm(K_iK_K - K[i] ) / np.linalg.norm(K[i]) # print(del_) # tmp_g = np.dot(Bc[i],np.linalg.solve(K_reg[i], rhs[i])) tmp_e = -np.dot(R[i].T,rhs[i]) if (i == 0): g_p = tmp_g e_p = tmp_e; else: g_p += tmp_g; e_p = np.concatenate((e_p,tmp_e)) print(' ...%d '%(i)) # gc_p = np.concatenate((g_p,e_p)) # gc_p = np.concatenate((gc_p,np.zeros(6))) Gc_clust = load_matrix(path0,"dump_Gc_clust_","",str(0),False,False,1) Ac_clust = load_matrix(path0,"dump_Ac_clust_","",str(0),False,True,1) Fc_clust = load_matrix(path0,"dump_Fc_clust_","",str(0),False,True,1) ker_GcTGc = load_matrix(path0,"dump_kerGc_","",str(0),False,False,1) # gc = load_matrix(path0,"dump_gc_","",str(0),False,False,1) # lam_alpha = load_matrix(path0,"dump_lam_alpha_","",str(0),False,False,1) # lam_alpha_p = np.linalg.solve(Ac_clust, gc) # nLam = Bc[0].shape[0] # lam_p = lam_alpha_p[0:nLam] ## alpha_p = lam_alpha[nLam:] # for i in range(nSub): # print (" ! %d " % (i)) # x10 = np.linalg.solve(K_reg[i],rhs[i]) # x11 = np.linalg.solve(K_reg[i],np.dot(Bc[i].T,lam_p)) # # print alpha_p[(6*i):(6*(i+1))] # x2 = np.dot(R[i],alpha_p[(6*i):(6*(i+1))]) # # x_out_p.append(x10 - x11 + x2) # print( "||x_out - x_out_p || = %e " % np.linalg.norm(x_out[i] - x_out_p[i])) Ac_clust_python = np.hstack((Fc_clust,Gc_clust)) Z = np.zeros((Gc_clust.shape[1],Ac_clust_python.shape[1])) print ( Z.shape) Ac_clust_python = np.vstack((Ac_clust_python,Z)) Gf_clust = load_matrix(path0,"dump_Gf_clust_","",str(0),False,False,1) # test = load_matrix(path0,"dump_testXYZ_","",str(0),False,False,1) # KpOnes= load_matrix(path0,"dump_KplusONES_","",str(0),False,False,1) #K_regD = K_reg[0] #frhs = rhs[0] #xxD = xx[0] #RD = R[0] #for i in range(1,nSub): # K_regD = block_diag(K_regD,K_reg[i]); # RD = block_diag(RD,R[i]); # frhs = np.concatenate((frhs,rhs[i])) # xxD = np.concatenate((xxD,xx[i])) # for i in range(nSub - 1): if (i == 0): Bc_g = np.hstack((Bc[0],Bc[1])) else: Bc_g = np.hstack((Bc_g,Bc[i+1])) for i in range(nSub - 1): if (i == 0): Bf_g = np.hstack((Bf[0],Bf[1])) else: Bf_g = np.hstack((Bf_g,Bf[i+1])) for i in range(nSub - 1): if (i == 0): Gf_g = Gf_p[0]+ Gf_p[1] else: Gf_g += Gf_p[i+1] weigth = np.loadtxt(path0+'/dump_weigth.txt') #Fc__ = np.dot(Bc_g,np.linalg.solve(K_regD,Bc_g.T)) # # #gc__ = np.dot(Bc_g,np.linalg.solve(K_regD,frhs)) #ec__ = - np.dot(RD.T,frhs) # #gc__ = np.concatenate((gc__,ec__)) #H = ker_GcTGc #AA0 = np.hstack((Fc__,Gc_clust)) #AB1 = # # #ZZ1 = np.zeros((Gc_clust.shape[0], H.shape[1])) #AA1 = np.vstack((ZZ1,H)) #AA01 = np.hstack((AA0,AA1)) #A0 = np.hstack((K_regD,Bc_g.T)) # #nB = Bc_g.shape[0] #Bc_Z = np.hstack((Bc_g,np.zeros((nB,nB)))) # #crhs = np.zeros(nB); # #A = np.vstack((A0,Bc_Z)) # #b = np.concatenate((frhs,crhs)) # #x = np.linalg.solve(A,b) # #xxD = np.concatenate((xxD,crhs)) #Bc_g = np.hstack((Bc_g,Bc[2])) #Bc_g = np.hstack((Bc_g,Bc[2])) #BcT_dense = load_matrix(path0,"dump_BcT_dense_","",str(0),True,True,1) #Fc_clust = load_matrix(path0,"dump_Fc_clust_","",str(0),True,True,1) #Ac_clust = load_matrix(path0,"dump_Ac_clust_","",str(0),True,True,1) #GcTGc = load_matrix(path0,"dump_GcTGc_clust_","",str(0),False,True,1) #GfTGf = load_matrix(path0,"dump_GfTGf_","",str(0),False,False,1) #iGfTGf = load_matrix(path0,"dump_iGfTGf_","",str(0),False,False,1) #ker_Ac = load_matrix(path0,"dump_ker_Ac_","",str(0),False,False,1) ##KpBcT0 = load_matrix(path0,"dump_KplusBcT_","",str(0),False,False,1) ##KpBcT1 = load_matrix(path0,"dump_KplusBcT_","",str(1),False,False,1) # # #dFc_eig = load_matrix(path0,"dump_Fc_clust_","",str(444),False,False,1) ##dFc_svd = load_matrix(path0,"dump_Fc_clust_","",str(555),False,False,1) #dAc_eig = load_matrix(path0,"dump_Ac_clust_","",str(444),False,False,1) ##dAc_svd = load_matrix(path0,"dump_Ac_clust_","",str(555),False,False,1) # # #GfTGf_ = np.zeros((GfTGf.shape[0],GfTGf.shape[0])) # # # # # # #for d in range(nSub): # GfTGf_ += np.dot(Gf[d].T,Gf[d]) # # # # #if False: # plt.subplot(1,3,1) # if GcTGc.shape[0] < 100: # markersize_ = 3 # else: # markersize_ = 0.7 # plt.spy(GcTGc, markersize=markersize_) # plt.xlabel("nnz = %d" % (GcTGc.nonzero()[0].shape[0])) # plt.subplot(1,3,2) # if Fc_clust.shape[0] < 100: # markersize = 3 # else: # markersize = 0.7 # plt.spy(Fc_clust, markersize=markersize_) # plt.xlabel("nnz = %d" % (Fc_clust.nonzero()[0].shape[0])) # plt.subplot(1,3,3) # if Ac_clust.shape[0] < 100: # markersize_ = 3 # else: # markersize_ = 0.7 # plt.spy(Ac_clust, markersize=markersize_) # plt.xlabel("nnz = %d" % (Ac_clust.nonzero()[0].shape[0])) # plt.show() # ##Bc_from_Rt = [] ##for i in range(1,14): ## Bc_from_Rt.append( load_matrix(path0,"dump_Bc_from_Rt_","",str(i),False,False,1) ) ## # ## Gc_ = load_matrix(path0,"dump_Gc_i_","",str(0),False,False,1) # # # # ##BcT_dense = load_matrix(path0,"dump_BcT_dense_","",str(0),True,True,1) # # #K_test= [] #Kplus_K_test = [] #K_Kplus_K_test = [] #K_reg_test = [] #K_reg_SF = [] #x_test = [] # # #for i in range(4): # # K_test.append(load_matrix(path0,"dump_K_dense_","",str(i),False,True,1)) # K_reg_test.append(load_matrix(path0,"dump_K_reg_","",str(i),False,True,1)) # K_reg_SF.append(load_matrix(path0,"dump_K_reg_SF_","",str(i),False,True,1)) # Kplus_K_test.append(load_matrix(path0,"dump_Kplus_K_","",str(i),False,False,1)) # K_Kplus_K_test.append(load_matrix(path0,"dump_K_Kplus_K_","",str(i),False,False,1)) # # #KKpK = np.dot(K_test[i], np.linalg.solve(K_reg_test[i],K_test[i])) # KKpK = np.dot(K[i], np.linalg.solve(K_reg[i],K[i])) # print "norm = %3.8e \n" % np.linalg.norm(KKpK - K[i]) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ##plt.spy(Fc_clust,markersize = .8);plt.show() # ##Gc_ = load_matrix(path0,"dump_Gc_i_","",str(0),True,True,1) # # # ##r = sparse.csgraph.reverse_cuthill_mckee(Ac_clust.tocsr(), symmetric_mode=True) ##Ac_clust = Ac_clust.toarray() ### ##P,L,U= scipy.linalg.lu(Ac_clust) ##nnz0 = L.nonzero()[0].shape[0] + U.nonzero()[0].shape[0] ## ## ### ### ##AcR = Ac_clust[np.ix_(r,r)] ##PR,LR,UR = scipy.linalg.lu(AcR) ##nnzR = LR.nonzero()[0].shape[0] + UR.nonzero()[0].shape[0] ### ### ##plt.subplot(2,2,1) ##plt.spy(L,markersize=0.1); ##plt.subplot(2,2,2) ##plt.spy(U,markersize=0.1); ##plt.subplot(2,2,3) ##plt.spy(LR,markersize=0.1); ##plt.subplot(2,2,4) ##plt.spy(UR,markersize=0.1); # ##print ("nnz = %d, nnz(reordered) = %d ") % (nnz0, nnzR) # # ##plt.show() # ##ker_Ac = load_matrix(path0,"dump_ker_Ac_","",str(0),False,True,1) ##ker_GcTGc = load_matrix(path0,"dump_ker_GcTGc_","",str(0),False,True,1) ##R0 = load_matrix(path0,"dump_R_","",str(0),False,True,1) # ##Gc_H = np.dot(GcTGc.toarray(),ker_GcTGc) # ##r = sparse.csgraph.reverse_cuthill_mckee(Ac_clust.tocsr(), symmetric_mode=True) ##Ac = Ac_clust.toarray()[np.ix_(r,r)] ##plt.subplot(1,2,1) ##plt.spy(Ac_clust ,markersize = 2.0) ##plt.subplot(1,2,2) ##plt.spy(Ac,markersize = 0.125) # # # ##Fc_python_List = [] # ##if 0: ## Fc_clust_python = np.zeros((Bct_list[i].shape[0], Bct_list[i].shape[0])) ## for i in range(nSub): ## Bc = Bct_list[i].toarray() ## indBc = np.abs(Bc).sum(axis=1)>0 ## Bc_red = Bc[indBc,:] ## BcKplus = BcKplus_List[i] ## ## Bf = Bf_List[i].toarray() ## indBf = np.abs(Bf).sum(axis=1)>0 ## Bf_red = Bf[indBf,:] ## ## Rc = RList[i].toarray() ## ## ## ## if (i == 0): ## Gf_clust_python = np.dot(Bf,Rc) ## Gc_clust_python = np.dot(Bc,Rc) ## else: ## Gf_clust_python = np.hstack((Gf_clust_python,np.dot(Bf,Rc))) ## Gc_clust_python = np.hstack((Gc_clust_python,np.dot(Bc,Rc))) ## indBcKplus = np.abs(BcKplus).sum(axis=1)>0 ## BcKplus = BcKplus[indBcKplus,:] ## BcKplus_python = np.linalg.solve(K_reg_List[i],Bc_red.T) ## BcKplus_ = np.linalg.solve(K_reg_List[i],Bc.T) ## Fc_i = np.dot(Bc_red,BcKplus_python) ## Fc_clust_python += np.dot(Bc,BcKplus_) ## Fc_python_List.append(Fc_i) ## ## for ii in range(nSub): ## ## ttt = Gc_List[ii][np.abs(Gc_List[ii]).sum(axis=1)>0,:] - GcList[ii] ## print np.linalg.norm(ttt) ## ## ## for ii in range(nSub): ## ddd0 = np.linalg.norm(Fc_python_List[ii] - Fc_List[ii]) ## ddd1 = np.linalg.norm(Fc_python_List[ii]) ## print "|Fc_python - Fc_myAp|/|Fc_python|",ddd0 / ddd1 ## ## ## Fc_clust = load_matrix(path0,"dump_Fc_clust_","",0,False,True,1) ## Gc_clust = load_matrix(path0,"dump_Gc_clust_","",0,False,False,1) ## Gf_clust = load_matrix(path0,"dump_Gf_clust_","",0,False,False,1) ## Ac_clust = load_matrix(path0,"dump_Ac_clust_","",0,False,True,1) ## Ac_clust_python = np.hstack((Fc_clust_python,Gc_clust_python)) ## ## Z = np.zeros((Gc_clust_python.shape[1],Ac_clust.shape[1])) ## print ( Z.shape) ## Ac_clust_python = np.vstack((Ac_clust_python,Z)) ## ## ## ddd0 = np.linalg.norm(Fc_clust - Fc_clust_python) ## ddd1 = np.linalg.norm(Fc_clust) ## print "|Fc_clust_python - Fc_clust_myAp|/|Fc_clust_python|",ddd0 / ddd1 ## ## ddd0 = np.linalg.norm(Gc_clust - Gc_clust_python) ## ddd1 = np.linalg.norm(Gc_clust) ## print "|Gc_clust_python - Gc_clust_myAp|/|Gc_clust_python|",ddd0 / ddd1 ## ## ddd0 = np.linalg.norm(Gf_clust - Gf_clust_python) ## ddd1 = np.linalg.norm(Gf_clust) ## print "|Gf_clust_python - Gf_clust_myAp|/|Gf_clust_python|",ddd0 / ddd1 ## ## ddd0 = np.linalg.norm(Ac_clust - Ac_clust_python) ## ddd1 = np.linalg.norm(Ac_clust) ## print "|Ac_clust_python - Ac_clust_myAp|/|Ac_clust_python|",ddd0 / ddd1 ## ##K = [] # # # ##plt.subplot(1,2,1) ##plt.spy(Gf_clust_python,markersize=1) ##plt.subplot(1,2,2) ##plt.spy(Gf_clust,markersize=1) ##plt.show()
[ "numpy.abs", "numpy.concatenate", "numpy.zeros", "numpy.hstack", "scipy.sparse.csc_matrix", "numpy.loadtxt", "numpy.int32", "numpy.dot", "numpy.linalg.solve", "numpy.vstack" ]
[((6585, 6623), 'numpy.loadtxt', 'np.loadtxt', (["(path0 + '/dump_weigth.txt')"], {}), "(path0 + '/dump_weigth.txt')\n", (6595, 6623), True, 'import numpy as np\n'), ((5559, 5590), 'numpy.hstack', 'np.hstack', (['(Fc_clust, Gc_clust)'], {}), '((Fc_clust, Gc_clust))\n', (5568, 5590), True, 'import numpy as np\n'), ((5599, 5654), 'numpy.zeros', 'np.zeros', (['(Gc_clust.shape[1], Ac_clust_python.shape[1])'], {}), '((Gc_clust.shape[1], Ac_clust_python.shape[1]))\n', (5607, 5654), True, 'import numpy as np\n'), ((5697, 5728), 'numpy.vstack', 'np.vstack', (['(Ac_clust_python, Z)'], {}), '((Ac_clust_python, Z))\n', (5706, 5728), True, 'import numpy as np\n'), ((588, 607), 'numpy.int32', 'np.int32', (['tmp[0, 0]'], {}), '(tmp[0, 0])\n', (596, 607), True, 'import numpy as np\n'), ((622, 641), 'numpy.int32', 'np.int32', (['tmp[0, 1]'], {}), '(tmp[0, 1])\n', (630, 641), True, 'import numpy as np\n'), ((6252, 6277), 'numpy.hstack', 'np.hstack', (['(Bc[0], Bc[1])'], {}), '((Bc[0], Bc[1]))\n', (6261, 6277), True, 'import numpy as np\n'), ((6302, 6330), 'numpy.hstack', 'np.hstack', (['(Bc_g, Bc[i + 1])'], {}), '((Bc_g, Bc[i + 1]))\n', (6311, 6330), True, 'import numpy as np\n'), ((6387, 6412), 'numpy.hstack', 'np.hstack', (['(Bf[0], Bf[1])'], {}), '((Bf[0], Bf[1]))\n', (6396, 6412), True, 'import numpy as np\n'), ((6437, 6465), 'numpy.hstack', 'np.hstack', (['(Bf_g, Bf[i + 1])'], {}), '((Bf_g, Bf[i + 1]))\n', (6446, 6465), True, 'import numpy as np\n'), ((830, 860), 'numpy.concatenate', 'np.concatenate', (['(I, J[logInd])'], {}), '((I, J[logInd]))\n', (844, 860), True, 'import numpy as np\n'), ((876, 906), 'numpy.concatenate', 'np.concatenate', (['(J, I[logInd])'], {}), '((J, I[logInd]))\n', (890, 906), True, 'import numpy as np\n'), ((922, 952), 'numpy.concatenate', 'np.concatenate', (['(V, V[logInd])'], {}), '((V, V[logInd]))\n', (936, 952), True, 'import numpy as np\n'), ((2546, 2566), 'numpy.dot', 'np.dot', (['Bf[i]', 'Rf[i]'], {}), '(Bf[i], Rf[i])\n', (2552, 2566), True, 'import numpy as np\n'), ((3596, 3615), 'numpy.dot', 'np.dot', (['Bc[i]', 'R[i]'], {}), '(Bc[i], R[i])\n', (3602, 3615), True, 'import numpy as np\n'), ((3729, 3771), 'numpy.linalg.solve', 'np.linalg.solve', (['K_reg[i]', 'Bc_nonzRow[i].T'], {}), '(K_reg[i], Bc_nonzRow[i].T)\n', (3744, 3771), True, 'import numpy as np\n'), ((3947, 3983), 'numpy.dot', 'np.dot', (['Bc_nonzRow[i]', 'KplusBcT_p[i]'], {}), '(Bc_nonzRow[i], KplusBcT_p[i])\n', (3953, 3983), True, 'import numpy as np\n'), ((4192, 4225), 'numpy.linalg.solve', 'np.linalg.solve', (['K_reg[i]', 'rhs[i]'], {}), '(K_reg[i], rhs[i])\n', (4207, 4225), True, 'import numpy as np\n'), ((4244, 4266), 'numpy.dot', 'np.dot', (['R[i].T', 'rhs[i]'], {}), '(R[i].T, rhs[i])\n', (4250, 4266), True, 'import numpy as np\n'), ((4405, 4433), 'numpy.concatenate', 'np.concatenate', (['(e_p, tmp_e)'], {}), '((e_p, tmp_e))\n', (4419, 4433), True, 'import numpy as np\n'), ((1008, 1052), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(V, (I, J))'], {'shape': '(n, m)'}), '((V, (I, J)), shape=(n, m))\n', (1025, 1052), False, 'from scipy import sparse\n'), ((2900, 2913), 'numpy.abs', 'np.abs', (['Bc[i]'], {}), '(Bc[i])\n', (2906, 2913), True, 'import numpy as np\n'), ((1174, 1218), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(V, (I, J))'], {'shape': '(n, m)'}), '((V, (I, J)), shape=(n, m))\n', (1191, 1218), False, 'from scipy import sparse\n')]
from galaxy_analysis.plot.plot_styles import * import matplotlib.pyplot as plt import os, sys import numpy as np from mpl_toolkits.axes_grid1 import make_axes_locatable def bins_from_centers(x): xnew = np.zeros(len(x) + 1) dx = np.zeros(len(x) + 1) dx[1:-1] = x[1:] - x[:-1] dx[0] = dx[1] dx[-1] = dx[-2] xnew[:-1] = x - 0.5*dx[:-1] xnew[-1] = x[-1] + 0.5*dx[-1] return xnew def plot_2d_histogram(datafile = 'all_runs_d_12.20.dat'): ylabel = r'log(H$^{-}$ Photodetachment Scale Factor)' xlabel = "log(LW Scale Factor)" data = np.genfromtxt(datafile) # names = True) k27 = data[:,0] LW = data[:,1] k27_centers = np.linspace(np.log10(np.min(k27)), np.log10(np.max(k27)), int(np.sqrt(np.size(k27) ))) k27_vals = bins_from_centers(k27_centers) LW_centers = np.linspace(np.log10(np.min(LW)), np.log10(np.max(LW)), int(np.sqrt(np.size(LW)))) LW_vals = bins_from_centers(LW_centers) k27_mesh, LW_mesh = np.meshgrid(LW_vals, k27_vals) k27_center_mesh, LW_center_mesh = np.meshgrid(LW_centers, k27_centers) #f_H2[data['k27'] == 1.58489319] = 100.0 # flag to figure out orientation f_H2 = data[:,2] z_mesh = f_H2.reshape( int(np.sqrt(np.size(k27))), int(np.sqrt(np.size(LW)))) #z_mesh = z[:-1,:-1] fig, ax = plt.subplots() fig.set_size_inches(8,8) img1 = ax.pcolormesh(10.0**(LW_mesh), 10.0**(k27_mesh), np.log10(z_mesh.T), cmap = 'magma', vmin = -9, vmax = -2.8) ax.semilogx() ax.semilogy() ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) divider = make_axes_locatable(ax) cax1 = divider.append_axes('right', size = '5%', pad = 0.05) fig.colorbar(img1, cax=cax1, label = r'log(f$_{\rm H_2}$)') ax.contour( 10.**(LW_center_mesh), 10.0**(k27_center_mesh), np.log10(z_mesh.T), levels = [-8,-7,-6,-5,-4,-3], colors = 'black', linewidths = 3, linestyles = '-.') ax.scatter( [1,1,100,100], [1,100,1,100], s = 250, marker = "*", color = "white") plt.minorticks_on() plt.tight_layout(h_pad = 0, w_pad = 0.05) fig.savefig("fH2.png") plt.close() f_H2 = data[:,3] z_mesh= f_H2.reshape( int(np.sqrt(np.size(k27))), int(np.sqrt(np.size(LW)))) #z_mesh = z[:-1,:-1] fig, ax = plt.subplots() fig.set_size_inches(8,8) img1 = ax.pcolormesh(10.0**(LW_mesh), 10.0**(k27_mesh), np.log10(z_mesh.T), cmap = 'RdYlBu_r', vmin = np.min(np.log10(z_mesh)), vmax = np.max(np.log10(z_mesh))) ax.semilogx() ax.semilogy() ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) divider = make_axes_locatable(ax) cax1 = divider.append_axes('right', size = '5%', pad = 0.05) fig.colorbar(img1, cax=cax1, label = r'log(Temperature [K])') plt.minorticks_on() plt.tight_layout(h_pad = 0, w_pad = 0.05) fig.savefig("T.png") plt.close() return if __name__ == "__main__": plot_2d_histogram( datafile = str(sys.argv[1]))
[ "mpl_toolkits.axes_grid1.make_axes_locatable", "matplotlib.pyplot.tight_layout", "numpy.size", "numpy.meshgrid", "matplotlib.pyplot.close", "numpy.genfromtxt", "numpy.min", "numpy.max", "matplotlib.pyplot.minorticks_on", "numpy.log10", "matplotlib.pyplot.subplots" ]
[((580, 603), 'numpy.genfromtxt', 'np.genfromtxt', (['datafile'], {}), '(datafile)\n', (593, 603), True, 'import numpy as np\n'), ((1047, 1077), 'numpy.meshgrid', 'np.meshgrid', (['LW_vals', 'k27_vals'], {}), '(LW_vals, k27_vals)\n', (1058, 1077), True, 'import numpy as np\n'), ((1116, 1152), 'numpy.meshgrid', 'np.meshgrid', (['LW_centers', 'k27_centers'], {}), '(LW_centers, k27_centers)\n', (1127, 1152), True, 'import numpy as np\n'), ((1375, 1389), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1387, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1766), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1762, 1766), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2187, 2206), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (2204, 2206), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2249), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0)', 'w_pad': '(0.05)'}), '(h_pad=0, w_pad=0.05)\n', (2228, 2249), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2296), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2294, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2440, 2454), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2452, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2876), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2872, 2876), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3013, 3032), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (3030, 3032), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3075), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0)', 'w_pad': '(0.05)'}), '(h_pad=0, w_pad=0.05)\n', (3054, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3109, 3120), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3118, 3120), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1548), 'numpy.log10', 'np.log10', (['z_mesh.T'], {}), '(z_mesh.T)\n', (1538, 1548), True, 'import numpy as np\n'), ((1961, 1979), 'numpy.log10', 'np.log10', (['z_mesh.T'], {}), '(z_mesh.T)\n', (1969, 1979), True, 'import numpy as np\n'), ((2595, 2613), 'numpy.log10', 'np.log10', (['z_mesh.T'], {}), '(z_mesh.T)\n', (2603, 2613), True, 'import numpy as np\n'), ((711, 722), 'numpy.min', 'np.min', (['k27'], {}), '(k27)\n', (717, 722), True, 'import numpy as np\n'), ((734, 745), 'numpy.max', 'np.max', (['k27'], {}), '(k27)\n', (740, 745), True, 'import numpy as np\n'), ((889, 899), 'numpy.min', 'np.min', (['LW'], {}), '(LW)\n', (895, 899), True, 'import numpy as np\n'), ((911, 921), 'numpy.max', 'np.max', (['LW'], {}), '(LW)\n', (917, 921), True, 'import numpy as np\n'), ((787, 799), 'numpy.size', 'np.size', (['k27'], {}), '(k27)\n', (794, 799), True, 'import numpy as np\n'), ((963, 974), 'numpy.size', 'np.size', (['LW'], {}), '(LW)\n', (970, 974), True, 'import numpy as np\n'), ((1292, 1304), 'numpy.size', 'np.size', (['k27'], {}), '(k27)\n', (1299, 1304), True, 'import numpy as np\n'), ((1320, 1331), 'numpy.size', 'np.size', (['LW'], {}), '(LW)\n', (1327, 1331), True, 'import numpy as np\n'), ((2357, 2369), 'numpy.size', 'np.size', (['k27'], {}), '(k27)\n', (2364, 2369), True, 'import numpy as np\n'), ((2385, 2396), 'numpy.size', 'np.size', (['LW'], {}), '(LW)\n', (2392, 2396), True, 'import numpy as np\n'), ((2673, 2689), 'numpy.log10', 'np.log10', (['z_mesh'], {}), '(z_mesh)\n', (2681, 2689), True, 'import numpy as np\n'), ((2731, 2747), 'numpy.log10', 'np.log10', (['z_mesh'], {}), '(z_mesh)\n', (2739, 2747), True, 'import numpy as np\n')]
# Imports --------------------------------------------------------------------- # Python import argparse import joblib import yaml import os.path as osp from collections import defaultdict import joblib import os # PyTorch import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch import autograd from torch.optim import Adam # NumPy import numpy as np from numpy import array from numpy.random import choice, randint # Model Building from gen_models.attentive_vae import AttentiveVAE import rlkit.torch.pytorch_util as ptu # Data from observations import multi_mnist from torch.utils.data import DataLoader, TensorDataset # Logging from rlkit.core import logger from rlkit.launchers.launcher_util import setup_logger, set_seed from rlkit.core.vistools import generate_gif, save_pytorch_tensor_as_img import sys def experiment(exp_specs): ptu.set_gpu_mode(exp_specs['use_gpu']) # Set up logging ---------------------------------------------------------- exp_id = exp_specs['exp_id'] exp_prefix = exp_specs['exp_name'] seed = exp_specs['seed'] set_seed(seed) setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs) # Prep the data ----------------------------------------------------------- path = 'junk_vis/debug_att_vae_shallower_48_64_dim_0p1_kl_stronger_seg_conv' (X_train, Y_train), (X_test, Y_test) = multi_mnist(path, max_digits=2, canvas_size=48, seed=42, use_max=False) convert_dict = {0: [0.,0.], 1: [1.,0.], 2: [1.,1.]} Num_train = np.array([convert_dict[a.shape[0]] for a in Y_train]) Num_test = np.array([convert_dict[a.shape[0]] for a in Y_test]) X_train = X_train[:,None,...] X_test = X_test[:,None,...] X_train, X_test = torch.FloatTensor(X_train)/255.0, torch.FloatTensor(X_test)/255.0 mask_train, mask_test = torch.FloatTensor(Num_train), torch.FloatTensor(Num_test) train_ds = TensorDataset(X_train, Num_train) val_ds = TensorDataset(X_test, Num_test) # Model Definition -------------------------------------------------------- model = AttentiveVAE( [1, 48, 48], exp_specs['vae_specs']['z_dim'], exp_specs['vae_specs']['x_encoder_specs'], exp_specs['vae_specs']['z_seg_conv_specs'], exp_specs['vae_specs']['z_seg_fc_specs'], exp_specs['vae_specs']['z_obj_conv_specs'], exp_specs['vae_specs']['z_obj_fc_specs'], exp_specs['vae_specs']['z_seg_recon_fc_specs'], exp_specs['vae_specs']['z_seg_recon_upconv_specs'], exp_specs['vae_specs']['z_obj_recon_fc_specs'], exp_specs['vae_specs']['z_obj_recon_upconv_specs'], exp_specs['vae_specs']['recon_upconv_part_specs'] ) if ptu.gpu_enabled(): model.cuda() # Optimizer --------------------------------------------------------------- model_optim = Adam(model.parameters(), lr=float(exp_specs['model_lr']), weight_decay=float(exp_specs['model_wd'])) # ------------------------------------------------------------------------- global_iter = 0 for epoch in range(exp_specs['epochs']): train_loader = DataLoader(train_ds, batch_size=exp_specs['batch_size'], shuffle=True, num_workers=4, pin_memory=False, drop_last=True) for iter_num, img_batch in enumerate(train_loader): img_batch, num_batch = img_batch[0], img_batch[1] if ptu.gpu_enabled(): img_batch = img_batch.cuda() what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(img_batch, num_batch) elbo, KL = model.compute_ELBO( what_means + where_means, what_log_covs + where_log_covs, recon_mean, recon_log_cov, img_batch, average_over_batch=True ) loss = -1. * elbo loss = loss + 1. * sum([m.mean() for m in masks]) loss.backward() model_optim.step() if global_iter % exp_specs['freq_val'] == 0: with torch.no_grad(): print('\nValidating Iter %d...' % global_iter) model.eval() idxs = np.random.choice(int(X_test.size(0)), size=exp_specs['batch_size'], replace=False) img_batch, num_batch = X_test[idxs], Num_test[idxs] if ptu.gpu_enabled(): img_batch = img_batch.cuda() what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(img_batch, num_batch) elbo, KL = model.compute_ELBO( what_means + where_means, what_log_covs + where_log_covs, recon_mean, recon_log_cov, img_batch, average_over_batch=True ) mse = ((recon_mean - img_batch)**2).mean() print('ELBO:\t%.4f' % elbo) print('MSE:\t%.4f' % mse) print('KL:\t%.4f' % KL) for i in range(1): save_pytorch_tensor_as_img(img_batch[i].data.cpu(), os.path.join(path, '%d_%d_img.png'%(global_iter, i))) save_pytorch_tensor_as_img(recon_mean[i].data.cpu(), os.path.join(path, '%d_%d_recon.png'%(global_iter, i))) save_pytorch_tensor_as_img(masks[0][i].data.cpu(), os.path.join(path, '%d_%d_mask_0.png'%(global_iter, i))) # save_pytorch_tensor_as_img(masks[1][i].data.cpu(), os.path.join(path, '%d_%d_mask_1.png'%(global_iter, i))) model.train() global_iter += 1 if __name__ == '__main__': # Arguments parser = argparse.ArgumentParser() parser.add_argument('-e', '--experiment', help='experiment specification file') args = parser.parse_args() with open(args.experiment, 'r') as spec_file: spec_string = spec_file.read() exp_specs = yaml.load(spec_string) experiment(exp_specs)
[ "yaml.load", "argparse.ArgumentParser", "torch.utils.data.DataLoader", "rlkit.torch.pytorch_util.gpu_enabled", "observations.multi_mnist", "rlkit.launchers.launcher_util.set_seed", "torch.FloatTensor", "numpy.array", "torch.utils.data.TensorDataset", "rlkit.torch.pytorch_util.set_gpu_mode", "gen_models.attentive_vae.AttentiveVAE", "torch.no_grad", "os.path.join", "rlkit.launchers.launcher_util.setup_logger" ]
[((906, 944), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (["exp_specs['use_gpu']"], {}), "(exp_specs['use_gpu'])\n", (922, 944), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1130, 1144), 'rlkit.launchers.launcher_util.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (1138, 1144), False, 'from rlkit.launchers.launcher_util import setup_logger, set_seed\n'), ((1149, 1218), 'rlkit.launchers.launcher_util.setup_logger', 'setup_logger', ([], {'exp_prefix': 'exp_prefix', 'exp_id': 'exp_id', 'variant': 'exp_specs'}), '(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)\n', (1161, 1218), False, 'from rlkit.launchers.launcher_util import setup_logger, set_seed\n'), ((1424, 1495), 'observations.multi_mnist', 'multi_mnist', (['path'], {'max_digits': '(2)', 'canvas_size': '(48)', 'seed': '(42)', 'use_max': '(False)'}), '(path, max_digits=2, canvas_size=48, seed=42, use_max=False)\n', (1435, 1495), False, 'from observations import multi_mnist\n'), ((1568, 1621), 'numpy.array', 'np.array', (['[convert_dict[a.shape[0]] for a in Y_train]'], {}), '([convert_dict[a.shape[0]] for a in Y_train])\n', (1576, 1621), True, 'import numpy as np\n'), ((1637, 1689), 'numpy.array', 'np.array', (['[convert_dict[a.shape[0]] for a in Y_test]'], {}), '([convert_dict[a.shape[0]] for a in Y_test])\n', (1645, 1689), True, 'import numpy as np\n'), ((1945, 1978), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_train', 'Num_train'], {}), '(X_train, Num_train)\n', (1958, 1978), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((1992, 2023), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_test', 'Num_test'], {}), '(X_test, Num_test)\n', (2005, 2023), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2117, 2679), 'gen_models.attentive_vae.AttentiveVAE', 'AttentiveVAE', (['[1, 48, 48]', "exp_specs['vae_specs']['z_dim']", "exp_specs['vae_specs']['x_encoder_specs']", "exp_specs['vae_specs']['z_seg_conv_specs']", "exp_specs['vae_specs']['z_seg_fc_specs']", "exp_specs['vae_specs']['z_obj_conv_specs']", "exp_specs['vae_specs']['z_obj_fc_specs']", "exp_specs['vae_specs']['z_seg_recon_fc_specs']", "exp_specs['vae_specs']['z_seg_recon_upconv_specs']", "exp_specs['vae_specs']['z_obj_recon_fc_specs']", "exp_specs['vae_specs']['z_obj_recon_upconv_specs']", "exp_specs['vae_specs']['recon_upconv_part_specs']"], {}), "([1, 48, 48], exp_specs['vae_specs']['z_dim'], exp_specs[\n 'vae_specs']['x_encoder_specs'], exp_specs['vae_specs'][\n 'z_seg_conv_specs'], exp_specs['vae_specs']['z_seg_fc_specs'],\n exp_specs['vae_specs']['z_obj_conv_specs'], exp_specs['vae_specs'][\n 'z_obj_fc_specs'], exp_specs['vae_specs']['z_seg_recon_fc_specs'],\n exp_specs['vae_specs']['z_seg_recon_upconv_specs'], exp_specs[\n 'vae_specs']['z_obj_recon_fc_specs'], exp_specs['vae_specs'][\n 'z_obj_recon_upconv_specs'], exp_specs['vae_specs'][\n 'recon_upconv_part_specs'])\n", (2129, 2679), False, 'from gen_models.attentive_vae import AttentiveVAE\n'), ((2751, 2768), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (2766, 2768), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5883, 5908), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5906, 5908), False, 'import argparse\n'), ((1872, 1900), 'torch.FloatTensor', 'torch.FloatTensor', (['Num_train'], {}), '(Num_train)\n', (1889, 1900), False, 'import torch\n'), ((1902, 1929), 'torch.FloatTensor', 'torch.FloatTensor', (['Num_test'], {}), '(Num_test)\n', (1919, 1929), False, 'import torch\n'), ((3160, 3283), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': "exp_specs['batch_size']", 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(False)', 'drop_last': '(True)'}), "(train_ds, batch_size=exp_specs['batch_size'], shuffle=True,\n num_workers=4, pin_memory=False, drop_last=True)\n", (3170, 3283), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((6133, 6155), 'yaml.load', 'yaml.load', (['spec_string'], {}), '(spec_string)\n', (6142, 6155), False, 'import yaml\n'), ((1778, 1804), 'torch.FloatTensor', 'torch.FloatTensor', (['X_train'], {}), '(X_train)\n', (1795, 1804), False, 'import torch\n'), ((1812, 1837), 'torch.FloatTensor', 'torch.FloatTensor', (['X_test'], {}), '(X_test)\n', (1829, 1837), False, 'import torch\n'), ((3417, 3434), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (3432, 3434), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((4100, 4115), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4113, 4115), False, 'import torch\n'), ((4423, 4440), 'rlkit.torch.pytorch_util.gpu_enabled', 'ptu.gpu_enabled', ([], {}), '()\n', (4438, 4440), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5295, 5349), 'os.path.join', 'os.path.join', (['path', "('%d_%d_img.png' % (global_iter, i))"], {}), "(path, '%d_%d_img.png' % (global_iter, i))\n", (5307, 5349), False, 'import os\n'), ((5426, 5482), 'os.path.join', 'os.path.join', (['path', "('%d_%d_recon.png' % (global_iter, i))"], {}), "(path, '%d_%d_recon.png' % (global_iter, i))\n", (5438, 5482), False, 'import os\n'), ((5557, 5614), 'os.path.join', 'os.path.join', (['path', "('%d_%d_mask_0.png' % (global_iter, i))"], {}), "(path, '%d_%d_mask_0.png' % (global_iter, i))\n", (5569, 5614), False, 'import os\n')]
import os import pickle import numpy as np import PIL.Image import dnnlib import dnnlib.tflib as tflib import config from training import misc synthesis_kwargs = dict(minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] def draw_figure(png, Gs, seeds): avg_dlantents_b = Gs.get_var('dlatent_avg_b') avg_dlantents_c = Gs.get_var('dlatent_avg_c') for seed in seeds: rnd = np.random.RandomState(seed) b1 = rnd.randn(Gs.input_shapes[0][1]) b1 = b1[np.newaxis] b1 = Gs.components.mapping_b.run(b1, None) b1_v = b1[0, 0, :] # b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b # change C for i in range(20): c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c_v = c[0, 0, :] c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c current_png = png + '/seedc_%d_%d' % (seed, i) + '.png' gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) b1_v = b1[0, 0, :] c = rnd.randn(Gs.input_shapes[1][1]) c = c[np.newaxis] c = Gs.components.mapping_c.run(c, None) # [seed, layer, component] c[:, :] = avg_dlantents_c for j in range(80): random_b2 = rnd.randn(Gs.input_shapes[0][1]) random_b2 = random_b2[np.newaxis] random_b2 = Gs.components.mapping_b.run(random_b2, None) b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b print(b2_v.shape) # gram-schmidt process a1 = np.sum(b1_v * b2_v, dtype=np.float32) a2 = np.sum(b1_v * b1_v, dtype=np.float32) print(a1) print(a2) b2_v = b2_v - a1 / a2 * b1_v print(b1_v.shape) print(b2_v.shape) print(np.sum(b1_v * b2_v)) for i in range(10): tmp = np.empty_like(b1) tmp[:, :] = b1_v + 0.1 * i * b2_v current_png = png + '/seedb%d_%d_%d' % (seed, j, i) + '.png' gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1] misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1)) #--------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) network_pkl = 'network-snapshot-010000.pkl' G, D, Gs = misc.load_pkl(network_pkl) draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23]) #---------------------------------------------------------------------------- if __name__ == "__main__": main() #----------------------------------------------------------------------------
[ "numpy.sum", "os.makedirs", "training.misc.load_pkl", "numpy.empty_like", "numpy.random.RandomState", "dnnlib.util.open_url", "pickle.load", "training.misc.save_image_grid", "dnnlib.tflib.init_tf" ]
[((2768, 2783), 'dnnlib.tflib.init_tf', 'tflib.init_tf', ([], {}), '()\n', (2781, 2783), True, 'import dnnlib.tflib as tflib\n'), ((2788, 2833), 'os.makedirs', 'os.makedirs', (['config.result_dir'], {'exist_ok': '(True)'}), '(config.result_dir, exist_ok=True)\n', (2799, 2833), False, 'import os\n'), ((2897, 2923), 'training.misc.load_pkl', 'misc.load_pkl', (['network_pkl'], {}), '(network_pkl)\n', (2910, 2923), False, 'from training import misc\n'), ((596, 623), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (617, 623), True, 'import numpy as np\n'), ((268, 321), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['url'], {'cache_dir': 'config.cache_dir'}), '(url, cache_dir=config.cache_dir)\n', (288, 321), False, 'import dnnlib\n'), ((353, 367), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (364, 367), False, 'import pickle\n'), ((1340, 1412), 'training.misc.save_image_grid', 'misc.save_image_grid', (['gen', 'current_png'], {'drange': '[-1, 1]', 'grid_size': '(1, 1)'}), '(gen, current_png, drange=[-1, 1], grid_size=(1, 1))\n', (1360, 1412), False, 'from training import misc\n'), ((1987, 2024), 'numpy.sum', 'np.sum', (['(b1_v * b2_v)'], {'dtype': 'np.float32'}), '(b1_v * b2_v, dtype=np.float32)\n', (1993, 2024), True, 'import numpy as np\n'), ((2042, 2079), 'numpy.sum', 'np.sum', (['(b1_v * b1_v)'], {'dtype': 'np.float32'}), '(b1_v * b1_v, dtype=np.float32)\n', (2048, 2079), True, 'import numpy as np\n'), ((2243, 2262), 'numpy.sum', 'np.sum', (['(b1_v * b2_v)'], {}), '(b1_v * b2_v)\n', (2249, 2262), True, 'import numpy as np\n'), ((2318, 2335), 'numpy.empty_like', 'np.empty_like', (['b1'], {}), '(b1)\n', (2331, 2335), True, 'import numpy as np\n'), ((2584, 2656), 'training.misc.save_image_grid', 'misc.save_image_grid', (['gen', 'current_png'], {'drange': '[-1, 1]', 'grid_size': '(1, 1)'}), '(gen, current_png, drange=[-1, 1], grid_size=(1, 1))\n', (2604, 2656), False, 'from training import misc\n')]
# -*- coding: utf-8 -*- import time import numpy as np from classes import Debug, KalmanFilter import smbus bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1 address = 0x68 # via i2cdetect power_mgmt_1 = 0x6b ACCEL_CONFIG = 0x1C # Reg 28 ACCEL_CONFIG2 = 0x1D # Reg 29 class Imu(Debug, KalmanFilter): def __init__(self, sim_mode=False): self.debug = Debug('imu') self.sim_mode = sim_mode self.kf = self.filter_config() self.raw = self.read_raw() self.offset = self.offset_calc() #self.port = port self.imu_config() def filter_config(self): # paramter for kalman filter dt = 1.0 / 50.0 # state transition model, A F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]) H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C q = 0.05 Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) # process noise R = np.array([0.8]).reshape(1, 1) # observation noise return KalmanFilter(F=F, H=H, Q=Q, R=R) def imu_config(self): # Aktivieren, um das Modul ansprechen zu koennen bus.write_byte_data(address, power_mgmt_1, 0) # full power mode # bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z # setzt Accelerometer Full Scale Select (hier auf +-2g) bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000) # setzt den Tiefpass-Filter bus.write_byte_data(address, ACCEL_CONFIG2, 0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz #print("IMU config ready..") def read_word(self, reg): h = bus.read_byte_data(address, reg) l = bus.read_byte_data(address, reg + 1) # h = bus.read_byte_data(self.address, reg) # l = bus.read_byte_data(self.address, reg + 1) value = (h << 8) + l return value def read_word_2c(self, reg): val = self.read_word(reg) if (val >= 0x8000): return -((65535 - val) + 1) else: return val def read_raw(self): if self.sim_mode == True: return 100, 200, 20 else: beschleunigung_xout = self.read_word_2c(0x3b) beschleunigung_yout = self.read_word_2c(0x3d) gyroskop_zout = self.read_word_2c(0x47) beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0 gyroskop_zout_skaliert = gyroskop_zout / 131 return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert def offset_calc(self): init_data = [] print("offset calc start...") for count in range(0, 200): init_data.append(self.read_raw()) offset = np.array(init_data) print("finished calc..") #print("offset:",offset) return np.median(offset, axis=0) def kalman_filter(self, z): # das ist meine C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein np.dot(self.kf.H, self.kf.predict()) self.kf.update(z) #print("kalmanfilter: ", self.kf.x[0], self.kf.x[1], self.kf.x[2]) return self.kf.x[1] def process(self): return self.kalman_filter(self.read_raw() - self.offset) ''' def test_imu(save=False, draw=False): print("stat testing...") imu = Imu(sim_mode=False) t_ref = int(round(time.time() * 1000)) if imu.sim_mode: for i in range(0, 1000): try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) time.sleep(0.1) except KeyboardInterrupt: break else: while KeyboardInterrupt is not True: try: imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run() imu.debug.excecute(t_ref) except KeyboardInterrupt: break if save: imu.debug.save() if draw: imu.debug.draw() return # if __name__== "__main": test_imu(save=True) '''
[ "classes.Debug", "numpy.median", "classes.KalmanFilter", "numpy.array", "smbus.SMBus" ]
[((115, 129), 'smbus.SMBus', 'smbus.SMBus', (['(2)'], {}), '(2)\n', (126, 129), False, 'import smbus\n'), ((377, 389), 'classes.Debug', 'Debug', (['"""imu"""'], {}), "('imu')\n", (382, 389), False, 'from classes import Debug, KalmanFilter\n'), ((730, 775), 'numpy.array', 'np.array', (['[[1, dt, 0], [0, 1, dt], [0, 0, 1]]'], {}), '([[1, dt, 0], [0, 1, dt], [0, 0, 1]])\n', (738, 775), True, 'import numpy as np\n'), ((889, 932), 'numpy.array', 'np.array', (['[[q, q, 0], [q, q, 0], [0, 0, 0]]'], {}), '([[q, q, 0], [q, q, 0], [0, 0, 0]])\n', (897, 932), True, 'import numpy as np\n'), ((1028, 1060), 'classes.KalmanFilter', 'KalmanFilter', ([], {'F': 'F', 'H': 'H', 'Q': 'Q', 'R': 'R'}), '(F=F, H=H, Q=Q, R=R)\n', (1040, 1060), False, 'from classes import Debug, KalmanFilter\n'), ((2931, 2950), 'numpy.array', 'np.array', (['init_data'], {}), '(init_data)\n', (2939, 2950), True, 'import numpy as np\n'), ((3032, 3057), 'numpy.median', 'np.median', (['offset'], {'axis': '(0)'}), '(offset, axis=0)\n', (3041, 3057), True, 'import numpy as np\n'), ((788, 807), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (796, 807), True, 'import numpy as np\n'), ((962, 977), 'numpy.array', 'np.array', (['[0.8]'], {}), '([0.8])\n', (970, 977), True, 'import numpy as np\n')]
from os import path import os import matplotlib.pyplot as plt import numpy as np import autofit as af """ The `analysis.py` module contains the dataset and log likelihood function which given a model instance (set up by the non-linear search) fits the dataset and returns the log likelihood of that model. """ class Analysis(af.Analysis): def __init__(self, data: np.ndarray, noise_map:np.ndarray): """ In this example the `Analysis` object only contains the data and noise-map. It can be easily extended, for more complex data-sets and model fitting problems. Parameters ---------- data A 1D numpy array containing the data (e.g. a noisy 1D Gaussian) fitted in the workspace examples. noise_map A 1D numpy array containing the noise values of the data, used for computing the goodness of fit metric. """ super().__init__() self.data = data self.noise_map = noise_map def log_likelihood_function(self, instance: af.ModelInstance) -> float: """ Determine the log likelihood of a fit of multiple profiles to the dataset. Parameters ---------- instance : af.Collection The model instances of the profiles. Returns ------- The log likelihood value indicating how well this model fit the dataset. """ xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 log_likelihood = -0.5 * sum(chi_squared_map) return log_likelihood def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool): """ During a model-fit, the `visualize` method is called throughout the non-linear search and is used to output images indicating the quality of the fit so far.. The `instance` passed into the visualize method is maximum log likelihood solution obtained by the model-fit so far and it can be used to provide on-the-fly images showing how the model-fit is going. For your model-fitting problem this function will be overwritten with plotting functions specific to your problem. Parameters ---------- paths The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. instance An instance of the model that is being fitted to the data by this analysis (whose parameters have been set via a non-linear search). during_analysis If True the visualization is being performed midway through the non-linear search before it is finished, which may change which images are output. """ xvalues = np.arange(self.data.shape[0]) try: model_data_1d = sum( profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance ) except TypeError: model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, y=self.data, yerr=self.noise_map, color="k", ecolor="k", elinewidth=1, capsize=2, ) plt.plot(range(self.data.shape[0]), model_data_1d, color="r") plt.title("Dynesty model fit to 1D Gaussian + Exponential dataset.") plt.xlabel("x values of profile") plt.ylabel("Profile normalization") os.makedirs(paths.image_path, exist_ok=True) plt.savefig(path.join(paths.image_path, "model_fit.png")) plt.clf()
[ "matplotlib.pyplot.title", "os.makedirs", "matplotlib.pyplot.clf", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "os.path.join", "matplotlib.pyplot.errorbar" ]
[((1484, 1513), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (1493, 1513), True, 'import numpy as np\n'), ((3328, 3357), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (3337, 3357), True, 'import numpy as np\n'), ((3644, 3754), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'xvalues', 'y': 'self.data', 'yerr': 'self.noise_map', 'color': '"""k"""', 'ecolor': '"""k"""', 'elinewidth': '(1)', 'capsize': '(2)'}), "(x=xvalues, y=self.data, yerr=self.noise_map, color='k', ecolor\n ='k', elinewidth=1, capsize=2)\n", (3656, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3933, 4001), 'matplotlib.pyplot.title', 'plt.title', (['"""Dynesty model fit to 1D Gaussian + Exponential dataset."""'], {}), "('Dynesty model fit to 1D Gaussian + Exponential dataset.')\n", (3942, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4044), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x values of profile"""'], {}), "('x values of profile')\n", (4021, 4044), True, 'import matplotlib.pyplot as plt\n'), ((4054, 4089), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Profile normalization"""'], {}), "('Profile normalization')\n", (4064, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4101, 4145), 'os.makedirs', 'os.makedirs', (['paths.image_path'], {'exist_ok': '(True)'}), '(paths.image_path, exist_ok=True)\n', (4112, 4145), False, 'import os\n'), ((4222, 4231), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4229, 4231), True, 'import matplotlib.pyplot as plt\n'), ((4167, 4211), 'os.path.join', 'path.join', (['paths.image_path', '"""model_fit.png"""'], {}), "(paths.image_path, 'model_fit.png')\n", (4176, 4211), False, 'from os import path\n')]
import numpy as np import pandas as pd from veneer.pest_runtime import * from veneer.manage import start,kill_all_now import pyapprox as pya from functools import partial from pyapprox.adaptive_sparse_grid import max_level_admissibility_function from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth from pyapprox.variable_transformations import AffineRandomVariableTransformation from funcs.read_data import variables_prep, file_settings from funcs.modeling_funcs import vs_settings, \ modeling_settings, paralell_vs, obtain_initials, change_param_values # Create the copy of models and veneer list project_name = 'MW_BASE_RC10.rsproj' veneer_name = 'vcmd45\\FlowMatters.Source.VeneerCmd.exe' first_port=15000; num_copies = 8 _, things_to_record, _, _, _ = modeling_settings() processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name) vs_list = vs_settings(ports, things_to_record) # obtain the initial values of parameters initial_values = obtain_initials(vs_list[0]) def run_source_lsq(vars, vs_list=vs_list): """ Script used to run_source and return the output file. The function is called by AdaptiveLejaPCE. """ from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble import spotpy as sp print('Read Parameters') parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index') # Define objective functions # Use annual or monthly loads def timeseries_sum(df, temp_scale = 'annual'): """ Obtain the sum of timeseries of different temporal scale. temp_scale: str, default is 'Y', monthly using 'M' """ assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.' if temp_scale == 'monthly': sum_126001A = df.resample('M').sum() else: month_126001A = df.resample('M').sum() sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year), columns=df.columns) for i in range(sum_126001A.shape[0]): sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum() return sum_126001A # End timeseries_sum() # import observation if the output.txt requires the use of obs. date_range = pd.to_datetime(['2009/07/01', '2018/06/30']) observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date') observed_din.index = pd.to_datetime(observed_din.index) observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x) # loop over the vars and try to use parallel parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short) for i in range(vars.shape[1]): parameter_df.iloc[i] = vars[:, i] # set the time period of the results retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')] # define the modeling period and the recording variables _, _, criteria, start_date, end_date = modeling_settings() din = generate_observation_ensemble(vs_list, criteria, start_date, end_date, parameter_df, retrieve_time) # obtain the sum at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) din_126001A = timeseries_sum(din, temp_scale = 'annual') obs_din = timeseries_sum(observed_din, temp_scale = 'annual') din_126001A = pd.DataFrame(din_126001A,dtype='float').values obs_din = pd.DataFrame(obs_din,dtype='float').values # breakpoint() resid = din_126001A - obs_din rmse = (np.mean(resid ** 2, axis=0)) ** 0.5 if rmse[0] == 0: rmse[0] = 1e-8 rmse = rmse.reshape(rmse.shape[0], 1) print(f'Finish {rmse.shape[0]} run') return rmse # END run_source_lsq() # read parameter distributions datapath = file_settings()[1] para_info = pd.read_csv(datapath + 'Parameters-PCE.csv') # define the variables for PCE param_file = file_settings()[-1] ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False) var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True) # Create PyApprox model n_candidate_samples = 10000 candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(), n_candidate_samples)) pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples) # Define criteria max_level = 6 err_tol = 1e-8 max_num_samples = 100 max_level_1d = [max_level]*(pce.num_vars) admissibility_function = partial( max_level_admissibility_function, max_level, max_level_1d, max_num_samples, err_tol) refinement_indicator = variance_pce_refinement_indicator pce.set_function(run_source_lsq, var_trans) pce.set_refinement_functions( refinement_indicator, admissibility_function, clenshaw_curtis_rule_growth ) # Generate emulator pce.build() # store PCE import pickle pickle.dump(pce, open(f'{file_settings()[0]}\pce-rmse.pkl', "wb")) # set the parameter values to initial values for vs in vs_list: vs = change_param_values(vs, initial_values, fromList=True) kill_all_now(processes)
[ "pandas.DataFrame", "functools.partial", "pandas.Timestamp", "funcs.modeling_funcs.modeling_settings", "pyapprox.variable_transformations.AffineRandomVariableTransformation", "pandas.read_csv", "funcs.read_data.variables_prep", "funcs.read_data.file_settings", "funcs.modeling_funcs.generate_observation_ensemble", "veneer.manage.kill_all_now", "pandas.to_datetime", "numpy.mean", "numpy.arange", "funcs.modeling_funcs.vs_settings", "funcs.modeling_funcs.change_param_values", "funcs.modeling_funcs.obtain_initials", "funcs.modeling_funcs.paralell_vs" ]
[((872, 891), 'funcs.modeling_funcs.modeling_settings', 'modeling_settings', ([], {}), '()\n', (889, 891), False, 'from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble\n'), ((911, 973), 'funcs.modeling_funcs.paralell_vs', 'paralell_vs', (['first_port', 'num_copies', 'project_name', 'veneer_name'], {}), '(first_port, num_copies, project_name, veneer_name)\n', (922, 973), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((985, 1021), 'funcs.modeling_funcs.vs_settings', 'vs_settings', (['ports', 'things_to_record'], {}), '(ports, things_to_record)\n', (996, 1021), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((1082, 1109), 'funcs.modeling_funcs.obtain_initials', 'obtain_initials', (['vs_list[0]'], {}), '(vs_list[0])\n', (1097, 1109), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((4073, 4117), 'pandas.read_csv', 'pd.read_csv', (["(datapath + 'Parameters-PCE.csv')"], {}), "(datapath + 'Parameters-PCE.csv')\n", (4084, 4117), True, 'import pandas as pd\n'), ((4204, 4270), 'funcs.read_data.variables_prep', 'variables_prep', (['param_file'], {'product_uniform': '"""uniform"""', 'dummy': '(False)'}), "(param_file, product_uniform='uniform', dummy=False)\n", (4218, 4270), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((4283, 4348), 'pyapprox.variable_transformations.AffineRandomVariableTransformation', 'AffineRandomVariableTransformation', (['variable'], {'enforce_bounds': '(True)'}), '(variable, enforce_bounds=True)\n', (4317, 4348), False, 'from pyapprox.variable_transformations import AffineRandomVariableTransformation\n'), ((4746, 4842), 'functools.partial', 'partial', (['max_level_admissibility_function', 'max_level', 'max_level_1d', 'max_num_samples', 'err_tol'], {}), '(max_level_admissibility_function, max_level, max_level_1d,\n max_num_samples, err_tol)\n', (4753, 4842), False, 'from functools import partial\n'), ((5327, 5350), 'veneer.manage.kill_all_now', 'kill_all_now', (['processes'], {}), '(processes)\n', (5339, 5350), False, 'from veneer.manage import start, kill_all_now\n'), ((1431, 1491), 'pandas.read_csv', 'pd.read_csv', (['"""../data/Parameters-PCE.csv"""'], {'index_col': '"""Index"""'}), "('../data/Parameters-PCE.csv', index_col='Index')\n", (1442, 1491), True, 'import pandas as pd\n'), ((2415, 2459), 'pandas.to_datetime', 'pd.to_datetime', (["['2009/07/01', '2018/06/30']"], {}), "(['2009/07/01', '2018/06/30'])\n", (2429, 2459), True, 'import pandas as pd\n'), ((2570, 2604), 'pandas.to_datetime', 'pd.to_datetime', (['observed_din.index'], {}), '(observed_din.index)\n', (2584, 2604), True, 'import pandas as pd\n'), ((3194, 3213), 'funcs.modeling_funcs.modeling_settings', 'modeling_settings', ([], {}), '()\n', (3211, 3213), False, 'from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble\n'), ((3224, 3327), 'funcs.modeling_funcs.generate_observation_ensemble', 'generate_observation_ensemble', (['vs_list', 'criteria', 'start_date', 'end_date', 'parameter_df', 'retrieve_time'], {}), '(vs_list, criteria, start_date, end_date,\n parameter_df, retrieve_time)\n', (3253, 3327), False, 'from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble\n'), ((4042, 4057), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (4055, 4057), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((4163, 4178), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (4176, 4178), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((5271, 5325), 'funcs.modeling_funcs.change_param_values', 'change_param_values', (['vs', 'initial_values'], {'fromList': '(True)'}), '(vs, initial_values, fromList=True)\n', (5290, 5325), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((3033, 3059), 'pandas.Timestamp', 'pd.Timestamp', (['"""2009-07-01"""'], {}), "('2009-07-01')\n", (3045, 3059), True, 'import pandas as pd\n'), ((3061, 3087), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-06-30"""'], {}), "('2018-06-30')\n", (3073, 3087), True, 'import pandas as pd\n'), ((3633, 3673), 'pandas.DataFrame', 'pd.DataFrame', (['din_126001A'], {'dtype': '"""float"""'}), "(din_126001A, dtype='float')\n", (3645, 3673), True, 'import pandas as pd\n'), ((3694, 3730), 'pandas.DataFrame', 'pd.DataFrame', (['obs_din'], {'dtype': '"""float"""'}), "(obs_din, dtype='float')\n", (3706, 3730), True, 'import pandas as pd\n'), ((3803, 3830), 'numpy.mean', 'np.mean', (['(resid ** 2)'], {'axis': '(0)'}), '(resid ** 2, axis=0)\n', (3810, 3830), True, 'import numpy as np\n'), ((2836, 2860), 'numpy.arange', 'np.arange', (['vars.shape[1]'], {}), '(vars.shape[1])\n', (2845, 2860), True, 'import numpy as np\n'), ((2054, 2100), 'numpy.arange', 'np.arange', (['df.index[0].year', 'df.index[-1].year'], {}), '(df.index[0].year, df.index[-1].year)\n', (2063, 2100), True, 'import numpy as np\n'), ((2494, 2509), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (2507, 2509), False, 'from funcs.read_data import variables_prep, file_settings\n'), ((5155, 5170), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (5168, 5170), False, 'from funcs.read_data import variables_prep, file_settings\n')]
# Source: https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=lbQf5GuZyQ4_ import collections from dataclasses import dataclass from typing import Any, Dict, List, Optional, Union import numpy as np import torch import transformers from audioengine.metrics.wer import Jiwer from datasets import load_metric from torch import nn from torch.cuda.amp import autocast from tqdm import tqdm from transformers import ( Trainer, Wav2Vec2Processor, ) from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() # elif self.use_apex: # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() # add less aggressive smoothing to progress bar for better estimate class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback): def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, smoothing=0.1) self.current_step = 0 # solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6 class GroupedLengthsTrainer(CTCTrainer): # length_field_name should possibly be part of TrainingArguments instead def __init__(self, train_seq_lengths: List[int], *args, **kwargs): super().__init__(*args, **kwargs) self.train_seq_lengths = train_seq_lengths def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None # Build the sampler. if self.args.group_by_length: # lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None if self.args.world_size <= 1: return LengthGroupedSampler( self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths, model_input_name=model_input_name ) else: return DistributedLengthGroupedSampler( self.train_dataset, self.args.train_batch_size, num_replicas=self.args.world_size, rank=self.args.process_index, lengths=self.train_seq_lengths, model_input_name=model_input_name, ) else: return super()._get_train_sampler() wer_metric = load_metric("wer") def compute_metrics(processor): def __call__(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} return __call__
[ "torch.cuda.amp.autocast", "tqdm.tqdm", "transformers.trainer_pt_utils.DistributedLengthGroupedSampler", "numpy.argmax", "datasets.load_metric", "transformers.trainer_pt_utils.LengthGroupedSampler" ]
[((7687, 7705), 'datasets.load_metric', 'load_metric', (['"""wer"""'], {}), "('wer')\n", (7698, 7705), False, 'from datasets import load_metric\n'), ((7822, 7853), 'numpy.argmax', 'np.argmax', (['pred_logits'], {'axis': '(-1)'}), '(pred_logits, axis=-1)\n', (7831, 7853), True, 'import numpy as np\n'), ((5953, 5995), 'tqdm.tqdm', 'tqdm', ([], {'total': 'state.max_steps', 'smoothing': '(0.1)'}), '(total=state.max_steps, smoothing=0.1)\n', (5957, 5995), False, 'from tqdm import tqdm\n'), ((4658, 4668), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (4666, 4668), False, 'from torch.cuda.amp import autocast\n'), ((7023, 7162), 'transformers.trainer_pt_utils.LengthGroupedSampler', 'LengthGroupedSampler', (['self.train_dataset', 'self.args.train_batch_size'], {'lengths': 'self.train_seq_lengths', 'model_input_name': 'model_input_name'}), '(self.train_dataset, self.args.train_batch_size,\n lengths=self.train_seq_lengths, model_input_name=model_input_name)\n', (7043, 7162), False, 'from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler\n'), ((7258, 7484), 'transformers.trainer_pt_utils.DistributedLengthGroupedSampler', 'DistributedLengthGroupedSampler', (['self.train_dataset', 'self.args.train_batch_size'], {'num_replicas': 'self.args.world_size', 'rank': 'self.args.process_index', 'lengths': 'self.train_seq_lengths', 'model_input_name': 'model_input_name'}), '(self.train_dataset, self.args.\n train_batch_size, num_replicas=self.args.world_size, rank=self.args.\n process_index, lengths=self.train_seq_lengths, model_input_name=\n model_input_name)\n', (7289, 7484), False, 'from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler\n')]
import numpy, sys, math, batman import matplotlib.pyplot as plt from scipy import interpolate file = numpy.load('GJ436b_Trans_SED.npz') SEDarray = file['SEDarray'] print(SEDarray.shape) plt.imshow(SEDarray) plt.show() stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800) stellarwave /= 10000. # to um relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5)) stellarwave = stellarwave[relevant] stellarspec = stellarspec[relevant] StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic') planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True) PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic') time = numpy.linspace(0.0,0.1,5000) f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r') params = batman.TransitParams params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has a fixed format params.per = float(f.readline().split('=')[1]) params.inc = float(f.readline().split('=')[1]) params.rp = float(f.readline().split('=')[1]) params.a = float(f.readline().split('=')[1]) params.w = float(f.readline().split('=')[1]) params.ecc = float(f.readline().split('=')[1]) params.fp = float(f.readline().split('=')[1]) params.t_secondary = float(f.readline().split('=')[1]) limbdark = f.readline().split('=')[1] # ugh u1 = float(limbdark.split(',')[0][2:]) u2 = float(limbdark.split(',')[1][1:-2]) params.u = [u1, u2] params.limb_dark = "quadratic" transitmodel = batman.TransitModel(params, time) # creates a transit model object using the time array; we can change the depth now by changing what's in params SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this wave = numpy.linspace(1.75,5.25,3500) for waveval in wave: params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but batman wants rp/rs fluxtransit = transitmodel.light_curve(params) actualflux = fluxtransit * StellarInterp(waveval) SEDarray = numpy.vstack((SEDarray, actualflux)) SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row with all zeroes numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave) plt.imshow(SEDarray) plt.show()
[ "numpy.load", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "numpy.zeros", "numpy.where", "numpy.loadtxt", "numpy.linspace", "scipy.interpolate.interp1d", "numpy.savez", "batman.TransitModel", "numpy.delete", "numpy.vstack" ]
[((102, 136), 'numpy.load', 'numpy.load', (['"""GJ436b_Trans_SED.npz"""'], {}), "('GJ436b_Trans_SED.npz')\n", (112, 136), False, 'import numpy, sys, math, batman\n'), ((187, 207), 'matplotlib.pyplot.imshow', 'plt.imshow', (['SEDarray'], {}), '(SEDarray)\n', (197, 207), True, 'import matplotlib.pyplot as plt\n'), ((208, 218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (216, 218), True, 'import matplotlib.pyplot as plt\n'), ((247, 308), 'numpy.loadtxt', 'numpy.loadtxt', (['"""ODFNEW_GJ436.spec"""'], {'unpack': '(True)', 'skiprows': '(800)'}), "('ODFNEW_GJ436.spec', unpack=True, skiprows=800)\n", (260, 308), False, 'import numpy, sys, math, batman\n'), ((350, 404), 'numpy.where', 'numpy.where', (['((stellarwave > 1.5) & (stellarwave < 5.5))'], {}), '((stellarwave > 1.5) & (stellarwave < 5.5))\n', (361, 404), False, 'import numpy, sys, math, batman\n'), ((489, 549), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['stellarwave', 'stellarspec'], {'kind': '"""cubic"""'}), "(stellarwave, stellarspec, kind='cubic')\n", (509, 549), False, 'from scipy import interpolate\n'), ((576, 661), 'numpy.loadtxt', 'numpy.loadtxt', (['"""../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt"""'], {'unpack': '(True)'}), "('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True\n )\n", (589, 661), False, 'import numpy, sys, math, batman\n'), ((672, 730), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['planetwave', 'planetspec'], {'kind': '"""cubic"""'}), "(planetwave, planetspec, kind='cubic')\n", (692, 730), False, 'from scipy import interpolate\n'), ((739, 769), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(0.1)', '(5000)'], {}), '(0.0, 0.1, 5000)\n', (753, 769), False, 'import numpy, sys, math, batman\n'), ((1553, 1586), 'batman.TransitModel', 'batman.TransitModel', (['params', 'time'], {}), '(params, time)\n', (1572, 1586), False, 'import numpy, sys, math, batman\n'), ((1711, 1737), 'numpy.zeros', 'numpy.zeros', (['time.shape[0]'], {}), '(time.shape[0])\n', (1722, 1737), False, 'import numpy, sys, math, batman\n'), ((1791, 1823), 'numpy.linspace', 'numpy.linspace', (['(1.75)', '(5.25)', '(3500)'], {}), '(1.75, 5.25, 3500)\n', (1805, 1823), False, 'import numpy, sys, math, batman\n'), ((2108, 2136), 'numpy.delete', 'numpy.delete', (['SEDarray', '(0)', '(0)'], {}), '(SEDarray, 0, 0)\n', (2120, 2136), False, 'import numpy, sys, math, batman\n'), ((2178, 2250), 'numpy.savez', 'numpy.savez', (['"""GJ436b_Trans_SED"""'], {'SEDarray': 'SEDarray', 'time': 'time', 'wave': 'wave'}), "('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave)\n", (2189, 2250), False, 'import numpy, sys, math, batman\n'), ((2252, 2272), 'matplotlib.pyplot.imshow', 'plt.imshow', (['SEDarray'], {}), '(SEDarray)\n', (2262, 2272), True, 'import matplotlib.pyplot as plt\n'), ((2273, 2283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2281, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2095), 'numpy.vstack', 'numpy.vstack', (['(SEDarray, actualflux)'], {}), '((SEDarray, actualflux))\n', (2071, 2095), False, 'import numpy, sys, math, batman\n')]
import math import sklearn.cluster as clstr import cv2 import numpy as np from PIL import Image, ImageOps, ImageDraw import os, glob import matplotlib.pyplot as pyplt import scipy.cluster.vq as vq import argparse import glob # We can specify these if need be. brodatz = "D:\\ImageProcessing\\project\\OriginalBrodatz\\" concatOut = "D:\\ImageProcessing\\project\\concat.png" # This is the function that checks boundaries when performing spatial convolution. def getRanges_for_window_with_adjust(row, col, height, width, W): mRange = [] nRange = [] mRange.append(0) mRange.append(W-1) nRange.append(0) nRange.append(W-1) initm = int(round(row - math.floor(W / 2))) initn = int(round(col - math.floor(W / 2))) if (initm < 0): mRange[1] += initm initm = 0 if (initn < 0): nRange[1] += initn initn = 0 if(initm + mRange[1] > (height - 1)): diff = ((initm + mRange[1]) - (height - 1)) mRange[1] -= diff if(initn + nRange[1] > (width-1)): diff = ((initn + nRange[1]) - (width - 1)) nRange[1] -= diff windowHeight = mRange[1] - mRange[0] windowWidth = nRange[1] - nRange[0] return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn)) # Used to normalize data before clustering occurs. # Whiten sets the variance to be 1 (unit variance), # spatial weighting also takes place here. # The mean can be subtracted if specified by the implementation. def normalizeData(featureVectors, setMeanToZero, spatialWeight=1): means = [] for col in range(0, len(featureVectors[0])): colMean = 0 for row in range(0, len(featureVectors)): colMean += featureVectors[row][col] colMean /= len(featureVectors) means.append(colMean) for col in range(2, len(featureVectors[0])): for row in range(0, len(featureVectors)): featureVectors[row][col] -= means[col] copy = vq.whiten(featureVectors) if (setMeanToZero): for row in range(0, len(featureVectors)): for col in range(0, len(featureVectors[0])): copy[row][col] -= means[col] for row in range(0, len(featureVectors)): copy[row][0] *= spatialWeight copy[row][1] *= spatialWeight return copy # Create the feature vectors and add in row and column data def constructFeatureVectors(featureImages, img): featureVectors = [] height, width = img.shape for row in range(height): for col in range(width): featureVector = [] featureVector.append(row) featureVector.append(col) for featureImage in featureImages: featureVector.append(featureImage[row][col]) featureVectors.append(featureVector) return featureVectors # An extra function if we are looking to save our feature vectors for later def printFeatureVectors(outDir, featureVectors): f = open(outDir, 'w') for vector in featureVectors: for item in vector: f.write(str(item) + " ") f.write("\n") f.close() # If we want to read in some feature vectors instead of creating them. def readInFeatureVectorsFromFile(dir): list = [line.rstrip('\n') for line in open(dir)] list = [i.split() for i in list] newList = [] for row in list: newRow = [] for item in row: floatitem = float(item) newRow.append(floatitem) newList.append(newRow) return newList # Print the intermediate results before clustering occurs def printFeatureImages(featureImages, naming, printlocation): i =0 for image in featureImages: # Normalize to intensity values imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) cv2.imwrite(printlocation + "\\" + naming + str(i) + ".png", imageToPrint) i+=1 # Print the final result, the user can also choose to make the output grey def printClassifiedImage(labels, k, img, outdir, greyOutput): if(greyOutput): labels = labels.reshape(img.shape) for row in range(0, len(labels)): for col in range(0, len(labels[0])): outputIntensity = (255/k)*labels[row][col] labels[row][col] = outputIntensity cv2.imwrite(outdir, labels.reshape(img.shape)) else: pyplt.imsave(outdir, labels.reshape(img.shape)) # Call the k means algorithm for classification def clusterFeatureVectors(featureVectors, k): kmeans = clstr.KMeans(n_clusters=k) kmeans.fit(featureVectors) labels = kmeans.labels_ return labels # To clean up old filter and feature images if the user chose to print them. def deleteExistingSubResults(outputPath): for filename in os.listdir(outputPath): if (filename.startswith("filter") or filename.startswith("feature")): os.remove(filename) # Checks user input (i.e. cannot have a negative mask size value) def check_positive_int(n): int_n = int(n) if int_n < 0: raise argparse.ArgumentTypeError("%s is negative" % n) return int_n # Checks user input (i.e. cannot have a negative weighting value) def check_positive_float(n): float_n = float(n) if float_n < 0: raise argparse.ArgumentTypeError("%s is negative " % n) return float_n #-------------------------------------------------------------------------- # All of the functions below were left here to demonstrate how I went about # cropping the input images. I left them here, in the case that Brodatz # textures were downloaded and cropped as new input images. #-------------------------------------------------------------------------- def cropTexture(x_offset, Y_offset, width, height, inDir, outDir): box = (x_offset, Y_offset, width, height) image = Image.open(inDir) crop = image.crop(box) crop.save(outDir, "PNG") def deleteCroppedImages(): for filename in glob.glob(brodatz + "*crop*"): os.remove(filename) def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType): images = [] for thisImage in pathsToImages: images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)) cv2.imwrite(outdir, np.concatenate(images, axis=axisType)) outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE) return outimg def createGrid(listOfBrodatzInts, outName, howManyPerRow): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + "D" + str(listOfBrodatzInts[i]) + ".png" brodatzCropOutput = brodatz + "cropD" + str(listOfBrodatzInts[i]) + ".png" # 128x128 crops, in order to generate a 512x512 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)] dests = [] for i in range(len(subOuts)): dest = brodatz + "cropRow" + str(i) + ".png" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + "cropRow" + str(i) + ".png", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0) # Destroy all sub crops (we can make this optional if we want!) deleteCroppedImages() def createGridWithCircle(listOfBrodatzInts, circleInt, outName): listOfRowOutputs = [] for i in range(len(listOfBrodatzInts)): brodatzCropInput = brodatz + "D" + str(listOfBrodatzInts[i]) + ".png" brodatzCropOutput = brodatz + "cropD" + str(listOfBrodatzInts[i]) + ".png" # 128x128 crops, in order to generate a 256x256 image cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput) listOfRowOutputs.append(brodatzCropOutput) subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)] dests = [] for i in range(len(subOuts)): dest = brodatz + "cropRow" + str(i) + ".png" dests.append(dest) concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + "cropRow" + str(i) + ".png", 1) concatentationOfBrodatzTexturesIntoRows(dests, brodatz + "Nat5crop.png", 0) size = (128, 128) mask = Image.new('L', size, color=255) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=0) im = Image.open(brodatz + "D" + str(circleInt) + ".png") output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) output.paste(0, mask=mask) output.save(brodatz + 'circlecrop.png', transparency=0) img = Image.open(brodatz + 'circlecrop.png').convert("RGBA") img_w, img_h = img.size background = Image.open(brodatz + "Nat5crop.png") bg_w, bg_h = background.size offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2) background.paste(output, offset, img) background.save(brodatz + outName, format="png") deleteCroppedImages() def createTexturePair(pair, outName): pathsToTemp = [brodatz + "D" + str(pair[0]) + ".png", brodatz + "D" + str(pair[1]) + ".png"] cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + "outcrop1.png") cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + "outcrop2.png") cropsToConcat = [brodatz + "outcrop1.png", brodatz + "outcrop2.png"] concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1) deleteCroppedImages() #-------------------------------------------------------------------------- # Create test images #-------------------------------------------------------------------------- # Note that I did not write this to have an exhaustive approach in mind, # where I pair all of the textures to every other texture. If I did so, # I would have made it a little more efficient, instead I just decided to # use the images that were in the papers already. # # We can use any of the 112 images from the Brodatz album here # nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54] # howManyPerRow = 4 # outName = "Nat16.png" # createGrid(nat16, outName, howManyPerRow) # # grid4 = [3,68,17,77] # howManyPerRow = 2 # outName = "grid4.png" # createGrid(grid4, outName, howManyPerRow) # #the last int is the circle in the middle of the image! # nat5 = [77,55,84,17] # circleInt = 24 # outName = 'Nat5.png' # createGridWithCircle(nat5, circleInt, outName) # # texturePairs = [[17,77],[3,68],[3,17],[55,68]] # count = 0 # for pair in texturePairs: # outName = brodatz + "pair" + str(count) + ".png" # createTexturePair(pair, outName) # count += 1
[ "PIL.Image.new", "os.remove", "numpy.concatenate", "PIL.ImageOps.fit", "sklearn.cluster.KMeans", "math.floor", "PIL.Image.open", "cv2.imread", "cv2.normalize", "glob.glob", "PIL.ImageDraw.Draw", "os.listdir", "scipy.cluster.vq.whiten", "argparse.ArgumentTypeError" ]
[((1992, 2017), 'scipy.cluster.vq.whiten', 'vq.whiten', (['featureVectors'], {}), '(featureVectors)\n', (2001, 2017), True, 'import scipy.cluster.vq as vq\n'), ((4589, 4615), 'sklearn.cluster.KMeans', 'clstr.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (4601, 4615), True, 'import sklearn.cluster as clstr\n'), ((4834, 4856), 'os.listdir', 'os.listdir', (['outputPath'], {}), '(outputPath)\n', (4844, 4856), False, 'import os, glob\n'), ((5891, 5908), 'PIL.Image.open', 'Image.open', (['inDir'], {}), '(inDir)\n', (5901, 5908), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((6013, 6042), 'glob.glob', 'glob.glob', (["(brodatz + '*crop*')"], {}), "(brodatz + '*crop*')\n", (6022, 6042), False, 'import glob\n'), ((6354, 6401), 'cv2.imread', 'cv2.imread', (['outdir', 'cv2.CV_LOAD_IMAGE_GRAYSCALE'], {}), '(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n', (6364, 6401), False, 'import cv2\n'), ((8332, 8363), 'PIL.Image.new', 'Image.new', (['"""L"""', 'size'], {'color': '(255)'}), "('L', size, color=255)\n", (8341, 8363), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((8375, 8395), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (8389, 8395), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((8510, 8559), 'PIL.ImageOps.fit', 'ImageOps.fit', (['im', 'mask.size'], {'centering': '(0.5, 0.5)'}), '(im, mask.size, centering=(0.5, 0.5))\n', (8522, 8559), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((8762, 8798), 'PIL.Image.open', 'Image.open', (["(brodatz + 'Nat5crop.png')"], {}), "(brodatz + 'Nat5crop.png')\n", (8772, 8798), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((3775, 3864), 'cv2.normalize', 'cv2.normalize', (['image'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_32F'}), '(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=\n cv2.CV_32F)\n', (3788, 3864), False, 'import cv2\n'), ((5114, 5162), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is negative' % n)"], {}), "('%s is negative' % n)\n", (5140, 5162), False, 'import argparse\n'), ((5334, 5383), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is negative ' % n)"], {}), "('%s is negative ' % n)\n", (5360, 5383), False, 'import argparse\n'), ((6052, 6071), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6061, 6071), False, 'import os, glob\n'), ((6301, 6338), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': 'axisType'}), '(images, axis=axisType)\n', (6315, 6338), True, 'import numpy as np\n'), ((4948, 4967), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (4957, 4967), False, 'import os, glob\n'), ((6225, 6275), 'cv2.imread', 'cv2.imread', (['thisImage', 'cv2.CV_LOAD_IMAGE_GRAYSCALE'], {}), '(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n', (6235, 6275), False, 'import cv2\n'), ((8662, 8700), 'PIL.Image.open', 'Image.open', (["(brodatz + 'circlecrop.png')"], {}), "(brodatz + 'circlecrop.png')\n", (8672, 8700), False, 'from PIL import Image, ImageOps, ImageDraw\n'), ((679, 696), 'math.floor', 'math.floor', (['(W / 2)'], {}), '(W / 2)\n', (689, 696), False, 'import math\n'), ((727, 744), 'math.floor', 'math.floor', (['(W / 2)'], {}), '(W / 2)\n', (737, 744), False, 'import math\n')]
# Copyright 2018 @<NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import sys sys.path.insert(0, os.getcwd()) import time import random import shutil import dill import numpy as np import tensorflow as tf from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper from helpers import Indexer, batch, checkpoint_model from itertools import chain, product from collections import defaultdict from kmedoids import kMedoids from scipy.spatial.distance import pdist, squareform from sklearn.metrics import accuracy_score from pairwise_classifier import * class MixtureReader: def __init__(self, data_dir, data_type, context): assert data_type in ['nyt', 'wiki'] self.data_dir = data_dir self.data_type = data_type self.context = context # int: 0 or context-length. def get_mixture(self, filename): if self.data_type == 'nyt': return self.__get_nyt_mixture(filename) else: # == wiki return self.__get_wiki_mixture(filename) def __get_nyt_mixture(self, filename): da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb')) doc_lbs = [] for sentcode in doc_mix: if sentcode in da: doc_lbs.append(0) else: doc_lbs.append(1) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs def __get_wiki_mixture(self, filename): doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb')) if self.context: CTX_LEN = self.context doc_mix_flat = list(chain.from_iterable(doc_mix)) doc_mix_len = len(doc_mix_flat) ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)]) return doc_mix, doc_lbs, ctx return doc_mix, doc_lbs class PscKMedoids: def __init__(self, psc_clf, data_type): self.psc_clf = psc_clf self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'], data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki', context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0) self.out_file_path = psc_clf.config['out_file_path'] def __to_sentence(self, indices): words = [] for index in indices: word = self.psc_clf.indexer.get_object(index) if word is None: words.append('UNK') else: words.append(word) return ' '.join(words) def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...} lbs = [0]*doc_len for idx in C[1]: lbs[idx] = 1 return lbs def __flip_clust(self, clust): return np.array([0 if i==1 else 1 for i in clust]) def __clust_accuracy(self, true, pred): return max(accuracy_score(true, pred), accuracy_score(true, self.__flip_clust(pred))) def __dist(self, x1, x2): x1, x1_len = batch([x1]) x2, x2_len = batch([x2]) fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len, self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len, self.psc_clf.keep_prob:1.0} if self.psc_clf.config['context']: fd[self.psc_clf.input_ctx] = self.ctx conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd) return 1-conf[0] def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True): if ctx is not None: self.ctx = ctx doc_mix_sq, _ = batch(doc_mix) doc_mix_sq = doc_mix_sq.T _, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2) doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix)) acc = self.__clust_accuracy(doc_lbs, doc_prd) if return_pred: return acc, doc_prd return acc def evaluate_rand(self, k=100, verbose=True): accs = [] filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False) if self.out_file_path is not None: # clear out file for new writing. out_file = open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None) result = self.evaluate_single(doc_mix, doc_lbs) if out_file_path is None: acc = result else: acc, prd = result out_file.write('FILE ID: ' + str(filename) + '\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\n') out_file.write('\n\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\nAverage accuracy = {}'.format(avg_acc)) return avg_acc def evaluate_given(self, filenames, verbose=True): accs = [] if self.out_file_path is not None: # clear out file for new writing. out_file = open(self.out_file_path, 'w') for filename in filenames: if self.mix_reader.context: doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None) else: doc_mix, doc_lbs = self.mix_reader.get_mixture(filename) result = self.evaluate_single(doc_mix, doc_lbs) if self.out_file_path is None: acc = result else: acc, prd = result out_file.write('FILE ID: ' + str(filename) + '\n') for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix): out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\n') out_file.write('\n\n') accs.append(acc) if verbose: print('File {}: acc = {}'.format(filename, acc)) out_file.close() avg_acc = np.mean(accs) print('\nAverage accuracy = {}'.format(avg_acc)) return avg_acc if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int) parser.add_argument('--vocab_size', type=int) parser.add_argument('--emb_size', type=int) parser.add_argument('--n_layer', type=int) parser.add_argument('--hid_size', type=int) parser.add_argument('--keep_prob', type=float) parser.add_argument('--learning_rate', type=float) parser.add_argument('--n_epoch', type=int) parser.add_argument('--train_size', type=int) parser.add_argument('--verbose', type=int) parser.add_argument('--save_freq', type=int) parser.add_argument('--data_dir', type=str) parser.add_argument('--info_path', type=str) parser.add_argument('--init_with_glove', type=bool) parser.add_argument('--save_dir', type=str) parser.add_argument('--save_name', type=str) parser.add_argument('--restore_dir', type=str) parser.add_argument('--restore_name', type=str) parser.add_argument('--load_from_saved', type=bool) parser.add_argument('--track_dir', type=str) parser.add_argument('--new_track', type=bool) parser.add_argument('--session_id', type=str) parser.add_argument('--mutual_attention', type=bool) parser.add_argument('--context', type=bool) parser.add_argument('--context_length', type=int) parser.add_argument('--out_file_path', type=str) args = parser.parse_args() config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size, 'n_layer': args.n_layer, 'hid_size': args.hid_size, 'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate, 'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose, 'save_freq': args.save_freq, 'data_dir': args.data_dir, 'info_path': args.info_path, 'init_with_glove': args.init_with_glove, 'save_dir': args.save_dir, 'save_name': args.save_name, 'restore_dir': args.restore_dir, 'restore_name': args.restore_name, 'load_from_saved': args.load_from_saved, 'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id, 'mutual_attention': args.mutual_attention, 'context': args.context, 'context_length': args.context_length, 'out_file_path': args.out_file_path} psc_clf = PairwiseSentenceClassifier(config) kmed = PscKMedoids(psc_clf, data_type='nyt') print('\n') sample_files = os.listdir('nyt_sample/') kmed.evaluate_given(sample_files)
[ "helpers.batch", "argparse.ArgumentParser", "os.getcwd", "sklearn.metrics.accuracy_score", "numpy.mean", "numpy.array", "scipy.spatial.distance.pdist", "numpy.random.choice", "itertools.chain.from_iterable", "os.listdir" ]
[((715, 726), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (724, 726), False, 'import os\n'), ((7932, 7957), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7955, 7957), False, 'import argparse\n'), ((10449, 10474), 'os.listdir', 'os.listdir', (['"""nyt_sample/"""'], {}), "('nyt_sample/')\n", (10459, 10474), False, 'import os\n'), ((3887, 3934), 'numpy.array', 'np.array', (['[(0 if i == 1 else 1) for i in clust]'], {}), '([(0 if i == 1 else 1) for i in clust])\n', (3895, 3934), True, 'import numpy as np\n'), ((4162, 4173), 'helpers.batch', 'batch', (['[x1]'], {}), '([x1])\n', (4167, 4173), False, 'from helpers import Indexer, batch, checkpoint_model\n'), ((4195, 4206), 'helpers.batch', 'batch', (['[x2]'], {}), '([x2])\n', (4200, 4206), False, 'from helpers import Indexer, batch, checkpoint_model\n'), ((4783, 4797), 'helpers.batch', 'batch', (['doc_mix'], {}), '(doc_mix)\n', (4788, 4797), False, 'from helpers import Indexer, batch, checkpoint_model\n'), ((5217, 5280), 'numpy.random.choice', 'np.random.choice', (['self.psc_clf.FILENAMES'], {'size': 'k', 'replace': '(False)'}), '(self.psc_clf.FILENAMES, size=k, replace=False)\n', (5233, 5280), True, 'import numpy as np\n'), ((6436, 6449), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (6443, 6449), True, 'import numpy as np\n'), ((7759, 7772), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (7766, 7772), True, 'import numpy as np\n'), ((3995, 4021), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true', 'pred'], {}), '(true, pred)\n', (4009, 4021), False, 'from sklearn.metrics import accuracy_score\n'), ((2073, 2101), 'itertools.chain.from_iterable', 'chain.from_iterable', (['doc_mix'], {}), '(doc_mix)\n', (2092, 2101), False, 'from itertools import chain, product\n'), ((2165, 2199), 'numpy.array', 'np.array', (['[doc_mix_flat[:CTX_LEN]]'], {}), '([doc_mix_flat[:CTX_LEN]])\n', (2173, 2199), True, 'import numpy as np\n'), ((2229, 2285), 'numpy.array', 'np.array', (['[doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)]'], {}), '([doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)])\n', (2237, 2285), True, 'import numpy as np\n'), ((2588, 2616), 'itertools.chain.from_iterable', 'chain.from_iterable', (['doc_mix'], {}), '(doc_mix)\n', (2607, 2616), False, 'from itertools import chain, product\n'), ((2680, 2714), 'numpy.array', 'np.array', (['[doc_mix_flat[:CTX_LEN]]'], {}), '([doc_mix_flat[:CTX_LEN]])\n', (2688, 2714), True, 'import numpy as np\n'), ((2744, 2800), 'numpy.array', 'np.array', (['[doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)]'], {}), '([doc_mix_flat + [0] * (CTX_LEN - doc_mix_len)])\n', (2752, 2800), True, 'import numpy as np\n'), ((4879, 4916), 'scipy.spatial.distance.pdist', 'pdist', (['doc_mix_sq'], {'metric': 'self.__dist'}), '(doc_mix_sq, metric=self.__dist)\n', (4884, 4916), False, 'from scipy.spatial.distance import pdist, squareform\n')]
import pandas as pd import numpy as np from sklearn.metrics import accuracy_score import torch from torch.utils.data import DataLoader import torch.optim as optim from model import CassavaModel from loss import DenseCrossEntropy import dataset from config import * def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS): model.train() t_loss = 0 for step, batch in enumerate(train_dataloader): img = batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None val_labels = None for step, batch in enumerate(vaid_dataloader): img = batch[0] label = batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds = preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for i in range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth') if __name__ == '__main__': k_fold_train(5)
[ "loss.DenseCrossEntropy", "torch.utils.data.DataLoader", "dataset.CassavaDataset", "pandas.read_csv", "torch.argmax", "numpy.zeros", "torch.cat", "torch.softmax", "torch.optim.Adam", "torch.device", "torch.no_grad", "model.CassavaModel" ]
[((319, 355), 'pandas.read_csv', 'pd.read_csv', (['"""./input/train_ohe.csv"""'], {}), "('./input/train_ohe.csv')\n", (330, 355), True, 'import pandas as pd\n'), ((487, 534), 'dataset.CassavaDataset', 'dataset.CassavaDataset', (['train_df'], {'device': 'DEVICE'}), '(train_df, device=DEVICE)\n', (509, 534), False, 'import dataset\n'), ((552, 599), 'dataset.CassavaDataset', 'dataset.CassavaDataset', (['valid_df'], {'device': 'DEVICE'}), '(valid_df, device=DEVICE)\n', (574, 599), False, 'import dataset\n'), ((621, 698), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'BATCH_SIZE', 'num_workers': '(4)', 'shuffle': '(True)'}), '(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)\n', (631, 698), False, 'from torch.utils.data import DataLoader\n'), ((718, 795), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'BATCH_SIZE', 'num_workers': '(4)', 'shuffle': '(True)'}), '(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)\n', (728, 795), False, 'from torch.utils.data import DataLoader\n'), ((807, 827), 'torch.device', 'torch.device', (['DEVICE'], {}), '(DEVICE)\n', (819, 827), False, 'import torch\n'), ((845, 864), 'loss.DenseCrossEntropy', 'DenseCrossEntropy', ([], {}), '()\n', (862, 864), False, 'from loss import DenseCrossEntropy\n'), ((2392, 2406), 'model.CassavaModel', 'CassavaModel', ([], {}), '()\n', (2404, 2406), False, 'from model import CassavaModel\n'), ((2491, 2508), 'torch.optim.Adam', 'optim.Adam', (['plist'], {}), '(plist)\n', (2501, 2508), True, 'import torch.optim as optim\n'), ((2516, 2552), 'pandas.read_csv', 'pd.read_csv', (['"""./input/train_ohe.csv"""'], {}), "('./input/train_ohe.csv')\n", (2527, 2552), True, 'import pandas as pd\n'), ((2566, 2587), 'numpy.zeros', 'np.zeros', (['df.shape[0]'], {}), '(df.shape[0])\n', (2574, 2587), True, 'import numpy as np\n'), ((2038, 2068), 'torch.argmax', 'torch.argmax', (['val_preds'], {'dim': '(1)'}), '(val_preds, dim=1)\n', (2050, 2068), False, 'import torch\n'), ((1735, 1750), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1748, 1750), False, 'import torch\n'), ((1987, 2023), 'torch.cat', 'torch.cat', (['(val_preds, preds)'], {'dim': '(0)'}), '((val_preds, preds), dim=0)\n', (1996, 2023), False, 'import torch\n'), ((1868, 1897), 'torch.softmax', 'torch.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (1881, 1897), False, 'import torch\n')]
import numpy as np from ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 } # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type': 1 } # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = { 'disabled': 10, } # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): """ initialise cnn layers with 3 bytes IP :return: """ # convolutional layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20 } # convolutional layer subnet conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14 # total bits: 19 } # pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total bits: 19 } # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields = { 'disabled': 19, } # disabled layer subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): """ initialise cnn layers with xavier weight initialisation :return: """ # convolutional layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits: 12 } # convolutional layer subnet conv_subnet = '0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6 # total bits: 11 } # pooling layer subnet pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, # total bits: 11 } # fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields = { 'disabled': 11, } # disabled layer subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: """ BaseCNNLayer class """ def __init__(self, str_subnet, fields): """ constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict """ self.str_subnet = str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): """ encode filed values to an IP interface :param field_values: field values :type field_values: a dict of (field_name, field_value) pairs :return: the layer interface :rtype: Interface """ interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): """ decode an IP interface to field values :param interface: an IP interface :type interface: Interface :return: a dict of (field_name, field_value) pairs :rtype: dict """ field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): """ generate an IP interface with random settings :rtype: Interface :return: an IP interface """ field_values = {} for field_name in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): """ check whether the interface belongs to this type :param interface: an IP interface :type interface: Interface :return: boolean :rtype: bool """ return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): """ ConvLayer class """ def __init__(self, str_subnet=None, fields=None): """ constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict """ if str_subnet is None: str_subnet = CONV_SUBNET if fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): """ PoolingLayer class """ def __init__(self, str_subnet=None, fields=None): """ constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict """ if str_subnet is None: str_subnet = POOLING_SUBNET if fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): """ FullyConnectedLayer class """ def __init__(self, str_subnet=None, fields=None): """ constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict """ if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): """ DisabledLayer class """ def __init__(self, str_subnet=None, fields=None): """ constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict """ if str_subnet is None: str_subnet = DISABLED_SUBNET if fields is None: fields = DISABLED_FIELDS super(DisabledLayer, self).__init__(str_subnet, fields)
[ "ipec.ip.core.IPStructure", "ipec.ip.core.max_decimal_value_of_binary", "ipec.ip.core.parse_subnet_str", "numpy.random.randint", "ipec.ip.encoder.Encoder", "ipec.ip.decoder.Decoder" ]
[((3939, 3967), 'ipec.ip.core.parse_subnet_str', 'parse_subnet_str', (['str_subnet'], {}), '(str_subnet)\n', (3955, 3967), False, 'from ipec.ip.core import parse_subnet_str\n'), ((3996, 4015), 'ipec.ip.core.IPStructure', 'IPStructure', (['fields'], {}), '(fields)\n', (4007, 4015), False, 'from ipec.ip.core import IPStructure\n'), ((4039, 4078), 'ipec.ip.encoder.Encoder', 'Encoder', (['self.ip_structure', 'self.subnet'], {}), '(self.ip_structure, self.subnet)\n', (4046, 4078), False, 'from ipec.ip.encoder import Encoder\n'), ((4102, 4111), 'ipec.ip.decoder.Decoder', 'Decoder', ([], {}), '()\n', (4109, 4111), False, 'from ipec.ip.decoder import Decoder\n'), ((5193, 5233), 'ipec.ip.core.max_decimal_value_of_binary', 'max_decimal_value_of_binary', (['num_of_bits'], {}), '(num_of_bits)\n', (5220, 5233), False, 'from ipec.ip.core import max_decimal_value_of_binary\n'), ((5259, 5294), 'numpy.random.randint', 'np.random.randint', (['(0)', '(max_value + 1)'], {}), '(0, max_value + 1)\n', (5276, 5294), True, 'import numpy as np\n')]
""" This file holds common functions across all database processing such as calculating statistics. """ import numpy as np from src import em_constants as emc def is_outlier(wav, lower, upper): """ Checks if an audio sample is an outlier. Bounds are inclusive. :param wav: The audio time series data points :param lower: The lower bound :param upper: The upper bound :return: Boolean """ return False if lower <= len(wav) <= upper else True def get_label(filename, delimiter, index, db_emo_map): """ Gets the k-hot encoded label from a sample's filename. :param filename: The sample's filename :param delimiter: The delimiter used in the filename :param index: Where in the filename the label/emotion is located :param db_emo_map: The database-specific emotion mapping :return: The label k-hot encoded to this program's standard emotion map or False if the label doesn't map to the standard emotions """ label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): """ Represents a label in a filename-friendly format. Mostly used in the "read_to_melspecgram()" function to write out labels in the filename. Sample input: [1. 0. 0. 0. 0. 0. 0.] Sample output: "1_0_0_0_0_0_0" :param label: Numpy array representing the k-hot encoded label :return: String representation of the label """ return "_".join(str(emo) for emo in label) def k_hot_encode_label(label): """ K-hot encodes a label. Takes a list of emotion IDs and returns a list encoding the most voted for emotion. Sample input: [0, 1, 2, 0, 6, 2] Sample output: [1, 0, 1, 0, 0, 0, 0] :param label: List of labels to encode :return: List of k-hot encoded labels or False if the label is unused """ # If there's only one label/vote, then use the quicker method of encoding if len(label) == 1: return _one_hot_encode_label(label) # Convert the emotion numbers into an array where the index is the emotion # and the value is the number of votes for that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count the emotions with the highest amount of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero, then this sample doesn't fit with the set of labels # that we're considering so drop it if not np.any(k_hot_label): print("No usable label.") return False return k_hot_label def _one_hot_encode_label(label): """ One hot encodes a label. Private function to quickly one-hot encode a label. Sample input: [4] Sample output: [0, 0, 0, 0, 1, 0, 0] :param label: A list with one label (length is one) :return: One-hot encoding of the label """ one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): """ Inverses a k-hot encoded label back into emotion ids. Sample input: [1, 0, 0, 0, 1, 0, 0] Sample output: [0, 4] :param k_hot_label: A list of the k-hot encoded label :return: A list of the emotion ids in the label """ return np.where(k_hot_label == 1)[0]
[ "numpy.floor", "numpy.zeros", "numpy.any", "numpy.max", "numpy.where", "numpy.unique" ]
[((2301, 2337), 'numpy.unique', 'np.unique', (['label'], {'return_counts': '(True)'}), '(label, return_counts=True)\n', (2310, 2337), True, 'import numpy as np\n'), ((2356, 2382), 'numpy.zeros', 'np.zeros', (['emc.NUM_EMOTIONS'], {}), '(emc.NUM_EMOTIONS)\n', (2364, 2382), True, 'import numpy as np\n'), ((3212, 3249), 'numpy.zeros', 'np.zeros', (['emc.NUM_EMOTIONS'], {'dtype': 'int'}), '(emc.NUM_EMOTIONS, dtype=int)\n', (3220, 3249), True, 'import numpy as np\n'), ((2575, 2594), 'numpy.max', 'np.max', (['k_hot_label'], {}), '(k_hot_label)\n', (2581, 2594), True, 'import numpy as np\n'), ((2778, 2797), 'numpy.any', 'np.any', (['k_hot_label'], {}), '(k_hot_label)\n', (2784, 2797), True, 'import numpy as np\n'), ((3634, 3660), 'numpy.where', 'np.where', (['(k_hot_label == 1)'], {}), '(k_hot_label == 1)\n', (3642, 3660), True, 'import numpy as np\n'), ((2613, 2634), 'numpy.floor', 'np.floor', (['k_hot_label'], {}), '(k_hot_label)\n', (2621, 2634), True, 'import numpy as np\n')]
import unittest from time import time from pickle import load, dump from tempfile import mkstemp from random import choice, randint from string import ascii_letters from numpy import corrcoef, random, abs, max, asarray, round, zeros_like from trlda.models import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102 D = 1010 K = 11 alpha = .27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) # set alpha to wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample a beta with the given eta model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk in the right direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20) if __name__ == '__main__': unittest.main()
[ "unittest.main", "trlda.models.BatchLDA", "pickle.dump", "numpy.zeros_like", "numpy.abs", "random.randint", "tempfile.mkstemp", "pickle.load", "numpy.random.rand" ]
[((2869, 2884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2882, 2884), False, 'import unittest\n'), ((438, 495), 'trlda.models.BatchLDA', 'BatchLDA', ([], {'num_words': 'W', 'num_topics': 'K', 'alpha': 'alpha', 'eta': 'eta'}), '(num_words=W, num_topics=K, alpha=alpha, eta=eta)\n', (446, 495), False, 'from trlda.models import BatchLDA\n'), ((805, 822), 'numpy.random.rand', 'random.rand', (['K', '(1)'], {}), '(K, 1)\n', (816, 822), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((969, 1032), 'trlda.models.BatchLDA', 'BatchLDA', ([], {'num_words': '(4)', 'num_topics': '(2)', 'alpha': '[0.2, 0.05]', 'eta': '(0.2)'}), '(num_words=4, num_topics=2, alpha=[0.2, 0.05], eta=0.2)\n', (977, 1032), False, 'from trlda.models import BatchLDA\n'), ((775, 793), 'numpy.random.rand', 'random.rand', (['(K + 1)'], {}), '(K + 1)\n', (786, 793), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((1682, 1755), 'trlda.models.BatchLDA', 'BatchLDA', ([], {'num_words': '(100)', 'num_topics': '(10)', 'alpha': '[0.1, 0.1]', 'eta': 'initial_eta'}), '(num_words=100, num_topics=10, alpha=[0.1, 0.1], eta=initial_eta)\n', (1690, 1755), False, 'from trlda.models import BatchLDA\n'), ((2312, 2321), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (2319, 2321), False, 'from tempfile import mkstemp\n'), ((2382, 2413), 'pickle.dump', 'dump', (["{'model': model0}", 'handle'], {}), "({'model': model0}, handle)\n", (2386, 2413), False, 'from pickle import load, dump\n'), ((2801, 2829), 'numpy.abs', 'abs', (['(model0.eta - model1.eta)'], {}), '(model0.eta - model1.eta)\n', (2804, 2829), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((662, 679), 'random.randint', 'randint', (['(0)', '(K - 1)'], {}), '(0, K - 1)\n', (669, 679), False, 'from random import choice, randint\n'), ((1839, 1864), 'numpy.zeros_like', 'zeros_like', (['model.lambdas'], {}), '(model.lambdas)\n', (1849, 1864), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2116, 2136), 'numpy.abs', 'abs', (['(model.eta - eta)'], {}), '(model.eta - eta)\n', (2119, 2136), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2138, 2166), 'numpy.abs', 'abs', (['(model.eta - initial_eta)'], {}), '(model.eta - initial_eta)\n', (2141, 2166), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2261, 2274), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (2272, 2274), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2283, 2296), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (2294, 2296), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2475, 2487), 'pickle.load', 'load', (['handle'], {}), '(handle)\n', (2479, 2487), False, 'from pickle import load, dump\n'), ((2673, 2709), 'numpy.abs', 'abs', (['(model0.lambdas - model1.lambdas)'], {}), '(model0.lambdas - model1.lambdas)\n', (2676, 2709), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n'), ((2741, 2773), 'numpy.abs', 'abs', (['(model0.alpha - model1.alpha)'], {}), '(model0.alpha - model1.alpha)\n', (2744, 2773), False, 'from numpy import corrcoef, random, abs, max, asarray, round, zeros_like\n')]
#!/usr/bin/env python import math import numpy as np from . import linalg as la from . import eulang #Euler angle sequence: XYZ (world). First rotation about X, second rotation #about Y, and the third rotation about Z axis of the world(i.e. fixed) frame. #This is the same as the sequence used in Blender. #In contrast, the XYZ sequence is understood in the Aerospace community as: #First rotation about Z-axis, second rotation about Y-axis, and the third #rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi) if angle < 0.0: angle = -angle axis = -axis if angle > math.pi: angle = 2*math.pi - angle axis = -axis return (axis, angle) def get_rand_axis_angle(): ''' Generates a random pair of axis-angle. The axis is a random vector from the surface of a unit sphere. Algorithm from Allen & Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate angle: A uniform random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2 if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0: if angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find the largest entry in the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters ---------- A : (3,3) ndarray The rows of A represent the orthonormal basis vectors of frame A. B : (3,3) ndarray The rows of B represent the orthonormal basis vectors of frame B. Returns ------- (3,3) ndarray The dcm of frame B w.r.t. frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward: shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle < math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a quaternion and returns a copy. ''' p = np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q): ''' Inverts a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and returns it as a new instance. ''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a quaternion and returns it as a copy. ''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False def get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle between two quaternions p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3, q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace by delta. ''' n = v.shape[0] for i in range(n): v[i,:] += delta return v def align(v, old, new): ''' old and new represent coordinate axes. They must be unit vectors. ''' assert old.shape[0] == new.shape[0] n = old.shape[0] if n == 1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n == 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3)) return is_orthogonal and det_is_one
[ "numpy.trace", "numpy.empty", "numpy.einsum", "numpy.sin", "numpy.linalg.norm", "numpy.diag", "math.fmod", "numpy.copy", "numpy.identity", "math.cos", "numpy.linalg.det", "math.sqrt", "numpy.cross", "math.sin", "numpy.cos", "numpy.dot", "numpy.vstack", "numpy.zeros", "math.acos", "numpy.random.random", "numpy.array", "math.isclose", "numpy.sqrt" ]
[((784, 813), 'math.fmod', 'math.fmod', (['angle', '(2 * math.pi)'], {}), '(angle, 2 * math.pi)\n', (793, 813), False, 'import math\n'), ((1187, 1201), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (1195, 1201), True, 'import numpy as np\n'), ((1561, 1582), 'numpy.sqrt', 'np.sqrt', (['(1.0 - zetasq)'], {}), '(1.0 - zetasq)\n', (1568, 1582), True, 'import numpy as np\n'), ((1752, 1771), 'math.cos', 'math.cos', (['(angle / 2)'], {}), '(angle / 2)\n', (1760, 1771), False, 'import math\n'), ((1809, 1840), 'numpy.array', 'np.array', (['[w, v[0], v[1], v[2]]'], {}), '([w, v[0], v[1], v[2]])\n', (1817, 1840), True, 'import numpy as np\n'), ((3076, 3095), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (3082, 3095), True, 'import numpy as np\n'), ((3146, 3162), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3154, 3162), True, 'import numpy as np\n'), ((3172, 3185), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3178, 3185), True, 'import numpy as np\n'), ((3196, 3209), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3202, 3209), True, 'import numpy as np\n'), ((3710, 3726), 'numpy.trace', 'np.trace', (['rotmat'], {}), '(rotmat)\n', (3718, 3726), True, 'import numpy as np\n'), ((3739, 3765), 'math.acos', 'math.acos', (['((trace - 1) / 2)'], {}), '((trace - 1) / 2)\n', (3748, 3765), False, 'import math\n'), ((4924, 4945), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (4930, 4945), True, 'import numpy as np\n'), ((5089, 5133), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (5098, 5133), True, 'import numpy as np\n'), ((5277, 5335), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (5286, 5335), True, 'import numpy as np\n'), ((6047, 6061), 'numpy.dot', 'np.dot', (['B', 'A.T'], {}), '(B, A.T)\n', (6053, 6061), True, 'import numpy as np\n'), ((7306, 7325), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (7312, 7325), True, 'import numpy as np\n'), ((7482, 7503), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (7488, 7503), True, 'import numpy as np\n'), ((7617, 7661), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (7626, 7661), True, 'import numpy as np\n'), ((7775, 7833), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (7784, 7833), True, 'import numpy as np\n'), ((9877, 9896), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (9883, 9896), True, 'import numpy as np\n'), ((10176, 10197), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (10182, 10197), True, 'import numpy as np\n'), ((10319, 10363), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (10328, 10363), True, 'import numpy as np\n'), ((10485, 10543), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (10494, 10543), True, 'import numpy as np\n'), ((10877, 10899), 'numpy.random.random', 'np.random.random', (['(4,)'], {}), '((4,))\n', (10893, 10899), True, 'import numpy as np\n'), ((10967, 10997), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (10975, 10997), True, 'import numpy as np\n'), ((11232, 11258), 'math.sqrt', 'math.sqrt', (['(1.0 - q[0] ** 2)'], {}), '(1.0 - q[0] ** 2)\n', (11241, 11258), False, 'import math\n'), ((12539, 12558), 'numpy.dot', 'np.dot', (['v', 'rotmat.T'], {}), '(v, rotmat.T)\n', (12545, 12558), True, 'import numpy as np\n'), ((12598, 12614), 'numpy.empty', 'np.empty', (['(3, 3)'], {}), '((3, 3))\n', (12606, 12614), True, 'import numpy as np\n'), ((13270, 13291), 'numpy.dot', 'np.dot', (['v', 'shiftmat.T'], {}), '(v, shiftmat.T)\n', (13276, 13291), True, 'import numpy as np\n'), ((13409, 13453), 'numpy.einsum', 'np.einsum', (['"""ip,jq,pq"""', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,pq', shiftmat, shiftmat, a)\n", (13418, 13453), True, 'import numpy as np\n'), ((13571, 13629), 'numpy.einsum', 'np.einsum', (['"""ip,jq,kr,pqr"""', 'shiftmat', 'shiftmat', 'shiftmat', 'a'], {}), "('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)\n", (13580, 13629), True, 'import numpy as np\n'), ((14032, 14042), 'numpy.copy', 'np.copy', (['q'], {}), '(q)\n', (14039, 14042), True, 'import numpy as np\n'), ((14292, 14302), 'numpy.copy', 'np.copy', (['q'], {}), '(q)\n', (14299, 14302), True, 'import numpy as np\n'), ((14421, 14438), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (14435, 14438), True, 'import numpy as np\n'), ((14561, 14571), 'numpy.copy', 'np.copy', (['q'], {}), '(q)\n', (14568, 14571), True, 'import numpy as np\n'), ((14641, 14658), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (14655, 14658), True, 'import numpy as np\n'), ((14666, 14704), 'math.isclose', 'math.isclose', (['norm', '(1.0)'], {'rel_tol': '(1e-14)'}), '(norm, 1.0, rel_tol=1e-14)\n', (14678, 14704), False, 'import math\n'), ((14829, 14922), 'numpy.array', 'np.array', (['[[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]'], {}), '([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -\n p2, p1, p0]])\n', (14837, 14922), True, 'import numpy as np\n'), ((15483, 15500), 'numpy.dot', 'np.dot', (['mat', 'qdot'], {}), '(mat, qdot)\n', (15489, 15500), True, 'import numpy as np\n'), ((15794, 15814), 'numpy.dot', 'np.dot', (['mat', 'ang_vel'], {}), '(mat, ang_vel)\n', (15800, 15814), True, 'import numpy as np\n'), ((656, 676), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (670, 676), True, 'import numpy as np\n'), ((1288, 1306), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1304, 1306), True, 'import numpy as np\n'), ((1778, 1797), 'math.sin', 'math.sin', (['(angle / 2)'], {}), '(angle / 2)\n', (1786, 1797), False, 'import math\n'), ((2286, 2315), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (2294, 2315), True, 'import numpy as np\n'), ((4735, 4757), 'numpy.array', 'np.array', (['[u0, u1, u2]'], {}), '([u0, u1, u2])\n', (4743, 4757), True, 'import numpy as np\n'), ((6611, 6640), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (6619, 6640), True, 'import numpy as np\n'), ((8962, 8991), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (8970, 8991), True, 'import numpy as np\n'), ((11206, 11221), 'math.acos', 'math.acos', (['q[0]'], {}), '(q[0])\n', (11215, 11221), False, 'import math\n'), ((11477, 11502), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (11485, 11502), True, 'import numpy as np\n'), ((11872, 11901), 'numpy.array', 'np.array', (["orientation['quat']"], {}), "(orientation['quat'])\n", (11880, 11901), True, 'import numpy as np\n'), ((15023, 15042), 'numpy.dot', 'np.dot', (['prod_mat', 'q'], {}), '(prod_mat, q)\n', (15029, 15042), True, 'import numpy as np\n'), ((15208, 15223), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (15216, 15223), False, 'import math\n'), ((15382, 15394), 'numpy.dot', 'np.dot', (['p', 'q'], {}), '(p, q)\n', (15388, 15394), True, 'import numpy as np\n'), ((15580, 15650), 'numpy.array', 'np.array', (['[[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]'], {}), '([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]])\n', (15588, 15650), True, 'import numpy as np\n'), ((15912, 15984), 'numpy.array', 'np.array', (['[[-q1, -q2, -q3], [q0, q3, -q2], [-q3, q0, q1], [q2, -q1, q0]]'], {}), '([[-q1, -q2, -q3], [q0, q3, -q2], [-q3, q0, q1], [q2, -q1, q0]])\n', (15920, 15984), True, 'import numpy as np\n'), ((17150, 17168), 'numpy.linalg.det', 'np.linalg.det', (['mat'], {}), '(mat)\n', (17163, 17168), True, 'import numpy as np\n'), ((17237, 17255), 'numpy.dot', 'np.dot', (['mat', 'mat.T'], {}), '(mat, mat.T)\n', (17243, 17255), True, 'import numpy as np\n'), ((17257, 17271), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (17268, 17271), True, 'import numpy as np\n'), ((692, 745), 'math.isclose', 'math.isclose', (['norm', '(1.0)'], {'abs_tol': '(1e-14)', 'rel_tol': '(1e-14)'}), '(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14)\n', (704, 745), False, 'import math\n'), ((2409, 2439), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (2417, 2439), True, 'import numpy as np\n'), ((6719, 6749), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (6727, 6749), True, 'import numpy as np\n'), ((9102, 9132), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (9110, 9132), True, 'import numpy as np\n'), ((11948, 11978), 'numpy.array', 'np.array', (["orientation['euler']"], {}), "(orientation['euler'])\n", (11956, 11978), True, 'import numpy as np\n'), ((16516, 16532), 'numpy.dot', 'np.dot', (['old', 'new'], {}), '(old, new)\n', (16522, 16532), True, 'import numpy as np\n'), ((16561, 16579), 'numpy.cross', 'np.cross', (['old', 'new'], {}), '(old, new)\n', (16569, 16579), True, 'import numpy as np\n'), ((16789, 16812), 'numpy.vstack', 'np.vstack', (['(old, z_old)'], {}), '((old, z_old))\n', (16798, 16812), True, 'import numpy as np\n'), ((16832, 16855), 'numpy.vstack', 'np.vstack', (['(new, z_new)'], {}), '((new, z_new))\n', (16841, 16855), True, 'import numpy as np\n'), ((1401, 1419), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1417, 1419), True, 'import numpy as np\n'), ((1446, 1464), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1462, 1464), True, 'import numpy as np\n'), ((2631, 2660), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (2639, 2660), True, 'import numpy as np\n'), ((4039, 4054), 'numpy.diag', 'np.diag', (['rotmat'], {}), '(rotmat)\n', (4046, 4054), True, 'import numpy as np\n'), ((6926, 6955), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (6934, 6955), True, 'import numpy as np\n'), ((9321, 9350), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (9329, 9350), True, 'import numpy as np\n'), ((12157, 12186), 'numpy.array', 'np.array', (["orientation['axis']"], {}), "(orientation['axis'])\n", (12165, 12186), True, 'import numpy as np\n'), ((15148, 15175), 'math.sin', 'math.sin', (['((1.0 - t) * theta)'], {}), '((1.0 - t) * theta)\n', (15156, 15175), False, 'import math\n'), ((15189, 15208), 'math.sin', 'math.sin', (['(t * theta)'], {}), '(t * theta)\n', (15197, 15208), False, 'import math\n'), ((16682, 16712), 'numpy.cross', 'np.cross', (['old[0, :]', 'old[1, :]'], {}), '(old[0, :], old[1, :])\n', (16690, 16712), True, 'import numpy as np\n'), ((16740, 16770), 'numpy.cross', 'np.cross', (['new[0, :]', 'new[1, :]'], {}), '(new[0, :], new[1, :])\n', (16748, 16770), True, 'import numpy as np\n'), ((4100, 4157), 'math.sqrt', 'math.sqrt', (['(rotmat[0, 0] - rotmat[1, 1] - rotmat[2, 2] + 1)'], {}), '(rotmat[0, 0] - rotmat[1, 1] - rotmat[2, 2] + 1)\n', (4109, 4157), False, 'import math\n'), ((4298, 4355), 'math.sqrt', 'math.sqrt', (['(rotmat[1, 1] - rotmat[0, 0] - rotmat[2, 2] + 1)'], {}), '(rotmat[1, 1] - rotmat[0, 0] - rotmat[2, 2] + 1)\n', (4307, 4355), False, 'import math\n'), ((4496, 4553), 'math.sqrt', 'math.sqrt', (['(rotmat[2, 2] - rotmat[0, 0] - rotmat[1, 1] + 1)'], {}), '(rotmat[2, 2] - rotmat[0, 0] - rotmat[1, 1] + 1)\n', (4505, 4553), False, 'import math\n')]
import sklearn.datasets as datasets from numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest import numpy as np import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix("test_0", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix("test_1", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded = BigMatrix("test_2", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded = BigMatrix("test_3", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded = BigMatrix("test_4", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11] X_sharded = BigMatrix("test_5", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67, 44:53] == X_sharded.submatrix(0, 4, 4).numpy()))
[ "numpywren.matrix_init.shard_matrix", "numpywren.matrix.BigMatrix", "numpy.all", "numpy.random.randn" ]
[((329, 354), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (344, 354), True, 'import numpy as np\n'), ((375, 430), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_0"""'], {'shape': 'X.shape', 'shard_sizes': 'X.shape'}), "('test_0', shape=X.shape, shard_sizes=X.shape)\n", (384, 430), False, 'from numpywren.matrix import BigMatrix\n'), ((439, 465), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (451, 465), False, 'from numpywren.matrix_init import shard_matrix\n'), ((545, 573), 'numpy.all', 'np.all', (['(X_sharded_local == X)'], {}), '(X_sharded_local == X)\n', (551, 573), True, 'import numpy as np\n'), ((631, 656), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (646, 656), True, 'import numpy as np\n'), ((677, 732), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_1"""'], {'shape': 'X.shape', 'shard_sizes': 'X.shape'}), "('test_1', shape=X.shape, shard_sizes=X.shape)\n", (686, 732), False, 'from numpywren.matrix import BigMatrix\n'), ((885, 910), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (900, 910), True, 'import numpy as np\n'), ((962, 1021), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_2"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_2', shape=X.shape, shard_sizes=shard_sizes)\n", (971, 1021), False, 'from numpywren.matrix import BigMatrix\n'), ((1030, 1056), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (1042, 1056), False, 'from numpywren.matrix_init import shard_matrix\n'), ((1496, 1521), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (1511, 1521), True, 'import numpy as np\n'), ((1573, 1632), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_3"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_3', shape=X.shape, shard_sizes=shard_sizes)\n", (1582, 1632), False, 'from numpywren.matrix import BigMatrix\n'), ((1641, 1667), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (1653, 1667), False, 'from numpywren.matrix_init import shard_matrix\n'), ((2035, 2060), 'numpy.random.randn', 'np.random.randn', (['(128)', '(128)'], {}), '(128, 128)\n', (2050, 2060), True, 'import numpy as np\n'), ((2112, 2171), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_4"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_4', shape=X.shape, shard_sizes=shard_sizes)\n", (2121, 2171), False, 'from numpywren.matrix import BigMatrix\n'), ((2180, 2206), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (2192, 2206), False, 'from numpywren.matrix_init import shard_matrix\n'), ((2670, 2697), 'numpy.random.randn', 'np.random.randn', (['(21)', '(67)', '(53)'], {}), '(21, 67, 53)\n', (2685, 2697), True, 'import numpy as np\n'), ((2753, 2812), 'numpywren.matrix.BigMatrix', 'BigMatrix', (['"""test_5"""'], {'shape': 'X.shape', 'shard_sizes': 'shard_sizes'}), "('test_5', shape=X.shape, shard_sizes=shard_sizes)\n", (2762, 2812), False, 'from numpywren.matrix import BigMatrix\n'), ((2821, 2847), 'numpywren.matrix_init.shard_matrix', 'shard_matrix', (['X_sharded', 'X'], {}), '(X_sharded, X)\n', (2833, 2847), False, 'from numpywren.matrix_init import shard_matrix\n')]
from gensim import models import json import numpy as np MODEL_VERSION = "glove-wiki-gigaword-300" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): """ This method will get the vector of the given word :param word_list: list of a single word string :return: the vector list of this word """ result = {"status_code": "0000"} if len(word_list) > 1: result["status_code"] = "0001" result["result_info"] = "Expect one wordString for getVec" return result word = word_list[0] try: vec = model.get_vector(word) result["vec"] = str(np.array(vec).tolist()) except Exception as e: result["status_code"] = "0001" result["result_info"] = str(e) return result def get_sim_by_word(word_list): """ This method will return a list of the similar words by the given word :param word_list: list of a single word string :return: the sim words list of the given word """ result = {"status_code": "0000"} if len(word_list) > 1: result["status_code"] = "0001" result["result_info"] = "Expect one wordString for getSim" return result word = word_list[0] try: sim_words = model.similar_by_word(word) result["sim_words"] = sim_words except Exception as e: result["status_code"] = "0001" result["result_info"] = str(e) return result def get_similarity_between(word_list): """ This method will get the similarity of two given words :param word_list: list of two words A B for similarity calculation :return: cosine similarity of the two given words """ result = {"status_code": "0000"} if len(word_list) != 2: result["status_code"] = "0001" result["result_info"] = "Expect two wordString for getSimBetween" return result try: word_a = word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b) result["similarity"] = str(similarity) except Exception as e: result["status_code"] = "0001" result["result_info"] = str(e) return result method_dispatcher = { "getVec": lambda word_list,: get_word_vec(word_list), "getSim": lambda word_list,: get_sim_by_word(word_list), "getSimBetween": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): """ This function will validate the event send from API gateway to Lambda and raise exception if exists :param event: :return: """ params = event["multiValueQueryStringParameters"] if "method" not in params.keys() or "wordString" not in params.keys(): raise Exception('"method" and "wordString" are expected as the Query Params') # flag = False method = params.get("method") if len(method) != 1: # flag = False raise Exception('Expect one value for method param') method = method[0] if method not in method_dispatcher.keys(): # flag = False raise Exception('method must be in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response = { 'statusCode': 200, 'body': "" } try: validate_event(event) except Exception as e: result["status_code"] = "0001" result["result_info"] = str(e) result["request_info"] = event["multiValueQueryStringParameters"] result["model_version"] = MODEL_VERSION response["body"] = json.dumps(result) return response params = event["multiValueQueryStringParameters"] method = params["method"][0] word_list = params["wordString"] result = method_dispatcher[method](word_list) result["request_info"] = event["multiValueQueryStringParameters"] result["model_version"] = MODEL_VERSION response["body"] = json.dumps(result) print(response) return response if __name__ == "__main__": f = open('mock_event.json') mock_event = json.load(f) f.close() print(lambda_handler(mock_event, context=""))
[ "json.load", "numpy.array", "gensim.models.KeyedVectors.load_word2vec_format", "json.dumps" ]
[((109, 164), 'gensim.models.KeyedVectors.load_word2vec_format', 'models.KeyedVectors.load_word2vec_format', (['MODEL_VERSION'], {}), '(MODEL_VERSION)\n', (149, 164), False, 'from gensim import models\n'), ((3927, 3945), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3937, 3945), False, 'import json\n'), ((4064, 4076), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4073, 4076), False, 'import json\n'), ((3572, 3590), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3582, 3590), False, 'import json\n'), ((649, 662), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (657, 662), True, 'import numpy as np\n')]
#!/usr/bin/env python # coding: utf-8 # In[1]: import os project_name = "reco-tut-mlh"; branch = "main"; account = "sparsh-ai" project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = "/content/" + project_name; get_ipython().system(u'mkdir "{path}"') get_ipython().magic(u'cd "{path}"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email "<EMAIL>"') get_ipython().system(u'git config --global user.name "reco-tut"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git') get_ipython().system(u'git pull origin "{branch}"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd "{project_path}"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . && git commit -m \'commit\' && git push origin "{branch}"') # In[7]: import sys sys.path.insert(0, './code') # --- # # Collaborative Filtering Comparison # # In this notebook we compare different recommendation systems starting with the state-of-the-art LightGCN and going back to the winning algorithm for 2009's Netflix Prize competition, SVD++. # # Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own individual notebooks where we go more indepth, especially LightGCN and NGCF, where we implemented them from scratch in Tensorflow. # # The last cell compares the performance of the different models using ranking metrics: # # # * Precision@k # * Recall@k # * Mean Average Precision (MAP) # * Normalized Discounted Cumulative Gain (NDCG) # # where $k=10$ # # # # Imports # In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import math import numpy as np import os import pandas as pd import random import requests import scipy.sparse as sp import surprise import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from utils import stratified_split, numpy_stratified_split import build_features import metrics from models import SVAE from models.GCN import LightGCN, NGCF # # Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: # Create DataFrame with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we can convert to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph (sparse matix where users are rows and movies are columns. # 1 if a user reviewed that movie, 0 if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R = R.tolil() # Put together adjacency matrix. Movies and users are nodes/vertices. # 1 if the movie and user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: # Calculate degree matrix D (for every row count the number of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with the values of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell SparseTensor where the non-zero points are indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models # ## Graph Convoultional Networks (GCNs) # ### Light Graph Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and NGCF # In[24]: # Convert test user ids to the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) recs = [] for model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column with the predicted movie's rank for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) # In[26]: # Binarize the data (only keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both usercount and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for train, validation and test sets # use list of unique items from training set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for train, validation and test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and test data into training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and test data train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test data: training part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test data: testing part (save non-binary version in the separate object, will be used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE # In[28]: # Model prediction on the training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings # Create column with the predicted movie's rank for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set recs.append(top_k) # ## Singular Value Decomposition (SVD) # ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and SVD # In[31]: for model in [svdpp, svd]: predictions = [] users = train['userId'].unique() items = train['movieId'].unique() for user in users: for item in items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already seen by users # Create column of all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join and remove movies that have alread been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how="outer") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that appear in both the train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so they have the same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train and test are NOT equal') print(f"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}") print(f"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}") continue # From the predictions, we want only the top k for each user, # not all the recommendations. # Extract the top k recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set recs.append(top_k) # # Compare performance # Looking at all 5 of our models, we can see that the state-of-the-art model LightGCN vastly outperforms all other models. When compared to SVD++, a widely used algorithm during the Netflix Prize competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k by 18%, MAP by 12%, and NDCG by 35%**. # # NGCF is the older sister model to LightGCN, but only by a single year. We can see how LightGCN improves in ranking metrics compared to NGCF by simply removing unnecessary operations. # # In conclusion, this demonstrates how far recommendation systems have advanced since 2009, and how new model architectures with notable performance increases can be developed in the span of just 1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user ids to the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) for rec, name in zip(recs, model_names): tester = test_df if name == 'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] # In[33]: comparison # # References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize and SVD, 2014, https://www.semanticscholar.org/paper/Netflix-Prize-and-SVD-Gower/ce7b81b46939d7852dbb30538a7796e69fdd407c #
[ "tensorflow.python.framework.ops.disable_eager_execution", "numpy.random.seed", "models.GCN.LightGCN", "pandas.read_csv", "build_features.AffinityMatrix", "os.path.join", "metrics.mean_average_precision", "numpy.mat", "pandas.DataFrame", "sys.path.append", "numpy.power", "pandas.merge", "surprise.Reader", "os.path.exists", "metrics.recall_at_k", "tensorflow.keras.optimizers.Adam", "models.GCN.NGCF", "tensorflow.SparseTensor", "metrics.ndcg", "scipy.sparse.dok_matrix", "surprise.SVDpp", "surprise.SVD", "scipy.sparse.diags", "numpy.isinf", "metrics.precision_at_k", "utils.numpy_stratified_split", "numpy.random.permutation", "models.SVAE.StandardVAE", "utils.stratified_split", "sys.path.insert", "pandas.unique", "numpy.where" ]
[((145, 183), 'os.path.join', 'os.path.join', (['"""/content"""', 'project_name'], {}), "('/content', project_name)\n", (157, 183), False, 'import os\n'), ((1194, 1222), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./code"""'], {}), "(0, './code')\n", (1209, 1222), False, 'import sys\n'), ((2517, 2556), 'os.path.join', 'os.path.join', (['"""./data/bronze"""', '"""u.data"""'], {}), "('./data/bronze', 'u.data')\n", (2529, 2556), False, 'import os\n'), ((2568, 2645), 'pandas.read_csv', 'pd.read_csv', (['fp'], {'sep': '"""\t"""', 'names': "['userId', 'movieId', 'rating', 'timestamp']"}), "(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp'])\n", (2579, 2645), True, 'import pandas as pd\n'), ((2758, 2797), 'os.path.join', 'os.path.join', (['"""./data/bronze"""', '"""u.item"""'], {}), "('./data/bronze', 'u.item')\n", (2770, 2797), False, 'import os\n'), ((3035, 3083), 'utils.stratified_split', 'stratified_split', (['raw_data', '"""userId"""', 'train_size'], {}), "(raw_data, 'userId', train_size)\n", (3051, 3083), False, 'from utils import stratified_split, numpy_stratified_split\n'), ((3622, 3674), 'pandas.merge', 'pd.merge', (['train', 'movie_new'], {'on': '"""movieId"""', 'how': '"""left"""'}), "(train, movie_new, on='movieId', how='left')\n", (3630, 3674), True, 'import pandas as pd\n'), ((3850, 3901), 'pandas.merge', 'pd.merge', (['test', 'movie_new'], {'on': '"""movieId"""', 'how': '"""left"""'}), "(test, movie_new, on='movieId', how='left')\n", (3858, 3901), True, 'import pandas as pd\n'), ((4540, 4592), 'scipy.sparse.dok_matrix', 'sp.dok_matrix', (['(n_users, n_movies)'], {'dtype': 'np.float32'}), '((n_users, n_movies), dtype=np.float32)\n', (4553, 4592), True, 'import scipy.sparse as sp\n'), ((4724, 4797), 'scipy.sparse.dok_matrix', 'sp.dok_matrix', (['(n_users + n_movies, n_users + n_movies)'], {'dtype': 'np.float32'}), '((n_users + n_movies, n_users + n_movies), dtype=np.float32)\n', (4737, 4797), True, 'import scipy.sparse as sp\n'), ((5380, 5402), 'scipy.sparse.diags', 'sp.diags', (['D_inv_values'], {}), '(D_inv_values)\n', (5388, 5402), True, 'import scipy.sparse as sp\n'), ((5769, 5814), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'coo.data', 'coo.shape'], {}), '(indices, coo.data, coo.shape)\n', (5784, 5814), True, 'import tensorflow as tf\n'), ((5961, 6025), 'models.GCN.LightGCN', 'LightGCN', (['A_tilde'], {'n_users': 'n_users', 'n_items': 'n_movies', 'n_layers': '(3)'}), '(A_tilde, n_users=n_users, n_items=n_movies, n_layers=3)\n', (5969, 6025), False, 'from models.GCN import LightGCN, NGCF\n'), ((6109, 6153), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (6133, 6153), True, 'import tensorflow as tf\n'), ((6297, 6357), 'models.GCN.NGCF', 'NGCF', (['A_tilde'], {'n_users': 'n_users', 'n_items': 'n_movies', 'n_layers': '(3)'}), '(A_tilde, n_users=n_users, n_items=n_movies, n_layers=3)\n', (6301, 6357), False, 'from models.GCN import LightGCN, NGCF\n'), ((7997, 8016), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (8011, 8016), True, 'import numpy as np\n'), ((8032, 8067), 'numpy.random.permutation', 'np.random.permutation', (['unique_users'], {}), '(unique_users)\n', (8053, 8067), True, 'import numpy as np\n'), ((8521, 8552), 'pandas.unique', 'pd.unique', (["train_set['movieId']"], {}), "(train_set['movieId'])\n", (8530, 8552), True, 'import pandas as pd\n'), ((8839, 8913), 'build_features.AffinityMatrix', 'build_features.AffinityMatrix', ([], {'df': 'train_set', 'items_list': 'unique_train_items'}), '(df=train_set, items_list=unique_train_items)\n', (8868, 8913), False, 'import build_features\n'), ((8923, 8995), 'build_features.AffinityMatrix', 'build_features.AffinityMatrix', ([], {'df': 'val_set', 'items_list': 'unique_train_items'}), '(df=val_set, items_list=unique_train_items)\n', (8952, 8995), False, 'import build_features\n'), ((9006, 9079), 'build_features.AffinityMatrix', 'build_features.AffinityMatrix', ([], {'df': 'test_set', 'items_list': 'unique_train_items'}), '(df=test_set, items_list=unique_train_items)\n', (9035, 9079), False, 'import build_features\n'), ((9431, 9485), 'utils.numpy_stratified_split', 'numpy_stratified_split', (['val_data'], {'ratio': '(0.75)', 'seed': '(123)'}), '(val_data, ratio=0.75, seed=123)\n', (9453, 9485), False, 'from utils import stratified_split, numpy_stratified_split\n'), ((9515, 9570), 'utils.numpy_stratified_split', 'numpy_stratified_split', (['test_data'], {'ratio': '(0.75)', 'seed': '(123)'}), '(test_data, ratio=0.75, seed=123)\n', (9537, 9570), False, 'from utils import stratified_split, numpy_stratified_split\n'), ((9628, 9664), 'numpy.where', 'np.where', (['(train_data > 3.5)', '(1.0)', '(0.0)'], {}), '(train_data > 3.5, 1.0, 0.0)\n', (9636, 9664), True, 'import numpy as np\n'), ((9676, 9710), 'numpy.where', 'np.where', (['(val_data > 3.5)', '(1.0)', '(0.0)'], {}), '(val_data > 3.5, 1.0, 0.0)\n', (9684, 9710), True, 'import numpy as np\n'), ((9723, 9758), 'numpy.where', 'np.where', (['(test_data > 3.5)', '(1.0)', '(0.0)'], {}), '(test_data > 3.5, 1.0, 0.0)\n', (9731, 9758), True, 'import numpy as np\n'), ((9801, 9838), 'numpy.where', 'np.where', (['(val_data_tr > 3.5)', '(1.0)', '(0.0)'], {}), '(val_data_tr > 3.5, 1.0, 0.0)\n', (9809, 9838), True, 'import numpy as np\n'), ((9894, 9931), 'numpy.where', 'np.where', (['(val_data_te > 3.5)', '(1.0)', '(0.0)'], {}), '(val_data_te > 3.5, 1.0, 0.0)\n', (9902, 9931), True, 'import numpy as np\n'), ((9985, 10023), 'numpy.where', 'np.where', (['(test_data_tr > 3.5)', '(1.0)', '(0.0)'], {}), '(test_data_tr > 3.5, 1.0, 0.0)\n', (9993, 10023), True, 'import numpy as np\n'), ((10202, 10240), 'numpy.where', 'np.where', (['(test_data_te > 3.5)', '(1.0)', '(0.0)'], {}), '(test_data_te > 3.5, 1.0, 0.0)\n', (10210, 10240), True, 'import numpy as np\n'), ((10309, 10343), 'pandas.DataFrame', 'pd.DataFrame', (['test_data_te_ratings'], {}), '(test_data_te_ratings)\n', (10321, 10343), True, 'import pandas as pd\n'), ((10364, 10397), 'pandas.DataFrame', 'pd.DataFrame', (['val_data_te_ratings'], {}), '(val_data_te_ratings)\n', (10376, 10397), True, 'import pandas as pd\n'), ((11259, 11284), 'tensorflow.python.framework.ops.disable_eager_execution', 'disable_eager_execution', ([], {}), '()\n', (11282, 11284), False, 'from tensorflow.python.framework.ops import disable_eager_execution\n'), ((11298, 11547), 'models.SVAE.StandardVAE', 'SVAE.StandardVAE', ([], {'n_users': 'train_data.shape[0]', 'original_dim': 'train_data.shape[1]', 'intermediate_dim': '(200)', 'latent_dim': '(64)', 'n_epochs': '(400)', 'batch_size': '(100)', 'k': '(10)', 'verbose': '(0)', 'seed': '(123)', 'drop_encoder': '(0.5)', 'drop_decoder': '(0.5)', 'annealing': '(False)', 'beta': '(1.0)'}), '(n_users=train_data.shape[0], original_dim=train_data.shape\n [1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100,\n k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5,\n annealing=False, beta=1.0)\n', (11314, 11547), False, 'from models import SVAE\n'), ((13050, 13121), 'surprise.SVDpp', 'surprise.SVDpp', ([], {'random_state': '(0)', 'n_factors': '(64)', 'n_epochs': '(10)', 'verbose': '(True)'}), '(random_state=0, n_factors=64, n_epochs=10, verbose=True)\n', (13064, 13121), False, 'import surprise\n'), ((13179, 13248), 'surprise.SVD', 'surprise.SVD', ([], {'random_state': '(0)', 'n_factors': '(64)', 'n_epochs': '(10)', 'verbose': '(True)'}), '(random_state=0, n_factors=64, n_epochs=10, verbose=True)\n', (13191, 13248), False, 'import surprise\n'), ((16177, 16254), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']"}), "(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG'])\n", (16189, 16254), True, 'import pandas as pd\n'), ((204, 232), 'os.path.exists', 'os.path.exists', (['project_path'], {}), '(project_path)\n', (218, 232), False, 'import os\n'), ((517, 538), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (532, 538), False, 'import sys\n'), ((5262, 5284), 'numpy.isinf', 'np.isinf', (['D_inv_values'], {}), '(D_inv_values)\n', (5270, 5284), True, 'import numpy as np\n'), ((13600, 13670), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {'columns': "['userId', 'movieId', 'prediction']"}), "(predictions, columns=['userId', 'movieId', 'prediction'])\n", (13612, 13670), True, 'import pandas as pd\n'), ((13897, 13963), 'pandas.merge', 'pd.merge', (['temp', 'predictions'], {'on': "['userId', 'movieId']", 'how': '"""outer"""'}), "(temp, predictions, on=['userId', 'movieId'], how='outer')\n", (13905, 13963), True, 'import pandas as pd\n'), ((16461, 16525), 'metrics.precision_at_k', 'metrics.precision_at_k', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16483, 16525), False, 'import metrics\n'), ((16536, 16597), 'metrics.recall_at_k', 'metrics.recall_at_k', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16555, 16597), False, 'import metrics\n'), ((16608, 16680), 'metrics.mean_average_precision', 'metrics.mean_average_precision', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16638, 16680), False, 'import metrics\n'), ((16692, 16746), 'metrics.ndcg', 'metrics.ndcg', (['rec', 'tester', '"""userId"""', '"""movieId"""', '"""rank"""'], {}), "(rec, tester, 'userId', 'movieId', 'rank')\n", (16704, 16746), False, 'import metrics\n'), ((5206, 5238), 'numpy.power', 'np.power', (['(D_values + 1e-09)', '(-0.5)'], {}), '(D_values + 1e-09, -0.5)\n', (5214, 5238), True, 'import numpy as np\n'), ((5693, 5719), 'numpy.mat', 'np.mat', (['[coo.row, coo.col]'], {}), '([coo.row, coo.col])\n', (5699, 5719), True, 'import numpy as np\n'), ((12992, 13018), 'surprise.Reader', 'surprise.Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (13007, 13018), False, 'import surprise\n')]
import yaml import numpy as np import logging logger = logging.getLogger("cm.conf") class ControlModelParameters: """ Load parameters from .yaml file. """ def __init__(self): self._config = None self.wind_farm = None self.turbine = None self.simulation = None self.flow = None self.ssc = None self.mode = None def load(self, file): logger.info("Loading configuration from: {}".format(file)) self._load_configuration_from_yaml(file) try: self._assign_configuration() except KeyError as ke: message = "Missing definition in config file, did not find {}".format(ke) logger.error(message, exc_info=1) raise KeyError("Missing definition in config file, did not find {}".format(ke)) logger.info("Loaded configuration.") def _load_configuration_from_yaml(self, file): stream = open(file, "r") self._config = yaml.load(stream=stream, Loader=yaml.SafeLoader) def print(self): print(yaml.dump(self._config)) def _assign_configuration(self): self.mode = self._config["mode"] if self.mode == "simulation": self.wind_farm = self.WindFarm(self._config["wind_farm"]) self.turbine = self.Turbine(self._config["turbine"]) self.simulation = self.Simulation(self._config["simulation"]) self.flow = self.Flow(self._config["flow"]) if self.mode == "supercontroller": self.ssc = self.SSC(self._config["ssc"]) self.turbine = self.Turbine(self._config["turbine"]) # if self.ssc.type == "gradient_step": self.wind_farm = self.WindFarm(self._config["wind_farm"]) self.simulation = self.Simulation(self._config["simulation"]) self.flow = self.Flow(self._config["flow"]) # else: # self.simulation = self.Simulation(self._config["simulation"]) if "estimator" in self._config.keys(): self.estimator = self.Estimator(self._config["estimator"]) class WindFarm: def __init__(self, config_dict): self.size = config_dict["size"] self.cells = config_dict["cells"] self.positions = config_dict["positions"] self.yaw_angles = np.deg2rad(config_dict["yaw_angles"]) # self.yaw_angles = [np.array(x) for x in self.yaw_angles] self.do_refine_turbines = config_dict["do_refine_turbines"] if self.do_refine_turbines: self.refine_radius = config_dict["refine_radius"] else: self.refine_radius = None self.controller = self.FarmController(config_dict["farm_controller"]) class FarmController: def __init__(self, config_dict): self.control_discretisation = config_dict["control_discretisation"] self.controls = config_dict["controls"] self.with_external_controller = False for control in self.controls.values(): if control['type'] == 'external': self.with_external_controller = True self.external_controls = config_dict["external_controller"]["controls"] self.port = config_dict["external_controller"]["port"] break # todo: refine control settings class Turbine: """ Turbine configuration class """ def __init__(self,config_dict): self.axial_induction = config_dict["axial_induction"] self.diameter = config_dict["diameter"] self.radius = self.diameter / 2 self.thickness = config_dict["thickness"] self.hub_height = config_dict["hub_height"] self.kernel = config_dict["kernel"] self.force_scale_axial = config_dict.get("force_scale_axial",1.) self.force_scale_transverse = config_dict.get("force_scale_transverse",1.) self.power_scale = config_dict.get("power_scale",1.) self.yaw_rate_limit = config_dict.get("yaw_rate_limit",-1) self.coefficients = config_dict.get("coefficients", "induction") self.pitch = config_dict.get("pitch", 0.) self.torque = config_dict.get("torque", 0.) class Simulation: def __init__(self, config_dict): self.is_dynamic = config_dict["is_dynamic"] # if not self.is_dynamic: # raise NotImplementedError("Steady flow currently not implemented") if self.is_dynamic: self.total_time = config_dict["total_time"] self.time_step = config_dict["time_step"] self.write_time_step = config_dict["write_time_step"] self.name = config_dict["name"] self.save_logs = config_dict["save_logs"] self.dimensions = config_dict["dimensions"] self.probes = config_dict.get("probes",[]) class Flow: def __init__(self, config_dict): self.kinematic_viscosity = config_dict["kinematic_viscosity"] self.tuning_viscosity = config_dict["tuning_viscosity"] self.density = config_dict["density"] self.mixing_length = config_dict["mixing_length"] self.wake_mixing_length = config_dict["wake_mixing_length"] self.wake_mixing_width = config_dict["wake_mixing_width"] self.wake_mixing_offset = config_dict["wake_mixing_offset"] self.wake_mixing_ml_max = config_dict["wake_mixing_ml_max"] self.continuity_correction = config_dict["continuity_correction"] self.type = config_dict["type"] if self.type == "steady": self.inflow_velocity = config_dict["inflow_velocity"] elif self.type == "series": self.inflow_velocity_series = np.array(config_dict["inflow_velocity_series"]) self.inflow_velocity = self.inflow_velocity_series[0, 1:3] self.finite_element = config_dict.get("finite_element","TH") class SSC: def __init__(self, config_dict): self.port = config_dict["port"] self.controls = config_dict["controls"] self.external_controls = config_dict["external_controls"] self.external_measurements = config_dict["external_measurements"] self.control_discretisation = config_dict["control_discretisation"] self.prediction_horizon = config_dict["prediction_horizon"] self.control_horizon = config_dict["control_horizon"] self.transient_time = config_dict.get("transient_time",-1) # self.objective = config_dict["objective"] # if self.objective == "tracking": # self.power_reference = np.array(config_dict["power_reference"]) # self.power_reference[:, 1] *= 1e6 # # if self.mode == "pitch_torque": # # raise NotImplementedError("gradient step pitch torque control not implemented.") self.plant = config_dict.get("plant", "cm") if self.plant == "sowfa": self.sowfa_time_step = config_dict["sowfa_time_step"] class Estimator: def __init__(self, config_dict): try: self.source = config_dict["source"] except KeyError as ke: logger.error("Only SOWFA as data source implemented") self.estimation_type = config_dict["type"] self.assimilation_window = config_dict["assimilation_window"] self.forward_step = config_dict.get("forward_step", 1) self.transient_period = config_dict.get("transient_period", -1) self.prediction_period = config_dict.get("prediction_period", 0) self.cost_function_weights = config_dict["cost_function_weights"] self.data = config_dict["data"] par = ControlModelParameters() wind_farm = par.wind_farm turbine = par.turbine flow = par.flow simulation = par.simulation with_adjoint = True if __name__ == '__main__': par = ControlModelParameters() par.load("../config/test_config.yaml") # par.print() # par.turbine.print()
[ "yaml.load", "numpy.deg2rad", "yaml.dump", "numpy.array", "logging.getLogger" ]
[((56, 84), 'logging.getLogger', 'logging.getLogger', (['"""cm.conf"""'], {}), "('cm.conf')\n", (73, 84), False, 'import logging\n'), ((991, 1039), 'yaml.load', 'yaml.load', ([], {'stream': 'stream', 'Loader': 'yaml.SafeLoader'}), '(stream=stream, Loader=yaml.SafeLoader)\n', (1000, 1039), False, 'import yaml\n'), ((1076, 1099), 'yaml.dump', 'yaml.dump', (['self._config'], {}), '(self._config)\n', (1085, 1099), False, 'import yaml\n'), ((2350, 2387), 'numpy.deg2rad', 'np.deg2rad', (["config_dict['yaw_angles']"], {}), "(config_dict['yaw_angles'])\n", (2360, 2387), True, 'import numpy as np\n'), ((5981, 6028), 'numpy.array', 'np.array', (["config_dict['inflow_velocity_series']"], {}), "(config_dict['inflow_velocity_series'])\n", (5989, 6028), True, 'import numpy as np\n')]
# !/usr/bin/python # -*- coding:UTF-8 -*- # -----------------------------------------------------------------------# # File Name: textrank_keyword # Author: <NAME> # Mail: <EMAIL> # Created Time: 2021-09-04 # Description: # -----------------------------------------------------------------------# import networkx as nx import numpy as np from knlp.common.constant import sentence_delimiters, allow_speech_tags from knlp.information_extract.keywords_extraction.textrank import TextRank from knlp.utils.util import get_default_stop_words_file, AttrDict class TextRank4Keyword(TextRank): """ 这个函数实现了利用Text rank算法来实现关键词提取的功能。 基础的思路就是先分词,然后计算每个词语的权重,最后按照顺序排列,得到重要性 暂时不考虑英文的需求 介绍请见 https://www.jiqizhixin.com/articles/2018-12-28-18 ref https://github.com/letiantian/TextRank4ZH/blob/master/textrank4zh/ """ def __init__(self, stop_words_file=get_default_stop_words_file(), private_vocab=None, allow_speech_tags=allow_speech_tags, delimiters="|".join(sentence_delimiters)): """ Args: stop_words_file: 停用词的文件路径 label_set: allow_speech_tags: 要保留的词性 delimiters: 默认值是`?!;?!。;…\n`,用来将文本拆分为句子。 """ super().__init__(stop_words_file=stop_words_file, private_vocab=private_vocab, allow_speech_tags=allow_speech_tags, delimiters=delimiters) def get_keywords(self, num=6, window=2, word_min_len=1, page_rank_config={'alpha': 0.85, }): """ 获取最重要的num个长度大于等于word_min_len的关键词。 Args: num: window: word_min_len: page_rank_config: Returns: 关键词列表。AttriDict {} """ # 获取按照text rank的方式得到的关键词,并排序 keywords = self.sort_words(self._vertex_source, self._edge_source, window=window, page_rank_config=page_rank_config) result = [] count = 0 for item in keywords: if count >= num: break if len(item.word) >= word_min_len: result.append(item) count += 1 return result def get_keyphrases(self, keywords_num=12, min_occur_num=2): """ 获取关键短语。 获取 keywords_num 个关键词构造的可能出现的短语,要求这个短语在原文本中至少出现的次数为min_occur_num。 使用有限的keywords_num 个关键词来构造短语 Args: keywords_num: 关键词的个数 min_occur_num: 最少出现次数 Returns: 关键短语的列表。 """ keywords_set = set([item.word for item in self.get_keywords(num=keywords_num, word_min_len=1)]) keyphrases = set() for sentence in self.words_no_filter: one = [] for word in sentence: if word in keywords_set: one.append(word) else: if len(one) > 1: keyphrases.add(''.join(one)) # concat在一起 if len(one) == 0: continue else: one = [] # 兜底 if len(one) > 1: keyphrases.add(''.join(one)) return [phrase for phrase in keyphrases if self.text.count(phrase) >= min_occur_num or phrase in self.label_set] @staticmethod def sort_words(vertex_source, edge_source, window=2, page_rank_config=None): """ 将单词按关键程度从大到小排序 Args: vertex_source: 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点 edge_source: 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边 window: 一个句子中相邻的window个单词,两两之间认为有边 page_rank_config: pagerank的设置 Returns: """ page_rank_config = {'alpha': 0.85, } if not page_rank_config else page_rank_config sorted_words = [] word_index = {} index_word = {} _vertex_source = vertex_source _edge_source = edge_source words_number = 0 for word_list in _vertex_source: for word in word_list: if word not in word_index: word_index[word] = words_number index_word[words_number] = word # MAP WORD TO AN INDEX words_number += 1 graph = np.zeros((words_number, words_number)) # words_number X words_number MATRIX def combine(word_list, window=2): """ 构造在window下的单词组合,用来构造单词之间的边。 Args: word_list: list of str, 由单词组成的列表。 window: int, 窗口大小。 Returns: """ if window < 2: window = 2 for x in range(1, window): if x >= len(word_list): break word_list2 = word_list[x:] res = zip(word_list, word_list2) for r in res: yield r for word_list in _edge_source: for w1, w2 in combine(word_list, window): if w1 in word_index and w2 in word_index: index1 = word_index[w1] index2 = word_index[w2] # 有链接的位置 = 1。0 graph[index1][index2] = 1.0 graph[index2][index1] = 1.0 nx_graph = nx.from_numpy_matrix(graph) scores = nx.pagerank(nx_graph, **page_rank_config) # this is a dict DIRECTLY GET THE SCORE FOR ALL THIS MATRIX sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) for index, score in sorted_scores: item = AttrDict(word=index_word[index], weight=score) sorted_words.append(item) return sorted_words if __name__ == '__main__': text = "测试分词的结果是否符合预期" window = 5 num = 20 word_min_len = 2 need_key_phrase = True tr4w = TextRank4Keyword() tr4w.analyze(text=text, lower=True) output = {"key_words": [], "key_phrase": []} res_keywords = tr4w.get_keywords(num=num, word_min_len=word_min_len, window=window) for item in res_keywords: kw_count = tr4w.text.count(item.word) output["key_words"].append([item.word, item.weight, kw_count]) if need_key_phrase: for phrase in tr4w.get_keyphrases(keywords_num=10, min_occur_num=2): output['key_phrase'].append(phrase) print(output)
[ "knlp.utils.util.get_default_stop_words_file", "networkx.from_numpy_matrix", "networkx.pagerank", "knlp.utils.util.AttrDict", "numpy.zeros" ]
[((874, 903), 'knlp.utils.util.get_default_stop_words_file', 'get_default_stop_words_file', ([], {}), '()\n', (901, 903), False, 'from knlp.utils.util import get_default_stop_words_file, AttrDict\n'), ((4301, 4339), 'numpy.zeros', 'np.zeros', (['(words_number, words_number)'], {}), '((words_number, words_number))\n', (4309, 4339), True, 'import numpy as np\n'), ((5321, 5348), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['graph'], {}), '(graph)\n', (5341, 5348), True, 'import networkx as nx\n'), ((5366, 5407), 'networkx.pagerank', 'nx.pagerank', (['nx_graph'], {}), '(nx_graph, **page_rank_config)\n', (5377, 5407), True, 'import networkx as nx\n'), ((5618, 5664), 'knlp.utils.util.AttrDict', 'AttrDict', ([], {'word': 'index_word[index]', 'weight': 'score'}), '(word=index_word[index], weight=score)\n', (5626, 5664), False, 'from knlp.utils.util import get_default_stop_words_file, AttrDict\n')]
import os,sys from PIL import Image import numpy LETTER_NB = 5 LETTER_SPACE = 1 LETTER_SIZE = 8 LETTER_LEFT = 10 LETTER_RIGHT = 16 class CaptchaReader(object): """docstring for CaptchaReader""" def __init__(self, folderDico): super(CaptchaReader, self).__init__() self.folderDico = folderDico + "/" def read(self, filename): # Extract symbol from targetted captcha symb_extractor = captchaSymbolExtractor() listSymb = symb_extractor.extractSymbol(filename) cap_string = "" nb_unread = 0 for symb in listSymb: succes = False for f in os.listdir(self.folderDico): if f.endswith(".png"): pil_image = Image.open(self.folderDico + f) dic_symb = numpy.array(pil_image) if self.compare(symb, dic_symb): succes = True if f[0].isdigit(): cap_string += f[0] else: cap_string += f[3] break if not succes: # If you go there, then the symbol has not been read Image.fromarray(symb).save("error/symb" + str(nb_unread) + ".png") nb_unread += 1 #return the string return cap_string def compare(self, symb_np, im_dic): #print symb_np return numpy.array_equal(symb_np, im_dic/255) class captchaSymbolExtractor(object): """docstring for captchaSymbolExtractor""" def __init__(self): super(captchaSymbolExtractor, self).__init__() def extractSymbol(self, filename): # mat_pix is a numpy array mat_pix = self.openImage(filename) list_im = [] for i in range(5): left = LETTER_LEFT + i * (LETTER_SIZE + LETTER_SPACE) right = LETTER_LEFT + (i + 1) * (LETTER_SIZE + LETTER_SPACE) - 1 symb = mat_pix[6:19, left:right] list_im.append(symb) im = Image.fromarray(symb*255) im = im.convert('1') return list_im def openImage(self, filename): pil_image = Image.open(filename) return numpy.array(pil_image)
[ "PIL.Image.open", "PIL.Image.fromarray", "numpy.array", "numpy.array_equal", "os.listdir" ]
[((1149, 1189), 'numpy.array_equal', 'numpy.array_equal', (['symb_np', '(im_dic / 255)'], {}), '(symb_np, im_dic / 255)\n', (1166, 1189), False, 'import numpy\n'), ((1794, 1814), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1804, 1814), False, 'from PIL import Image\n'), ((1824, 1846), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (1835, 1846), False, 'import numpy\n'), ((566, 593), 'os.listdir', 'os.listdir', (['self.folderDico'], {}), '(self.folderDico)\n', (576, 593), False, 'import os, sys\n'), ((1678, 1705), 'PIL.Image.fromarray', 'Image.fromarray', (['(symb * 255)'], {}), '(symb * 255)\n', (1693, 1705), False, 'from PIL import Image\n'), ((639, 670), 'PIL.Image.open', 'Image.open', (['(self.folderDico + f)'], {}), '(self.folderDico + f)\n', (649, 670), False, 'from PIL import Image\n'), ((687, 709), 'numpy.array', 'numpy.array', (['pil_image'], {}), '(pil_image)\n', (698, 709), False, 'import numpy\n'), ((953, 974), 'PIL.Image.fromarray', 'Image.fromarray', (['symb'], {}), '(symb)\n', (968, 974), False, 'from PIL import Image\n')]
import xlrd # from xlutils.copy import copy as xlscopy import shutil import os from numpy import sqrt, abs import sys sys.path.append('../..') # 如果最终要从main.py调用,则删掉这句 from GeneralMethod.PyCalcLib import Fitting from GeneralMethod.PyCalcLib import Method from reportwriter.ReportWriter import ReportWriter class thermology: report_data_keys = [ '1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20', '21','22','23','24','25','26','27','28','29', '101','102','103','104','105','106','107','108','109','110','111','112','113','114','115','116','117', '118','119','120','121','122','123','124','125','126','127','128','129', 'L','K','J', 'Ua','UJ' ] PREVIEW_FILENAME = "Preview.pdf" DATA_SHEET_FILENAME = "data.xlsx" REPORT_TEMPLATE_FILENAME = "thermology_empty.docx" REPORT_OUTPUT_FILENAME = "thermology_out.docx" def __init__(self): self.data = {} # 存放实验中的各个物理量 self.uncertainty = {} # 存放物理量的不确定度 self.report_data = {} # 存放需要填入实验报告的 print("1021 测量水的溶解热+焦耳热功当量\n1. 实验预习\n2. 数据处理") while True: try: oper = input("请选择: ").strip() except EOFError: sys.exit(0) if oper != '1' and oper != '2': print("输入内容非法!请输入一个数字1或2") else: break if oper == '1': print("现在开始实验预习") print("正在打开预习报告......") os.startfile(self.PREVIEW_FILENAME) elif oper == '2': print("现在开始数据处理") print("即将打开数据输入文件......") # 打开数据输入文件 os.startfile(self.DATA_SHEET_FILENAME) input("输入数据完成后请保存并关闭excel文件,然后按回车键继续") # 从excel中读取数据 self.input_data("./"+self.DATA_SHEET_FILENAME) # './' is necessary when running this file, but should be removed if run main.py print("数据读入完毕,处理中......") # 计算物理量 self.calc_data1() self.calc_data2() # 计算不确定度 self.calc_uncertainty() print("正在生成实验报告......") # 生成实验报告 self.fill_report() print("实验报告生成完毕,正在打开......") os.startfile(self.REPORT_OUTPUT_FILENAME) print("Done!") ''' 从excel表格中读取数据 @param filename: 输入excel的文件名 @return none ''' def input_data(self, filename): ws = xlrd.open_workbook(filename).sheet_by_name('thermology1') # 从excel中读取数据 list_time = [] list_resistance = [] list_temperature = [] list_weight = [] for row in [1, 4, 7]: for col in range(1, 8): list_time.append(float(ws.cell_value(row, col))) #时间 self.data['list_time'] = list_time for row in [2, 5, 8]: for col in range(1, 8): list_resistance.append(float(ws.cell_value(row, col))) #电阻值 self.data['list_resistance'] = list_resistance for row in [3, 6, 9]: for col in range(1, 8): list_temperature.append(float(ws.cell_value(row, col))) #温度 self.data['list_temperature'] = list_temperature col = 1 for row in range(10, 14): list_weight.append(float(ws.cell_value(row,col))) #几种质量 self.data['list_weight'] = list_weight row = 14 temp_ice = float(ws.cell_value(row, col)) #冰温度 self.data['temp_ice'] = temp_ice row = 15 temp_env = float(ws.cell_value(row, col)) #环境温度 self.data['temp_env'] = temp_env ws = xlrd.open_workbook(filename).sheet_by_name('thermology2') list_time2 = [] list_resistance2 = [] list_temperature2 = [] for row in [1, 4, 7, 10]: for col in range(1, 9): if isinstance(ws.cell_value(row, col), str): continue else: list_time2.append(float(ws.cell_value(row, col))) self.data['list_time2'] = list_time2 for row in [2, 5, 8, 11]: for col in range(1, 9): if isinstance(ws.cell_value(row, col), str): continue else: list_resistance2.append(float(ws.cell_value(row, col))) self.data['list_resistance2'] = list_resistance2 for row in [3, 6, 9, 12]: for col in range(1, 9): if isinstance(ws.cell_value(row, col), str): continue else: list_temperature2.append(float(ws.cell_value(row, col))) self.data['list_temperature2'] = list_temperature2 col = 1 row = 13 temp_env2 = float(ws.cell_value(row, col)) self.data['temp_env2'] = temp_env2 row = 14 voltage_begin = float(ws.cell_value(row, col)) self.data['voltage_begin'] = voltage_begin row = 15 voltage_end = float(ws.cell_value(row, col)) self.data['voltage_end'] = voltage_end row = 16 resitence = float(ws.cell_value(row, col)) self.data['resitence'] = resitence self.data['c1'] = 0.389e3 self.data['c2'] = 0.389e3 self.data['c0'] = 4.18e3 self.data['ci'] = 1.80e3 def calc_data1(self): list_weight = self.data['list_weight'] list_time1 = self.data['list_time'] list_temperature1 = self.data['list_temperature'] temp_ice = self.data['temp_ice'] temp_env = self.data['temp_env'] c1 = self.data['c1'] c2 = self.data['c2'] c0 = self.data['c0'] ci = self.data['ci'] m_water = list_weight[1] - list_weight[0] m_ice = list_weight[2] - list_weight[1] list_graph = Fitting.linear(list_time1, list_temperature1, show_plot=False) self.data['list_graph'] = list_graph temp_begin = list_graph[0] * list_time1[0] + list_graph[1] temp_end = list_graph[0] * list_time1[(len(list_time1)-1)] + list_graph[1] self.data['temp_begin'] = temp_begin self.data['temp_end'] = temp_end self.data['m_water'] = m_water self.data['m_ice'] = m_ice ''' print(temp_begin) print('\n',temp_end) print('\n',m_water) print('\n',m_ice) print('!1!\n',c0*m_water*0.001+c1*list_weight[3]*0.001+c2*(list_weight[0]-list_weight[3])*0.001) print('\n!2!\n',temp_begin-temp_end) print('\n!3!\n',c0*temp_end) print('\n!4!\n',ci*temp_ice) ''' L = 1/(m_ice*0.001) * (c0*m_water*0.001+c1*list_weight[3]*0.001+c2*(list_weight[0]-list_weight[3])*0.001) * (temp_begin-temp_end)- c0*temp_end + ci*temp_ice K = c0 * m_water*0.001 * (list_temperature1[15]-list_temperature1[8]) / ((list_time1[15]-list_time1[8])*(list_temperature1[15]-temp_env)) self.data['L'] = L self.data['K'] = K def calc_data2(self): c1 = self.data['c1'] c0 = self.data['c0'] list_temperature2 = self.data['list_temperature2'] list_weight = self.data['list_weight'] temp_env2 = self.data['temp_env2'] list_time2 = self.data['list_time2'] voltage_begin = self.data['voltage_begin'] voltage_end = self.data['voltage_end'] resitence = self.data['resitence'] m_water = list_weight[1] - list_weight[0] list_x = [] list_y = [] for i in range(len(list_temperature2)): if i==len(list_temperature2)-1: break list_x.append((list_temperature2[i]+list_temperature2[i+1])/2-temp_env2) for i in range(len(list_temperature2)): if i == len(list_temperature2)-1: break list_y.append((list_temperature2[i+1]-list_temperature2[i])/((list_time2[i+1]-list_time2[i])*60)) self.data['list_x'] = list_x self.data['list_y'] = list_y list_graph2 = Fitting.linear(list_x, list_y, show_plot=False) self.data['list_graph2'] = list_graph2 J = ((voltage_begin+voltage_end)/2)**2/(list_graph2[1]*resitence*(c0*m_water*0.001+c1*list_weight[3]*0.001+64.38)) self.data['J'] = J ''' print('b',list_graph2[0]) print('\n a',list_graph2[1]) print('\n r',list_graph2[2]) ''' def calc_uncertainty(self): list_a = [] list_x = self.data['list_x'] list_y = self.data['list_y'] list_graph2 = self.data['list_graph2'] voltage_begin = self.data['voltage_begin'] voltage_end = self.data['voltage_end'] resitence = self.data['resitence'] c1 = self.data['c1'] c0 = self.data['c0'] list_weight = self.data['list_weight'] m_water = list_weight[1] - list_weight[0] for i in range(len(list_x)): list_a.append(list_y[i]-list_graph2[1]*list_x[i]) self.data['list_a'] = list_a Ua = Method.a_uncertainty(self.data['list_a']) self.data['Ua'] = Ua UJ = abs(((voltage_begin+voltage_end)/2)**2/(Ua*resitence*(c0*m_water*0.001+c1*list_weight[3]*0.001 + 64.38))) self.data['UJ'] = UJ def fill_report(self): # 表格:xy for i, x_i in enumerate(self.data['list_x']): self.report_data[str(i + 1)] = "%.5f" % (x_i) for i, y_i in enumerate(self.data['list_y']): self.report_data[str(i + 101)] = "%.5f" % (y_i) # 最终结果 self.report_data['L'] = self.data['L'] self.report_data['K'] = self.data['K'] self.report_data['J'] = self.data['J'] self.report_data['Ua'] = self.data['Ua'] self.report_data['UJ'] = self.data['UJ'] RW = ReportWriter() RW.load_replace_kw(self.report_data) RW.fill_report(self.REPORT_TEMPLATE_FILENAME, self.REPORT_OUTPUT_FILENAME) if __name__ == '__main__': mc = thermology()
[ "sys.path.append", "numpy.abs", "GeneralMethod.PyCalcLib.Fitting.linear", "reportwriter.ReportWriter.ReportWriter", "xlrd.open_workbook", "GeneralMethod.PyCalcLib.Method.a_uncertainty", "sys.exit", "os.startfile" ]
[((126, 150), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (141, 150), False, 'import sys\n'), ((6025, 6087), 'GeneralMethod.PyCalcLib.Fitting.linear', 'Fitting.linear', (['list_time1', 'list_temperature1'], {'show_plot': '(False)'}), '(list_time1, list_temperature1, show_plot=False)\n', (6039, 6087), False, 'from GeneralMethod.PyCalcLib import Fitting\n'), ((8321, 8368), 'GeneralMethod.PyCalcLib.Fitting.linear', 'Fitting.linear', (['list_x', 'list_y'], {'show_plot': '(False)'}), '(list_x, list_y, show_plot=False)\n', (8335, 8368), False, 'from GeneralMethod.PyCalcLib import Fitting\n'), ((9355, 9396), 'GeneralMethod.PyCalcLib.Method.a_uncertainty', 'Method.a_uncertainty', (["self.data['list_a']"], {}), "(self.data['list_a'])\n", (9375, 9396), False, 'from GeneralMethod.PyCalcLib import Method\n'), ((9441, 9572), 'numpy.abs', 'abs', (['(((voltage_begin + voltage_end) / 2) ** 2 / (Ua * resitence * (c0 * m_water *\n 0.001 + c1 * list_weight[3] * 0.001 + 64.38)))'], {}), '(((voltage_begin + voltage_end) / 2) ** 2 / (Ua * resitence * (c0 *\n m_water * 0.001 + c1 * list_weight[3] * 0.001 + 64.38)))\n', (9444, 9572), False, 'from numpy import sqrt, abs\n'), ((10136, 10150), 'reportwriter.ReportWriter.ReportWriter', 'ReportWriter', ([], {}), '()\n', (10148, 10150), False, 'from reportwriter.ReportWriter import ReportWriter\n'), ((1550, 1585), 'os.startfile', 'os.startfile', (['self.PREVIEW_FILENAME'], {}), '(self.PREVIEW_FILENAME)\n', (1562, 1585), False, 'import os\n'), ((1720, 1758), 'os.startfile', 'os.startfile', (['self.DATA_SHEET_FILENAME'], {}), '(self.DATA_SHEET_FILENAME)\n', (1732, 1758), False, 'import os\n'), ((2306, 2347), 'os.startfile', 'os.startfile', (['self.REPORT_OUTPUT_FILENAME'], {}), '(self.REPORT_OUTPUT_FILENAME)\n', (2318, 2347), False, 'import os\n'), ((2520, 2548), 'xlrd.open_workbook', 'xlrd.open_workbook', (['filename'], {}), '(filename)\n', (2538, 2548), False, 'import xlrd\n'), ((3750, 3778), 'xlrd.open_workbook', 'xlrd.open_workbook', (['filename'], {}), '(filename)\n', (3768, 3778), False, 'import xlrd\n'), ((1301, 1312), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1309, 1312), False, 'import sys\n')]
# Authors: <NAME> (lambertt) and <NAME> (odafaluy) import numpy import scipy import scipy.linalg import plot class Matrix: """ Provides Methods for operations with an hilbert- or a special triangular matrix. """ def __init__(self, mtype, dim, dtype): """ Initializes the class instance. :param mtype: The matrix type ("hilbert" or "saite" for triangular) :param dim: The dimension. Must be > 0. :param dtype: The type to use. Can be "float16", "float32" or "flaot64" """ if mtype not in ["saite", "hilbert"]: raise Exception("Unknown mtype. Allowed are 'hilbert' and 'saite'.") self.mtype = mtype if dim <= 0: raise Exception("dim must be > 0") self.dim = dim if dtype not in ["float16", "float32", "float64"]: raise Exception("Unknown dtype. Allowed are 'float16', 'float32' and 'float64'.") self.dtype = dtype self.dtype_constructor = None self.matrix = None self.inv = None self.l = None self.u = None self.create_matrix_and_inv() def create_matrix_and_inv(self): """ Calculates the matrix from the values given to the constructor and its inverse. :return: Nothing. """ arr = [] if self.mtype == "saite": for row in xrange(0, self.dim): arr.append([]) for col in xrange(0, self.dim): if row == col: arr[row].append(2) elif row - 1 == col or col - 1 == row: arr[row].append(-1) else: arr[row].append(0) if self.mtype == "hilbert": arr = scipy.linalg.hilbert(self.dim).tolist() self.matrix = numpy.array(arr, dtype=self.dtype) self.inv = scipy.linalg.inv(self.matrix) def condition(self): """ Calculates the condition of the matrix. :return: The condition of the matrix. """ return numpy.linalg.norm(self.matrix, ord=numpy.inf) * numpy.linalg.norm(self.inv, ord=numpy.inf) def lu(self): """ Splits the matrix into l (left lower) and u (right upper) matrices. (Matrix A = LU) :return: A Tuple l,u of matrices """ if self.l is None or self.u is None: self.l, self.u = scipy.linalg.lu(self.matrix, permute_l=True) return self.l, self.u def solve(self, b): """ Solves the equation Ax=b for x and the matrix A. :param b: The vector b to solve the Matrix for. :return: The vector x from Ax=b. """ l, u = self.lu() x = scipy.linalg.solve_triangular(l, b, lower=True) x = scipy.linalg.solve_triangular(u, x, lower=False) return x def main_31b(mtypes, dims, dtypes): """ Executes experiments as described in 3.1B. :param mtypes: The mtype-values to use. :param dims: The dimensions to use. :param dtypes: The dtype-values to use. :return: Nothing. """ for mtype in mtypes: for dim in dims: for dtype in dtypes: print("") print("Experiment for mtype={0}, dim={1}, dtype={2}".format(mtype, dim, dtype)) identity = numpy.identity(dim, dtype) matrix = Matrix(mtype, dim, dtype) m = identity - (numpy.dot(matrix.matrix, matrix.inv)) try: m_inv = scipy.linalg.inv(m) except (numpy.linalg.linalg.LinAlgError, ValueError) as ex: print("Cannot calculate inverse of M: " + ex.message) continue condition = numpy.linalg.norm(m, ord=numpy.inf) * numpy.linalg.norm(m_inv, ord=numpy.inf) print("cond(M) = {1} || I - M M^(-1) || = {0}".format(condition, matrix.condition())) def main_32b_saite(n): plot.plot(n) def main_32b_hilbert(i_max, dtype, n): """ Executes experiments as described in 3.2B B. (Hilbert) :param i_max: The maximum i to use :param dtype: the data-type to use (float16, float32 or float64) :param n: The dimension to use. :return: Nothing. """ matrix = Matrix("hilbert", n, dtype) print("Hilbert Matrix with n={0} and type {1}".format(n, dtype)) result = numpy.identity(n, dtype=dtype) for i in xrange(1, i_max + 1): result = numpy.dot(result, matrix.matrix) print("i = {0}, x^{0} = ".format(i)) print(result) def main_32b(dtypes, n_iterable, i_iterable): """ Executes experiments as described in 3.2B :param dtypes: the data-type to use (float16, float32 or float64) :param n_iterable: The n-values to use. :param i_iterable: The i-values to use. (if i>n it will be ignored). :return: Nothing. """ for dtype in dtypes: for n in n_iterable: for i_max in i_iterable: if i_max > n: continue main_32b_hilbert(i_max, dtype, n) def main(experiment, mtypes=None, dims=None, dtypes=None, n_iterable=None, i_iterable=None): """ Executes experiments as described. See start.py for more information. :return: Nothing. """ if experiment == "3.1B": main_31b(mtypes, dims, dtypes) elif experiment == "3.2B - A": for n in n_iterable: main_32b_saite(n) elif experiment == "3.2B - B": main_32b(dtypes, n_iterable, i_iterable) else: print("Unknown experiment")
[ "scipy.linalg.hilbert", "scipy.linalg.solve_triangular", "numpy.identity", "scipy.linalg.lu", "scipy.linalg.inv", "numpy.array", "numpy.linalg.norm", "numpy.dot", "plot.plot" ]
[((4016, 4028), 'plot.plot', 'plot.plot', (['n'], {}), '(n)\n', (4025, 4028), False, 'import plot\n'), ((4435, 4465), 'numpy.identity', 'numpy.identity', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (4449, 4465), False, 'import numpy\n'), ((1860, 1894), 'numpy.array', 'numpy.array', (['arr'], {'dtype': 'self.dtype'}), '(arr, dtype=self.dtype)\n', (1871, 1894), False, 'import numpy\n'), ((1914, 1943), 'scipy.linalg.inv', 'scipy.linalg.inv', (['self.matrix'], {}), '(self.matrix)\n', (1930, 1943), False, 'import scipy\n'), ((2762, 2809), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['l', 'b'], {'lower': '(True)'}), '(l, b, lower=True)\n', (2791, 2809), False, 'import scipy\n'), ((2822, 2870), 'scipy.linalg.solve_triangular', 'scipy.linalg.solve_triangular', (['u', 'x'], {'lower': '(False)'}), '(u, x, lower=False)\n', (2851, 2870), False, 'import scipy\n'), ((4518, 4550), 'numpy.dot', 'numpy.dot', (['result', 'matrix.matrix'], {}), '(result, matrix.matrix)\n', (4527, 4550), False, 'import numpy\n'), ((2104, 2149), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.matrix'], {'ord': 'numpy.inf'}), '(self.matrix, ord=numpy.inf)\n', (2121, 2149), False, 'import numpy\n'), ((2152, 2194), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.inv'], {'ord': 'numpy.inf'}), '(self.inv, ord=numpy.inf)\n', (2169, 2194), False, 'import numpy\n'), ((2446, 2490), 'scipy.linalg.lu', 'scipy.linalg.lu', (['self.matrix'], {'permute_l': '(True)'}), '(self.matrix, permute_l=True)\n', (2461, 2490), False, 'import scipy\n'), ((3373, 3399), 'numpy.identity', 'numpy.identity', (['dim', 'dtype'], {}), '(dim, dtype)\n', (3387, 3399), False, 'import numpy\n'), ((1797, 1827), 'scipy.linalg.hilbert', 'scipy.linalg.hilbert', (['self.dim'], {}), '(self.dim)\n', (1817, 1827), False, 'import scipy\n'), ((3483, 3519), 'numpy.dot', 'numpy.dot', (['matrix.matrix', 'matrix.inv'], {}), '(matrix.matrix, matrix.inv)\n', (3492, 3519), False, 'import numpy\n'), ((3571, 3590), 'scipy.linalg.inv', 'scipy.linalg.inv', (['m'], {}), '(m)\n', (3587, 3590), False, 'import scipy\n'), ((3799, 3834), 'numpy.linalg.norm', 'numpy.linalg.norm', (['m'], {'ord': 'numpy.inf'}), '(m, ord=numpy.inf)\n', (3816, 3834), False, 'import numpy\n'), ((3837, 3876), 'numpy.linalg.norm', 'numpy.linalg.norm', (['m_inv'], {'ord': 'numpy.inf'}), '(m_inv, ord=numpy.inf)\n', (3854, 3876), False, 'import numpy\n')]
"""Trajectory Generator for in-place stepping motion for quadruped robot.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np TWO_PI = 2 * math.pi def _get_actions_asymmetric_sine(phase, tg_params): """Returns the leg extension given current phase of TG and parameters. Args: phase: a number in [0, 2pi) representing current leg phase tg_params: a dictionary of tg parameters: stance_lift_cutoff -- switches the TG between stance (phase < cutoff) and lift (phase > cutoff) phase amplitude_swing -- amplitude in swing phase amplitude_lift -- amplitude in lift phase center_extension -- center of leg extension """ stance_lift_cutoff = tg_params['stance_lift_cutoff'] a_prime = np.where(phase < stance_lift_cutoff, tg_params['amplitude_stance'], tg_params['amplitude_lift']) scaled_phase = np.where( phase > stance_lift_cutoff, np.pi + (phase - stance_lift_cutoff) / (TWO_PI - stance_lift_cutoff) * np.pi, phase / stance_lift_cutoff * np.pi) return tg_params['center_extension'] + a_prime * np.sin(scaled_phase) def step(current_phases, leg_frequencies, dt, tg_params): """Steps forward the in-place trajectory generator. Args: current_phases: phases of each leg. leg_frequencies: the frequency to proceed the phase of each leg. dt: amount of time (sec) between consecutive time steps. tg_params: a set of parameters for trajectory generator, see the docstring of "_get_actions_asymmetric_sine" for details. Returns: actions: leg swing/extensions as output by the trajectory generator. new_state: new swing/extension. """ new_phases = np.fmod(current_phases + TWO_PI * leg_frequencies * dt, TWO_PI) extensions = [] for leg_id in range(4): extensions.append( _get_actions_asymmetric_sine(new_phases[..., leg_id], tg_params)) return new_phases, extensions def reset(): return np.array([0, np.pi * 0.5, np.pi, np.pi * 1.5])
[ "numpy.sin", "numpy.where", "numpy.array", "numpy.fmod" ]
[((843, 943), 'numpy.where', 'np.where', (['(phase < stance_lift_cutoff)', "tg_params['amplitude_stance']", "tg_params['amplitude_lift']"], {}), "(phase < stance_lift_cutoff, tg_params['amplitude_stance'],\n tg_params['amplitude_lift'])\n", (851, 943), True, 'import numpy as np\n'), ((978, 1132), 'numpy.where', 'np.where', (['(phase > stance_lift_cutoff)', '(np.pi + (phase - stance_lift_cutoff) / (TWO_PI - stance_lift_cutoff) * np.pi)', '(phase / stance_lift_cutoff * np.pi)'], {}), '(phase > stance_lift_cutoff, np.pi + (phase - stance_lift_cutoff) /\n (TWO_PI - stance_lift_cutoff) * np.pi, phase / stance_lift_cutoff * np.pi)\n', (986, 1132), True, 'import numpy as np\n'), ((1781, 1844), 'numpy.fmod', 'np.fmod', (['(current_phases + TWO_PI * leg_frequencies * dt)', 'TWO_PI'], {}), '(current_phases + TWO_PI * leg_frequencies * dt, TWO_PI)\n', (1788, 1844), True, 'import numpy as np\n'), ((2042, 2088), 'numpy.array', 'np.array', (['[0, np.pi * 0.5, np.pi, np.pi * 1.5]'], {}), '([0, np.pi * 0.5, np.pi, np.pi * 1.5])\n', (2050, 2088), True, 'import numpy as np\n'), ((1193, 1213), 'numpy.sin', 'np.sin', (['scaled_phase'], {}), '(scaled_phase)\n', (1199, 1213), True, 'import numpy as np\n')]
# Adapted from: # https://www.analyticsvidhya.com/blog/2016/08/beginners-guide-to-topic-modeling-in-python/ import read_bibtex import os, shutil from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.stem.porter import PorterStemmer import string import gensim from gensim import corpora from gensim.test.utils import datapath import numpy as np stop = set(stopwords.words('english')) stop.add("exist") stop.add("because") stop.add("via") stop.add("interest") stop.add("therefore") stop.add("hence") stop.add("this") exclude = set(string.punctuation) exclude.add("-") exclude.add("_") exclude.add(".") exclude.add(";") lemma = WordNetLemmatizer() stemmer = PorterStemmer() ntopics = 30 npasses = 400 result_dir="doc_results_all_500_30" model_dir="model_all_500_30" year_from=1980 # Creating the object for LDA model using gensim library Lda = gensim.models.ldamodel.LdaModel def clean(doc): punc_free = ''.join(ch for ch in doc if ch not in exclude) lemmatized = " ".join(lemma.lemmatize(word)+" " for word in punc_free.lower().split()) stemmed = " ".join(stemmer.stem(word) for word in lemmatized.split()) stop_free = " ".join([i for i in stemmed.split() if i not in stop]) return stop_free def main(): if result_dir in os.listdir("."): shutil.rmtree("./"+result_dir) os.mkdir("./"+result_dir) # Read and clean data doc_set = read_bibtex.bibtex_tostring_from(year_from) doc_clean = [clean(doc).split() for doc in doc_set] # Creating the term dictionary of our courpus, where every unique term is assigned an index. dictionary = corpora.Dictionary(doc_clean) # Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above. doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean] # Loading the LDA model ldamodel = Lda.load("./"+model_dir+"/all") # Infer topic distribution for each doc topic_dist = [ldamodel.get_document_topics(dictionary.doc2bow(doc)) for doc in doc_clean] # Save results np.save("./"+result_dir+"/all", np.array(topic_dist)) dist_array = np.array(topic_dist) transpose_array = [[] for x in range(n_topics)] for itr in range(len(dist_array)): for top, weight in dist_array[itr]: transpose_array[top].append((itr, weight)) for row in transpose_array: row.sort(key=lambda x: x[1], reverse=True) np.save("./"+result_dir+"/all_transpose", np.array(transpose_array)) main()
[ "os.mkdir", "shutil.rmtree", "nltk.stem.porter.PorterStemmer", "gensim.corpora.Dictionary", "numpy.array", "nltk.corpus.stopwords.words", "nltk.stem.wordnet.WordNetLemmatizer", "read_bibtex.bibtex_tostring_from", "os.listdir" ]
[((670, 689), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (687, 689), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((700, 715), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (713, 715), False, 'from nltk.stem.porter import PorterStemmer\n'), ((397, 423), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (412, 423), False, 'from nltk.corpus import stopwords\n'), ((1343, 1370), 'os.mkdir', 'os.mkdir', (["('./' + result_dir)"], {}), "('./' + result_dir)\n", (1351, 1370), False, 'import os, shutil\n'), ((1410, 1453), 'read_bibtex.bibtex_tostring_from', 'read_bibtex.bibtex_tostring_from', (['year_from'], {}), '(year_from)\n', (1442, 1453), False, 'import read_bibtex\n'), ((1625, 1654), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (['doc_clean'], {}), '(doc_clean)\n', (1643, 1654), False, 'from gensim import corpora\n'), ((2144, 2164), 'numpy.array', 'np.array', (['topic_dist'], {}), '(topic_dist)\n', (2152, 2164), True, 'import numpy as np\n'), ((1291, 1306), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (1301, 1306), False, 'import os, shutil\n'), ((1308, 1340), 'shutil.rmtree', 'shutil.rmtree', (["('./' + result_dir)"], {}), "('./' + result_dir)\n", (1321, 1340), False, 'import os, shutil\n'), ((2103, 2123), 'numpy.array', 'np.array', (['topic_dist'], {}), '(topic_dist)\n', (2111, 2123), True, 'import numpy as np\n'), ((2485, 2510), 'numpy.array', 'np.array', (['transpose_array'], {}), '(transpose_array)\n', (2493, 2510), True, 'import numpy as np\n')]
import json import numpy as np import pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() """These methods are fine-tuned for the current data sets. I need to generalize them once I know more about different types of data coming in""" @classmethod def clean(cls, df, name): """Find all empty space or all NaN columns and drops them from the DataFrame""" df.replace(r'\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) """This is to find coordinate columns etc. manually, because we don't know anything about the structure of our data!""" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method): """Find float columns, impute their NaN values with 'method', and then min-max scale the column/feature""" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls, df, col_in, col_out): """Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd""" ##FIXME! This is assuming all coordinates are E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): """Convert decimal longitude/latitude to Web Mercator format""" k = 6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return df def read_prepare_data(self): """Use all data tools above to deliver the final cleaned DataFrame""" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d') DataSource.DataSource.types['NYK'] = NYK
[ "DataSource.DataSource.__init__", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.Imputer", "numpy.tan", "pandas.to_datetime" ]
[((417, 469), 'DataSource.DataSource.__init__', 'DataSource.DataSource.__init__', (['self', 'app', 'dsrc_name'], {}), '(self, app, dsrc_name)\n', (447, 469), False, 'import DataSource\n'), ((1863, 1918), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': 'np.nan', 'strategy': 'method', 'axis': '(1)'}), '(missing_values=np.nan, strategy=method, axis=1)\n', (1870, 1918), False, 'from sklearn.preprocessing import Imputer, StandardScaler\n'), ((2182, 2198), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2196, 2198), False, 'from sklearn.preprocessing import Imputer, StandardScaler\n'), ((3579, 3607), 'pandas.to_datetime', 'pd.to_datetime', (['self.data[0]'], {}), '(self.data[0])\n', (3593, 3607), True, 'import pandas as pd\n'), ((3642, 3670), 'pandas.to_datetime', 'pd.to_datetime', (['self.data[1]'], {}), '(self.data[1])\n', (3656, 3670), True, 'import pandas as pd\n'), ((3055, 3093), 'numpy.tan', 'np.tan', (['((90 + df[lat]) * np.pi / 360.0)'], {}), '((90 + df[lat]) * np.pi / 360.0)\n', (3061, 3093), True, 'import numpy as np\n')]
import numpy as np import torch from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from typing import List, Tuple, Dict, Union, Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not very clean... if 'device' in dir(func): self.device = func.device else: self.device = torch.device(device) if precision == 'float32': self.precision = torch.float32 elif precision == 'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func = hvp if hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You must first call get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k: v for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in dir( self), 'You must first call get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir( self), 'You must first call get input to define the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You must first call get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): """ A factory to create a function of the torch parameter model. :param model: torch model :type model: torch.nn.Modle] :param loss: a function with signature loss_value = loss(pred_y, true_y). :type loss: function :param train_x: dataset used as input of the model :type train_x: np.ndarray :param train_y: dataset used as ground truth input of the loss :type train_y: np.ndarray :return: (function of the parameters, list of parameters, names of parameters) :rtype: tuple """ # named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names = extract_weights(model) device = params[0].device prec_ = torch.float32 if precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for k, v in zip(names, new_params)}) out = apply_func(model, train_x) return loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy() for p in params], names def apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module "functional" # In particular the goal is to be able to provide a function that takes as input # the parameters and evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: """ Deletes the attribute specified by the given list of names. For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) """ if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None: """ Set the attribute specified by the given list of names to value. For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) """ if len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: """ This function removes all the Parameters from the model and return them as a tuple as well as their original attribute names. The weights must be re-loaded with `load_weights` before the model can be used again. Note that this function modifies the model in place and after this call, mod.parameters() will be empty. """ orig_params = [p for p in mod.parameters() if p.requires_grad] # Remove all the parameters in the model names = [] for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(".")) names.append(name) # Make params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: """ Reload a set of weights so that `mod` can be used again to perform a forward pass. Note that the `params` are regular Tensors (that can have history) and so are left as Tensors. This means that mod.parameters() will still be empty after this call. """ for name, p in params.items(): _set_nested_attr(mod, name.split("."), p)
[ "torch.autograd.functional.hessian", "numpy.concatenate", "torch.autograd.grad", "torch.cat", "numpy.reshape", "torch.device", "torch.is_tensor", "torch.tensor" ]
[((1248, 1289), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'input_var_grad'], {}), '(loss, input_var_grad)\n', (1267, 1289), False, 'import torch\n'), ((2621, 2686), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'device': 'self.device'}), '(input_var, dtype=self.precision, device=self.device)\n', (2633, 2686), False, 'import torch\n'), ((2809, 2851), 'torch.autograd.functional.hessian', 'hessian', (['func', 'input_var_'], {'vectorize': '(False)'}), '(func, input_var_, vectorize=False)\n', (2816, 2851), False, 'from torch.autograd.functional import hvp, vhp, hessian\n'), ((3390, 3434), 'torch.autograd.grad', 'torch.autograd.grad', (['ctr_val', 'input_var_grad'], {}), '(ctr_val, input_var_grad)\n', (3409, 3434), False, 'import torch\n'), ((3542, 3560), 'torch.is_tensor', 'torch.is_tensor', (['t'], {}), '(t)\n', (3557, 3560), False, 'import torch\n'), ((3775, 3801), 'torch.is_tensor', 'torch.is_tensor', (['t_list[0]'], {}), '(t_list[0])\n', (3790, 3801), False, 'import torch\n'), ((5094, 5143), 'torch.tensor', 'torch.tensor', (['train_x'], {'dtype': 'prec_', 'device': 'device'}), '(train_x, dtype=prec_, device=device)\n', (5106, 5143), False, 'import torch\n'), ((5202, 5251), 'torch.tensor', 'torch.tensor', (['train_y'], {'dtype': 'prec_', 'device': 'device'}), '(train_y, dtype=prec_, device=device)\n', (5214, 5251), False, 'import torch\n'), ((491, 511), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (503, 511), False, 'import torch\n'), ((969, 1059), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'requires_grad': '(True)', 'device': 'self.device'}), '(input_var, dtype=self.precision, requires_grad=True, device=\n self.device)\n', (981, 1059), False, 'import torch\n'), ((1741, 1806), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'device': 'self.device'}), '(input_var, dtype=self.precision, device=self.device)\n', (1753, 1806), False, 'import torch\n'), ((1867, 1929), 'torch.tensor', 'torch.tensor', (['vector'], {'dtype': 'self.precision', 'device': 'self.device'}), '(vector, dtype=self.precision, device=self.device)\n', (1879, 1929), False, 'import torch\n'), ((3104, 3194), 'torch.tensor', 'torch.tensor', (['input_var'], {'dtype': 'self.precision', 'requires_grad': '(True)', 'device': 'self.device'}), '(input_var, dtype=self.precision, requires_grad=True, device=\n self.device)\n', (3116, 3194), False, 'import torch\n'), ((3822, 3844), 'torch.cat', 'torch.cat', (['t_list', 'dim'], {}), '(t_list, dim)\n', (3831, 3844), False, 'import torch\n'), ((4065, 4083), 'torch.is_tensor', 'torch.is_tensor', (['t'], {}), '(t)\n', (4080, 4083), False, 'import torch\n'), ((3654, 3671), 'numpy.reshape', 'np.reshape', (['t', 'sh'], {}), '(t, sh)\n', (3664, 3671), True, 'import numpy as np\n'), ((3912, 3939), 'numpy.concatenate', 'np.concatenate', (['t_list', 'dim'], {}), '(t_list, dim)\n', (3926, 3939), True, 'import numpy as np\n')]
import numpy as np from utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1 # interim_prior should have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise ValueError(("data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]" + " inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self, index): import numbers cls = type(self) # *Leave* a shallow axis in the case a single object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): """Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior weights. Needed to compute a posterior object.""" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object): def init(self, D): pass def __call__(self, D): return D def unmanip(self, D): return D def update(self, D, phi, c, prior): pass
[ "numpy.sum", "utils.pick_discrete" ]
[((1570, 1588), 'numpy.sum', 'np.sum', (['ps'], {'axis': '(1)'}), '(ps, axis=1)\n', (1576, 1588), True, 'import numpy as np\n'), ((1642, 1658), 'utils.pick_discrete', 'pick_discrete', (['p'], {}), '(p)\n', (1655, 1658), False, 'from utils import pick_discrete\n')]
import pefile import numpy as np # import os execs = [ "1F2EB7B090018D975E6D9B40868C94CA", "33DE5067A433A6EC5C328067DC18EC37", "65018CD542145A3792BA09985734C12A", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "A316D5AECA269CA865077E7FFF356E7D", "<KEY>", "AL65_DB05DF0498B59B42A8E493CF3C10C578", "B07322743778B5868475DBE66EEDAC4F", "B98hX8E8622C393D7E832D39E620EAD5D3B49", "BVJ2D9FBF759F527AF373E34673DC3ACA462", "DS22_A670D13D4D014169C4080328B8FEB86", "EEE99EC8AA67B05407C01094184C33D2B5A44", "F6655E39465C2FF5B016980D918EA028", "F8437E44748D2C3FCF84019766F4E6DC", "<KEY>", "FGTR43_EF8E0FB20E7228C7492CCDC59D87C690", "<KEY>", "FTTR9EA3C16194CE354C244C1B74C46CD92E", "<KEY>", "GFT4_7DDD3D72EAD03C7518F5D47650C8572", "<KEY>", "<KEY>", "JKK8CA6FE7A1315AF5AFEAC2961460A80569", "<KEY>", "<KEY>", "L11_1415EB8519D13328091CC5C76A624E3D", "NBV_8B75BCBFF174C25A0161F30758509A44", "NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4", "PL98_BD8B082B7711BC980252F988BB0CA936", "POL55_A4F1ECC4D25B33395196B5D51A06790", "QW2_4C6BDDCCA2695D6202DF38708E14FC7E", "RTC_7F85D7F628CE62D1D8F7B39D8940472", "SAM_B659D71AE168E774FAAF38DB30F4A84", "TG78Z__727A6800991EEAD454E53E8AF164A99C", "VBMM9_149B7BD7218AAB4E257D28469FDDB0D", "VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E", ] prueba = {"correlativo": None, "nameExec": None, "sectionName": [], "sectionVA": [], "sectionVS": [], "sectionSR": [], "kernel32": [], "msvcrt": [], "shell32": [], "user32": [], "ws232": [], "ADVAPI32": [], "GDI32": [], "KERNEL32": [], "NETAPI32": [], "PSAPI": [], "WININET": [], "ntdll": [], "TimeStamp": None} # pe = pefile.PE("65018CD542145A3792BA09985734C12A") # algo = [10, 20, 30, 40, 50] granPrueba = [] entrysList = [] for a in execs: sectionNames = [] sectionVA = [] sectionVS = [] sectionSR = [] kernel32 = [] msvcrt = [] shell32 = [] user32 = [] ws232 = [] ADVAPI32 = [] GDI32 = [] KERNEL32 = [] NETAPI32 = [] PSAPI = [] WININET = [] ntdll = [] # print(execs.index(a) + 1) print("a") print(a) c = execs.index(a) + 1 pe = pefile.PE(a) prueba["correlativo"] = c prueba["nameExec"] = a print(c) print("Secciones") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba["sectionName"] = sectionNames prueba["sectionVA"] = sectionVA prueba["sectionVS"] = sectionVS prueba["sectionSR"] = sectionSR print() print() print("Entradas") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == "b'KERNEL32.DLL'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba["kernel32"] = kernel32 elif str(entry.dll) == "b'ADVAPI32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba["ADVAPI32"] = ADVAPI32 elif str(entry.dll) == "b'GDI32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba["GDI32"] = GDI32 elif str(entry.dll) == "b'KERNEL32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba["KERNEL32"] = KERNEL32 elif str(entry.dll) == "b'NETAPI32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba["NETAPI32"] = NETAPI32 elif str(entry.dll) == "b'PSAPI.DLL'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba["PSAPI"] = PSAPI elif str(entry.dll) == "b'WININET.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba["WININET"] = WININET elif str(entry.dll) == "b'ntdll.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba["ntdll"] = ntdll elif str(entry.dll) == "b'MSVCRT.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba["msvcrt"] = msvcrt elif str(entry.dll) == "b'SHELL32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba["shell32"] = shell32 elif str(entry.dll) == "b'USER32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba["user32"] = user32 elif str(entry.dll) == "b'WS2_32.dll'": for function in entry.imports: x = function.name print('\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba["ws232"] = ws232 # listamalware = os.listdir(path) print() print() print("TimeStamp") print("TimeDateStamp : " + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba["TimeStamp"] = z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {"correlativo": None, "nameExec": None, "sectionName": [], "sectionVA": [], "sectionVS": [], "sectionSR": None, "kernel32": None, "msvcrt": None, "shell32": None, "user32": None, "ws232": None, "TimeStamp": None} # print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x = np.array(list1) print(np.unique(x)) unique(entrysList) df.to_csv("dataset.csv")
[ "pandas.DataFrame", "pefile.PE", "numpy.array", "numpy.unique" ]
[((5899, 5923), 'pandas.DataFrame', 'pd.DataFrame', (['granPrueba'], {}), '(granPrueba)\n', (5911, 5923), True, 'import pandas as pd\n'), ((2017, 2029), 'pefile.PE', 'pefile.PE', (['a'], {}), '(a)\n', (2026, 2029), False, 'import pefile\n'), ((5981, 5996), 'numpy.array', 'np.array', (['list1'], {}), '(list1)\n', (5989, 5996), True, 'import numpy as np\n'), ((6004, 6016), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (6013, 6016), True, 'import numpy as np\n')]
import numpy as np #TODO: #1. create a streamlined and replicable gif creation set of functions in this file. #2. implement these functions into the generation algorithms available. def convert_2d(index, cols): return (index // cols, index % cols) def bounds_check(index, rows, cols): if index[0] < 0 or index[0] > rows - 1: return True if index[1] < 0 or index[1] > cols - 1: return True return False def neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret = [] for i in range(4): #bounds checking x = curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index, dir): if dir == 'L': return (index[0], index[1] - 1) elif dir == 'R': return (index[0], index[1] + 1) elif dir == 'T': return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir == 'L': return 1 elif dir == 'R': return 0 elif dir == 'T': return 2 return 3 def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x = index[1] - nbr_index[1] if x == 1: return 'R' if x == -1: return 'L' if y == 1: return 'T' if y == -1: return 'D' def print_grid(grid): for i in range(len(grid)): print("[", end="") for j in range(len(grid[i])): print(grid[i][j].walls, end=", ") print("]") def print_index(grid): for i in range(len(grid)): print("[", end="") for j in range(len(grid[i])): print(grid[i][j].index, end=", ") print("]") def print_visited(grid): for i in range(len(grid)): print("[", end="") for j in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=", ") else: print('O', end=", ") print("]") def maze_index(index, dir): if dir == 0: return (index[0], index[1] - 1) elif dir == 1: return (index[0], index[1] + 1) elif dir == 2: return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image, index, direction, color=None): # set marking color to 255 (white) if none provided if color == None: color = 255 # assign the given color to the cell to mark it as active new_image[index[0], index[1]] = color if direction < 0: return new_image # find the index of the wall to break remove mark_as_white = maze_index(index, direction) # remove the wall (set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index): return (index[0] * 2 + 1, index[1] * 2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None): # mark one or two changes, algorithm specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color = None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret = [] for i in range(4): #bounds checking x = curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1)) maze[:,:] = '@' for i in range(len(grid)): for j in range(len(grid[i])): for k in range(4): idx = maze_index((i * 2 + 1,j * 2 + 1), k) maze[i * 2 + 1, j * 2 + 1] = '+' if grid[i][j].walls[k] == 'X': if k == 0 or k == 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=" ") print() def countNeighbors(grid, index, rows, cols): #order: Left, Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count = 0 for i in range(8): #bounds checking x = index[1] + ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x] == 255: count += 1 return count def checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule: if c == int(character): return True return False def start_cells(grid, y, x, random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count = 0 for index in dirs: if count == len(dirs): break if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y + index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]: continue grid[y + index[0], x + index[1]] = 255 visited.add((y + index[0], x + index[1])) update_set(y + index[0], x + index[1], visited, grid, unvisited) count += 1 if count == 0: return False return True def check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if (y + index[0], x + index[1]) in visited: return True return False def update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if y + index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]: continue all_nodes.add((y,x)) if (y,x) in unvisited: unvisited.remove((y,x))
[ "numpy.array_equal" ]
[((3427, 3462), 'numpy.array_equal', 'np.array_equal', (['newIMG', 'gif_arr[-1]'], {}), '(newIMG, gif_arr[-1])\n', (3441, 3462), True, 'import numpy as np\n'), ((3805, 3840), 'numpy.array_equal', 'np.array_equal', (['newIMG', 'gif_arr[-1]'], {}), '(newIMG, gif_arr[-1])\n', (3819, 3840), True, 'import numpy as np\n')]
import contextlib import logging import logging.config import random import time from pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy as np import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger("hp_transfer_aa_experiments.run") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith("eval_reference"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open("r") as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if step == 1 and args.runtype.type in ["eval_dim", "eval_reference"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f"Using {trials_per_task} trials per task") if step > 1 and args.runtype.type.startswith("eval_reference"): trials_until_loss = reference_losses[step][f"{args.runtype.dim_factor}_loss"] logger.info( f"Also performing trials until loss {trials_until_loss :.4f}" f" (max {10 * trials_per_task})" ) else: trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path("results"), args.experiment_group, f"results/{args.experiment_name.replace('/', ',')}.csv", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open("a") as result_stream: result_stream.write("\t".join([str(value) for value in batch_result_row]) + "\n") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith("transfer") previous_results = result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f"Running on task {task.identifier}") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == "reference" and step == 1: continue logger.info(f"Step ------- {step :04d}") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f"Using configspace\n{configspace}".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task = None # pylint: disable=unused-argument def compute( self, config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier = kwargs["task_identifier"] development_stage = kwargs["development_stage"] task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed: # Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if "development_step" in config: del config["development_step"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay to make sure the nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger("worker"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start a background worker for the master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger("worker"), ) w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger("master"), ) # Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path="configs", config_name="run") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general information logger.info(f"Using working_directory={working_directory}") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f"Commit hash: {git_info['commit']}") logger.info(f"Commit date: {git_info['author_date']}") logger.info(f"Arguments:\n{OmegaConf.to_yaml(args)}") # Construct benchmark if "data_path" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f"Run finished") if __name__ == "__main__": run() # pylint: disable=no-value-for-parameter
[ "omegaconf.OmegaConf.to_yaml", "hp_transfer_aa_experiments.analyse.read_results.get_batch_result_row", "numpy.random.seed", "hydra.utils.to_absolute_path", "hydra.utils.instantiate", "contextlib.suppress", "time.sleep", "hp_transfer_optimizers.core.result.TrajectoryResult", "pathlib.Path", "random.seed", "gitinfo.gitinfo.get_git_info", "hydra.main", "yaml.safe_load", "hp_transfer_optimizers.core.nameserver.nic_name_to_host", "logging.getLogger", "hp_transfer_optimizers.core.result.BatchResult" ]
[((608, 659), 'logging.getLogger', 'logging.getLogger', (['"""hp_transfer_aa_experiments.run"""'], {}), "('hp_transfer_aa_experiments.run')\n", (625, 659), False, 'import logging\n'), ((7222, 7274), 'hydra.main', 'hydra.main', ([], {'config_path': '"""configs"""', 'config_name': '"""run"""'}), "(config_path='configs', config_name='run')\n", (7232, 7274), False, 'import hydra\n'), ((2038, 2260), 'hp_transfer_aa_experiments.analyse.read_results.get_batch_result_row', 'get_batch_result_row', (['args.benchmark.name', 'args.runtype.dim_factor_pre_adjustment', 'args.approach.name', 'args.benchmark.benchmark.trajectory_id', 'args.benchmark.benchmark.adjustment_id', 'args.run_id', 'result_batch'], {}), '(args.benchmark.name, args.runtype.\n dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark\n .trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id,\n result_batch)\n', (2058, 2260), False, 'from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row\n'), ((2991, 3034), 'hp_transfer_optimizers.core.result.BatchResult', 'result_utils.BatchResult', (['step', 'configspace'], {}), '(step, configspace)\n', (3015, 3034), True, 'from hp_transfer_optimizers.core import result as result_utils\n'), ((3631, 3662), 'hp_transfer_optimizers.core.result.TrajectoryResult', 'result_utils.TrajectoryResult', ([], {}), '()\n', (3660, 3662), True, 'from hp_transfer_optimizers.core import result as result_utils\n'), ((5665, 5678), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5675, 5678), False, 'import time\n'), ((5763, 5799), 'hp_transfer_optimizers.core.nameserver.nic_name_to_host', 'hpns.nic_name_to_host', (['args.nic_name'], {}), '(args.nic_name)\n', (5784, 5799), True, 'from hp_transfer_optimizers.core import nameserver as hpns\n'), ((7022, 7039), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7033, 7039), False, 'import random\n'), ((7044, 7064), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7058, 7064), True, 'import numpy as np\n'), ((7933, 7982), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['args.benchmark.benchmark'], {}), '(args.benchmark.benchmark)\n', (7956, 7982), False, 'import hydra\n'), ((811, 867), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['args.reference_losses_path'], {}), '(args.reference_losses_path)\n', (839, 867), False, 'import hydra\n'), ((2342, 2381), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['"""results"""'], {}), "('results')\n", (2370, 2381), False, 'import hydra\n'), ((7457, 7487), 'contextlib.suppress', 'contextlib.suppress', (['TypeError'], {}), '(TypeError)\n', (7476, 7487), False, 'import contextlib\n'), ((7508, 7530), 'gitinfo.gitinfo.get_git_info', 'gitinfo.get_git_info', ([], {}), '()\n', (7528, 7530), False, 'from gitinfo import gitinfo\n'), ((7830, 7894), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['args.benchmark.benchmark.data_path'], {}), '(args.benchmark.benchmark.data_path)\n', (7858, 7894), False, 'import hydra\n'), ((961, 983), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (975, 983), False, 'import yaml\n'), ((5901, 5928), 'logging.getLogger', 'logging.getLogger', (['"""worker"""'], {}), "('worker')\n", (5918, 5928), False, 'import logging\n'), ((6505, 6532), 'logging.getLogger', 'logging.getLogger', (['"""worker"""'], {}), "('worker')\n", (6522, 6532), False, 'import logging\n'), ((6765, 6792), 'logging.getLogger', 'logging.getLogger', (['"""master"""'], {}), "('master')\n", (6782, 6792), False, 'import logging\n'), ((7340, 7346), 'pathlib.Path', 'Path', ([], {}), '()\n', (7344, 7346), False, 'from pathlib import Path\n'), ((7683, 7706), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['args'], {}), '(args)\n', (7700, 7706), False, 'from omegaconf import OmegaConf\n'), ((881, 908), 'pathlib.Path', 'Path', (['reference_losses_path'], {}), '(reference_losses_path)\n', (885, 908), False, 'from pathlib import Path\n')]
import numpy as np import pytest from packaging.utils import Version import fast_numpy_loops old_numpy = Version(np.__version__) < Version('1.18') @pytest.fixture(scope='session') def initialize_fast_numpy_loops(): fast_numpy_loops.initialize() @pytest.fixture(scope='function') def rng(): if old_numpy: class OldRNG(np.random.RandomState): pass rng = OldRNG(1234) rng.random = rng.random_sample rng.integers = rng.randint return rng else: return np.random.default_rng(1234)
[ "numpy.random.default_rng", "fast_numpy_loops.initialize", "pytest.fixture", "packaging.utils.Version" ]
[((150, 181), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (164, 181), False, 'import pytest\n'), ((253, 285), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (267, 285), False, 'import pytest\n'), ((106, 129), 'packaging.utils.Version', 'Version', (['np.__version__'], {}), '(np.__version__)\n', (113, 129), False, 'from packaging.utils import Version\n'), ((132, 147), 'packaging.utils.Version', 'Version', (['"""1.18"""'], {}), "('1.18')\n", (139, 147), False, 'from packaging.utils import Version\n'), ((221, 250), 'fast_numpy_loops.initialize', 'fast_numpy_loops.initialize', ([], {}), '()\n', (248, 250), False, 'import fast_numpy_loops\n'), ((522, 549), 'numpy.random.default_rng', 'np.random.default_rng', (['(1234)'], {}), '(1234)\n', (543, 549), True, 'import numpy as np\n')]
# -*- encoding: utf-8 -*- def extract_feature_pixel(img, mask_1, mask_0=[], mask_2=[], mask_3=[], mask_4=[], mask_5=[], dim_prof=0): import numpy as np #Função de leitura da imagem e máscaras, e retorna array de pixel como atributo n = img.shape[dim_prof] t1 = img[mask_1].size/n t0 = img[mask_0].size/n t2 = img[mask_2].size/n t3 = img[mask_3].size/n t4 = img[mask_4].size/n t5 = img[mask_5].size/n ones = np.ones((t1,1)) eval1 = img[mask_1].reshape(n,-1).T atr_slice = np.concatenate((eval1,ones), axis=1) if mask_0!=[]: zeros = np.zeros((t0,1)) eval0 = img[mask_0].reshape(n,-1).T atr0 = np.concatenate((eval0,zeros), axis=1) atr_slice = np.vstack([atr0,atr_slice]) if mask_2!=[]: twos = np.ones((t2,1))*2 eval2 = img[mask_2].reshape(n,-1).T atr2 = np.concatenate((eval2,twos), axis=1) atr_slice = np.vstack([atr_slice,atr2]) if mask_3!=[]: threes = np.ones((t3,1))*3 eval3 = img[mask_3].reshape(n,-1).T atr3 = np.concatenate((eval3,threes), axis=1) atr_slice = np.vstack([atr_slice,atr3]) if mask_4!=[]: fours = np.ones((t4,1))*4 eval4 = img[mask_4].reshape(n,-1).T atr4 = np.concatenate((eval4,fours), axis=1) atr_slice = np.vstack([atr_slice,atr4]) if mask_5!=[]: fives = np.ones((t5,1))*5 eval5 = img[mask_5].reshape(n,-1).T atr5 = np.concatenate((eval5,fives), axis=1) atr_slice = np.vstack([atr_slice,atr5]) return atr_slice
[ "numpy.vstack", "numpy.zeros", "numpy.ones", "numpy.concatenate" ]
[((451, 467), 'numpy.ones', 'np.ones', (['(t1, 1)'], {}), '((t1, 1))\n', (458, 467), True, 'import numpy as np\n'), ((523, 560), 'numpy.concatenate', 'np.concatenate', (['(eval1, ones)'], {'axis': '(1)'}), '((eval1, ones), axis=1)\n', (537, 560), True, 'import numpy as np\n'), ((596, 613), 'numpy.zeros', 'np.zeros', (['(t0, 1)'], {}), '((t0, 1))\n', (604, 613), True, 'import numpy as np\n'), ((672, 710), 'numpy.concatenate', 'np.concatenate', (['(eval0, zeros)'], {'axis': '(1)'}), '((eval0, zeros), axis=1)\n', (686, 710), True, 'import numpy as np\n'), ((730, 758), 'numpy.vstack', 'np.vstack', (['[atr0, atr_slice]'], {}), '([atr0, atr_slice])\n', (739, 758), True, 'import numpy as np\n'), ((870, 907), 'numpy.concatenate', 'np.concatenate', (['(eval2, twos)'], {'axis': '(1)'}), '((eval2, twos), axis=1)\n', (884, 907), True, 'import numpy as np\n'), ((927, 955), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr2]'], {}), '([atr_slice, atr2])\n', (936, 955), True, 'import numpy as np\n'), ((1069, 1108), 'numpy.concatenate', 'np.concatenate', (['(eval3, threes)'], {'axis': '(1)'}), '((eval3, threes), axis=1)\n', (1083, 1108), True, 'import numpy as np\n'), ((1128, 1156), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr3]'], {}), '([atr_slice, atr3])\n', (1137, 1156), True, 'import numpy as np\n'), ((1269, 1307), 'numpy.concatenate', 'np.concatenate', (['(eval4, fours)'], {'axis': '(1)'}), '((eval4, fours), axis=1)\n', (1283, 1307), True, 'import numpy as np\n'), ((1327, 1355), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr4]'], {}), '([atr_slice, atr4])\n', (1336, 1355), True, 'import numpy as np\n'), ((1468, 1506), 'numpy.concatenate', 'np.concatenate', (['(eval5, fives)'], {'axis': '(1)'}), '((eval5, fives), axis=1)\n', (1482, 1506), True, 'import numpy as np\n'), ((1526, 1554), 'numpy.vstack', 'np.vstack', (['[atr_slice, atr5]'], {}), '([atr_slice, atr5])\n', (1535, 1554), True, 'import numpy as np\n'), ((793, 809), 'numpy.ones', 'np.ones', (['(t2, 1)'], {}), '((t2, 1))\n', (800, 809), True, 'import numpy as np\n'), ((992, 1008), 'numpy.ones', 'np.ones', (['(t3, 1)'], {}), '((t3, 1))\n', (999, 1008), True, 'import numpy as np\n'), ((1192, 1208), 'numpy.ones', 'np.ones', (['(t4, 1)'], {}), '((t4, 1))\n', (1199, 1208), True, 'import numpy as np\n'), ((1391, 1407), 'numpy.ones', 'np.ones', (['(t5, 1)'], {}), '((t5, 1))\n', (1398, 1407), True, 'import numpy as np\n')]
#!/usr/bin/env python3 ################################### # Mastering ML Python Mini Course # # Inspired by the project here: # # https://s3.amazonaws.com/MLMastery/machine_learning_mastery_with_python_mini_course.pdf?__s=mxhvphowryg2sfmzus2q # # By <NAME> # # Project will soon be found at: # # https://www.inertia7.com/projects/ #################################### # Welcome to my repo for the Mastering Machine Learning Python Mini Course # Here I will be going through each part of the course # So you can get a feel of the different parts import numpy as np import pandas as pd from pandas import read_csv, Series from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier from sklearn.model_selection import cross_val_score, KFold, train_test_split # Define url and columns url = 'https://goo.gl/bDdBiA' columns = np.array(['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']) # Read in data data = read_csv(url, names = columns) array = data.values # Divide data into attributes and predictor X = array[:, 0:8] y = array[:, 8] #################################### # Lesson 11: Improve Accuracy with Ensemble Methods #################################### ''' Here in the course would have been a section to do some ensemble model training, as it represents an extra layer on top of traditional models But since I have already done this, I will instead invoke the one ensemble method I haven't tried: The Voting Classifier This method involves literally combining different models (such as Logsitic Regression + Decision Tree) versus many of the same models (many Decision Trees in a Random Forest or Gradient Boosted Machine) Here I will try out a bunch of different things and see where it goes! Will use cross validation metrics here, nothing too fancy ''' # Make list for models models = np.empty([3, 2], dtype = object) # Voting ensembles # Number 1: Hard Vote (Predicted class labels used for majority rule voting) models[0] = ['Voting Classifier 1', VotingClassifier(estimators = [ ('lr', LogisticRegression(random_state = 1)), ('gbm', GradientBoostingClassifier(random_state = 1)),], voting = 'hard')] # Number 2: Soft Vote (Argmax of sums of predicted probabilities used) # Recommended for ensemble of well-calibrated classifiers models[1] = ['Voting Classifier 2', VotingClassifier(estimators = [ ('lda', LinearDiscriminantAnalysis()), ('lr', LogisticRegression(random_state = 1))], voting = 'soft')] # Number 3: Soft Vote with weights # Some models will be more valuable than others models[2] = ['Voting Classifier 3', VotingClassifier(estimators = [ ('lr', LogisticRegression(random_state = 1)), ('gbm', GradientBoostingClassifier(random_state = 1)),], voting = 'soft', weights = (0.25, 0.75))] # Iterate through models, then fit & evaluate for name, model in models: k_fold = KFold(n_splits = 10, random_state = 1) for scoring in ('accuracy', 'roc_auc', 'neg_log_loss'): try: result = cross_val_score(model, X, y, cv = k_fold, scoring = scoring) if scoring == 'accuracy': print("\n%s of %s model:\n %.3f%% (+\-%.3f%%)" % (scoring, name, result.mean() * 100.0, result.std() * 100.0)) else: print("\n%s of %s model:\n %.3f (+\-%.3f)" % (scoring, name, result.mean(), result.std())) except AttributeError: print("The %s model cannot perform cross validation with the %s metric" % (name, scoring))
[ "pandas.read_csv", "numpy.empty", "sklearn.model_selection.cross_val_score", "sklearn.model_selection.KFold", "sklearn.ensemble.GradientBoostingClassifier", "sklearn.linear_model.LogisticRegression", "numpy.array", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis" ]
[((963, 1049), 'numpy.array', 'np.array', (["['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']"], {}), "(['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age',\n 'class'])\n", (971, 1049), True, 'import numpy as np\n'), ((1069, 1097), 'pandas.read_csv', 'read_csv', (['url'], {'names': 'columns'}), '(url, names=columns)\n', (1077, 1097), False, 'from pandas import read_csv, Series\n'), ((1969, 1999), 'numpy.empty', 'np.empty', (['[3, 2]'], {'dtype': 'object'}), '([3, 2], dtype=object)\n', (1977, 1999), True, 'import numpy as np\n'), ((3016, 3050), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'random_state': '(1)'}), '(n_splits=10, random_state=1)\n', (3021, 3050), False, 'from sklearn.model_selection import cross_val_score, KFold, train_test_split\n'), ((3150, 3206), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'cv': 'k_fold', 'scoring': 'scoring'}), '(model, X, y, cv=k_fold, scoring=scoring)\n', (3165, 3206), False, 'from sklearn.model_selection import cross_val_score, KFold, train_test_split\n'), ((2178, 2212), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2196, 2212), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2229, 2271), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2255, 2271), False, 'from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier\n'), ((2510, 2538), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (2536, 2538), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((2552, 2586), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2570, 2586), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2777, 2811), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2795, 2811), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2828, 2870), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2854, 2870), False, 'from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier\n')]
import numpy as np from kinematics import to_robot_velocities from viz.env import Viz class ControlSignalsViz(Viz): def __init__(self, marxbot, time_window=10): super().__init__() self.marxbot = marxbot self.marxbot_max_vel = 30 self.time_window = time_window def _show(self, env): self.ax = env.get_axes() self.ax.set_title('Control signals over time') self.ax.set_xlabel("time [s]") self.ax.set_xlim(-self.time_window, 0) self.ax.grid(True) self.n_dims = 2 self.n_samples = round(self.time_window / env.refresh_interval) self.time = np.linspace(-self.time_window, 0, self.n_samples) self.readings = np.full((self.n_dims, self.n_samples), np.nan) labels = ["linear velocity [cm/s]", "angular velocity [rad/s]"] colors = ["tab:blue", "tab:orange"] mins = [-self.marxbot_max_vel, -10] maxs = [+self.marxbot_max_vel, +10] self.plots = [] for i in range(self.n_dims): ax = self.ax if i > 0: ax = ax.twinx() ax.set_ylabel(labels[i], color=colors[i]) ax.tick_params(axis='y', labelcolor=colors[i]) ax.tick_params(labelsize=8) plot = ax.plot(self.time, self.readings[i], color=colors[i])[0] ax.set_ylim( mins[i] - 0.1 * abs(mins[i]), maxs[i] + 0.1 * abs(maxs[i]) ) self.plots.append(plot) def _update(self): robot_velocities = to_robot_velocities(*self.marxbot.wheel_target_speeds) self.readings = np.roll(self.readings, -1, axis=1) self.readings[:, -1] = robot_velocities for i in range(self.n_dims): self.plots[i].set_ydata(self.readings[i])
[ "numpy.full", "kinematics.to_robot_velocities", "numpy.linspace", "numpy.roll" ]
[((648, 697), 'numpy.linspace', 'np.linspace', (['(-self.time_window)', '(0)', 'self.n_samples'], {}), '(-self.time_window, 0, self.n_samples)\n', (659, 697), True, 'import numpy as np\n'), ((722, 768), 'numpy.full', 'np.full', (['(self.n_dims, self.n_samples)', 'np.nan'], {}), '((self.n_dims, self.n_samples), np.nan)\n', (729, 768), True, 'import numpy as np\n'), ((1567, 1621), 'kinematics.to_robot_velocities', 'to_robot_velocities', (['*self.marxbot.wheel_target_speeds'], {}), '(*self.marxbot.wheel_target_speeds)\n', (1586, 1621), False, 'from kinematics import to_robot_velocities\n'), ((1647, 1681), 'numpy.roll', 'np.roll', (['self.readings', '(-1)'], {'axis': '(1)'}), '(self.readings, -1, axis=1)\n', (1654, 1681), True, 'import numpy as np\n')]
import torch import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np classes = ('beaver','dolphin','otter','seal','whale','aquarium fish','flatfish','ray','shark','trout','orchids','poppies','roses','sunflowers','tulips','bottles','bowls','cans','cups','plates','apples','mushrooms','oranges','pears','sweet peppers','clock','computer keyboard','lamp','telephone','television','bed','chair','couch','table','wardrobe','bee','beetle','butterfly','caterpillar','cockroach','bear','leopard','lion','tiger','wolf','bridge','castle','house','road','skyscraper','cloud','forest','mountain','plain','sea','camel','cattle','chimpanzee','elephant','kangaroo','fox','porcupine','possum','raccoon','skunk','crab','lobster','snail','spider','worm','baby','boy','girl','man','woman','crocodile','dinosaur','lizard','snake','turtle','hamster','mouse','rabbit','shrew','squirrel','maple','oak','palm','pine','willow','bicycle','bus','motorcycle','pickup truck','train','lawn-mower','rocket','streetcar','tank','tractor') def _get_transform(): return transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) def get_train_data_loader(): transform = _get_transform() trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform) return torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) def get_test_data_loader(): transform = _get_transform() testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform) return torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) # function to show an image def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0)))
[ "torch.utils.data.DataLoader", "numpy.transpose", "torchvision.datasets.CIFAR100", "torchvision.transforms.Normalize", "torchvision.transforms.ToTensor" ]
[((1293, 1389), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (1322, 1389), False, 'import torchvision\n'), ((1437, 1522), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=4, shuffle=True, num_workers=2\n )\n', (1464, 1522), False, 'import torch\n'), ((1641, 1738), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (1670, 1738), False, 'import torchvision\n'), ((1785, 1870), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(4)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=4, shuffle=False, num_workers=2\n )\n', (1812, 1870), False, 'import torch\n'), ((2039, 2069), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (2051, 2069), True, 'import numpy as np\n'), ((1125, 1146), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1144, 1146), True, 'import torchvision.transforms as transforms\n'), ((1153, 1207), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1173, 1207), True, 'import torchvision.transforms as transforms\n')]
import os import sys ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer """ Test for activity triggered analysis NOTE: Should be executed from the repository's root directory """ class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images = 50 num_pixels = 12 num_neurons = 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater than num images (shouldn't use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num images, but divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num_images, but does not divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06) if __name__ == "__main__": tf.test.main()
[ "sys.path.append", "tensorflow.test.main", "os.getcwd", "numpy.random.RandomState", "DeepSparseCoding.tf1x.analysis.base_analyzer.Analyzer", "numpy.dot" ]
[((108, 133), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (123, 133), False, 'import sys\n'), ((1484, 1498), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1496, 1498), True, 'import tensorflow as tf\n'), ((65, 76), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (74, 76), False, 'import os\n'), ((448, 475), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (469, 475), True, 'import numpy as np\n'), ((596, 606), 'DeepSparseCoding.tf1x.analysis.base_analyzer.Analyzer', 'Analyzer', ([], {}), '()\n', (604, 606), False, 'from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer\n'), ((928, 957), 'numpy.dot', 'np.dot', (['images', 'model_weights'], {}), '(images, model_weights)\n', (934, 957), True, 'import numpy as np\n'), ((1101, 1130), 'numpy.dot', 'np.dot', (['images', 'model_weights'], {}), '(images, model_weights)\n', (1107, 1130), True, 'import numpy as np\n'), ((1282, 1311), 'numpy.dot', 'np.dot', (['images', 'model_weights'], {}), '(images, model_weights)\n', (1288, 1311), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt def spectrum(f, x): # Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding frequencies dx = x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx = 100 x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile + '.pdf') plt.savefig(figfile + '.png') plt.show()
[ "numpy.zeros_like", "numpy.abs", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "numpy.where", "numpy.exp", "numpy.linspace", "matplotlib.pyplot.savefig", "numpy.sqrt" ]
[((373, 398), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (384, 398), True, 'import numpy as np\n'), ((707, 752), 'matplotlib.pyplot.legend', 'plt.legend', (["['step', '2sin', 'gauss', 'peak']"], {}), "(['step', '2sin', 'gauss', 'peak'])\n", (717, 752), True, 'import matplotlib.pyplot as plt\n'), ((753, 782), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figfile + '.pdf')"], {}), "(figfile + '.pdf')\n", (764, 782), True, 'import matplotlib.pyplot as plt\n'), ((783, 812), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figfile + '.png')"], {}), "(figfile + '.png')\n", (794, 812), True, 'import matplotlib.pyplot as plt\n'), ((813, 823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (821, 823), True, 'import matplotlib.pyplot as plt\n'), ((149, 158), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (155, 158), True, 'import numpy as np\n'), ((237, 281), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / dx)', 'A_amplitude.size'], {}), '(0, np.pi / dx, A_amplitude.size)\n', (248, 281), True, 'import numpy as np\n'), ((624, 640), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (637, 640), True, 'import numpy as np\n'), ((417, 438), 'numpy.where', 'np.where', (['(x < 5)', '(1)', '(0)'], {}), '(x < 5, 1, 0)\n', (425, 438), True, 'import numpy as np\n'), ((572, 611), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - L / 2.0) / s) ** 2)'], {}), '(-0.5 * ((x - L / 2.0) / s) ** 2)\n', (578, 611), True, 'import numpy as np\n'), ((552, 570), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (559, 570), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- #VecMap0.1 #The first versio of VecMap from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName("VecMap") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName("pushButton") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName("checkBox") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName("label_2") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName("lineEdit") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName("label_3") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName("label_4") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName("pushButton_2") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName("pushButton_3") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName("label_5") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName("label_6") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName("label_9") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName("checkBox_2") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName("checkBox_3") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName("pushButton_4") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName("label_10") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName("checkBox_4") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName("label_11") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName("label_12") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName("label_14") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName("lineEdit_4") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName("label_15") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName("label_16") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName("label_17") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName("lineEdit_5") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName("pushButton_5") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName("pushButton_6") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName("label_18") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName("pushButton_7") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName("pushButton_8") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName("label_19") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName("label_20") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName("pushButton_9") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName("pushButton_10") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName("pushButton_11") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName("pushButton_12") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName("radioButton") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName("radioButton_2") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName("label_21") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName("pushButton_13") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName("label_7") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName("lineEdit_2") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName("pushButton_14") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName("lineEdit_3") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName("label_13") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName("checkBox_5") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate("VecMap", "VecMap0.1")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate("VecMap", "Load Image")) self.checkBox.setText(_translate("VecMap", "ABF/BF image")) self.label.setText(_translate("VecMap", "Step 1. Load image")) self.label_2.setText(_translate("VecMap", "<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>")) self.lineEdit.setText(_translate("VecMap", "8")) self.label_3.setText(_translate("VecMap", "Step 2. Initialize atom positions")) self.label_4.setText(_translate("VecMap", "Separation factor")) self.pushButton_2.setText(_translate("VecMap", "Initialize")) self.pushButton_3.setText(_translate("VecMap", "Find \n" "separation")) self.label_5.setText(_translate("VecMap", "<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>")) self.label_6.setText(_translate("VecMap", "<html><head/><body><p>Try a few separation factors around the given number to determine the best separation factor.</p></body></html>")) self.label_9.setText(_translate("VecMap", "Step 3. Refine atom positions")) self.checkBox_2.setText(_translate("VecMap", "Refine Oxygen")) self.checkBox_3.setText(_translate("VecMap", "Save result plots")) self.pushButton_4.setText(_translate("VecMap", "Refine")) self.label_10.setText(_translate("VecMap", "<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check Refine Oxygen if O columns are visible.</p></body></html>")) self.checkBox_4.setText(_translate("VecMap", "[011] Zone")) self.label_11.setText(_translate("VecMap", "Step 4. Generate a vector map")) self.label_12.setText(_translate("VecMap", "e.g., something around 8-12")) self.label_14.setText(_translate("VecMap", "List of angles (degrees) of vectors that will be colored differently:")) self.lineEdit_4.setText(_translate("VecMap", "45")) self.label_15.setText(_translate("VecMap", "e.g., 45 135 225 315")) self.label_16.setText(_translate("VecMap", "List of colors (should match the angles):")) self.label_17.setText(_translate("VecMap", "e.g., yellow blue red green")) self.lineEdit_5.setText(_translate("VecMap", "yellow")) self.pushButton_5.setText(_translate("VecMap", "Vector angle\n" "distrubution")) self.pushButton_6.setText(_translate("VecMap", "Show \n" "map")) self.label_18.setText(_translate("VecMap", "<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking the vector angle distribution.</p></body></html>")) self.pushButton_7.setText(_translate("VecMap", "Load from csv")) self.pushButton_8.setText(_translate("VecMap", "Disclaimer")) self.label_19.setText(_translate("VecMap", "VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>")) self.label_20.setText(_translate("VecMap", "Check here for more information!")) self.pushButton_9.setText(_translate("VecMap", "About")) self.pushButton_10.setText(_translate("VecMap", "Acknoledgments")) self.pushButton_11.setText(_translate("VecMap", "Contact")) self.pushButton_12.setText(_translate("VecMap", "Donate me!")) self.radioButton.setText(_translate("VecMap", "A-site")) self.radioButton_2.setText(_translate("VecMap", "B-site")) self.label_21.setText(_translate("VecMap", "Select which site to calculate")) self.pushButton_13.setText(_translate("VecMap", "Calculate")) self.label_7.setText(_translate("VecMap", "Scale:")) self.lineEdit_2.setText(_translate("VecMap", "10")) self.pushButton_14.setText(_translate("VecMap", "Oxygen\n" " map")) self.lineEdit_3.setText(_translate("VecMap", "6")) self.label_13.setText(_translate("VecMap", "Scale:")) self.checkBox_5.setText(_translate("VecMap", "Scale bar")) #===== Open file and set up global variables such as path etc. ====================== #===== Connected to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)') global file, my_path, file_path, title, scale, units, s, image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF = 1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0 if file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set the working path file_path = getDirectory(file, '/') #Set the parent path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from the image units = s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in hspy format image = s.data if ABF == 1: s.data = np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like image # Draw an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module =============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return if event.button == 1: # Left mouse button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("Please load the image file first!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() #==== Find separation module ======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images #s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep + 5)) # Create canvas for drawing try: global f_sep f_sep = SeparationCanvas() for i in range(9): s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("Please load the image file first!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() #==== Refine atom position module =================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0 try: #Refine atom positions print('='*50) print('Refining atom positions for A-site atoms...') print('This may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the initial position of B: typically 3 for [001] and 1 for [110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average) from the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50) print('Subtracting sublattice A from the image using 2D gaussian fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of O atoms if find_O == 1: #Find initial positions for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice A and B from the image using 2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image #Refine O positions print('='*50) print('Refining atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and original image as hdf5 file. This file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #======================= if plotpos == 1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom positions with the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O == 1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have been saved to '+ my_path) except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("Please initialize the atom positions first!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() #==================== Calculate displacement module ================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global U_avg, disp, disp_O, disp_atom # Read cal_site from the radio button # 0 to calculate A site in relative to B site; 1 to calculate B site in relative to A site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If the input image is [110], turn this on. O map is not supported for [110] yet. O_map = find_O #If enabled, will calculate the displacement of O atoms in relation to sublattice B. U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from the image. #========================================================================= #The main scripts start from here if cal_site == 0:#Calculate A site disp_atom = 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\n') for data in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\n') #Save the neigboring atoms as well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos: n = len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\n') #Calculate O map and save if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\n') for data in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\n') print('Atomic displacement data saved to ' + my_path + title + '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("Please refine the atom positions first!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() #======== Display angle distribution of the vectors module =========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\n displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("Please calculate the displacement first!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() print('') #========= Generate vector map module ============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to determine the coloring pattern. For single color rendering, just leave it as [0]. ang_lst = [int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + "_{}_vec_map.tif".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to ' + my_path + title + "_{}_vec_map.tif! Enjoy!".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("Please calculate the displacement first!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("The list of colors should match the list of angles!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() #========= Generate O vector map module ============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + "_O_vec_map_by_{}.tif".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved to ' + my_path + title + "_O_vec_map_by_{}.tif! Enjoy!".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText("No O displacement data exist!") msg.setWindowTitle("Hey guys") returnValue = msg.exec() #============ Load displacement from csv module ==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data from the csv file saved previously global s, my_path, title, scale, units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if file: my_path = getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) # Look for the O data disp_atom = file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!') else: find_O = 0 print('No O displacement data was found! Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText("<b>Disclaimer</b><br>" \ "This app was designed by Dr <NAME>. Redistribution and use in source, " \ "with or without modification, are permitted. Any redistribution must remain "\ "the above copyright. When a scientific publication is reached through the "\ "app, please add the following reference: <br>"\ "1. Ma, T. et al. <a href=\"https://doi.org/10.1103/PhysRevLett.123.217602\">Phys. Rev. Lett. 123, 217602 (2019).</a>"\ "<br>"\ "2. Ma, T. et al. <a href=\"https://doi.org/10.1063/1.5115039\">Appl. Phys. Lett. 115, 122902 (2019).</a>" "<br>" \ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.<br>") msg.setWindowTitle("VecMap0.1: Disclaimer") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText("VecMap v0.1.1"\ "<br>"\ "Designed by Dr. <NAME>"\ "<br>"\ "06/13/2020"\ "<br>" "First version release!<br>" "Get more information and<br> source code from my <a href=\"http://www-personal.umich.edu/~taoma/VectorMap.html\">website</a>.") msg.setWindowTitle("VecMap0.1: About") returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText("This program was written with Python 3. The author " \ "acknowledges the HyperSpy and Atomap packages which "\ "are partially incorporated in the program. Please "\ "consider citing/adding acknowledgement for Hyperspy "\ "and Atomap packages in your publication:"\ "<br>" "<NAME> la et al. <a href=\"http://doi.org/10.5281/zenodo.3396791\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>" \ "<br>" "<NAME>. et al. <a href=\"https://doi.org/10.1186/s40679-017-0042-5\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>") msg.setWindowTitle("VecMap0.1: Acknowledgments") returnValue = msg.exec() #============ Contact button ==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText("Ask questions and report bugs to:"\ "<br>" "<a href=\"mailto:<EMAIL>\"><EMAIL></a>") msg.setWindowTitle("VecMap0.1: Contact") returnValue = msg.exec() #============ Donate me button ==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText("I will make this app freely available for the society.<br>"\ "If you like this app, show your appreciation by <a href=\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\">donating me!</a>"\ "<br>"\ "Your support is my motivation!<br>") msg.setWindowTitle("VecMap0.1: Donate me!") returnValue = msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one plot, we can use add_axes # instead of add_subplot, but then the subplot # configuration tool in the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) # Create the navigation toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and FigCanvas objects. # 10x10 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as np import matplotlib.pyplot as plt import math import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change==== def readImage(file): #Load raw image file for process. #Require Hyperspy package s = load(file) return s def getDirectory(file, s='.'): #Make the working directory and return the path. for idx in range(-1, -len(file), -1): if file[idx] == s: #find the file extension and remove it. '/' for parent path path = file[:idx] + '/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for a sublattice #img: an array of image data; ini_pos: initial positions; atom_name: a string for name; atom_color: a string for color #img_110: For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function to find the neighboring atoms of P(x,y) from a list of atoms A. # P:a given atom (x,y); A: a list of atoms; Ua: A threashold in px, 0.707*a for [001] and 0.5*a for [110] x, y = P N = [a for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list to store the neighboring atoms N = sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2) ** 0.5) return N def closest_node(node, nodes): #A function to find the closest node in an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a line function from two points A = (p1[1] - p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def intersection(L1, L2): #A function to find the intersection point of two lines D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x = Dx / D y = Dy / D return x,y else: return False def math_center(a, b, c, d): #Define a function to find the mathematical center of four points, a, b, c, d #Find the diagonal of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2) return center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal atomic positions for A in a un-distorted perovskite structure #A, B are lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale is the image pixel size #return a list of tuples ideal_positions = [] Neighbor_positions = [] if not img_110: #calculate image [001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic positions for O in a un-distorted perovskite structure #only support [001] images ideal_O_positions = [] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement of A #A_com, A are lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale is the image pixel size disp = [] for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector angle according to dx, dy. if dy >= 0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not modify the original list if len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color ang_lst_mod = [a - ang_lst[0] for a in ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5] - ang_lst[0] if ang < 0: ang = ang + 360 for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = [] lines = disp.readlines() print('Displacement data:\n') print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome to the first version of VecMap --- a convenient tool to calculate atomic displacements in perovskite structures This app was designed by Dr. <NAME>. Address your questions and suggestions to <EMAIL>. Please see the "Disclaimer" before use! Hope you get good results and publications from it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_()) if __name__ == "__main__": main()
[ "PyQt5.QtWidgets.QPushButton", "os.path.isfile", "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "atomap.tools.remove_atoms_from_image_using_2d_gaussian", "PyQt5.QtWidgets.QApplication", "PyQt5.QtWidgets.QLabel", "PyQt5.QtWidgets.QWidget", "PyQt5.QtWidgets.QRadioButton", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "scipy.spatial.distance.euclidean", "PyQt5.QtWidgets.QCheckBox", "atomap.sublattice.Sublattice", "os.path.exists", "matplotlib.figure.Figure", "scipy.spatial.distance.cdist", "numpy.divide", "copy.deepcopy", "PyQt5.QtWidgets.QFrame", "PyQt5.QtCore.QRect", "math.sqrt", "numpy.asarray", "hyperspy.io.load", "numpy.float", "atomap.atom_finding_refining.get_atom_positions", "matplotlib.use", "PyQt5.QtCore.QMetaObject.connectSlotsByName", "matplotlib_scalebar.scalebar.ScaleBar", "math.atan", "os.makedirs", "PyQt5.QtWidgets.QLineEdit", "PyQt5.QtCore.QSize", "PyQt5.QtGui.QFont" ]
[((192, 216), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (206, 216), False, 'import matplotlib\n'), ((48135, 48145), 'hyperspy.io.load', 'load', (['file'], {}), '(file)\n', (48139, 48145), False, 'from hyperspy.io import load\n'), ((48725, 48789), 'atomap.sublattice.Sublattice', 'Sublattice', (['ini_pos'], {'image': 'img', 'color': 'atom_color', 'name': 'atom_name'}), '(ini_pos, image=img, color=atom_color, name=atom_name)\n', (48735, 48789), False, 'from atomap.sublattice import Sublattice\n'), ((53787, 53810), 'copy.deepcopy', 'copy.deepcopy', (['vec_data'], {}), '(vec_data)\n', (53800, 53810), False, 'import copy\n'), ((55699, 55731), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (55721, 55731), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((55746, 55765), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (55763, 55765), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((857, 886), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (878, 886), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1032, 1059), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (1051, 1059), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1197, 1221), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (1213, 1221), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1463, 1487), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (1479, 1487), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1618, 1642), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (1634, 1642), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1924, 1951), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (1943, 1951), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2092, 2116), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (2108, 2116), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2254, 2278), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (2270, 2278), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2421, 2450), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (2442, 2450), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2607, 2636), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (2628, 2636), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2788, 2812), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (2804, 2812), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3094, 3118), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (3110, 3118), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3399, 3423), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (3415, 3423), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3678, 3702), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (3694, 3702), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3843, 3870), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (3862, 3870), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4020, 4047), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (4039, 4047), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4200, 4229), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (4221, 4229), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4382, 4406), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (4398, 4406), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4697, 4724), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (4716, 4724), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4870, 4894), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (4886, 4894), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5150, 5174), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5166, 5174), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5316, 5340), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5332, 5340), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5483, 5507), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5499, 5507), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5651, 5678), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (5670, 5678), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5826, 5850), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (5842, 5850), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5992, 6016), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (6008, 6016), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6158, 6182), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (6174, 6182), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6326, 6353), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (6345, 6353), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6505, 6534), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (6526, 6534), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6693, 6722), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (6714, 6722), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6875, 6899), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (6891, 6899), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7192, 7221), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (7213, 7221), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7373, 7397), 'PyQt5.QtWidgets.QFrame', 'QtWidgets.QFrame', (['VecMap'], {}), '(VecMap)\n', (7389, 7397), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7657, 7686), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (7678, 7686), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7840, 7864), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (7856, 7864), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8006, 8030), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (8022, 8030), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8176, 8205), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8197, 8205), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8365, 8394), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8386, 8394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8556, 8585), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8577, 8585), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8748, 8777), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (8769, 8777), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8867, 8880), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (8878, 8880), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9066, 9096), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['VecMap'], {}), '(VecMap)\n', (9088, 9096), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9294, 9324), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['VecMap'], {}), '(VecMap)\n', (9316, 9324), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9480, 9504), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (9496, 9504), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9651, 9680), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (9672, 9680), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9836, 9860), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (9852, 9860), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10000, 10027), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (10019, 10027), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10179, 10208), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['VecMap'], {}), '(VecMap)\n', (10200, 10208), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10367, 10394), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['VecMap'], {}), '(VecMap)\n', (10386, 10394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10542, 10566), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['VecMap'], {}), '(VecMap)\n', (10558, 10566), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10710, 10737), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['VecMap'], {}), '(VecMap)\n', (10729, 10737), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10950, 10995), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['VecMap'], {}), '(VecMap)\n', (10987, 10995), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((45486, 45518), 'matplotlib.figure.Figure', 'Figure', (['(5.0, 4.0)'], {'dpi': 'self.dpi'}), '((5.0, 4.0), dpi=self.dpi)\n', (45492, 45518), False, 'from matplotlib.figure import Figure\n'), ((45542, 45564), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (45554, 45564), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((45989, 46036), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self.main_frame'], {}), '(self.canvas, self.main_frame)\n', (46006, 46036), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((46824, 46858), 'matplotlib.figure.Figure', 'Figure', (['(10.0, 10.0)'], {'dpi': 'self.dpi'}), '((10.0, 10.0), dpi=self.dpi)\n', (46830, 46858), False, 'from matplotlib.figure import Figure\n'), ((46882, 46904), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (46894, 46904), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((47230, 47277), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self.main_frame'], {}), '(self.canvas, self.main_frame)\n', (47247, 47277), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((53002, 53037), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['arrow_end', 'atom'], {}), '(arrow_end, atom)\n', (53020, 53037), False, 'from scipy.spatial import distance\n'), ((749, 771), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(402)', '(836)'], {}), '(402, 836)\n', (761, 771), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((804, 828), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (816, 828), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((924, 952), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(40)', '(91)', '(41)'], {}), '(20, 40, 91, 41)\n', (936, 952), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1095, 1125), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(10)', '(111)', '(20)'], {}), '(150, 10, 111, 20)\n', (1107, 1125), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1253, 1282), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(90)', '(371)', '(21)'], {}), '(20, 90, 371, 21)\n', (1265, 1282), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1520, 1549), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(10)', '(121)', '(16)'], {}), '(20, 10, 121, 16)\n', (1532, 1549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1677, 1707), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(40)', '(251)', '(51)'], {}), '(130, 40, 251, 51)\n', (1689, 1707), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1987, 2017), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(130)', '(30)', '(20)'], {}), '(130, 130, 30, 20)\n', (1999, 2017), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2151, 2181), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(110)', '(191)', '(16)'], {}), '(20, 110, 191, 16)\n', (2163, 2181), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2313, 2343), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(130)', '(111)', '(16)'], {}), '(20, 130, 111, 16)\n', (2325, 2343), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2490, 2519), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(170)', '(91)', '(41)'], {}), '(20, 170, 91, 41)\n', (2502, 2519), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2676, 2705), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(230)', '(91)', '(41)'], {}), '(20, 230, 91, 41)\n', (2688, 2705), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2847, 2878), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(160)', '(251)', '(51)'], {}), '(130, 160, 251, 51)\n', (2859, 2878), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3153, 3184), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(230)', '(251)', '(51)'], {}), '(130, 230, 251, 51)\n', (3165, 3184), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3457, 3487), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(280)', '(371)', '(21)'], {}), '(20, 280, 371, 21)\n', (3469, 3487), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3737, 3767), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(300)', '(191)', '(16)'], {}), '(20, 300, 191, 16)\n', (3749, 3767), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3908, 3938), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(330)', '(111)', '(20)'], {}), '(20, 330, 111, 20)\n', (3920, 3938), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4085, 4116), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(330)', '(131)', '(20)'], {}), '(150, 330, 131, 20)\n', (4097, 4116), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4269, 4298), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(370)', '(91)', '(41)'], {}), '(20, 370, 91, 41)\n', (4281, 4298), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4442, 4473), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(360)', '(251)', '(51)'], {}), '(130, 360, 251, 51)\n', (4454, 4473), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4762, 4792), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(260)', '(10)', '(111)', '(20)'], {}), '(260, 10, 111, 20)\n', (4774, 4792), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4928, 4958), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(420)', '(371)', '(21)'], {}), '(20, 420, 371, 21)\n', (4940, 4958), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5210, 5240), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(440)', '(191)', '(16)'], {}), '(20, 440, 191, 16)\n', (5222, 5240), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5376, 5407), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(170)', '(130)', '(191)', '(16)'], {}), '(170, 130, 191, 16)\n', (5388, 5407), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5543, 5573), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(510)', '(381)', '(16)'], {}), '(20, 510, 381, 16)\n', (5555, 5573), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5716, 5746), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(550)', '(251)', '(22)'], {}), '(20, 550, 251, 22)\n', (5728, 5746), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5886, 5916), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(530)', '(181)', '(16)'], {}), '(20, 530, 181, 16)\n', (5898, 5916), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6052, 6082), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(580)', '(381)', '(16)'], {}), '(20, 580, 381, 16)\n', (6064, 6082), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6218, 6248), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(600)', '(181)', '(16)'], {}), '(20, 600, 181, 16)\n', (6230, 6248), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6391, 6421), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(620)', '(251)', '(22)'], {}), '(20, 620, 251, 22)\n', (6403, 6421), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6574, 6605), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(280)', '(550)', '(101)', '(91)'], {}), '(280, 550, 101, 91)\n', (6586, 6605), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6762, 6791), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(680)', '(80)', '(41)'], {}), '(20, 680, 80, 41)\n', (6774, 6791), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6935, 6966), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(680)', '(191)', '(51)'], {}), '(200, 680, 191, 51)\n', (6947, 6966), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7261, 7291), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(290)', '(460)', '(91)', '(51)'], {}), '(290, 460, 91, 51)\n', (7273, 7291), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7431, 7461), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(730)', '(371)', '(21)'], {}), '(20, 730, 371, 21)\n', (7443, 7461), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7726, 7756), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(780)', '(120)', '(28)'], {}), '(20, 780, 120, 28)\n', (7738, 7756), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7900, 7930), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(850)', '(291)', '(16)'], {}), '(60, 850, 291, 16)\n', (7912, 7930), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8066, 8096), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(750)', '(211)', '(16)'], {}), '(20, 750, 211, 16)\n', (8078, 8096), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8245, 8276), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(780)', '(120)', '(28)'], {}), '(150, 780, 120, 28)\n', (8257, 8276), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8435, 8465), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(810)', '(120)', '(28)'], {}), '(20, 810, 120, 28)\n', (8447, 8465), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8626, 8657), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(810)', '(120)', '(28)'], {}), '(150, 810, 120, 28)\n', (8638, 8657), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8818, 8849), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(280)', '(780)', '(101)', '(58)'], {}), '(280, 780, 101, 58)\n', (8830, 8849), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9135, 9164), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(480)', '(95)', '(20)'], {}), '(20, 480, 95, 20)\n', (9147, 9164), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9365, 9394), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(480)', '(95)', '(20)'], {}), '(90, 480, 95, 20)\n', (9377, 9394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9540, 9570), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(460)', '(171)', '(16)'], {}), '(20, 460, 171, 16)\n', (9552, 9570), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9721, 9751), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(460)', '(81)', '(51)'], {}), '(200, 460, 81, 51)\n', (9733, 9751), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9895, 9924), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(650)', '(41)', '(16)'], {}), '(20, 650, 41, 16)\n', (9907, 9924), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10065, 10094), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(650)', '(30)', '(20)'], {}), '(60, 650, 30, 20)\n', (10077, 10094), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10249, 10279), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(110)', '(680)', '(80)', '(41)'], {}), '(110, 680, 80, 41)\n', (10261, 10279), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10432, 10462), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(650)', '(30)', '(20)'], {}), '(150, 650, 30, 20)\n', (10444, 10462), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10602, 10632), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(110)', '(650)', '(41)', '(16)'], {}), '(110, 650, 41, 16)\n', (10614, 10632), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10775, 10806), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(210)', '(650)', '(111)', '(20)'], {}), '(210, 650, 111, 20)\n', (10787, 10806), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18589, 18626), 'atomap.atom_finding_refining.get_atom_positions', 'get_atom_positions', (['s'], {'separation': 'sep'}), '(s, separation=sep)\n', (18607, 18626), False, 'from atomap.atom_finding_refining import get_atom_positions\n'), ((24648, 24751), 'atomap.tools.remove_atoms_from_image_using_2d_gaussian', 'remove_atoms_from_image_using_2d_gaussian', (['sublattice_A.image', 'sublattice_A'], {'show_progressbar': '(False)'}), '(sublattice_A.image, sublattice_A,\n show_progressbar=False)\n', (24689, 24751), False, 'from atomap.tools import remove_atoms_from_image_using_2d_gaussian\n'), ((40146, 40173), 'os.path.isfile', 'os.path.isfile', (['file_O_disp'], {}), '(file_O_disp)\n', (40160, 40173), False, 'import os\n'), ((49616, 49645), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[node]', 'nodes'], {}), '([node], nodes)\n', (49630, 49645), False, 'from scipy.spatial import distance\n'), ((50372, 50394), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[a]', 'M'], {}), '([a], M)\n', (50386, 50394), False, 'from scipy.spatial import distance\n'), ((17353, 17376), 'os.path.exists', 'os.path.exists', (['my_path'], {}), '(my_path)\n', (17367, 17376), False, 'import os\n'), ((17395, 17415), 'os.makedirs', 'os.makedirs', (['my_path'], {}), '(my_path)\n', (17406, 17415), False, 'import os\n'), ((17828, 17848), 'numpy.divide', 'np.divide', (['(1)', 's.data'], {}), '(1, s.data)\n', (17837, 17848), True, 'import numpy as np\n'), ((21678, 21720), 'atomap.atom_finding_refining.get_atom_positions', 'get_atom_positions', (['s'], {'separation': 's_factor'}), '(s, separation=s_factor)\n', (21696, 21720), False, 'from atomap.atom_finding_refining import get_atom_positions\n'), ((24009, 24043), 'math.sqrt', 'math.sqrt', (['(z0[0] ** 2 + z0[1] ** 2)'], {}), '(z0[0] ** 2 + z0[1] ** 2)\n', (24018, 24043), False, 'import math\n'), ((24066, 24100), 'math.sqrt', 'math.sqrt', (['(z1[0] ** 2 + z1[1] ** 2)'], {}), '(z1[0] ** 2 + z1[1] ** 2)\n', (24075, 24100), False, 'import math\n'), ((25433, 25507), 'atomap.sublattice.Sublattice', 'Sublattice', (['AB_positions'], {'image': 's.data', 'color': '"""y"""', 'name': '"""Sublattice A + B"""'}), "(AB_positions, image=s.data, color='y', name='Sublattice A + B')\n", (25443, 25507), False, 'from atomap.sublattice import Sublattice\n'), ((25997, 26100), 'atomap.tools.remove_atoms_from_image_using_2d_gaussian', 'remove_atoms_from_image_using_2d_gaussian', (['sublattice_B.image', 'sublattice_B'], {'show_progressbar': '(False)'}), '(sublattice_B.image, sublattice_B,\n show_progressbar=False)\n', (26038, 26100), False, 'from atomap.tools import remove_atoms_from_image_using_2d_gaussian\n'), ((36556, 36624), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['scale', '"""nm"""'], {'location': '"""lower left"""', 'scale_loc': '"""top"""', 'sep': '(2)'}), "(scale, 'nm', location='lower left', scale_loc='top', sep=2)\n", (36564, 36624), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((38420, 38488), 'matplotlib_scalebar.scalebar.ScaleBar', 'ScaleBar', (['scale', '"""nm"""'], {'location': '"""lower left"""', 'scale_loc': '"""top"""', 'sep': '(2)'}), "(scale, 'nm', location='lower left', scale_loc='top', sep=2)\n", (38428, 38488), False, 'from matplotlib_scalebar.scalebar import ScaleBar\n'), ((53302, 53320), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53311, 53320), False, 'import math\n'), ((19206, 19227), 'numpy.float', 'np.float', (['event.xdata'], {}), '(event.xdata)\n', (19214, 19227), True, 'import numpy as np\n'), ((19253, 19274), 'numpy.float', 'np.float', (['event.ydata'], {}), '(event.ydata)\n', (19261, 19274), True, 'import numpy as np\n'), ((19370, 19422), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['(x, y)', 'A_positions[atom_nearby]'], {}), '((x, y), A_positions[atom_nearby])\n', (19388, 19422), False, 'from scipy.spatial import distance\n'), ((19700, 19720), 'numpy.asarray', 'np.asarray', (['atom_lst'], {}), '(atom_lst)\n', (19710, 19720), True, 'import numpy as np\n'), ((19727, 19747), 'numpy.asarray', 'np.asarray', (['atom_lst'], {}), '(atom_lst)\n', (19737, 19747), True, 'import numpy as np\n'), ((21806, 21830), 'numpy.asarray', 'np.asarray', (['ini_position'], {}), '(ini_position)\n', (21816, 21830), True, 'import numpy as np\n'), ((21837, 21861), 'numpy.asarray', 'np.asarray', (['ini_position'], {}), '(ini_position)\n', (21847, 21861), True, 'import numpy as np\n'), ((53391, 53409), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53400, 53409), False, 'import math\n'), ((53484, 53502), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53493, 53502), False, 'import math\n'), ((53565, 53583), 'math.atan', 'math.atan', (['(dy / dx)'], {}), '(dy / dx)\n', (53574, 53583), False, 'import math\n')]
import numpy as np def count_subset_occurrences(array, subset_array): occurrences = 0 for idx in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]), np.array([1, 1]) ) == 3 test_base_case()
[ "numpy.array" ]
[((348, 394), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]'], {}), '([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3])\n', (356, 394), True, 'import numpy as np\n'), ((405, 421), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (413, 421), True, 'import numpy as np\n')]
# -------------------------------------------------------- # mcan-vqa (Deep Modular Co-Attention Networks) # modify this to our VQA dataset # -------------------------------------------------------- import os from copy import copy import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib import colors from cfgs.base_cfgs import Cfgs from core.exec import Execution import argparse, yaml def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help="set training split, " "eg.'train', 'train+val+vg'" "set 'train' can trigger the " "eval after every epoch", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the ' 'val split when an epoch finished' "(only work when train with " "'train' split)", type=bool) parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help="gpu select, eg.'0, 1, 2'", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that you use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str) args = parser.parse_args() return args def main(): opt = Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file = "cfgs/{}_model.yml".format(args.model) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting data x0, x1 = -5, 5 y0, y1 = -3, 3 x = np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1 - Z2) * 2 # Set up a colormap: # use copy so that we do not mutate the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the default. # If you comment out all the palette.set* lines, you will see # all the defaults; under and over will be colored with the # first and last colors in the palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the norm, we establish the # range to which the regular palette color scale is applied. # Anything above that range is colored based on palette.set_over, etc. # set up the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number of colors, with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__ == '__main__': main() # text_layout()
[ "matplotlib.pyplot.savefig", "numpy.meshgrid", "yaml.load", "argparse.ArgumentParser", "numpy.ma.masked_where", "matplotlib.colors.Normalize", "os.getcwd", "matplotlib.pyplot.close", "matplotlib.colors.BoundaryNorm", "copy.copy", "numpy.exp", "numpy.linspace", "cfgs.base_cfgs.Cfgs", "matplotlib.pyplot.subplots", "core.exec.Execution" ]
[((516, 564), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MCAN Args"""'}), "(description='MCAN Args')\n", (539, 564), False, 'import argparse, yaml\n'), ((4103, 4109), 'cfgs.base_cfgs.Cfgs', 'Cfgs', ([], {}), '()\n', (4107, 4109), False, 'from cfgs.base_cfgs import Cfgs\n'), ((4495, 4509), 'core.exec.Execution', 'Execution', (['opt'], {}), '(opt)\n', (4504, 4509), False, 'from core.exec import Execution\n'), ((4645, 4669), 'numpy.linspace', 'np.linspace', (['x0', 'x1', '(500)'], {}), '(x0, x1, 500)\n', (4656, 4669), True, 'import numpy as np\n'), ((4678, 4702), 'numpy.linspace', 'np.linspace', (['y0', 'y1', '(500)'], {}), '(y0, y1, 500)\n', (4689, 4702), True, 'import numpy as np\n'), ((4714, 4731), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4725, 4731), True, 'import numpy as np\n'), ((4741, 4765), 'numpy.exp', 'np.exp', (['(-X ** 2 - Y ** 2)'], {}), '(-X ** 2 - Y ** 2)\n', (4747, 4765), True, 'import numpy as np\n'), ((4771, 4807), 'numpy.exp', 'np.exp', (['(-(X - 1) ** 2 - (Y - 1) ** 2)'], {}), '(-(X - 1) ** 2 - (Y - 1) ** 2)\n', (4777, 4807), True, 'import numpy as np\n'), ((4935, 4952), 'copy.copy', 'copy', (['plt.cm.gray'], {}), '(plt.cm.gray)\n', (4939, 4952), False, 'from copy import copy\n'), ((5376, 5406), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(Z > 1.2)', 'Z'], {}), '(Z > 1.2, Z)\n', (5394, 5406), True, 'import numpy as np\n'), ((5662, 5701), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(6, 5.4)'}), '(nrows=2, figsize=(6, 5.4))\n', (5674, 5701), True, 'import matplotlib.pyplot as plt\n'), ((6912, 6927), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f1'], {}), '(f1)\n', (6923, 6927), True, 'import matplotlib.pyplot as plt\n'), ((6932, 6943), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6941, 6943), True, 'import matplotlib.pyplot as plt\n'), ((4285, 4321), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (4294, 4321), False, 'import argparse, yaml\n'), ((6860, 6871), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6869, 6871), False, 'import os\n'), ((5840, 5877), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(-1.0)', 'vmax': '(1.0)'}), '(vmin=-1.0, vmax=1.0)\n', (5856, 5877), False, 'from matplotlib import colors\n'), ((6383, 6455), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['[-1, -0.5, -0.2, 0, 0.2, 0.5, 1]'], {'ncolors': 'palette.N'}), '([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N)\n', (6402, 6455), False, 'from matplotlib import colors\n')]
r""" =========== Transport laws =========== Create a plot comparing the different transport laws. """ import matplotlib.pyplot as plt import numpy as np from PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0, 0.4, 1000) theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\theta$') plt.ylabel('Non dimensional saturated flux') plt.legend() plt.tight_layout() plt.show()
[ "PyDune.physics.sedtransport.transport_laws.quartic_transport_law", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "PyDune.physics.sedtransport.transport_laws.quadratic_transport_law", "matplotlib.pyplot.figure", "numpy.linspace", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.tight_layout", "PyDune.physics.sedtransport.transport_laws.cubic_transport_law" ]
[((226, 251), 'numpy.linspace', 'np.linspace', (['(0)', '(0.4)', '(1000)'], {}), '(0, 0.4, 1000)\n', (237, 251), True, 'import numpy as np\n'), ((280, 292), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (290, 292), True, 'import matplotlib.pyplot as plt\n'), ((572, 610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Shield number, $\\\\theta$"""'], {}), "('Shield number, $\\\\theta$')\n", (582, 610), True, 'import matplotlib.pyplot as plt\n'), ((611, 655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Non dimensional saturated flux"""'], {}), "('Non dimensional saturated flux')\n", (621, 655), True, 'import matplotlib.pyplot as plt\n'), ((656, 668), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (666, 668), True, 'import matplotlib.pyplot as plt\n'), ((669, 687), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (696, 698), True, 'import matplotlib.pyplot as plt\n'), ((309, 358), 'PyDune.physics.sedtransport.transport_laws.quadratic_transport_law', 'TL.quadratic_transport_law', (['theta', 'theta_d', 'omega'], {}), '(theta, theta_d, omega)\n', (335, 358), True, 'from PyDune.physics.sedtransport import transport_laws as TL\n'), ((409, 454), 'PyDune.physics.sedtransport.transport_laws.cubic_transport_law', 'TL.cubic_transport_law', (['theta', 'theta_d', 'omega'], {}), '(theta, theta_d, omega)\n', (431, 454), True, 'from PyDune.physics.sedtransport import transport_laws as TL\n'), ((501, 541), 'PyDune.physics.sedtransport.transport_laws.quartic_transport_law', 'TL.quartic_transport_law', (['theta', 'theta_d'], {}), '(theta, theta_d)\n', (525, 541), True, 'from PyDune.physics.sedtransport import transport_laws as TL\n')]
import random import numpy as np import time class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f = 2 self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename = '' def set_filename(self, name): self.filename = name def configure_device(self, level): self.level = level def measure_signal(self): for i in range(0, self.sample): delta = random.randint(1, self.level * 10) / 10 - self.level self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f * i / self.Fs) + delta def get_signal(self): return self.y def save_signal(self): with open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\n') f.write('Spectrum:\n') for i in range(0, self.sample): f.write(str(self.x[i]) + '\t' + str(self.y[i]) + '\n')
[ "random.randint", "numpy.empty", "time.time", "numpy.arange", "numpy.cos" ]
[((181, 210), 'numpy.arange', 'np.arange', (['(1)', '(self.sample + 1)'], {}), '(1, self.sample + 1)\n', (190, 210), True, 'import numpy as np\n'), ((226, 247), 'numpy.empty', 'np.empty', (['self.sample'], {}), '(self.sample)\n', (234, 247), True, 'import numpy as np\n'), ((523, 557), 'random.randint', 'random.randint', (['(1)', '(self.level * 10)'], {}), '(1, self.level * 10)\n', (537, 557), False, 'import random\n'), ((618, 658), 'numpy.cos', 'np.cos', (['(2 * np.pi * self.f * i / self.Fs)'], {}), '(2 * np.pi * self.f * i / self.Fs)\n', (624, 658), True, 'import numpy as np\n'), ((922, 956), 'random.randint', 'random.randint', (['(1)', '(self.level * 10)'], {}), '(1, self.level * 10)\n', (936, 956), False, 'import random\n'), ((860, 871), 'time.time', 'time.time', ([], {}), '()\n', (869, 871), False, 'import time\n')]
# Core functions for Vireo model # Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import numpy as np from scipy.stats import entropy from scipy.special import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): """ Vireo core function to cluster the cells into donors. """ if random_seed is not None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print("Error: no n_donor and GT_prior has < 2 donors.") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize thete if theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number of genotype categories ## initialize Psi if Psi is None: Psi = np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False: print("As GT_prior is not given, we change learn_GT to True.") learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) #TODO: check if there is a better way to deal with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is False: print("As GT_prior is not complete, we change learn_GT to True.") learn_GT = True elif GT_prior.shape[1] > n_donor: print("Warning: n_donor is smaller than samples in GT_prior, hence we " "ignore n_donor.") n_donor = GT_prior.shape[1] # check if n_gt is matched to GT_prior if GT_prior.shape[2] != n_gt: print("Error: number of GT categories not matched: theta and GT_prior") sys.exit(1) ## VB interations LB = np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it] < LB[it - 1]: if verbose: print("Warning: Lower bound decreases!\n") elif it == max_iter - 1: if verbose: print("Warning: VB did not converge!\n") elif LB[it] - LB[it - 1] < epsilon_conv: break ## one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): """ Update the parameters of each component of the variantional distribution. The doublet probability can be created by doublet genotypes """ if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how to calculate lower bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): """ """ S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): """ """ if Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1 + S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): """ """ if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt * _digmma1 + S2_gt * _digmma2 - SS_gt * _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): """ """ if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): """ calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) """ # TODO: support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): """ Add doublet genotype by summarizing their probability: New GT has five categories: 0, 1, 2, 1.5, 2.5 TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2 """ combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in combn_iter]) # GT combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in combn_iter]) # sample combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob has three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2) return np.append(GT_prob1, GT_prob2, axis=1)
[ "numpy.sum", "numpy.random.seed", "numpy.log", "scipy.stats.entropy", "numpy.zeros", "numpy.ones", "numpy.expand_dims", "numpy.append", "scipy.special.digamma", "numpy.array", "numpy.exp", "numpy.random.rand", "sys.exit" ]
[((3246, 3264), 'numpy.zeros', 'np.zeros', (['max_iter'], {}), '(max_iter)\n', (3254, 3264), True, 'import numpy as np\n'), ((6889, 6930), 'numpy.zeros', 'np.zeros', (['(AD.shape[1], GT_prob.shape[1])'], {}), '((AD.shape[1], GT_prob.shape[1]))\n', (6897, 6930), True, 'import numpy as np\n'), ((7960, 7984), 'numpy.zeros', 'np.zeros', (['GT_prior.shape'], {}), '(GT_prior.shape)\n', (7968, 7984), True, 'import numpy as np\n'), ((8964, 8991), 'numpy.sum', 'np.sum', (['(logLik_ID * ID_prob)'], {}), '(logLik_ID * ID_prob)\n', (8970, 8991), True, 'import numpy as np\n'), ((9672, 9705), 'numpy.array', 'np.array', (['[x for x in combn_iter]'], {}), '([x for x in combn_iter])\n', (9680, 9705), True, 'import numpy as np\n'), ((10102, 10150), 'numpy.append', 'np.append', (['theta_shapes', 'theta_shapes_db'], {'axis': '(0)'}), '(theta_shapes, theta_shapes_db, axis=0)\n', (10111, 10150), True, 'import numpy as np\n'), ((10448, 10481), 'numpy.array', 'np.array', (['[x for x in combn_iter]'], {}), '([x for x in combn_iter])\n', (10456, 10481), True, 'import numpy as np\n'), ((10633, 10666), 'numpy.array', 'np.array', (['[x for x in combn_iter]'], {}), '([x for x in combn_iter])\n', (10641, 10666), True, 'import numpy as np\n'), ((10833, 10902), 'numpy.zeros', 'np.zeros', (['(GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])'], {}), '((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0]))\n', (10841, 10902), True, 'import numpy as np\n'), ((11479, 11516), 'numpy.append', 'np.append', (['GT_prob1', 'GT_prob2'], {'axis': '(1)'}), '(GT_prob1, GT_prob2, axis=1)\n', (11488, 11516), True, 'import numpy as np\n'), ((758, 785), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (772, 785), True, 'import numpy as np\n'), ((1211, 1257), 'numpy.array', 'np.array', (['[[0.1, 99.9], [50, 50], [99.9, 0.1]]'], {}), '([[0.1, 99.9], [50, 50], [99.9, 0.1]])\n', (1219, 1257), True, 'import numpy as np\n'), ((3202, 3213), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3210, 3213), False, 'import sys\n'), ((4376, 4420), 'numpy.zeros', 'np.zeros', (['(ID_prob.shape[0], n_donor_doublt)'], {}), '((ID_prob.shape[0], n_donor_doublt))\n', (4384, 4420), True, 'import numpy as np\n'), ((6554, 6599), 'numpy.sum', 'np.sum', (['(S1_gt * GT_prob[:, :, ig])'], {'axis': '_axis'}), '(S1_gt * GT_prob[:, :, ig], axis=_axis)\n', (6560, 6599), True, 'import numpy as np\n'), ((6631, 6676), 'numpy.sum', 'np.sum', (['(S2_gt * GT_prob[:, :, ig])'], {'axis': '_axis'}), '(S2_gt * GT_prob[:, :, ig], axis=_axis)\n', (6637, 6676), True, 'import numpy as np\n'), ((7708, 7771), 'numpy.ones', 'np.ones', (['(AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])'], {}), '((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0]))\n', (7715, 7771), True, 'import numpy as np\n'), ((8512, 8527), 'numpy.exp', 'np.exp', (['GT_prob'], {}), '(GT_prob)\n', (8518, 8527), True, 'import numpy as np\n'), ((11395, 11458), 'numpy.zeros', 'np.zeros', (['(GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])'], {}), '((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0]))\n', (11403, 11458), True, 'import numpy as np\n'), ((952, 963), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (960, 963), False, 'import sys\n'), ((1377, 1407), 'numpy.expand_dims', 'np.expand_dims', (['theta_prior', '(2)'], {}), '(theta_prior, 2)\n', (1391, 1407), True, 'import numpy as np\n'), ((1457, 1488), 'numpy.expand_dims', 'np.expand_dims', (['theta_shapes', '(2)'], {}), '(theta_shapes, 2)\n', (1471, 1488), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.ones', 'np.ones', (['n_donor'], {}), '(n_donor)\n', (1634, 1643), True, 'import numpy as np\n'), ((1694, 1715), 'numpy.sum', 'np.sum', (['Psi[:n_donor]'], {}), '(Psi[:n_donor])\n', (1700, 1715), True, 'import numpy as np\n'), ((1773, 1809), 'numpy.random.rand', 'np.random.rand', (['AD.shape[1]', 'n_donor'], {}), '(AD.shape[1], n_donor)\n', (1787, 1809), True, 'import numpy as np\n'), ((1950, 1981), 'numpy.ones', 'np.ones', (['(n_var, n_donor, n_gt)'], {}), '((n_var, n_donor, n_gt))\n', (1957, 1981), True, 'import numpy as np\n'), ((6810, 6835), 'numpy.ones', 'np.ones', (['GT_prob.shape[1]'], {}), '(GT_prob.shape[1])\n', (6817, 6835), True, 'import numpy as np\n'), ((7423, 7434), 'numpy.sum', 'np.sum', (['Psi'], {}), '(Psi)\n', (7429, 7434), True, 'import numpy as np\n'), ((8462, 8478), 'numpy.log', 'np.log', (['GT_prior'], {}), '(GT_prior)\n', (8468, 8478), True, 'import numpy as np\n'), ((8751, 8773), 'numpy.ones', 'np.ones', (['GT_prob.shape'], {}), '(GT_prob.shape)\n', (8758, 8773), True, 'import numpy as np\n'), ((8822, 8844), 'numpy.ones', 'np.ones', (['ID_prob.shape'], {}), '(ID_prob.shape)\n', (8829, 8844), True, 'import numpy as np\n'), ((8893, 8915), 'numpy.ones', 'np.ones', (['ID_prob.shape'], {}), '(ID_prob.shape)\n', (8900, 8915), True, 'import numpy as np\n'), ((9012, 9046), 'scipy.stats.entropy', 'entropy', (['ID_prob', 'ID_prior'], {'axis': '(1)'}), '(ID_prob, ID_prior, axis=1)\n', (9019, 9046), False, 'from scipy.stats import entropy\n'), ((9068, 9102), 'scipy.stats.entropy', 'entropy', (['GT_prob', 'GT_prior'], {'axis': '(2)'}), '(GT_prob, GT_prior, axis=2)\n', (9075, 9102), False, 'from scipy.stats import entropy\n'), ((9926, 9966), 'numpy.sum', 'np.sum', (['_theta_p1'], {'axis': '(1)', 'keepdims': '(True)'}), '(_theta_p1, axis=1, keepdims=True)\n', (9932, 9966), True, 'import numpy as np\n'), ((9996, 10036), 'numpy.sum', 'np.sum', (['_theta_p2'], {'axis': '(1)', 'keepdims': '(True)'}), '(_theta_p2, axis=1, keepdims=True)\n', (10002, 10036), True, 'import numpy as np\n'), ((2638, 2668), 'numpy.ones', 'np.ones', (['(n_var, n_gt, _add_n)'], {}), '((n_var, n_gt, _add_n))\n', (2645, 2668), True, 'import numpy as np\n'), ((6989, 7017), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 0]'], {}), '(theta_shapes[ig, 0])\n', (6996, 7017), False, 'from scipy.special import digamma\n'), ((7052, 7080), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 1]'], {}), '(theta_shapes[ig, 1])\n', (7059, 7080), False, 'from scipy.special import digamma\n'), ((8053, 8081), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 0]'], {}), '(theta_shapes[ig, 0])\n', (8060, 8081), False, 'from scipy.special import digamma\n'), ((8116, 8144), 'scipy.special.digamma', 'digamma', (['theta_shapes[ig, 1]'], {}), '(theta_shapes[ig, 1])\n', (8123, 8144), False, 'from scipy.special import digamma\n'), ((5345, 5368), 'numpy.ones', 'np.ones', (['n_doublet_pair'], {}), '(n_doublet_pair)\n', (5352, 5368), True, 'import numpy as np\n'), ((8931, 8942), 'numpy.sum', 'np.sum', (['Psi'], {}), '(Psi)\n', (8937, 8942), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt ## preliminary tests #inputs: A, P, Q, R # A is the discrete representation of epsilon #number of spatial harmonics (or orders) P = 6; Q = 6; R = 6; Nx = 20; Ny = 20; Nz = 1; #this is fundamentally 3D...not sure how to make general for 2D N = np.array([Nx, Ny, Nz]); ## generalize two 2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N) == 1): Q = 1; R = 1; elif(len(N) == 2): R = 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex); for rrow in range(R): for qrow in range(Q): for prow in range(P): #first term locates z plane, 2nd locates y column, prow locates x row = (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for qcol in range(Q): for pcol in range(P): col = (rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow] - p[pcol]; qfft = q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0])); plt.show() plt.imshow(np.abs(C)); plt.show() plt.plot(np.diag(abs(C))) plt.show()
[ "matplotlib.pyplot.show", "numpy.abs", "matplotlib.pyplot.imshow", "numpy.floor", "numpy.fft.fftn", "numpy.zeros", "numpy.ones", "numpy.array", "numpy.prod" ]
[((296, 318), 'numpy.array', 'np.array', (['[Nx, Ny, Nz]'], {}), '([Nx, Ny, Nz])\n', (304, 318), True, 'import numpy as np\n'), ((359, 373), 'numpy.ones', 'np.ones', (['(N + 1)'], {}), '(N + 1)\n', (366, 373), True, 'import numpy as np\n'), ((395, 417), 'matplotlib.pyplot.imshow', 'plt.imshow', (['A[:, :, 0]'], {}), '(A[:, :, 0])\n', (405, 417), True, 'import matplotlib.pyplot as plt\n'), ((417, 427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (425, 427), True, 'import matplotlib.pyplot as plt\n'), ((890, 908), 'numpy.zeros', 'np.zeros', (['(NH, NH)'], {}), '((NH, NH))\n', (898, 908), True, 'import numpy as np\n'), ((1561, 1571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1569, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1595, 1605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1603, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1640, 1642), True, 'import matplotlib.pyplot as plt\n'), ((816, 832), 'numpy.floor', 'np.floor', (['(Nx / 2)'], {}), '(Nx / 2)\n', (824, 832), True, 'import numpy as np\n'), ((842, 858), 'numpy.floor', 'np.floor', (['(Ny / 2)'], {}), '(Ny / 2)\n', (850, 858), True, 'import numpy as np\n'), ((868, 884), 'numpy.floor', 'np.floor', (['(Nz / 2)'], {}), '(Nz / 2)\n', (876, 884), True, 'import numpy as np\n'), ((1539, 1558), 'numpy.abs', 'np.abs', (['Af[:, :, 0]'], {}), '(Af[:, :, 0])\n', (1545, 1558), True, 'import numpy as np\n'), ((1583, 1592), 'numpy.abs', 'np.abs', (['C'], {}), '(C)\n', (1589, 1592), True, 'import numpy as np\n'), ((743, 753), 'numpy.prod', 'np.prod', (['N'], {}), '(N)\n', (750, 753), True, 'import numpy as np\n'), ((771, 785), 'numpy.fft.fftn', 'np.fft.fftn', (['A'], {}), '(A)\n', (782, 785), True, 'import numpy as np\n'), ((565, 580), 'numpy.floor', 'np.floor', (['(P / 2)'], {}), '(P / 2)\n', (573, 580), True, 'import numpy as np\n'), ((585, 600), 'numpy.floor', 'np.floor', (['(P / 2)'], {}), '(P / 2)\n', (593, 600), True, 'import numpy as np\n'), ((634, 649), 'numpy.floor', 'np.floor', (['(Q / 2)'], {}), '(Q / 2)\n', (642, 649), True, 'import numpy as np\n'), ((654, 669), 'numpy.floor', 'np.floor', (['(Q / 2)'], {}), '(Q / 2)\n', (662, 669), True, 'import numpy as np\n'), ((694, 709), 'numpy.floor', 'np.floor', (['(R / 2)'], {}), '(R / 2)\n', (702, 709), True, 'import numpy as np\n'), ((714, 729), 'numpy.floor', 'np.floor', (['(R / 2)'], {}), '(R / 2)\n', (722, 729), True, 'import numpy as np\n')]
''' <NAME> (<EMAIL>) Department of Physics University of Bath, UK May 1st, 2020 Conductance model of an RVLM neuron for use with reservoir computing using a modified Hodgkin-Huxley framework of ion channel gating. Model parameters are chosen so as to replicate the behaviour of the thalamocortical relay neuron presented in Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'. The neuron model consists of three ionic currents: a passive leak current, a transient sodium current (NaT), and a potassium current (K). The sodium current is controlled by an activation gating variable (m) and an inactivation gating variable (h). The potassium channel is non-inactivating and is controlld by a single activation gating variable (n). The full model state x comprises four state variables - the membrane voltage and the three gating varibales m, h, and n, and is thus described as: x = [V,m,h,n] The only state variable that it is possible to measure experimentally is the membrane voltage. This is the state variable output by the python script. ''' import scipy as sp import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint # Define constants TEMP_C = 35 FARADAY = 96480 PI = 3.14159265359 # Model duration (ms) T = 7400 dt = 0.025 # Generate array of time points, from zero to T t = np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ############################################################################## # Define functions for gating kinetics of ion channels # Effect of temperature is accounted for by the Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in uA/cm^2) # Currents correspond to passive leak, delayed-rectifier potassium, # and transient sodium currents def I_Leak(VV): return gLeak * (VV - EL) def I_K(VV,nn): return gK * nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV - ENa) # Define equations of motion for full neuron state x = [V,m,h,n] # Use idx to read in correct current stimulation data point # Function reads in system state and returns its derivative def dXdt(X,t): VV, mm, hh, nn, idx = X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters ############################################################################## # Soma dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI # Define model parameters # conductances: gX; reversal potentials: EX; # thresholds: aXV1; membrane capacitance: Cm; # time constants: tx0, epsx Cm = 1 gNaT = 69 ENa = 41 gK = 6.9 EK = -100 EL = -65 gLeak = 0.465 amV1 = -39.92 amV2 = 10 amV3 = 23.39 tm0 = 0.143 epsm = 1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3 = 27.22 th0 = 0.701 epsh = 12.90 anV1 = -34.58 anV2 = 22.17 anV3 = 23.58 tn0 = 1.291 epsn = 4.314 ############################################################################## # Preparing current stimulation to be injected into the neuron ############################################################################## # Function for injected a current step (uA/cm^2) # Args: amplitude, init time, final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for loading current injection protocol (uA/cm^2) # Args: file path, amplitude scale (default = 0.02), sample every 'n'th point def load_stim(name, scale, n): stim = [] with open(name, "r") as ins: count = 0 for line in ins: count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\n')))) ins.close() return stim # Initialise stim or load external stimulation files # If not loading in external stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) # Define amplitude, init time and end time amp = 0 #0.003 t_i = 100 t_f = 300 ############################################################################## # Initializing the neuron model ############################################################################## # Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at steady-state resting potential # Final value in the init array is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations of motion ############################################################################## # Integrate model equations # Arguments: state derivative, initial neuron state x(0), time point array X = odeint(dXdt, init, t) # Define variables to simplify analysis VV = X[:,0] mm = X[:,1] hh = X[:,2] nn = X[:,3] # Adding Gaussian error to voltage trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error ############################################################################## # Plotting and saving model output ############################################################################## # Define total current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel("Membrane Potential (mV)") plt.subplot(2,1,2) plt.ylabel("Current (uA)") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian noise) f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \n' % VV[i]) f.close() # Save voltage data (with gaussian noise) f = open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \n' % VV_obs[i]) f.close() # Save current stimulation data f = open('output/stimulation.csv', 'w') for i in range(int(len(VV))): f.write('%f\n' % stimulation[i]) f.close()
[ "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "scipy.integrate.odeint", "scipy.tanh", "numpy.arange", "matplotlib.pyplot.ylabel" ]
[((1399, 1418), 'numpy.arange', 'np.arange', (['(0)', 'T', 'dt'], {}), '(0, T, dt)\n', (1408, 1418), True, 'import numpy as np\n'), ((6104, 6125), 'scipy.integrate.odeint', 'odeint', (['dXdt', 'init', 't'], {}), '(dXdt, init, t)\n', (6110, 6125), False, 'from scipy.integrate import odeint\n'), ((6691, 6711), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6702, 6711), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6750), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'VV_obs', '"""k"""'], {'linewidth': '(0.8)'}), "(t, VV_obs, 'k', linewidth=0.8)\n", (6719, 6750), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Membrane Potential (mV)"""'], {}), "('Membrane Potential (mV)')\n", (6759, 6786), True, 'import matplotlib.pyplot as plt\n'), ((6788, 6808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6799, 6808), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Current (uA)"""'], {}), "('Current (uA)')\n", (6818, 6834), True, 'import matplotlib.pyplot as plt\n'), ((6836, 6880), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'stimulation', '"""b"""'], {'linewidth': '(0.8)'}), "(t, stimulation, 'b', linewidth=0.8)\n", (6844, 6880), True, 'import matplotlib.pyplot as plt\n'), ((6881, 6891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6889, 6891), True, 'import matplotlib.pyplot as plt\n'), ((1758, 1785), 'scipy.tanh', 'sp.tanh', (['((VV - amV1) / amV2)'], {}), '((VV - amV1) / amV2)\n', (1765, 1785), True, 'import scipy as sp\n'), ((1942, 1969), 'scipy.tanh', 'sp.tanh', (['((VV - ahV1) / ahV2)'], {}), '((VV - ahV1) / ahV2)\n', (1949, 1969), True, 'import scipy as sp\n'), ((2126, 2153), 'scipy.tanh', 'sp.tanh', (['((VV - anV1) / anV2)'], {}), '((VV - anV1) / anV2)\n', (2133, 2153), True, 'import scipy as sp\n'), ((1827, 1854), 'scipy.tanh', 'sp.tanh', (['((VV - amV1) / amV3)'], {}), '((VV - amV1) / amV3)\n', (1834, 1854), True, 'import scipy as sp\n'), ((1854, 1881), 'scipy.tanh', 'sp.tanh', (['((VV - amV1) / amV3)'], {}), '((VV - amV1) / amV3)\n', (1861, 1881), True, 'import scipy as sp\n'), ((2011, 2038), 'scipy.tanh', 'sp.tanh', (['((VV - ahV1) / ahV3)'], {}), '((VV - ahV1) / ahV3)\n', (2018, 2038), True, 'import scipy as sp\n'), ((2038, 2065), 'scipy.tanh', 'sp.tanh', (['((VV - ahV1) / ahV3)'], {}), '((VV - ahV1) / ahV3)\n', (2045, 2065), True, 'import scipy as sp\n'), ((2195, 2222), 'scipy.tanh', 'sp.tanh', (['((VV - anV1) / anV3)'], {}), '((VV - anV1) / anV3)\n', (2202, 2222), True, 'import scipy as sp\n'), ((2222, 2249), 'scipy.tanh', 'sp.tanh', (['((VV - anV1) / anV3)'], {}), '((VV - anV1) / anV3)\n', (2229, 2249), True, 'import scipy as sp\n')]
#!/usr/bin/python import sys import os import numpy as np import pandas as pd import argparse import tensorflow as tf from importlib.machinery import SourceFileLoader import math import psutil import time from scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title="Learning Curve MSE.{}".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv("./{}/mse.csv".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title="Learning Curve MSE_NZ.{}".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv("./{}/mse_nz.csv".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot results of the current model on the whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate and save imputation results for an input matrix at the 'impute' mode. If the number of cells is larger than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters ---------- ''' if m > p.large_size: #impute on small data blocks to avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the # of cells is less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, "{}/imputation.{}.hd5".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, "{}/latent_code.{}.hd5".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20) ram = round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data and extract gene IDs and cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data and save indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input data is: {} M'.format(usage())) # todo: for backward support for older parameter files only # sample_size is 1000 in default; if sample_size is less than the number of cells (m), # we reconstruct the training and validation sets by randomly sampling. try: p.sample_size sample_size = p.sample_size except: sample_size = int(9e4) if sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is a matrix sampled randomly, and should it be a matrix containing # sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation #2.1 init --> keep this in the main tf.reset_default_graph() # define placeholders and variables X = tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables # input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3 define loss # input X, h, p # return mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer --> keep this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training on mse') trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init a session accoding to the run_flag sess = tf.Session() # restore variables saver = tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess, "./step1/step1.ckpt") elif p.run_flag == 'rand_init': print('*** In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In impute mode loading "step2.ckpt"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop = time.time() #print("reading took {:.1f} seconds".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h), for genej, nz_cells print('RAM usage after building the model is: {} M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0) #save imputation results before training steps print("Evaluation: epoch{}".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print("mse_nz_train=", round(mse_nz_train, 3), "mse_nz_valid=",round(mse_nz_valid, 3)) print("mse_train=", round(mse_train, 3),"mse_valid=", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches for i in range(num_batch): # x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the results of epoch 1 and all display steps (epochs) if (epoch == 1) or (epoch % p.display_step == 0): tic_log = time.time() print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used # training mse and mse_nz of the last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse and mse_nz of the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for each epoch: {}\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if the input matrix is large (m > p.large_size), only save the #imputation results of a small sample set (sample_input) print("> Impute and save.. ") if m > p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, "{}/sample_imputation.{}.hd5".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, "{}/imputation.{}.hd5".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path = saver.save(sess, log_dir + "/{}.ckpt".format(p.stage)) print("Model saved in: %s" % save_path) #3.save the training and test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print("> save bottleneck_representation") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy # do python -u weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2 define layers and variables # input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L == 7: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed # define input/output a_bottleneck = e_a3 elif p.L == 5: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed # define input/output a_bottleneck = e_a2 elif p.L == 3: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed # define input/output a_bottleneck = e_a1 else: raise Exception("{} L not defined, only 3, 5, 7 implemented".format(p.L)) h = d_a1 return a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope("Metrics"): omega = tf.sign(X) # 0 if 0, 1 if > 0; not possibly < 0 in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode, infile): '''load the 'global_params.py' file ''' cwd = os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input = infile p.mode = mode if mode == 'pre-training': # step1/rand_init for pre-training on reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'translate': # step2/load_saved from step1, for transfer learning p.stage = 'step2' # step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L elif mode == 'late': # step2/rand_init for one step training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif mode == 'analysis': p.tag = 'Eval' p.stage = 'Eval' else: print('The mode you entered cannot be recognized.') print('Valid mode options: pre-training | late | translate | impute | analysis') p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval on learning curve p.snapshot_step = 5 # interval of saving session, imputation p.m = 1000 p.n = 300 p.sample_size = int(240) print('in test mode\n', 'num-genes set to {}, num-cells set to {}\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return p # to do: modify to display based on mode # def display_params(p): # PRINT PARAMETERS print('\nmode:', p.mode) print('\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print("n_hidden{}: {}".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\n') def read_data(p): '''READ DATA Parameters ------------ p: Return ----------- ''' print('>READING DATA..') print('RAM usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage())) # Test or not: m*n subset (1000 * 300). Delete later if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df) # todo: directly read into csr, get rid of input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM usage after deleting input_df: {} M'.format(usage())) # Summary of data print("name_input:", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print("input_df:\n", _, "\n") m, n = input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {} cells, {} genes\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------ p: parameters from global_params.py and example.py Return ----------- X: input data matrix; genes in columns (same below) Y: imputed data matrix G: ground truth ''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y # todo: support sparse matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE OR NOT if p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\nIn this code, matrices should have already been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation and input MSE between imputation and ground truth Parameters ------------ X: input data matrix; genes in columns (same below) Y: imputed data matrix G: ground truth Return ----------- 4 MSEs ''' print('\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard deviation in each gene write SDs to files plot histograms of SDs Parameters ------------ X: input data matrix; genes in columns (same below) Y: imputed data matrix G: ground truth p: parameters Return ----------- None ''' print('\n calculating standard deviation in each gene for input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate plots using all genes Parameters ------------ X: input data matrix; genes in columns (same below) Y: imputed data matrix G: ground truth p: parameters Return ----------- None ''' # histograms of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\n max expression:', max_expression) print('\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between genes in imputation and ground truth # and of correlations between cells in imputation and ground truth # when ground truth is not provide, # input is used as ground truth print('\n> Correlations between ground truth and imputation') print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for correlations of genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title="Correlation for each gene\n(Ground_truth vs Imputation)\n{}\n{}". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram for correlations of cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title="Correlation for each cell\n(Ground_truth vs Imputation)\n{}\n{}". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data matrices print('\n> Generating heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\n> Generating PCA and tSNE plots') if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate plots for genes specified by the user Parameters ------------ X: input data matrix; genes in columns (same below) Y: imputed data matrix G: ground truth p: parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(">n> Scatterplots of selected gene pairs") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print("\n> Scatterplots for selected genes") print("ground truth vs imputation, ground truth vs input") gene_dir = p.tag+'/genes' # genetate a list of genes using the gene_pair_list gene_list = [gene for pair in List for gene in pair] for j in gene_list: try: print('for ', j) Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression values # and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\n> Discrete gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print("\n> Discrete imputation vs ground truth") gene_dir = p.tag+'/genes_discrete' for j in gene_list: try: print('for ', j) Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters from global_params.py and example.py Return ----------- None ''' # load imputation results and input data X, Y, G = load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate and visualize variation in genes analyze_variation_in_genes(X, Y, G, p) # visualize results using all genes visualize_all_genes(X, Y, G, p) # visualize selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute | analysis') parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter module and use name 'p' #print("Usage: python late.py -mode <late> -infile <xx.hd5>") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix, gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop = time.time() time_finish = round((toc_stop - tic_start), 2) print("Imputation Finished!") print("Wall Time Used: {} seconds".format(time_finish))
[ "numpy.random.seed", "argparse.ArgumentParser", "tensorflow.reset_default_graph", "scimpute.max_min_element_in_arrs", "gc.collect", "scimpute.read_data_into_cell_row", "numpy.arange", "scimpute.read_sparse_matrix_from_h5", "pandas.DataFrame", "scimpute.weight_bias_variable", "scimpute.split__csr_matrix", "tensorflow.sign", "tensorflow.set_random_seed", "time.clock", "tensorflow.placeholder", "importlib.machinery.SourceFileLoader", "tensorflow.summary.FileWriter", "scimpute.gene_pair_plot", "tensorflow.name_scope", "scimpute.sparse_matrix_transformation", "scimpute.pca_tsne", "scimpute.mse", "scimpute.refresh_logfolder", "tensorflow.train.Saver", "tensorflow.summary.scalar", "tensorflow.global_variables_initializer", "scimpute.mse_omega", "scimpute.nz_std", "scimpute.df_exp_discretize_log10", "tensorflow.Session", "matplotlib.use", "scipy.sparse.csr_matrix", "scimpute.hist_df", "scimpute.dense_layer", "os.getpid", "os.getcwd", "math.floor", "tensorflow.pow", "time.time", "tensorflow.train.AdamOptimizer" ]
[((269, 290), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (283, 290), False, 'import matplotlib\n'), ((2503, 2567), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Y_input_arr', 'columns': 'gene_ids', 'index': 'cell_ids'}), '(data=Y_input_arr, columns=gene_ids, index=cell_ids)\n', (2515, 2567), True, 'import pandas as pd\n'), ((8291, 8352), 'scimpute.split__csr_matrix', 'scimpute.split__csr_matrix', (['input_matrix'], {'a': 'p.a', 'b': 'p.b', 'c': 'p.c'}), '(input_matrix, a=p.a, b=p.b, c=p.c)\n', (8317, 8352), False, 'import scimpute\n'), ((10418, 10442), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10440, 10442), True, 'import tensorflow as tf\n'), ((10485, 10538), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n]'], {'name': '"""X_input"""'}), "(tf.float32, [None, n], name='X_input')\n", (10499, 10538), True, 'import tensorflow as tf\n'), ((10562, 10602), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""p.pIn"""'}), "(tf.float32, name='p.pIn')\n", (10576, 10602), True, 'import tensorflow as tf\n'), ((10645, 10689), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""p.pHidden"""'}), "(tf.float32, name='p.pHidden')\n", (10659, 10689), True, 'import tensorflow as tf\n'), ((11109, 11148), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['p.learning_rate'], {}), '(p.learning_rate)\n', (11131, 11148), True, 'import tensorflow as tf\n'), ((11479, 11491), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11489, 11491), True, 'import tensorflow as tf\n'), ((11522, 11538), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11536, 11538), True, 'import tensorflow as tf\n'), ((12337, 12390), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(log_dir + '/batch')", 'sess.graph'], {}), "(log_dir + '/batch', sess.graph)\n", (12358, 12390), True, 'import tensorflow as tf\n'), ((12407, 12460), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(log_dir + '/valid')", 'sess.graph'], {}), "(log_dir + '/valid', sess.graph)\n", (12428, 12460), True, 'import tensorflow as tf\n'), ((18266, 18296), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['rand_state'], {}), '(rand_state)\n', (18284, 18296), True, 'import tensorflow as tf\n'), ((22359, 22370), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22368, 22370), False, 'import os\n'), ((28388, 28458), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.fname_imputation', 'p.ori_imputation'], {}), '(p.fname_imputation, p.ori_imputation)\n', (28420, 28458), False, 'import scimpute\n'), ((30132, 30176), 'scimpute.max_min_element_in_arrs', 'scimpute.max_min_element_in_arrs', (['[Y.values]'], {}), '([Y.values])\n', (30164, 30176), False, 'import scimpute\n'), ((30254, 30298), 'scimpute.max_min_element_in_arrs', 'scimpute.max_min_element_in_arrs', (['[G.values]'], {}), '([G.values])\n', (30286, 30298), False, 'import scimpute\n'), ((30372, 30396), 'scimpute.mse_omega', 'scimpute.mse_omega', (['Y', 'X'], {}), '(Y, X)\n', (30390, 30396), False, 'import scimpute\n'), ((30493, 30511), 'scimpute.mse', 'scimpute.mse', (['Y', 'X'], {}), '(Y, X)\n', (30505, 30511), False, 'import scimpute\n'), ((30599, 30623), 'scimpute.mse_omega', 'scimpute.mse_omega', (['Y', 'G'], {}), '(Y, G)\n', (30617, 30623), False, 'import scimpute\n'), ((30727, 30745), 'scimpute.mse', 'scimpute.mse', (['Y', 'G'], {}), '(Y, G)\n', (30739, 30745), False, 'import scimpute\n'), ((31315, 31336), 'scimpute.nz_std', 'scimpute.nz_std', (['X', 'Y'], {}), '(X, Y)\n', (31330, 31336), False, 'import scimpute\n'), ((31359, 31380), 'scimpute.nz_std', 'scimpute.nz_std', (['X', 'G'], {}), '(X, G)\n', (31374, 31380), False, 'import scimpute\n'), ((31753, 31828), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'std_ratio_yx_data', 'index': 'X.columns', 'columns': "['sd_ratio']"}), "(data=std_ratio_yx_data, index=X.columns, columns=['sd_ratio'])\n", (31765, 31828), True, 'import pandas as pd\n'), ((31948, 32023), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'std_ratio_yg_data', 'index': 'X.columns', 'columns': "['sd_ratio']"}), "(data=std_ratio_yg_data, index=X.columns, columns=['sd_ratio'])\n", (31960, 32023), True, 'import pandas as pd\n'), ((32650, 32777), 'scimpute.hist_df', 'scimpute.hist_df', (['std_ratio_yx_df'], {'xlab': '"""Ratio of Imputation SD vs Input SD"""', 'title': '""""""', 'range': '(std_min, std_max)', 'dir': 'p.tag'}), "(std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD',\n title='', range=(std_min, std_max), dir=p.tag)\n", (32666, 32777), False, 'import scimpute\n'), ((32788, 32927), 'scimpute.hist_df', 'scimpute.hist_df', (['std_ratio_yg_df'], {'xlab': '"""Ratio of Imputation SD vs Ground Truth SD"""', 'title': '""""""', 'range': '(std_min, std_max)', 'dir': 'p.tag'}), "(std_ratio_yg_df, xlab=\n 'Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min,\n std_max), dir=p.tag)\n", (32804, 32927), False, 'import scimpute\n'), ((35239, 35303), 'scimpute.max_min_element_in_arrs', 'scimpute.max_min_element_in_arrs', (['[Y.values, G.values, X.values]'], {}), '([Y.values, G.values, X.values])\n', (35271, 35303), False, 'import scimpute\n'), ((36063, 36163), 'scimpute.pca_tsne', 'scimpute.pca_tsne', ([], {'df_cell_row': 'Y', 'cluster_info': 'cluster_info', 'title': 'p.name_imputation', 'dir': 'p.tag'}), '(df_cell_row=Y, cluster_info=cluster_info, title=p.\n name_imputation, dir=p.tag)\n', (36080, 36163), False, 'import scimpute\n'), ((36188, 36283), 'scimpute.pca_tsne', 'scimpute.pca_tsne', ([], {'df_cell_row': 'X', 'cluster_info': 'cluster_info', 'title': 'p.name_input', 'dir': 'p.tag'}), '(df_cell_row=X, cluster_info=cluster_info, title=p.\n name_input, dir=p.tag)\n', (36205, 36283), False, 'import scimpute\n'), ((36308, 36410), 'scimpute.pca_tsne', 'scimpute.pca_tsne', ([], {'df_cell_row': 'G', 'cluster_info': 'cluster_info', 'title': 'p.name_ground_truth', 'dir': 'p.tag'}), '(df_cell_row=G, cluster_info=cluster_info, title=p.\n name_ground_truth, dir=p.tag)\n', (36325, 36410), False, 'import scimpute\n'), ((36838, 36914), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['Y'], {'list': 'List', 'tag': '"""(Imputation)"""', 'dir': 'gene_pair_dir'}), "(Y, list=List, tag='(Imputation)', dir=gene_pair_dir)\n", (36861, 36914), False, 'import scimpute\n'), ((36916, 36987), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['X'], {'list': 'List', 'tag': '"""(Input)"""', 'dir': 'gene_pair_dir'}), "(X, list=List, tag='(Input)', dir=gene_pair_dir)\n", (36939, 36987), False, 'import scimpute\n'), ((36989, 37067), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['G'], {'list': 'List', 'tag': '"""(Ground_truth)"""', 'dir': 'gene_pair_dir'}), "(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir)\n", (37012, 37067), False, 'import scimpute\n'), ((38145, 38180), 'scimpute.df_exp_discretize_log10', 'scimpute.df_exp_discretize_log10', (['Y'], {}), '(Y)\n', (38177, 38180), False, 'import scimpute\n'), ((38310, 38401), 'scimpute.gene_pair_plot', 'scimpute.gene_pair_plot', (['Y'], {'list': 'List', 'tag': '"""(Imputation Discrete) """', 'dir': 'gene_pair_dir'}), "(Y, list=List, tag='(Imputation Discrete) ', dir=\n gene_pair_dir)\n", (38333, 38401), False, 'import scimpute\n'), ((39897, 39952), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Help information"""'}), "(description='Help information')\n", (39920, 39952), False, 'import argparse\n'), ((40464, 40499), 'scimpute.refresh_logfolder', 'scimpute.refresh_logfolder', (['log_dir'], {}), '(log_dir)\n', (40490, 40499), False, 'import scimpute\n'), ((40514, 40525), 'time.time', 'time.time', ([], {}), '()\n', (40523, 40525), False, 'import time\n'), ((40692, 40703), 'time.time', 'time.time', ([], {}), '()\n', (40701, 40703), False, 'import time\n'), ((5223, 5287), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Y_input_arr', 'columns': 'gene_ids', 'index': 'cell_ids'}), '(data=Y_input_arr, columns=gene_ids, index=cell_ids)\n', (5235, 5287), True, 'import pandas as pd\n'), ((5540, 5586), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'latent_code', 'index': 'cell_ids'}), '(data=latent_code, index=cell_ids)\n', (5552, 5586), True, 'import pandas as pd\n'), ((7678, 7689), 'os.getpid', 'os.getpid', ([], {}), '()\n', (7687, 7689), False, 'import os\n'), ((9147, 9164), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (9161, 9164), True, 'import numpy as np\n'), ((9902, 9914), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9912, 9914), False, 'import gc\n'), ((9917, 9933), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (9931, 9933), True, 'import numpy as np\n'), ((21820, 21844), 'tensorflow.name_scope', 'tf.name_scope', (['"""Metrics"""'], {}), "('Metrics')\n", (21833, 21844), True, 'import tensorflow as tf\n'), ((21856, 21866), 'tensorflow.sign', 'tf.sign', (['X'], {}), '(X)\n', (21863, 21866), True, 'import tensorflow as tf\n'), ((22105, 22148), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse_nz__Y_vs_X"""', 'mse_nz'], {}), "('mse_nz__Y_vs_X', mse_nz)\n", (22122, 22148), True, 'import tensorflow as tf\n'), ((22207, 22244), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse__Y_vs_X"""', 'mse'], {}), "('mse__Y_vs_X', mse)\n", (22224, 22244), True, 'import tensorflow as tf\n'), ((25745, 25824), 'scimpute.read_sparse_matrix_from_h5', 'scimpute.read_sparse_matrix_from_h5', (['p.fname_input', 'p.genome_input', 'p.ori_input'], {}), '(p.fname_input, p.genome_input, p.ori_input)\n', (25780, 25824), False, 'import scimpute\n'), ((26063, 26075), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26073, 26075), False, 'import gc\n'), ((26153, 26228), 'scimpute.sparse_matrix_transformation', 'scimpute.sparse_matrix_transformation', (['input_matrix', 'p.transformation_input'], {}), '(input_matrix, p.transformation_input)\n', (26190, 26228), False, 'import scimpute\n'), ((26264, 26276), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26274, 26276), False, 'import gc\n'), ((26664, 26724), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.fname_input', 'p.ori_input'], {}), '(p.fname_input, p.ori_input)\n', (26696, 26724), False, 'import scimpute\n'), ((27227, 27247), 'scipy.sparse.csr_matrix', 'csr_matrix', (['input_df'], {}), '(input_df)\n', (27237, 27247), False, 'from scipy.sparse import csr_matrix\n'), ((27445, 27457), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27455, 27457), False, 'import gc\n'), ((28523, 28597), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.fname_ground_truth', 'p.ori_ground_truth'], {}), '(p.fname_ground_truth, p.ori_ground_truth)\n', (28555, 28597), False, 'import scimpute\n'), ((35937, 35985), 'scimpute.read_data_into_cell_row', 'scimpute.read_data_into_cell_row', (['p.cluster_file'], {}), '(p.cluster_file)\n', (35969, 35985), False, 'import scimpute\n'), ((983, 1069), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_', 'index': 'epoch_log', 'columns': "['Epoch', 'MSE_batch', 'MSE_valid']"}), "(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch',\n 'MSE_valid'])\n", (995, 1069), True, 'import pandas as pd\n'), ((1940, 2032), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_', 'index': 'epoch_log', 'columns': "['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid']"}), "(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch',\n 'MSE_NZ_valid'])\n", (1952, 2032), True, 'import pandas as pd\n'), ((11716, 11749), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11747, 11749), True, 'import tensorflow as tf\n'), ((13688, 13700), 'time.clock', 'time.clock', ([], {}), '()\n', (13698, 13700), False, 'import time\n'), ((13702, 13713), 'time.time', 'time.time', ([], {}), '()\n', (13711, 13713), False, 'import time\n'), ((13893, 13944), 'numpy.arange', 'np.arange', (['(p.batch_size * i)', '(p.batch_size * (i + 1))'], {}), '(p.batch_size * i, p.batch_size * (i + 1))\n', (13902, 13944), True, 'import numpy as np\n'), ((14212, 14224), 'time.clock', 'time.clock', ([], {}), '()\n', (14222, 14224), False, 'import time\n'), ((14226, 14237), 'time.time', 'time.time', ([], {}), '()\n', (14235, 14237), False, 'import time\n'), ((14372, 14383), 'time.time', 'time.time', ([], {}), '()\n', (14381, 14383), False, 'import time\n'), ((15174, 15185), 'time.time', 'time.time', ([], {}), '()\n', (15183, 15185), False, 'import time\n'), ((15720, 15731), 'time.time', 'time.time', ([], {}), '()\n', (15729, 15731), False, 'import time\n'), ((17666, 17677), 'time.time', 'time.time', ([], {}), '()\n', (17675, 17677), False, 'import time\n'), ((18472, 18499), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L1"""'], {}), "('Encoder_L1')\n", (18485, 18499), True, 'import tensorflow as tf\n'), ((18517, 18581), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder1"""', 'n', 'p.n_hidden_1', 'p.sd'], {}), "('encoder1', n, p.n_hidden_1, p.sd)\n", (18546, 18581), False, 'import scimpute\n'), ((18592, 18651), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder1"""', 'X', 'e_w1', 'e_b1', 'pIn_holder'], {}), "('encoder1', X, e_w1, e_b1, pIn_holder)\n", (18612, 18651), False, 'import scimpute\n'), ((18659, 18686), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L2"""'], {}), "('Encoder_L2')\n", (18672, 18686), True, 'import tensorflow as tf\n'), ((18704, 18779), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder2"""', 'p.n_hidden_1', 'p.n_hidden_2', 'p.sd'], {}), "('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)\n", (18733, 18779), False, 'import scimpute\n'), ((18790, 18856), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder2"""', 'e_a1', 'e_w2', 'e_b2', 'pHidden_holder'], {}), "('encoder2', e_a1, e_w2, e_b2, pHidden_holder)\n", (18810, 18856), False, 'import scimpute\n'), ((18864, 18891), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L3"""'], {}), "('Encoder_L3')\n", (18877, 18891), True, 'import tensorflow as tf\n'), ((18909, 18984), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder3"""', 'p.n_hidden_2', 'p.n_hidden_3', 'p.sd'], {}), "('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd)\n", (18938, 18984), False, 'import scimpute\n'), ((18995, 19061), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder3"""', 'e_a2', 'e_w3', 'e_b3', 'pHidden_holder'], {}), "('encoder3', e_a2, e_w3, e_b3, pHidden_holder)\n", (19015, 19061), False, 'import scimpute\n'), ((19515, 19542), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L3"""'], {}), "('Decoder_L3')\n", (19528, 19542), True, 'import tensorflow as tf\n'), ((19560, 19635), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder3"""', 'p.n_hidden_3', 'p.n_hidden_2', 'p.sd'], {}), "('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd)\n", (19589, 19635), False, 'import scimpute\n'), ((19646, 19712), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder3"""', 'e_a3', 'd_w3', 'd_b3', 'pHidden_holder'], {}), "('decoder3', e_a3, d_w3, d_b3, pHidden_holder)\n", (19666, 19712), False, 'import scimpute\n'), ((19720, 19747), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L2"""'], {}), "('Decoder_L2')\n", (19733, 19747), True, 'import tensorflow as tf\n'), ((19765, 19840), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder2"""', 'p.n_hidden_2', 'p.n_hidden_1', 'p.sd'], {}), "('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)\n", (19794, 19840), False, 'import scimpute\n'), ((19851, 19917), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder2"""', 'd_a3', 'd_w2', 'd_b2', 'pHidden_holder'], {}), "('decoder2', d_a3, d_w2, d_b2, pHidden_holder)\n", (19871, 19917), False, 'import scimpute\n'), ((19925, 19952), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L1"""'], {}), "('Decoder_L1')\n", (19938, 19952), True, 'import tensorflow as tf\n'), ((19970, 20034), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder1"""', 'p.n_hidden_1', 'n', 'p.sd'], {}), "('decoder1', p.n_hidden_1, n, p.sd)\n", (19999, 20034), False, 'import scimpute\n'), ((20045, 20111), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder1"""', 'd_a2', 'd_w1', 'd_b1', 'pHidden_holder'], {}), "('decoder1', d_a2, d_w1, d_b1, pHidden_holder)\n", (20065, 20111), False, 'import scimpute\n'), ((22038, 22054), 'tensorflow.pow', 'tf.pow', (['(X - h)', '(2)'], {}), '(X - h, 2)\n', (22044, 22054), True, 'import tensorflow as tf\n'), ((22173, 22189), 'tensorflow.pow', 'tf.pow', (['(X - h)', '(2)'], {}), '(X - h, 2)\n', (22179, 22189), True, 'import tensorflow as tf\n'), ((22448, 22500), 'importlib.machinery.SourceFileLoader', 'SourceFileLoader', (['param_name', "(cwd + '/' + param_file)"], {}), "(param_name, cwd + '/' + param_file)\n", (22464, 22500), False, 'from importlib.machinery import SourceFileLoader\n'), ((26589, 26601), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26599, 26601), False, 'import gc\n'), ((27182, 27194), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27192, 27194), False, 'import gc\n'), ((20254, 20281), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L1"""'], {}), "('Encoder_L1')\n", (20267, 20281), True, 'import tensorflow as tf\n'), ((20299, 20363), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder1"""', 'n', 'p.n_hidden_1', 'p.sd'], {}), "('encoder1', n, p.n_hidden_1, p.sd)\n", (20328, 20363), False, 'import scimpute\n'), ((20374, 20433), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder1"""', 'X', 'e_w1', 'e_b1', 'pIn_holder'], {}), "('encoder1', X, e_w1, e_b1, pIn_holder)\n", (20394, 20433), False, 'import scimpute\n'), ((20441, 20468), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L2"""'], {}), "('Encoder_L2')\n", (20454, 20468), True, 'import tensorflow as tf\n'), ((20486, 20561), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder2"""', 'p.n_hidden_1', 'p.n_hidden_2', 'p.sd'], {}), "('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)\n", (20515, 20561), False, 'import scimpute\n'), ((20572, 20638), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder2"""', 'e_a1', 'e_w2', 'e_b2', 'pHidden_holder'], {}), "('encoder2', e_a1, e_w2, e_b2, pHidden_holder)\n", (20592, 20638), False, 'import scimpute\n'), ((20646, 20673), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L2"""'], {}), "('Decoder_L2')\n", (20659, 20673), True, 'import tensorflow as tf\n'), ((20691, 20766), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder2"""', 'p.n_hidden_2', 'p.n_hidden_1', 'p.sd'], {}), "('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)\n", (20720, 20766), False, 'import scimpute\n'), ((20777, 20843), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder2"""', 'e_a2', 'd_w2', 'd_b2', 'pHidden_holder'], {}), "('decoder2', e_a2, d_w2, d_b2, pHidden_holder)\n", (20797, 20843), False, 'import scimpute\n'), ((20851, 20878), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L1"""'], {}), "('Decoder_L1')\n", (20864, 20878), True, 'import tensorflow as tf\n'), ((20896, 20960), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder1"""', 'p.n_hidden_1', 'n', 'p.sd'], {}), "('decoder1', p.n_hidden_1, n, p.sd)\n", (20925, 20960), False, 'import scimpute\n'), ((20971, 21037), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder1"""', 'd_a2', 'd_w1', 'd_b1', 'pHidden_holder'], {}), "('decoder1', d_a2, d_w1, d_b1, pHidden_holder)\n", (20991, 21037), False, 'import scimpute\n'), ((21970, 21986), 'tensorflow.pow', 'tf.pow', (['(X - h)', '(2)'], {}), '(X - h, 2)\n', (21976, 21986), True, 'import tensorflow as tf\n'), ((22082, 22094), 'tensorflow.pow', 'tf.pow', (['h', '(2)'], {}), '(h, 2)\n', (22088, 22094), True, 'import tensorflow as tf\n'), ((21180, 21207), 'tensorflow.name_scope', 'tf.name_scope', (['"""Encoder_L1"""'], {}), "('Encoder_L1')\n", (21193, 21207), True, 'import tensorflow as tf\n'), ((21225, 21289), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""encoder1"""', 'n', 'p.n_hidden_1', 'p.sd'], {}), "('encoder1', n, p.n_hidden_1, p.sd)\n", (21254, 21289), False, 'import scimpute\n'), ((21300, 21359), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""encoder1"""', 'X', 'e_w1', 'e_b1', 'pIn_holder'], {}), "('encoder1', X, e_w1, e_b1, pIn_holder)\n", (21320, 21359), False, 'import scimpute\n'), ((21367, 21394), 'tensorflow.name_scope', 'tf.name_scope', (['"""Decoder_L1"""'], {}), "('Decoder_L1')\n", (21380, 21394), True, 'import tensorflow as tf\n'), ((21412, 21476), 'scimpute.weight_bias_variable', 'scimpute.weight_bias_variable', (['"""decoder1"""', 'p.n_hidden_1', 'n', 'p.sd'], {}), "('decoder1', p.n_hidden_1, n, p.sd)\n", (21441, 21476), False, 'import scimpute\n'), ((21487, 21553), 'scimpute.dense_layer', 'scimpute.dense_layer', (['"""decoder1"""', 'e_a1', 'd_w1', 'd_b1', 'pHidden_holder'], {}), "('decoder1', e_a1, d_w1, d_b1, pHidden_holder)\n", (21507, 21553), False, 'import scimpute\n'), ((16856, 16894), 'math.floor', 'math.floor', (['(epoch / 5 / p.display_step)'], {}), '(epoch / 5 / p.display_step)\n', (16866, 16894), False, 'import math\n'), ((17096, 17134), 'math.floor', 'math.floor', (['(epoch / 5 / p.display_step)'], {}), '(epoch / 5 / p.display_step)\n', (17106, 17134), False, 'import math\n')]
import tqdm import networkx as nx import argparse import numpy as np import multiprocessing import graph_tool as gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument("-g", "--graph", help='bundled graph') parser.add_argument("-l","--length",help="contig length") parser.add_argument("-o","--output",help="output file") args = parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length = {} with open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): """ Performs typing and value conversion for the graph_tool PropertyMap class. If a key is provided, it also ensures the key is in a format that can be used with the PropertyMap. Returns a tuple, (type name, value, key) """ if isinstance(key, unicode): # Encode the key as ASCII key = key.encode('ascii', errors='replace') # Deal with the value if isinstance(value, bool): tname = 'bool' elif isinstance(value, int): tname = 'float' value = float(value) elif isinstance(value, float): tname = 'float' elif isinstance(value, unicode): tname = 'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object' else: tname = 'string' value = str(value) return tname, value, key def nx2gt(nxG): """ Converts a networkx graph to a graph-tool graph. """ # Phase 0: Create a directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as "internal properties" for key, value in nxG.graph.items(): # Convert the value and key into a type for graph-tool tname, value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key] = value # Set the actual value # Phase 1: Add the vertex and edge property maps # Go through all nodes and edges and add seen properties # Add the node properties first nprops = set() # cache keys to only add properties once for node, data in nxG.nodes_iter(data=True): # Go through all the properties if not seen and add them. for key, val in data.items(): if key in nprops: continue # Skip properties already added # Convert the value and key into a type for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap # Add the key to the already seen properties nprops.add(key) # Also add the node id: in NetworkX a node can be any hashable type, but # in graph-tool node are defined as indices. So we capture any strings # in a special PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties second eprops = set() # cache keys to only add properties once for src, dst, data in nxG.edges_iter(data=True): # Go through all the edge properties if not seen and add them. for key, val in data.items(): if key in eprops: continue # Skip properties already added # Convert the value and key into a type for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap # Add the key to the already seen properties eprops.add(key) # Phase 2: Actually add all the nodes and vertices with their properties # Add the nodes vertices = {} # vertex mapping for tracking edges later for node, data in nxG.nodes_iter(data=True): # Create the vertex and annotate for our edges later v = gtG.add_vertex() vertices[node] = v # Set the vertex properties, not forgetting the id property data['id'] = str(node) for key, value in data.items(): gtG.vp[key][v] = value # vp is short for vertex_properties # Add the edges for src, dst, data in nxG.edges_iter(data=True): # Look up the vertex structs from our vertices mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for key, value in data.items(): gtG.ep[key][e] = value # ep is short for edge_properties # Done, finally! return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\t'+str(repeat_nodes[node])+'\n') #for u,v,data in G_copy.edges(data=True): # print u +"\t"+data[u][v]['ori'][0]+v+"\t"+data[u][v]['ori'][1]+"\t"+str(data[u][v]["mean"])+"\t"+str(data[u][v]["stdev"])+"\t"+str(data[u][v]["bsize"]) #nx.write_gml(G_copy,args.output)
[ "tqdm.tqdm", "argparse.ArgumentParser", "networkx.set_node_attributes", "numpy.std", "numpy.mean", "networkx.Graph", "graph_tool.centrality.betweenness", "networkx.connected_component_subgraphs", "networkx.number_connected_components", "multiprocessing.cpu_count" ]
[((172, 197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (195, 197), False, 'import argparse\n'), ((402, 412), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (410, 412), True, 'import networkx as nx\n'), ((420, 447), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (445, 447), False, 'import multiprocessing\n'), ((1055, 1105), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', '"""length"""', 'contig_length'], {}), "(G, 'length', contig_length)\n", (1077, 1105), True, 'import networkx as nx\n'), ((567, 603), 'tqdm.tqdm', 'tqdm.tqdm', (['f'], {'desc': '"""Reading bundled"""'}), "(f, desc='Reading bundled')\n", (576, 603), False, 'import tqdm\n'), ((888, 924), 'tqdm.tqdm', 'tqdm.tqdm', (['f'], {'desc': '"""Reading lengths"""'}), "(f, desc='Reading lengths')\n", (897, 924), False, 'import tqdm\n'), ((5481, 5496), 'graph_tool.centrality.betweenness', 'betweenness', (['_g'], {}), '(_g)\n', (5492, 5496), False, 'from graph_tool.centrality import betweenness\n'), ((5794, 5831), 'networkx.number_connected_components', 'nx.number_connected_components', (['graph'], {}), '(graph)\n', (5824, 5831), True, 'import networkx as nx\n'), ((6202, 6250), 'tqdm.tqdm', 'tqdm.tqdm', (['repeat_nodes'], {'desc': '"""Checking repeats"""'}), "(repeat_nodes, desc='Checking repeats')\n", (6211, 6250), False, 'import tqdm\n'), ((5547, 5557), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (5554, 5557), True, 'import numpy as np\n'), ((5577, 5586), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (5583, 5586), True, 'import numpy as np\n'), ((5914, 5953), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['graph'], {}), '(graph)\n', (5946, 5953), True, 'import networkx as nx\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Mar 14 14:11:07 2019 @author: mimbres """ import pandas as pd import numpy as np from tqdm import trange LASTFM_FILEPATH = './data/final_mapping.json' OUTPUT_FILEPATH1 = './data/lastfm_top50_tagmtx.npy' OUTPUT_FILEPATH2 = './data/lastfm_top50_featmtx.npy' OUTPUT_FILEPATH3 = './data/lastfm_top50_track_ids.npy' OUTPUT_FILEPATH4 = './data/lastfm_top50_tag_avail_cnt.npy' SAVED_SCALER_FILEPATH = './data/std_scaler.sav' TOP50A = ['rock', 'pop', 'alternative', 'indie', 'favorites', 'female vocalists', 'Love', 'alternative rock', 'electronic', 'beautiful', 'jazz', '00s', 'singer-songwriter', 'metal', 'male vocalists', 'Awesome', 'american', 'Mellow', 'classic rock', '90s', 'soul', 'chillout', 'punk', '80s', 'chill', 'indie rock', 'folk', 'dance', 'instrumental', 'hard rock', 'oldies', 'seen live', 'Favorite', 'country', 'blues', 'guitar', 'cool', 'british', 'acoustic', 'electronica', '70s', 'Favourites', 'Hip-Hop', 'experimental', 'easy listening', 'female vocalist', 'ambient', 'punk rock', 'funk', 'hardcore'] _dict = {'major': 1, 'minor': 0} # Load .json file... df=pd.read_json(LASTFM_FILEPATH) num_items = len(df) # Shuffle (we can split train/test later) df = df.sample(frac=1).reset_index(drop=True) # Create an empty result matrix tag_mtx = np.zeros((num_items,50)) feat_mtx = np.zeros((num_items,29)) track_ids = np.ndarray((num_items,), dtype=object) tag_avail_cnt = np.zeros((num_items,)) for i in trange(num_items): item = np.asarray(df[0][i]) # Get one item tag_cnt = 0 for tag in TOP50A: # Check availability of each tag in this item _idx = np.where(tag == item)[0] if len(_idx) is not 0: # If top50-tag available... tag_cnt += 1 column_idx = _idx[0] #print(i, item[column_idx,:]) tag_mtx[i,TOP50A.index(tag)] = item[column_idx,1].astype(np.float) tag_avail_cnt[i] = tag_cnt track_ids[i] = df[1][i][0] if tag_cnt is not 0: _feat = np.asarray(df[1][i]) _feat[20] = _dict.get(_feat[20]) # {'major', 'minor'} --> {0,1} _feat[5] = _feat[5][:4] # '2005-01-01' --> '2005' feat_mtx[i,:] = _feat[[4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33]] print('max available tags =', np.max(tag_avail_cnt), '\n', 'avg available tags =', np.mean(tag_avail_cnt[np.where(tag_avail_cnt!=0)]), '\n', 'items with top50 unavailable =', len(np.where(tag_avail_cnt==0)[0]), '\n', 'items with top50 available =', len(np.where(tag_avail_cnt!=0)[0]) ) ''' max available tags = 31.0 avg available tags = 4.705301775916366 items with top50 unavailable = 38595 items with top50 available = 123204 ''' # Reduce top50 unavailable items tag_mtx = tag_mtx[tag_avail_cnt!=0,:] feat_mtx = feat_mtx[tag_avail_cnt!=0,:] track_ids = track_ids[tag_avail_cnt!=0] # Feature normalization import pickle #from sklearn.preprocessing import StandardScaler scaler = pickle.load(open(SAVED_SCALER_FILEPATH, 'rb')) feat_mtx_new = scaler.fit_transform(feat_mtx) feat_mtx_new[:,15] = feat_mtx[:,15] # Save results as .npy np.save(OUTPUT_FILEPATH1, tag_mtx.astype(np.int8)) #np.save(OUTPUT_FILEPATH2, feat_mtx.astype(np.int8)) np.save(OUTPUT_FILEPATH2, feat_mtx_new.astype(np.float32)) np.save(OUTPUT_FILEPATH3, track_ids) np.save(OUTPUT_FILEPATH4, tag_avail_cnt.astype(np.int8))
[ "numpy.save", "tqdm.trange", "numpy.asarray", "numpy.zeros", "pandas.read_json", "numpy.max", "numpy.where", "numpy.ndarray" ]
[((1219, 1248), 'pandas.read_json', 'pd.read_json', (['LASTFM_FILEPATH'], {}), '(LASTFM_FILEPATH)\n', (1231, 1248), True, 'import pandas as pd\n'), ((1402, 1427), 'numpy.zeros', 'np.zeros', (['(num_items, 50)'], {}), '((num_items, 50))\n', (1410, 1427), True, 'import numpy as np\n'), ((1438, 1463), 'numpy.zeros', 'np.zeros', (['(num_items, 29)'], {}), '((num_items, 29))\n', (1446, 1463), True, 'import numpy as np\n'), ((1475, 1513), 'numpy.ndarray', 'np.ndarray', (['(num_items,)'], {'dtype': 'object'}), '((num_items,), dtype=object)\n', (1485, 1513), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.zeros', 'np.zeros', (['(num_items,)'], {}), '((num_items,))\n', (1538, 1552), True, 'import numpy as np\n'), ((1564, 1581), 'tqdm.trange', 'trange', (['num_items'], {}), '(num_items)\n', (1570, 1581), False, 'from tqdm import trange\n'), ((3414, 3450), 'numpy.save', 'np.save', (['OUTPUT_FILEPATH3', 'track_ids'], {}), '(OUTPUT_FILEPATH3, track_ids)\n', (3421, 3450), True, 'import numpy as np\n'), ((1594, 1614), 'numpy.asarray', 'np.asarray', (['df[0][i]'], {}), '(df[0][i])\n', (1604, 1614), True, 'import numpy as np\n'), ((2422, 2443), 'numpy.max', 'np.max', (['tag_avail_cnt'], {}), '(tag_avail_cnt)\n', (2428, 2443), True, 'import numpy as np\n'), ((2117, 2137), 'numpy.asarray', 'np.asarray', (['df[1][i]'], {}), '(df[1][i])\n', (2127, 2137), True, 'import numpy as np\n'), ((1744, 1765), 'numpy.where', 'np.where', (['(tag == item)'], {}), '(tag == item)\n', (1752, 1765), True, 'import numpy as np\n'), ((2503, 2531), 'numpy.where', 'np.where', (['(tag_avail_cnt != 0)'], {}), '(tag_avail_cnt != 0)\n', (2511, 2531), True, 'import numpy as np\n'), ((2583, 2611), 'numpy.where', 'np.where', (['(tag_avail_cnt == 0)'], {}), '(tag_avail_cnt == 0)\n', (2591, 2611), True, 'import numpy as np\n'), ((2663, 2691), 'numpy.where', 'np.where', (['(tag_avail_cnt != 0)'], {}), '(tag_avail_cnt != 0)\n', (2671, 2691), True, 'import numpy as np\n')]
#Programmer: <NAME> #This file contains a test step function for debugging the swept rule import numpy, h5py, mpi4py.MPI as MPI try: import pycuda.driver as cuda from pycuda.compiler import SourceModule except Exception as e: pass def step(state,iidx,arrayTimeIndex,globalTimeStep): """This is the method that will be called by the swept solver. state - 4D numpy array(t,v,x,y (v is variables length)) iidx - an iterable of indexs arrayTimeIndex - the current time step globalTimeStep - a step counter that allows implementation of the scheme """ if scheme: checkerOneStep(state,iidx,arrayTimeIndex,globalTimeStep) else: checkerTwoStep(state,iidx,arrayTimeIndex,globalTimeStep) def checkerOneStep(state,iidx,arrayTimeIndex,globalTimeStep): """Use this function as the one step checker pattern""" vs = slice(0,state.shape[1],1) for idx,idy in iidx: ntidx = (arrayTimeIndex+1,vs,idx,idy) #next step index state[ntidx] = state[arrayTimeIndex,vs,idx+1,idy] state[ntidx] += state[arrayTimeIndex,vs,idx-1,idy] state[ntidx] += state[arrayTimeIndex,vs,idx,idy+1] state[ntidx] += state[arrayTimeIndex,vs,idx,idy-1] state[ntidx] /= 4 def checkerTwoStep(state,iidx,arrayTimeIndex,globalTimeStep): """Use this function as the two step checker pattern""" vs = slice(0,state.shape[1],1) for idx,idy in iidx: ntidx = (arrayTimeIndex+1,vs,idx,idy) #next step index state[ntidx] = state[arrayTimeIndex,vs,idx+1,idy] state[ntidx] += state[arrayTimeIndex,vs,idx-1,idy] state[ntidx] += state[arrayTimeIndex,vs,idx,idy+1] state[ntidx] += state[arrayTimeIndex,vs,idx,idy-1] state[ntidx] /= 4 def createInitialConditions(nv,nx,ny,filename="checkerConditions.hdf5"): """Use this function to create a set of initial conditions in an hdf5 file.""" comm = MPI.COMM_WORLD data = numpy.zeros((nv,nx,ny)) for i in range(0,nx,2): for j in range(0,ny,2): data[:,i,j]=1 for i in range(1,nx,2): for j in range(1,ny,2): data[:,i,j]=1 with h5py.File(filename,"w",driver="mpio",comm=comm) as hf: hf.create_dataset("data",data.shape,data=data) return filename def set_globals(*args,source_mod=None): """Use this function to set cpu global variables""" global dt,dx,dy,scheme #true for one step t0,tf,dt,dx,dy,scheme = args if source_mod is not None: keys = "<KEY>" nargs = args[2:] fc = lambda x:numpy.float64(x) for i,key in enumerate(keys): ckey,_ = source_mod.get_global(key) cuda.memcpy_htod(ckey,fc(nargs[i])) ckey,_ = source_mod.get_global("SCHEME") cuda.memcpy_htod(ckey,bytes(scheme))
[ "numpy.float64", "h5py.File", "numpy.zeros" ]
[((1951, 1976), 'numpy.zeros', 'numpy.zeros', (['(nv, nx, ny)'], {}), '((nv, nx, ny))\n', (1962, 1976), False, 'import numpy, h5py, mpi4py.MPI as MPI\n'), ((2156, 2206), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {'driver': '"""mpio"""', 'comm': 'comm'}), "(filename, 'w', driver='mpio', comm=comm)\n", (2165, 2206), False, 'import numpy, h5py, mpi4py.MPI as MPI\n'), ((2563, 2579), 'numpy.float64', 'numpy.float64', (['x'], {}), '(x)\n', (2576, 2579), False, 'import numpy, h5py, mpi4py.MPI as MPI\n')]
import nose.tools as nt import numpy as np import theano import theano.tensor as T import treeano import treeano.nodes as tn fX = theano.config.floatX def test_aggregator_node_serialization(): tn.check_serialization(tn.AggregatorNode("a")) def test_elementwise_cost_node_serialization(): tn.check_serialization(tn.ElementwiseCostNode( "foo", {"pred": tn.IdentityNode("foo"), "target": tn.IdentityNode("bar")})) def test_total_cost_node_serialization(): tn.check_serialization(tn.TotalCostNode( "foo", {"pred": tn.IdentityNode("foo"), "target": tn.IdentityNode("bar")})) def test_auxilliary_cost_node_serialization(): tn.check_serialization(tn.AuxiliaryCostNode( "foo", {"target": tn.IdentityNode("bar")})) def test_total_cost_node(): network = tn.TotalCostNode( "cost", {"pred": tn.InputNode("x", shape=(3, 4, 5)), "target": tn.InputNode("y", shape=(3, 4, 5))}, cost_function=treeano.utils.squared_error).network() fn = network.function(["x", "y"], ["cost"]) x = np.random.rand(3, 4, 5).astype(fX) y = np.random.rand(3, 4, 5).astype(fX) np.testing.assert_allclose(fn(x, y)[0], ((x - y) ** 2).mean(), rtol=1e-5) np.testing.assert_allclose(fn(x, x)[0], 0) np.testing.assert_allclose(fn(y, y)[0], 0) def test_total_cost_node_with_weight(): network = tn.TotalCostNode( "cost", {"pred": tn.InputNode("x", shape=(3, 4, 5)), "weight": tn.InputNode("w", shape=(3, 4, 5)), "target": tn.InputNode("y", shape=(3, 4, 5))}, cost_function=treeano.utils.squared_error).network() fn = network.function(["x", "y", "w"], ["cost"]) x = np.random.rand(3, 4, 5).astype(fX) w = np.random.rand(3, 4, 5).astype(fX) y = np.random.rand(3, 4, 5).astype(fX) np.testing.assert_allclose(fn(x, y, w)[0], (((x - y) ** 2) * w).mean(), rtol=1e-5) np.testing.assert_allclose(fn(x, x, w)[0], 0) np.testing.assert_allclose(fn(y, y, w)[0], 0) def test_auxiliary_cost_node(): network = tn.HyperparameterNode( "hp", tn.SequentialNode( "seq", [tn.InputNode("x", shape=(3, 4, 5)), tn.AuxiliaryCostNode( "cost1", {"target": tn.InputNode("y1", shape=(3, 4, 5))}), tn.AddConstantNode("a1", value=2), tn.AuxiliaryCostNode( "cost2", {"target": tn.InputNode("y2", shape=(3, 4, 5))}), tn.MultiplyConstantNode("m1", value=2), tn.AuxiliaryCostNode( "cost3", {"target": tn.InputNode("y3", shape=(3, 4, 5))}), tn.ConstantNode("const", value=0), tn.InputElementwiseSumNode("cost")] ), cost_reference="cost", cost_function=treeano.utils.squared_error, ).network() fn = network.function(["x", "y1", "y2", "y3"], ["cost"]) x = np.random.rand(3, 4, 5).astype(fX) ys = [np.random.rand(3, 4, 5).astype(fX) for _ in range(3)] def mse(x, y): return ((x - y) ** 2).mean() expected_output = (mse(x, ys[0]) + mse(x + 2, ys[1]) + mse(2 * (x + 2), ys[2])) np.testing.assert_allclose(fn(x, *ys)[0], expected_output, rtol=1e-5)
[ "treeano.nodes.ConstantNode", "treeano.nodes.MultiplyConstantNode", "treeano.nodes.InputElementwiseSumNode", "treeano.nodes.AddConstantNode", "treeano.nodes.AggregatorNode", "treeano.nodes.InputNode", "numpy.random.rand", "treeano.nodes.IdentityNode" ]
[((224, 246), 'treeano.nodes.AggregatorNode', 'tn.AggregatorNode', (['"""a"""'], {}), "('a')\n", (241, 246), True, 'import treeano.nodes as tn\n'), ((1102, 1125), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1116, 1125), True, 'import numpy as np\n'), ((1145, 1168), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1159, 1168), True, 'import numpy as np\n'), ((1852, 1875), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1866, 1875), True, 'import numpy as np\n'), ((1895, 1918), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1909, 1918), True, 'import numpy as np\n'), ((1938, 1961), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1952, 1961), True, 'import numpy as np\n'), ((3224, 3247), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (3238, 3247), True, 'import numpy as np\n'), ((381, 403), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""foo"""'], {}), "('foo')\n", (396, 403), True, 'import treeano.nodes as tn\n'), ((424, 446), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""bar"""'], {}), "('bar')\n", (439, 446), True, 'import treeano.nodes as tn\n'), ((571, 593), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""foo"""'], {}), "('foo')\n", (586, 593), True, 'import treeano.nodes as tn\n'), ((614, 636), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""bar"""'], {}), "('bar')\n", (629, 636), True, 'import treeano.nodes as tn\n'), ((772, 794), 'treeano.nodes.IdentityNode', 'tn.IdentityNode', (['"""bar"""'], {}), "('bar')\n", (787, 794), True, 'import treeano.nodes as tn\n'), ((3269, 3292), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (3283, 3292), True, 'import numpy as np\n'), ((893, 927), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""x"""'], {'shape': '(3, 4, 5)'}), "('x', shape=(3, 4, 5))\n", (905, 927), True, 'import treeano.nodes as tn\n'), ((948, 982), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y"""'], {'shape': '(3, 4, 5)'}), "('y', shape=(3, 4, 5))\n", (960, 982), True, 'import treeano.nodes as tn\n'), ((1583, 1617), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""x"""'], {'shape': '(3, 4, 5)'}), "('x', shape=(3, 4, 5))\n", (1595, 1617), True, 'import treeano.nodes as tn\n'), ((1638, 1672), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""w"""'], {'shape': '(3, 4, 5)'}), "('w', shape=(3, 4, 5))\n", (1650, 1672), True, 'import treeano.nodes as tn\n'), ((1693, 1727), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y"""'], {'shape': '(3, 4, 5)'}), "('y', shape=(3, 4, 5))\n", (1705, 1727), True, 'import treeano.nodes as tn\n'), ((2428, 2462), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""x"""'], {'shape': '(3, 4, 5)'}), "('x', shape=(3, 4, 5))\n", (2440, 2462), True, 'import treeano.nodes as tn\n'), ((2605, 2638), 'treeano.nodes.AddConstantNode', 'tn.AddConstantNode', (['"""a1"""'], {'value': '(2)'}), "('a1', value=2)\n", (2623, 2638), True, 'import treeano.nodes as tn\n'), ((2781, 2819), 'treeano.nodes.MultiplyConstantNode', 'tn.MultiplyConstantNode', (['"""m1"""'], {'value': '(2)'}), "('m1', value=2)\n", (2804, 2819), True, 'import treeano.nodes as tn\n'), ((2962, 2995), 'treeano.nodes.ConstantNode', 'tn.ConstantNode', (['"""const"""'], {'value': '(0)'}), "('const', value=0)\n", (2977, 2995), True, 'import treeano.nodes as tn\n'), ((3010, 3044), 'treeano.nodes.InputElementwiseSumNode', 'tn.InputElementwiseSumNode', (['"""cost"""'], {}), "('cost')\n", (3036, 3044), True, 'import treeano.nodes as tn\n'), ((2553, 2588), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y1"""'], {'shape': '(3, 4, 5)'}), "('y1', shape=(3, 4, 5))\n", (2565, 2588), True, 'import treeano.nodes as tn\n'), ((2729, 2764), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y2"""'], {'shape': '(3, 4, 5)'}), "('y2', shape=(3, 4, 5))\n", (2741, 2764), True, 'import treeano.nodes as tn\n'), ((2910, 2945), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""y3"""'], {'shape': '(3, 4, 5)'}), "('y3', shape=(3, 4, 5))\n", (2922, 2945), True, 'import treeano.nodes as tn\n')]
# NOTE WARNING NEVER CHANGE THIS FIRST LINE!!!! NEVER EVER import cudf from collections import OrderedDict from enum import Enum from urllib.parse import urlparse from threading import Lock from weakref import ref from pyblazing.apiv2.filesystem import FileSystem from pyblazing.apiv2 import DataType from .hive import * import time import datetime import socket import errno import subprocess import os import re import pandas import numpy as np import pyarrow from urllib.parse import urlparse from urllib.parse import ParseResult from pathlib import PurePath import cio import pyblazing import cudf import dask_cudf import dask import jpype import dask.distributed import netifaces as ni import random jpype.addClassPath( os.path.join( os.getenv("CONDA_PREFIX"), 'lib/blazingsql-algebra.jar')) jpype.addClassPath( os.path.join( os.getenv("CONDA_PREFIX"), 'lib/blazingsql-algebra-core.jar')) jpype.startJVM(jpype.getDefaultJVMPath(), '-ea', convertStrings=False) ArrayClass = jpype.JClass('java.util.ArrayList') ColumnTypeClass = jpype.JClass( 'com.blazingdb.calcite.catalog.domain.CatalogColumnDataType') dataType = ColumnTypeClass.fromString("GDF_INT8") ColumnClass = jpype.JClass( 'com.blazingdb.calcite.catalog.domain.CatalogColumnImpl') TableClass = jpype.JClass( 'com.blazingdb.calcite.catalog.domain.CatalogTableImpl') DatabaseClass = jpype.JClass( 'com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl') BlazingSchemaClass = jpype.JClass('com.blazingdb.calcite.schema.BlazingSchema') RelationalAlgebraGeneratorClass = jpype.JClass( 'com.blazingdb.calcite.application.RelationalAlgebraGenerator') def get_np_dtype_to_gdf_dtype_str(dtype): dtypes = { np.dtype('float64'): 'GDF_FLOAT64', np.dtype('float32'): 'GDF_FLOAT32', np.dtype('int64'): 'GDF_INT64', np.dtype('int32'): 'GDF_INT32', np.dtype('int16'): 'GDF_INT16', np.dtype('int8'): 'GDF_INT8', np.dtype('bool_'): 'GDF_BOOL8', np.dtype('datetime64[s]'): 'GDF_DATE64', np.dtype('datetime64[ms]'): 'GDF_DATE64', np.dtype('datetime64[ns]'): 'GDF_TIMESTAMP', np.dtype('datetime64[us]'): 'GDF_TIMESTAMP', np.dtype('datetime64'): 'GDF_DATE64', np.dtype('object_'): 'GDF_STRING', np.dtype('str_'): 'GDF_STRING', np.dtype('<M8[s]'): 'GDF_DATE64', np.dtype('<M8[ms]'): 'GDF_DATE64', np.dtype('<M8[ns]'): 'GDF_TIMESTAMP', np.dtype('<M8[us]'): 'GDF_TIMESTAMP' } ret = dtypes[np.dtype(dtype)] return ret def checkSocket(socketNum): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) socket_free = False try: s.bind(("127.0.0.1", socketNum)) socket_free = True except socket.error as e: if e.errno == errno.EADDRINUSE: socket_free = False else: # something else raised the socket.error exception print("ERROR: Something happened when checking socket " + str(socketNum)) #print(e) s.close() return socket_free def initializeBlazing(ralId=0, networkInterface='lo', singleNode=False, allocator="managed", pool=True,initial_pool_size=None, enable_logging=False): #print(networkInterface) workerIp = ni.ifaddresses(networkInterface)[ni.AF_INET][0]['addr'] ralCommunicationPort = random.randint(10000, 32000) + ralId while checkSocket(ralCommunicationPort) == False: ralCommunicationPort = random.randint(10000, 32000) + ralId cudf.set_allocator(allocator=allocator, pool=pool, initial_pool_size=initial_pool_size,# Default is 1/2 total GPU memory enable_logging=enable_logging) cio.initializeCaller( ralId, 0, networkInterface.encode(), workerIp.encode(), ralCommunicationPort, singleNode) cwd = os.getcwd() return ralCommunicationPort, workerIp, cwd def getNodePartitions(df, client): df = df.persist() workers = client.scheduler_info()['workers'] connectionToId = {} for worker in workers: connectionToId[worker] = workers[worker]['name'] dask.distributed.wait(df) #print(client.who_has(df)) worker_part = client.who_has(df) worker_partitions = {} for key in worker_part: worker = worker_part[key][0] partition = int(key[key.find(",") + 2:(len(key) - 1)]) if connectionToId[worker] not in worker_partitions: worker_partitions[connectionToId[worker]] = [] worker_partitions[connectionToId[worker]].append(partition) #print("worker partitions") #print(worker_partitions) return worker_partitions def collectPartitionsRunQuery( masterIndex, nodes, tables, fileTypes, ctxToken, algebra, accessToken): import dask.distributed worker_id = dask.distributed.get_worker().name for table_name in tables: if(isinstance(tables[table_name].input, dask_cudf.core.DataFrame)): partitions = tables[table_name].get_partitions(worker_id) if (len(partitions) == 0): tables[table_name].input = tables[table_name].input.get_partition( 0).head(0) elif (len(partitions) == 1): tables[table_name].input = tables[table_name].input.get_partition( partitions[0]).compute(scheduler='threads') else: table_partitions = [] for partition in partitions: table_partitions.append( tables[table_name].input.get_partition(partition).compute()) tables[table_name].input = cudf.concat(table_partitions) return cio.runQueryCaller( masterIndex, nodes, tables, fileTypes, ctxToken, algebra, accessToken) # returns a map of table names to the indices of the columns needed. If there are more than one table scan for one table, it merged the needed columns # if the column list is empty, it means we want all columns def mergeTableScans(tableScanInfo): table_names = tableScanInfo.keys() table_columns = {} for table_name in table_names: table_columns[table_name] = [] for table_name in table_names: for index in range(0, len(tableScanInfo[table_name]['table_columns'])): if len(tableScanInfo[table_name]['table_columns'][index]) > 0: table_columns[table_name] = list(set(table_columns[table_name] + tableScanInfo[table_name]['table_columns'][index])) table_columns[table_name].sort() else: # if the column list is empty, it means we want all columns table_columns[table_name] = [] break return table_columns def modifyAlegebraAndTablesForArrowBasedOnColumnUsage(algebra, tableScanInfo, originalTables, table_columns_in_use): newTables={} for table_name in tableScanInfo: if originalTables[table_name].fileType == DataType.ARROW: newTables[table_name] = originalTables[table_name].filterAndRemapColumns(table_columns_in_use[table_name]) for index in range(0,len(tableScanInfo[table_name]['table_scans'])): orig_scan = tableScanInfo[table_name]['table_scans'][index] orig_col_indexes = tableScanInfo[table_name]['table_columns'][index] table_columns_we_want = table_columns_in_use[table_name] new_col_indexes = [] if len(table_columns_we_want) > 0: if orig_col_indexes == table_columns_we_want: new_col_indexes = list(range(0, len(orig_col_indexes))) else: for new_index, merged_col_index in enumerate(table_columns_we_want): if merged_col_index in orig_col_indexes: new_col_indexes.append(new_index) orig_project = 'projects=[' + str(orig_col_indexes) + ']' new_project = 'projects=[' + str(new_col_indexes) + ']' new_scan = orig_scan.replace(orig_project, new_project) algebra = algebra.replace(orig_scan, new_scan) else: newTables[table_name] = originalTables[table_name] return newTables, algebra class BlazingTable(object): def __init__( self, input, fileType, files=None, datasource=[], calcite_to_file_indices=None, num_row_groups=None, args={}, convert_gdf_to_dask=False, convert_gdf_to_dask_partitions=1, client=None, uri_values=[], in_file=[], force_conversion=False, metadata=None): self.fileType = fileType if fileType == DataType.ARROW: if force_conversion: #converts to cudf for querying self.input = cudf.DataFrame.from_arrow(input) self.fileType = DataType.CUDF else: self.input = cudf.DataFrame.from_arrow(input.schema.empty_table()) self.arrow_table = input else: self.input = input self.calcite_to_file_indices = calcite_to_file_indices self.files = files self.datasource = datasource # TODO, cc @percy, @cristian! # num_row_groups: this property is computed in create_table.parse_schema, but not used in run_query. self.num_row_groups = num_row_groups self.args = args if fileType == DataType.CUDF or DataType.DASK_CUDF: if(convert_gdf_to_dask and isinstance(self.input, cudf.DataFrame)): self.input = dask_cudf.from_cudf( self.input, npartitions=convert_gdf_to_dask_partitions) if(isinstance(self.input, dask_cudf.core.DataFrame)): self.dask_mapping = getNodePartitions(self.input, client) self.uri_values = uri_values self.in_file = in_file # slices, this is computed in create table, and then reused in sql method self.slices = None # metadata, this is computed in create table, after call get_metadata self.metadata = metadata # row_groups_ids, vector<vector<int>> one vector of row_groups per file self.row_groups_id = [] # a pair of values with the startIndex and batchSize info for each slice self.offset = (0,0) def has_metadata(self) : if isinstance(self.metadata, dask_cudf.core.DataFrame): return not self.metadata.compute().empty if self.metadata is not None : return not self.metadata.empty return False def filterAndRemapColumns(self,tableColumns): #only used for arrow if len(tableColumns) == 0: # len = 0 means all columns return BlazingTable(self.arrow_table,DataType.ARROW,force_conversion=True) new_table = self.arrow_table columns = [] names = [] i = 0 for column in new_table.itercolumns(): for index in tableColumns: if i == index: names.append(self.arrow_table.field(i).name) columns.append(column) i = i + 1 new_table = pyarrow.Table.from_arrays(columns,names=names) new_table = BlazingTable(new_table,DataType.ARROW,force_conversion=True) return new_table def convertForQuery(self): return BlazingTable(self.arrow_table,DataType.ARROW,force_conversion=True) # until this is implemented we cant do self join with arrow tables # def unionColumns(self,otherTable): def getSlices(self, numSlices): nodeFilesList = [] if self.files is None: for i in range(0, numSlices): nodeFilesList.append(BlazingTable(self.input, self.fileType)) return nodeFilesList remaining = len(self.files) startIndex = 0 for i in range(0, numSlices): batchSize = int(remaining / (numSlices - i)) # #print(batchSize) # #print(startIndex) tempFiles = self.files[startIndex: startIndex + batchSize] uri_values = self.uri_values[startIndex: startIndex + batchSize] if isinstance(self.metadata, cudf.DataFrame) or self.metadata is None: slice_metadata = self.metadata else: slice_metadata = self.metadata.get_partition(i).compute() if self.num_row_groups is not None: bt = BlazingTable(self.input, self.fileType, files=tempFiles, calcite_to_file_indices=self.calcite_to_file_indices, num_row_groups=self.num_row_groups[startIndex: startIndex + batchSize], uri_values=uri_values, args=self.args, metadata=slice_metadata) bt.offset = (startIndex, batchSize) nodeFilesList.append(bt) else: bt = BlazingTable( self.input, self.fileType, files=tempFiles, calcite_to_file_indices=self.calcite_to_file_indices, uri_values=uri_values, args=self.args, metadata=slice_metadata) bt.offset = (startIndex, batchSize) nodeFilesList.append(bt) startIndex = startIndex + batchSize remaining = remaining - batchSize return nodeFilesList def get_partitions(self, worker): return self.dask_mapping[worker] class BlazingContext(object): def __init__(self, dask_client=None, # if None, it will run in single node network_interface=None, allocator="managed", # options are "default" or "managed". Where "managed" uses Unified Virtual Memory (UVM) and may use system memory if GPU memory runs out pool=True, # if True, it will allocate a memory pool in the beginning. This can greatly improve performance initial_pool_size=None, # Initial size of memory pool in bytes (if pool=True). If None, it will default to using half of the GPU memory enable_logging=False): # If set to True the memory allocator logging will be enabled, but can negatively impact perforamance """ :param connection: BlazingSQL cluster URL to connect to (e.g. 172.16.17.32:8889, blazingsql-gateway:7887). """ self.lock = Lock() self.finalizeCaller = ref(cio.finalizeCaller) self.dask_client = dask_client self.nodes = [] self.node_cwds = [] self.finalizeCaller = lambda: NotImplemented if(dask_client is not None): if network_interface is None: network_interface = 'eth0' worker_list = [] dask_futures = [] masterIndex = 0 i = 0 ##print(network_interface) for worker in list(self.dask_client.scheduler_info()["workers"]): dask_futures.append( self.dask_client.submit( initializeBlazing, ralId=i, networkInterface=network_interface, singleNode=False, allocator=allocator, pool=pool, initial_pool_size=initial_pool_size, enable_logging=enable_logging, workers=[worker])) worker_list.append(worker) i = i + 1 i = 0 for connection in dask_futures: ralPort, ralIp, cwd = connection.result() node = {} node['worker'] = worker_list[i] node['ip'] = ralIp node['communication_port'] = ralPort #print("ralport is") #print(ralPort) self.nodes.append(node) self.node_cwds.append(cwd) i = i + 1 else: ralPort, ralIp, cwd = initializeBlazing( ralId=0, networkInterface='lo', singleNode=True, allocator=allocator, pool=pool, initial_pool_size=initial_pool_size, enable_logging=enable_logging) node = {} node['ip'] = ralIp node['communication_port'] = ralPort self.nodes.append(node) self.node_cwds.append(cwd) # NOTE ("//"+) is a neat trick to handle ip:port cases #internal_api.SetupOrchestratorConnection(orchestrator_host_ip, orchestrator_port) self.fs = FileSystem() self.db = DatabaseClass("main") self.schema = BlazingSchemaClass(self.db) self.generator = RelationalAlgebraGeneratorClass(self.schema) self.tables = {} self.logs_initialized = False # waitForPingSuccess(self.client) print("BlazingContext ready") def ready(self, wait=False): if wait: waitForPingSuccess(self.client) return True else: return self.client.ping() def __del__(self): self.finalizeCaller() def __repr__(self): return "BlazingContext('%s')" % (self.connection) def __str__(self): return self.connection # BEGIN FileSystem interface def localfs(self, prefix, **kwargs): return self.fs.localfs(self.dask_client, prefix, **kwargs) # Use result, error_msg = hdfs(args) where result can be True|False def hdfs(self, prefix, **kwargs): return self.fs.hdfs(self.dask_client, prefix, **kwargs) def s3(self, prefix, **kwargs): return self.fs.s3(self.dask_client, prefix, **kwargs) def gs(self, prefix, **kwargs): return self.fs.gs(self.dask_client, prefix, **kwargs) def show_filesystems(self): print(self.fs) # END FileSystem interface def _to_url(self, str_input): url = urlparse(str_input) return url def _to_path(self, url): path = PurePath(url.path) return path # BEGIN SQL interface def explain(self, sql): return str(self.generator.getRelationalAlgebraString(sql)) def add_remove_table(self, tableName, addTable, table=None): self.lock.acquire() try: if(addTable): self.db.removeTable(tableName) self.tables[tableName] = table arr = ArrayClass() order = 0 for column in table.input.columns: if(isinstance(table.input, dask_cudf.core.DataFrame)): dataframe_column = table.input.head(0)._data[column] else: dataframe_column = table.input._data[column] data_sz = len(dataframe_column) dtype = get_np_dtype_to_gdf_dtype_str( dataframe_column.dtype) dataType = ColumnTypeClass.fromString(dtype) column = ColumnClass(column, dataType, order) arr.add(column) order = order + 1 tableJava = TableClass(tableName, self.db, arr) self.db.addTable(tableJava) self.schema = BlazingSchemaClass(self.db) self.generator = RelationalAlgebraGeneratorClass(self.schema) else: self.db.removeTable(tableName) self.schema = BlazingSchemaClass(self.db) self.generator = RelationalAlgebraGeneratorClass(self.schema) del self.tables[tableName] finally: self.lock.release() def create_table(self, table_name, input, **kwargs): table = None extra_columns = [] uri_values = [] file_format_hint = kwargs.get( 'file_format', 'undefined') # See datasource.file_format extra_kwargs = {} in_file = [] if(isinstance(input, hive.Cursor)): hive_table_name = kwargs.get('hive_table_name', table_name) folder_list, uri_values, file_format_hint, extra_kwargs, extra_columns, in_file = get_hive_table( input, hive_table_name) kwargs.update(extra_kwargs) input = folder_list if isinstance(input, str): input = [input, ] if isinstance(input, pandas.DataFrame): input = cudf.DataFrame.from_pandas(input) if isinstance(input, pyarrow.Table): if (self.dask_client is not None): input = cudf.DataFrame.from_arrow(input) else: table = BlazingTable( input, DataType.ARROW) if isinstance(input, cudf.DataFrame): if (self.dask_client is not None): table = BlazingTable( input, DataType.DASK_CUDF, convert_gdf_to_dask=True, convert_gdf_to_dask_partitions=len( self.nodes), client=self.dask_client) else: table = BlazingTable(input, DataType.CUDF) elif isinstance(input, list): parsedSchema = self._parseSchema( input, file_format_hint, kwargs, extra_columns) file_type = parsedSchema['file_type'] table = BlazingTable( parsedSchema['columns'], file_type, files=parsedSchema['files'], datasource=parsedSchema['datasource'], calcite_to_file_indices=parsedSchema['calcite_to_file_indices'], num_row_groups=parsedSchema['num_row_groups'], args=parsedSchema['args'], uri_values=uri_values, in_file=in_file) table.slices = table.getSlices(len(self.nodes)) if parsedSchema['file_type'] == DataType.PARQUET : parsedMetadata = self._parseMetadata(input, file_format_hint, table.slices, parsedSchema, kwargs, extra_columns) if isinstance(parsedMetadata, cudf.DataFrame): table.metadata = parsedMetadata else: table.metadata = parsedMetadata elif isinstance(input, dask_cudf.core.DataFrame): table = BlazingTable( input, DataType.DASK_CUDF, client=self.dask_client) if table is not None: self.add_remove_table(table_name, True, table) return table def drop_table(self, table_name): self.add_remove_table(table_name, False) def _parseSchema(self, input, file_format_hint, kwargs, extra_columns): if self.dask_client: worker = tuple(self.dask_client.scheduler_info()['workers'])[0] connection = self.dask_client.submit( cio.parseSchemaCaller, input, file_format_hint, kwargs, extra_columns, workers=[worker]) return connection.result() else: return cio.parseSchemaCaller( input, file_format_hint, kwargs, extra_columns) def _parseMetadata(self, input, file_format_hint, currentTableNodes, schema, kwargs, extra_columns): if self.dask_client: dask_futures = [] workers = tuple(self.dask_client.scheduler_info()['workers']) worker_id = 0 for worker in workers: file_subset = [ file.decode() for file in currentTableNodes[worker_id].files] connection = self.dask_client.submit( cio.parseMetadataCaller, file_subset, currentTableNodes[worker_id].offset, schema, file_format_hint, kwargs, extra_columns, workers=[worker]) dask_futures.append(connection) worker_id += 1 return dask.dataframe.from_delayed(dask_futures) else: return cio.parseMetadataCaller( input, currentTableNodes[0].offset, schema, file_format_hint, kwargs, extra_columns) def _optimize_with_skip_data(self, masterIndex, table_name, table_files, nodeTableList, scan_table_query, fileTypes): if self.dask_client is None: current_table = nodeTableList[0][table_name] table_tuple = (table_name, current_table) file_indices_and_rowgroup_indices = cio.runSkipDataCaller(masterIndex, self.nodes, table_tuple, fileTypes, 0, scan_table_query, 0) if not file_indices_and_rowgroup_indices.empty: file_and_rowgroup_indices = file_indices_and_rowgroup_indices.to_pandas() files = file_and_rowgroup_indices['file_handle_index'].values.tolist() grouped = file_and_rowgroup_indices.groupby('file_handle_index') actual_files = [] current_table.row_groups_ids = [] for group_id in grouped.groups: row_indices = grouped.groups[group_id].values.tolist() actual_files.append(table_files[group_id]) row_groups_col = file_and_rowgroup_indices['row_group_index'].values.tolist() row_group_ids = [row_groups_col[i] for i in row_indices] current_table.row_groups_ids.append(row_group_ids) current_table.files = actual_files else: dask_futures = [] i = 0 for node in self.nodes: worker = node['worker'] current_table = nodeTableList[i][table_name] table_tuple = (table_name, current_table) dask_futures.append( self.dask_client.submit( cio.runSkipDataCaller, masterIndex, self.nodes, table_tuple, fileTypes, 0, scan_table_query, 0, workers=[worker])) i = i + 1 result = dask.dataframe.from_delayed(dask_futures) for index in range(len(self.nodes)): file_indices_and_rowgroup_indices = result.get_partition(index).compute() if file_indices_and_rowgroup_indices.empty : continue file_and_rowgroup_indices = file_indices_and_rowgroup_indices.to_pandas() files = file_and_rowgroup_indices['file_handle_index'].values.tolist() grouped = file_and_rowgroup_indices.groupby('file_handle_index') actual_files = [] current_table.row_groups_ids = [] for group_id in grouped.groups: row_indices = grouped.groups[group_id].values.tolist() actual_files.append(table_files[group_id]) row_groups_col = file_and_rowgroup_indices['row_group_index'].values.tolist() row_group_ids = [row_groups_col[i] for i in row_indices] current_table.row_groups_ids.append(row_group_ids) current_table.files = actual_files def sql(self, sql, table_list=[], algebra=None): # TODO: remove hardcoding masterIndex = 0 nodeTableList = [{} for _ in range(len(self.nodes))] fileTypes = [] if (algebra is None): algebra = self.explain(sql) if self.dask_client is None: relational_algebra_steps = cio.getTableScanInfoCaller(algebra) else: worker = tuple(self.dask_client.scheduler_info()['workers'])[0] connection = self.dask_client.submit( cio.getTableScanInfoCaller, algebra, workers=[worker]) relational_algebra_steps = connection.result() table_columns = mergeTableScans(relational_algebra_steps) new_tables, algebra = modifyAlegebraAndTablesForArrowBasedOnColumnUsage(algebra, relational_algebra_steps,self.tables, table_columns) for table in new_tables: fileTypes.append(new_tables[table].fileType) ftype = new_tables[table].fileType if(ftype == DataType.PARQUET or ftype == DataType.ORC or ftype == DataType.JSON or ftype == DataType.CSV): currentTableNodes = new_tables[table].getSlices(len(self.nodes)) elif(new_tables[table].fileType == DataType.DASK_CUDF): currentTableNodes = [] for node in self.nodes: currentTableNodes.append(new_tables[table]) elif(new_tables[table].fileType == DataType.CUDF or new_tables[table].fileType == DataType.ARROW): currentTableNodes = [] for node in self.nodes: currentTableNodes.append(new_tables[table]) j = 0 for nodeList in nodeTableList: nodeList[table] = currentTableNodes[j] j = j + 1 if new_tables[table].has_metadata(): scan_table_query = relational_algebra_steps[table]['table_scans'][0] self._optimize_with_skip_data(masterIndex, table, new_tables[table].files, nodeTableList, scan_table_query, fileTypes) ctxToken = random.randint(0, 64000) accessToken = 0 if (len(table_list) > 0): print("NOTE: You no longer need to send a table list to the .sql() funtion") if self.dask_client is None: result = cio.runQueryCaller( masterIndex, self.nodes, nodeTableList[0], fileTypes, ctxToken, algebra, accessToken) else: dask_futures = [] i = 0 for node in self.nodes: worker = node['worker'] dask_futures.append( self.dask_client.submit( collectPartitionsRunQuery, masterIndex, self.nodes, nodeTableList[i], fileTypes, ctxToken, algebra, accessToken, workers=[worker])) i = i + 1 result = dask.dataframe.from_delayed(dask_futures) return result # END SQL interface # BEGIN LOG interface def log(self, query, logs_table_name='bsql_logs'): if not self.logs_initialized: self.logs_table_name = logs_table_name log_files = [self.node_cwds[i] + '/RAL.' + \ str(i) + '.log' for i in range(0, len(self.node_cwds))] #print(log_files) dtypes = [ 'date64', 'int32', 'str', 'int32', 'int16', 'int16', 'str', 'float32', 'str', 'int32', 'str', 'int32'] names = [ 'log_time', 'node_id', 'type', 'query_id', 'step', 'substep', 'info', 'duration', 'extra1', 'data1', 'extra2', 'data2'] t = self.create_table( self.logs_table_name, log_files, delimiter='|', dtype=dtypes, names=names, file_format='csv') #print("table created") #print(t) self.logs_initialized = True return self.sql(query)
[ "socket.socket", "cudf.DataFrame.from_arrow", "cudf.set_allocator", "weakref.ref", "urllib.parse.urlparse", "random.randint", "dask_cudf.from_cudf", "dask.distributed.wait", "dask.dataframe.from_delayed", "pyarrow.Table.from_arrays", "pyblazing.apiv2.filesystem.FileSystem", "threading.Lock", "cio.runSkipDataCaller", "cio.parseMetadataCaller", "cudf.concat", "cudf.DataFrame.from_pandas", "dask.distributed.get_worker", "cio.getTableScanInfoCaller", "jpype.getDefaultJVMPath", "os.getenv", "cio.parseSchemaCaller", "jpype.JClass", "os.getcwd", "cio.runQueryCaller", "numpy.dtype", "netifaces.ifaddresses", "pathlib.PurePath" ]
[((1027, 1062), 'jpype.JClass', 'jpype.JClass', (['"""java.util.ArrayList"""'], {}), "('java.util.ArrayList')\n", (1039, 1062), False, 'import jpype\n'), ((1081, 1155), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogColumnDataType"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogColumnDataType')\n", (1093, 1155), False, 'import jpype\n'), ((1225, 1295), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogColumnImpl"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogColumnImpl')\n", (1237, 1295), False, 'import jpype\n'), ((1314, 1383), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogTableImpl"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogTableImpl')\n", (1326, 1383), False, 'import jpype\n'), ((1405, 1477), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl"""'], {}), "('com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl')\n", (1417, 1477), False, 'import jpype\n'), ((1504, 1562), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.schema.BlazingSchema"""'], {}), "('com.blazingdb.calcite.schema.BlazingSchema')\n", (1516, 1562), False, 'import jpype\n'), ((1597, 1673), 'jpype.JClass', 'jpype.JClass', (['"""com.blazingdb.calcite.application.RelationalAlgebraGenerator"""'], {}), "('com.blazingdb.calcite.application.RelationalAlgebraGenerator')\n", (1609, 1673), False, 'import jpype\n'), ((957, 982), 'jpype.getDefaultJVMPath', 'jpype.getDefaultJVMPath', ([], {}), '()\n', (980, 982), False, 'import jpype\n'), ((2627, 2679), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM', '(0)'], {}), '(socket.AF_INET, socket.SOCK_STREAM, 0)\n', (2640, 2679), False, 'import socket\n'), ((3617, 3740), 'cudf.set_allocator', 'cudf.set_allocator', ([], {'allocator': 'allocator', 'pool': 'pool', 'initial_pool_size': 'initial_pool_size', 'enable_logging': 'enable_logging'}), '(allocator=allocator, pool=pool, initial_pool_size=\n initial_pool_size, enable_logging=enable_logging)\n', (3635, 3740), False, 'import cudf\n'), ((4016, 4027), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4025, 4027), False, 'import os\n'), ((4295, 4320), 'dask.distributed.wait', 'dask.distributed.wait', (['df'], {}), '(df)\n', (4316, 4320), False, 'import dask\n'), ((5894, 5987), 'cio.runQueryCaller', 'cio.runQueryCaller', (['masterIndex', 'nodes', 'tables', 'fileTypes', 'ctxToken', 'algebra', 'accessToken'], {}), '(masterIndex, nodes, tables, fileTypes, ctxToken, algebra,\n accessToken)\n', (5912, 5987), False, 'import cio\n'), ((758, 783), 'os.getenv', 'os.getenv', (['"""CONDA_PREFIX"""'], {}), "('CONDA_PREFIX')\n", (767, 783), False, 'import os\n'), ((870, 895), 'os.getenv', 'os.getenv', (['"""CONDA_PREFIX"""'], {}), "('CONDA_PREFIX')\n", (879, 895), False, 'import os\n'), ((1746, 1765), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (1754, 1765), True, 'import numpy as np\n'), ((1790, 1809), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (1798, 1809), True, 'import numpy as np\n'), ((1834, 1851), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (1842, 1851), True, 'import numpy as np\n'), ((1874, 1891), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (1882, 1891), True, 'import numpy as np\n'), ((1914, 1931), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (1922, 1931), True, 'import numpy as np\n'), ((1954, 1970), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (1962, 1970), True, 'import numpy as np\n'), ((1992, 2009), 'numpy.dtype', 'np.dtype', (['"""bool_"""'], {}), "('bool_')\n", (2000, 2009), True, 'import numpy as np\n'), ((2032, 2057), 'numpy.dtype', 'np.dtype', (['"""datetime64[s]"""'], {}), "('datetime64[s]')\n", (2040, 2057), True, 'import numpy as np\n'), ((2081, 2107), 'numpy.dtype', 'np.dtype', (['"""datetime64[ms]"""'], {}), "('datetime64[ms]')\n", (2089, 2107), True, 'import numpy as np\n'), ((2131, 2157), 'numpy.dtype', 'np.dtype', (['"""datetime64[ns]"""'], {}), "('datetime64[ns]')\n", (2139, 2157), True, 'import numpy as np\n'), ((2184, 2210), 'numpy.dtype', 'np.dtype', (['"""datetime64[us]"""'], {}), "('datetime64[us]')\n", (2192, 2210), True, 'import numpy as np\n'), ((2237, 2259), 'numpy.dtype', 'np.dtype', (['"""datetime64"""'], {}), "('datetime64')\n", (2245, 2259), True, 'import numpy as np\n'), ((2283, 2302), 'numpy.dtype', 'np.dtype', (['"""object_"""'], {}), "('object_')\n", (2291, 2302), True, 'import numpy as np\n'), ((2326, 2342), 'numpy.dtype', 'np.dtype', (['"""str_"""'], {}), "('str_')\n", (2334, 2342), True, 'import numpy as np\n'), ((2366, 2384), 'numpy.dtype', 'np.dtype', (['"""<M8[s]"""'], {}), "('<M8[s]')\n", (2374, 2384), True, 'import numpy as np\n'), ((2408, 2427), 'numpy.dtype', 'np.dtype', (['"""<M8[ms]"""'], {}), "('<M8[ms]')\n", (2416, 2427), True, 'import numpy as np\n'), ((2451, 2470), 'numpy.dtype', 'np.dtype', (['"""<M8[ns]"""'], {}), "('<M8[ns]')\n", (2459, 2470), True, 'import numpy as np\n'), ((2497, 2516), 'numpy.dtype', 'np.dtype', (['"""<M8[us]"""'], {}), "('<M8[us]')\n", (2505, 2516), True, 'import numpy as np\n'), ((2557, 2572), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2565, 2572), True, 'import numpy as np\n'), ((3453, 3481), 'random.randint', 'random.randint', (['(10000)', '(32000)'], {}), '(10000, 32000)\n', (3467, 3481), False, 'import random\n'), ((5027, 5056), 'dask.distributed.get_worker', 'dask.distributed.get_worker', ([], {}), '()\n', (5054, 5056), False, 'import dask\n'), ((11570, 11617), 'pyarrow.Table.from_arrays', 'pyarrow.Table.from_arrays', (['columns'], {'names': 'names'}), '(columns, names=names)\n', (11595, 11617), False, 'import pyarrow\n'), ((15156, 15162), 'threading.Lock', 'Lock', ([], {}), '()\n', (15160, 15162), False, 'from threading import Lock\n'), ((15193, 15216), 'weakref.ref', 'ref', (['cio.finalizeCaller'], {}), '(cio.finalizeCaller)\n', (15196, 15216), False, 'from weakref import ref\n'), ((17335, 17347), 'pyblazing.apiv2.filesystem.FileSystem', 'FileSystem', ([], {}), '()\n', (17345, 17347), False, 'from pyblazing.apiv2.filesystem import FileSystem\n'), ((18669, 18688), 'urllib.parse.urlparse', 'urlparse', (['str_input'], {}), '(str_input)\n', (18677, 18688), False, 'from urllib.parse import urlparse\n'), ((18753, 18771), 'pathlib.PurePath', 'PurePath', (['url.path'], {}), '(url.path)\n', (18761, 18771), False, 'from pathlib import PurePath\n'), ((30345, 30369), 'random.randint', 'random.randint', (['(0)', '(64000)'], {}), '(0, 64000)\n', (30359, 30369), False, 'import random\n'), ((3575, 3603), 'random.randint', 'random.randint', (['(10000)', '(32000)'], {}), '(10000, 32000)\n', (3589, 3603), False, 'import random\n'), ((21160, 21193), 'cudf.DataFrame.from_pandas', 'cudf.DataFrame.from_pandas', (['input'], {}), '(input)\n', (21186, 21193), False, 'import cudf\n'), ((23913, 23982), 'cio.parseSchemaCaller', 'cio.parseSchemaCaller', (['input', 'file_format_hint', 'kwargs', 'extra_columns'], {}), '(input, file_format_hint, kwargs, extra_columns)\n', (23934, 23982), False, 'import cio\n'), ((24851, 24892), 'dask.dataframe.from_delayed', 'dask.dataframe.from_delayed', (['dask_futures'], {}), '(dask_futures)\n', (24878, 24892), False, 'import dask\n'), ((24927, 25039), 'cio.parseMetadataCaller', 'cio.parseMetadataCaller', (['input', 'currentTableNodes[0].offset', 'schema', 'file_format_hint', 'kwargs', 'extra_columns'], {}), '(input, currentTableNodes[0].offset, schema,\n file_format_hint, kwargs, extra_columns)\n', (24950, 25039), False, 'import cio\n'), ((25389, 25487), 'cio.runSkipDataCaller', 'cio.runSkipDataCaller', (['masterIndex', 'self.nodes', 'table_tuple', 'fileTypes', '(0)', 'scan_table_query', '(0)'], {}), '(masterIndex, self.nodes, table_tuple, fileTypes, 0,\n scan_table_query, 0)\n', (25410, 25487), False, 'import cio\n'), ((27050, 27091), 'dask.dataframe.from_delayed', 'dask.dataframe.from_delayed', (['dask_futures'], {}), '(dask_futures)\n', (27077, 27091), False, 'import dask\n'), ((28555, 28590), 'cio.getTableScanInfoCaller', 'cio.getTableScanInfoCaller', (['algebra'], {}), '(algebra)\n', (28581, 28590), False, 'import cio\n'), ((30576, 30684), 'cio.runQueryCaller', 'cio.runQueryCaller', (['masterIndex', 'self.nodes', 'nodeTableList[0]', 'fileTypes', 'ctxToken', 'algebra', 'accessToken'], {}), '(masterIndex, self.nodes, nodeTableList[0], fileTypes,\n ctxToken, algebra, accessToken)\n', (30594, 30684), False, 'import cio\n'), ((31465, 31506), 'dask.dataframe.from_delayed', 'dask.dataframe.from_delayed', (['dask_futures'], {}), '(dask_futures)\n', (31492, 31506), False, 'import dask\n'), ((3370, 3402), 'netifaces.ifaddresses', 'ni.ifaddresses', (['networkInterface'], {}), '(networkInterface)\n', (3384, 3402), True, 'import netifaces as ni\n'), ((9182, 9214), 'cudf.DataFrame.from_arrow', 'cudf.DataFrame.from_arrow', (['input'], {}), '(input)\n', (9207, 9214), False, 'import cudf\n'), ((9964, 10039), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (['self.input'], {'npartitions': 'convert_gdf_to_dask_partitions'}), '(self.input, npartitions=convert_gdf_to_dask_partitions)\n', (9983, 10039), False, 'import dask_cudf\n'), ((21311, 21343), 'cudf.DataFrame.from_arrow', 'cudf.DataFrame.from_arrow', (['input'], {}), '(input)\n', (21336, 21343), False, 'import cudf\n'), ((5853, 5882), 'cudf.concat', 'cudf.concat', (['table_partitions'], {}), '(table_partitions)\n', (5864, 5882), False, 'import cudf\n')]
from __future__ import annotations __copyright__ = """ Copyright (C) 2020 <NAME> Copyright (C) 2020 <NAME> Copyright (C) 2020 <NAME> Copyright (C) 2021 <NAME> """ __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # {{{ docs __doc__ = """ .. currentmodule:: pytato .. autofunction:: abs .. autofunction:: sqrt .. autofunction:: sin .. autofunction:: cos .. autofunction:: tan .. autofunction:: arcsin .. autofunction:: arccos .. autofunction:: arctan .. autofunction:: conj .. autofunction:: arctan2 .. autofunction:: sinh .. autofunction:: cosh .. autofunction:: tanh .. autofunction:: exp .. autofunction:: log .. autofunction:: log10 .. autofunction:: isnan .. autofunction:: real .. autofunction:: imag """ # }}} import numpy as np import pymbolic.primitives as prim from typing import Tuple, Optional from pytato.array import Array, ArrayOrScalar, IndexLambda, _dtype_any from pytato.scalar_expr import SCALAR_CLASSES from pymbolic import var def _apply_elem_wise_func(inputs: Tuple[ArrayOrScalar], func_name: str, ret_dtype: Optional[_dtype_any] = None ) -> ArrayOrScalar: if all(isinstance(x, SCALAR_CLASSES) for x in inputs): np_func = getattr(np, func_name) return np_func(*inputs) # type: ignore if not inputs: raise ValueError("at least one argument must be present") shape = None sym_args = [] bindings = {} for index, inp in enumerate(inputs): if isinstance(inp, Array): if inp.dtype.kind not in ["f", "c"]: raise ValueError("only floating-point or complex " "arguments supported") if shape is None: shape = inp.shape elif inp.shape != shape: # FIXME: merge this logic with arithmetic, so that broadcasting # is implemented properly raise NotImplementedError("broadcasting in function application") if ret_dtype is None: ret_dtype = inp.dtype bindings[f"in_{index}"] = inp sym_args.append( prim.Subscript(var(f"in_{index}"), tuple(var(f"_{i}") for i in range(len(shape))))) else: sym_args.append(inp) assert shape is not None assert ret_dtype is not None return IndexLambda( prim.Call(var(f"pytato.c99.{func_name}"), tuple(sym_args)), shape, ret_dtype, bindings) def abs(x: Array) -> ArrayOrScalar: if x.dtype.kind == "c": result_dtype = np.empty(0, dtype=x.dtype).real.dtype else: result_dtype = x.dtype return _apply_elem_wise_func((x,), "abs", ret_dtype=result_dtype) def sqrt(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "sqrt") def sin(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "sin") def cos(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "cos") def tan(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "tan") def arcsin(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "asin") def arccos(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "acos") def arctan(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "atan") def conj(x: Array) -> ArrayOrScalar: if x.dtype.kind != "c": return x return _apply_elem_wise_func((x,), "conj") def arctan2(y: Array, x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((y, x), "atan2") # type:ignore def sinh(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "sinh") def cosh(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "cosh") def tanh(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "tanh") def exp(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "exp") def log(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "log") def log10(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "log10") def isnan(x: Array) -> ArrayOrScalar: return _apply_elem_wise_func((x,), "isnan", np.dtype(np.int32)) def real(x: Array) -> ArrayOrScalar: if x.dtype.kind == "c": result_dtype = np.empty(0, dtype=x.dtype).real.dtype else: return x return _apply_elem_wise_func((x,), "real", ret_dtype=result_dtype) def imag(x: Array) -> ArrayOrScalar: if x.dtype.kind == "c": result_dtype = np.empty(0, dtype=x.dtype).real.dtype else: import pytato as pt return pt.zeros(x.shape, dtype=x.dtype) return _apply_elem_wise_func((x,), "imag", ret_dtype=result_dtype) # vim: fdm=marker
[ "pytato.zeros", "numpy.dtype", "pymbolic.var", "numpy.empty" ]
[((5205, 5223), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (5213, 5223), True, 'import numpy as np\n'), ((5632, 5664), 'pytato.zeros', 'pt.zeros', (['x.shape'], {'dtype': 'x.dtype'}), '(x.shape, dtype=x.dtype)\n', (5640, 5664), True, 'import pytato as pt\n'), ((3424, 3454), 'pymbolic.var', 'var', (['f"""pytato.c99.{func_name}"""'], {}), "(f'pytato.c99.{func_name}')\n", (3427, 3454), False, 'from pymbolic import var\n'), ((3603, 3629), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'x.dtype'}), '(0, dtype=x.dtype)\n', (3611, 3629), True, 'import numpy as np\n'), ((5315, 5341), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'x.dtype'}), '(0, dtype=x.dtype)\n', (5323, 5341), True, 'import numpy as np\n'), ((5541, 5567), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'x.dtype'}), '(0, dtype=x.dtype)\n', (5549, 5567), True, 'import numpy as np\n'), ((3174, 3192), 'pymbolic.var', 'var', (['f"""in_{index}"""'], {}), "(f'in_{index}')\n", (3177, 3192), False, 'from pymbolic import var\n'), ((3224, 3236), 'pymbolic.var', 'var', (['f"""_{i}"""'], {}), "(f'_{i}')\n", (3227, 3236), False, 'from pymbolic import var\n')]
#!/usr/bin/env python3 import numpy as np import math import random def compute_z(theta, x): z = 0 for j in range(len(x)): z += theta[j] * x[j] z += theta[len(x)] return z def compute_g(z): return (1)/(1 + math.exp(-z)) def compute_h(z): return compute_g(z) def binary_cross_entropy_loss(Y_train, Y_predict): total = 0 for i in range(len(Y_train)): total -= (Y_train[i] * math.log(Y_predict[i])) + \ ((1 - Y_train[i]) * math.log(1-Y_predict[i])) average = total / len(Y_train) return average def compute_loss_gradients(theta, X_train, Y_train, Y_predict): delta_theta = [] for j in range(len(X_train[0])): grad = 0 for i in range(len(Y_train)): grad += ((Y_predict[i] - Y_train[i]) * X_train[i][j])/len(Y_train) delta_theta.append(grad) return delta_theta def main(): # f = int(input("no of features: ")) n = int(input("no of rows: ")) X_train = [] Y_train = [] for i in range(n): row = [int(r) for r in input().split()] X_train.append(row[0:-1]) Y_train.append(row[-1]) theta = [np.random.randn() for i in range(len(X_train))] print("theta", theta) for i in range(n): print(X_train[i], Y_train[i]) epochs = 5 epsilon = 0.00000000000000001 alpha = 0.001 for e in range(epochs): Y_predict = [] for i in range(n): print(X_train[i]) Y_predict.append(compute_h(compute_z(theta, X_train[i]))) current_loss = binary_cross_entropy_loss(Y_train, Y_predict) print("=========> Epoch number:", e, "Current Loss: ", current_loss) print("Y_predict", Y_predict) if current_loss <= epsilon: break delta_theta = compute_loss_gradients( theta, X_train, Y_train, Y_predict) print("delta_theta", delta_theta) for j in range(len(theta) - 1): theta[j] = theta[j] - alpha * delta_theta[j] if __name__ == "__main__": main()
[ "math.log", "math.exp", "numpy.random.randn" ]
[((1164, 1181), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1179, 1181), True, 'import numpy as np\n'), ((238, 250), 'math.exp', 'math.exp', (['(-z)'], {}), '(-z)\n', (246, 250), False, 'import math\n'), ((429, 451), 'math.log', 'math.log', (['Y_predict[i]'], {}), '(Y_predict[i])\n', (437, 451), False, 'import math\n'), ((489, 515), 'math.log', 'math.log', (['(1 - Y_predict[i])'], {}), '(1 - Y_predict[i])\n', (497, 515), False, 'import math\n')]
''' Functions to compute fast distance covariance using mergesort. ''' import warnings from numba import float64, int64, boolean import numba import numpy as np from ._utils import CompileMode, _transform_to_2d def _compute_weight_sums(y, weights): n_samples = len(y) weight_sums = np.zeros((n_samples,) + weights.shape[1:], dtype=y.dtype) # Buffer that contains the indexes of the current and # last iterations indexes = np.arange(2 * n_samples).reshape((2, n_samples)) indexes[1] = 0 # Remove this previous_indexes = indexes[0] current_indexes = indexes[1] weights_cumsum = np.zeros( (n_samples + 1,) + weights.shape[1:], dtype=weights.dtype) merged_subarray_len = 1 # For all lengths that are a power of two while merged_subarray_len < n_samples: gap = 2 * merged_subarray_len indexes_idx = 0 # Numba does not support axis, nor out parameter. for var in range(weights.shape[1]): weights_cumsum[1:, var] = np.cumsum( weights[previous_indexes, var]) # Select the subarrays in pairs for subarray_pair_idx in range(0, n_samples, gap): subarray_1_idx = subarray_pair_idx subarray_2_idx = subarray_pair_idx + merged_subarray_len subarray_1_idx_last = min( subarray_1_idx + merged_subarray_len - 1, n_samples - 1) subarray_2_idx_last = min( subarray_2_idx + merged_subarray_len - 1, n_samples - 1) # Merge the subarrays while (subarray_1_idx <= subarray_1_idx_last and subarray_2_idx <= subarray_2_idx_last): previous_index_1 = previous_indexes[subarray_1_idx] previous_index_2 = previous_indexes[subarray_2_idx] if y[previous_index_1].item() >= y[previous_index_2].item(): current_indexes[indexes_idx] = previous_index_1 subarray_1_idx += 1 else: current_indexes[indexes_idx] = previous_index_2 subarray_2_idx += 1 weight_sums[previous_index_2] += ( weights_cumsum[subarray_1_idx_last + 1] - weights_cumsum[subarray_1_idx]) indexes_idx += 1 # Join the remaining elements of one of the arrays (already sorted) if subarray_1_idx <= subarray_1_idx_last: n_remaining = subarray_1_idx_last - subarray_1_idx + 1 indexes_idx_next = indexes_idx + n_remaining current_indexes[indexes_idx:indexes_idx_next] = ( previous_indexes[subarray_1_idx:subarray_1_idx_last + 1]) indexes_idx = indexes_idx_next elif subarray_2_idx <= subarray_2_idx_last: n_remaining = subarray_2_idx_last - subarray_2_idx + 1 indexes_idx_next = indexes_idx + n_remaining current_indexes[indexes_idx:indexes_idx_next] = ( previous_indexes[subarray_2_idx:subarray_2_idx_last + 1]) indexes_idx = indexes_idx_next merged_subarray_len = gap # Swap buffer previous_indexes, current_indexes = (current_indexes, previous_indexes) return weight_sums _compute_weight_sums_compiled = numba.njit( float64[:, :](float64[:, :], float64[:, :]), cache=True)(_compute_weight_sums) def _generate_compute_aijbij_term(compiled): def _compute_aijbij_term(x, y): compute_weight_sums = (_compute_weight_sums_compiled if compiled else _compute_weight_sums) # x must be sorted n = len(x) weights = np.hstack((np.ones_like(y), y, x, x * y)) weight_sums = compute_weight_sums(y, weights) x = x.ravel() y = y.ravel() term_1 = (x * y).T @ weight_sums[:, 0].ravel() term_2 = x.T @ weight_sums[:, 1].ravel() term_3 = y.T @ weight_sums[:, 2].ravel() term_4 = np.sum(weight_sums[:, 3]) # First term in the equation sums_term = term_1 - term_2 - term_3 + term_4 # Second term in the equation sum_x = np.sum(x) sum_y = np.sum(y) cov_term = n * x.T @ y - np.sum(sum_x * y + sum_y * x) + sum_x * sum_y d = 4 * sums_term - 2 * cov_term return d.item() return _compute_aijbij_term _compute_aijbij_term = _generate_compute_aijbij_term(compiled=False) _compute_aijbij_term_compiled = numba.njit( float64(float64[:, :], float64[:, :]), cache=True)( _generate_compute_aijbij_term(compiled=True)) def _compute_row_sums(x): # x must be sorted x = x.ravel() n_samples = len(x) term_1 = (2 * np.arange(1, n_samples + 1) - n_samples) * x sums = np.cumsum(x) term_2 = sums[-1] - 2 * sums return term_1 + term_2 _compute_row_sums_compiled = numba.njit( float64[:](float64[:]), cache=True)(_compute_row_sums) def _generate_distance_covariance_sqr_mergesort_generic_impl( compiled): def _distance_covariance_sqr_mergesort_generic_impl(x, y, unbiased): compute_aijbij_term = (_compute_aijbij_term_compiled if compiled else _compute_aijbij_term) compute_row_sums = (_compute_row_sums_compiled if compiled else _compute_row_sums) n = len(x) # Sort x in ascending order ordered_indexes = np.argsort(x.ravel()) x = x[ordered_indexes] y = y[ordered_indexes] aijbij = compute_aijbij_term(x, y) a_i = compute_row_sums(x.ravel()) ordered_indexes_y = np.argsort(y.ravel()) b_i_perm = compute_row_sums(y.ravel()[ordered_indexes_y]) b_i = np.empty_like(b_i_perm) b_i[ordered_indexes_y] = b_i_perm a_dot_dot = np.sum(a_i) b_dot_dot = np.sum(b_i) sum_ab = a_i.ravel().T @ b_i.ravel() if unbiased: d3 = (n - 3) d2 = (n - 2) d1 = (n - 1) else: d3 = d2 = d1 = n d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 + a_dot_dot / n * b_dot_dot / d1 / d2 / d3) return d_cov return _distance_covariance_sqr_mergesort_generic_impl _distance_covariance_sqr_mergesort_generic_impl = ( _generate_distance_covariance_sqr_mergesort_generic_impl( compiled=False)) _distance_covariance_sqr_mergesort_generic_impl_compiled = numba.njit( float64(float64[:, :], float64[:, :], boolean), cache=True)( _generate_distance_covariance_sqr_mergesort_generic_impl( compiled=True)) impls_dict = { CompileMode.AUTO: ( _distance_covariance_sqr_mergesort_generic_impl_compiled, _distance_covariance_sqr_mergesort_generic_impl), CompileMode.NO_COMPILE: (_distance_covariance_sqr_mergesort_generic_impl,), CompileMode.COMPILE_CPU: ( _distance_covariance_sqr_mergesort_generic_impl_compiled,) } def _distance_covariance_sqr_mergesort_generic(x, y, *, exponent=1, unbiased=False, compile_mode=CompileMode.AUTO): if exponent != 1: raise ValueError(f"Exponent should be 1 but is {exponent} instead.") x = _transform_to_2d(x) y = _transform_to_2d(y) if compile_mode not in (CompileMode.AUTO, CompileMode.COMPILE_CPU, CompileMode.NO_COMPILE): return NotImplementedError( f"Compile mode {compile_mode} not implemented.") for impl in impls_dict[compile_mode]: try: return impl(x, y, unbiased) except TypeError as e: if compile_mode is not CompileMode.AUTO: raise e warnings.warn(f"Falling back to uncompiled MERGESORT fast " f"distance covariance because of TypeError " f"exception raised: {e}. Rembember: only floating " f"point values can be used in the compiled " f"implementations.")
[ "numpy.sum", "numpy.ones_like", "numpy.empty_like", "numpy.zeros", "numpy.cumsum", "numpy.arange", "numba.float64", "warnings.warn" ]
[((298, 355), 'numpy.zeros', 'np.zeros', (['((n_samples,) + weights.shape[1:])'], {'dtype': 'y.dtype'}), '((n_samples,) + weights.shape[1:], dtype=y.dtype)\n', (306, 355), True, 'import numpy as np\n'), ((624, 691), 'numpy.zeros', 'np.zeros', (['((n_samples + 1,) + weights.shape[1:])'], {'dtype': 'weights.dtype'}), '((n_samples + 1,) + weights.shape[1:], dtype=weights.dtype)\n', (632, 691), True, 'import numpy as np\n'), ((4847, 4859), 'numpy.cumsum', 'np.cumsum', (['x'], {}), '(x)\n', (4856, 4859), True, 'import numpy as np\n'), ((4065, 4090), 'numpy.sum', 'np.sum', (['weight_sums[:, 3]'], {}), '(weight_sums[:, 3])\n', (4071, 4090), True, 'import numpy as np\n'), ((4238, 4247), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4244, 4247), True, 'import numpy as np\n'), ((4264, 4273), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (4270, 4273), True, 'import numpy as np\n'), ((4572, 4609), 'numba.float64', 'float64', (['float64[:, :]', 'float64[:, :]'], {}), '(float64[:, :], float64[:, :])\n', (4579, 4609), False, 'from numba import float64, int64, boolean\n'), ((5820, 5843), 'numpy.empty_like', 'np.empty_like', (['b_i_perm'], {}), '(b_i_perm)\n', (5833, 5843), True, 'import numpy as np\n'), ((5907, 5918), 'numpy.sum', 'np.sum', (['a_i'], {}), '(a_i)\n', (5913, 5918), True, 'import numpy as np\n'), ((5939, 5950), 'numpy.sum', 'np.sum', (['b_i'], {}), '(b_i)\n', (5945, 5950), True, 'import numpy as np\n'), ((6557, 6603), 'numba.float64', 'float64', (['float64[:, :]', 'float64[:, :]', 'boolean'], {}), '(float64[:, :], float64[:, :], boolean)\n', (6564, 6603), False, 'from numba import float64, int64, boolean\n'), ((451, 475), 'numpy.arange', 'np.arange', (['(2 * n_samples)'], {}), '(2 * n_samples)\n', (460, 475), True, 'import numpy as np\n'), ((1023, 1064), 'numpy.cumsum', 'np.cumsum', (['weights[previous_indexes, var]'], {}), '(weights[previous_indexes, var])\n', (1032, 1064), True, 'import numpy as np\n'), ((3764, 3779), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (3776, 3779), True, 'import numpy as np\n'), ((4307, 4336), 'numpy.sum', 'np.sum', (['(sum_x * y + sum_y * x)'], {}), '(sum_x * y + sum_y * x)\n', (4313, 4336), True, 'import numpy as np\n'), ((4790, 4817), 'numpy.arange', 'np.arange', (['(1)', '(n_samples + 1)'], {}), '(1, n_samples + 1)\n', (4799, 4817), True, 'import numpy as np\n'), ((7888, 8104), 'warnings.warn', 'warnings.warn', (['f"""Falling back to uncompiled MERGESORT fast distance covariance because of TypeError exception raised: {e}. Rembember: only floating point values can be used in the compiled implementations."""'], {}), "(\n f'Falling back to uncompiled MERGESORT fast distance covariance because of TypeError exception raised: {e}. Rembember: only floating point values can be used in the compiled implementations.'\n )\n", (7901, 8104), False, 'import warnings\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: <NAME> # @Date: 2014-10-28 04:41:23 # @Last Modified by: marinheiro # @Last Modified time: 2014-12-08 23:30:01 """ Auxiliary functions to convert between different rotation representations. """ import numpy import numpy.linalg import scipy import math # Axis-Angle <-> Log Conversion def axis_angle_to_log(n, theta): """Converts from the axis-angle representation to the log representation """ return n*theta def log_to_axis_angle(w): """OI """ theta = numpy.linalg.norm(w) n = numpy.zeros((3,)) if theta != 0.0: n = w/theta return (n, theta) # Quaternion <-> Axis-Angle conversion def quaternion_to_axis_angle(quat): """OI """ theta = 2.0*math.atan2(numpy.linalg.norm(quat[1:]), quat[0]) n = numpy.zeros((3,1)) if theta != 0.0: n = quat[1:]/math.sin(theta/2) return (n, theta) def axis_angle_to_quaternion(n, theta): """OI """ c = math.cos(theta/2) s = math.sin(theta/2) quat = numpy.zeros((4,1)) quat[0] = c quat[1:] = n*s return quat # Matrix <-> Quaternion conversion def matrix_to_quaternion(rot): """OI """ s = math.sqrt(numpy.trace(rot) + 1.0)/2 quat = numpy.array([[s], [(rot[2, 1]-rot[1, 2])/(4*s)], [(rot[0, 2]-rot[2, 0])/(4*s)], [(rot[1, 0]-rot[0, 1])/(4*s)], ]) return quat def quaternion_to_matrix(quat): """OI """ qw = quat[0][0] qx = quat[1][0] qy = quat[2][0] qz = quat[3][0] rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw], [2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw], [2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]]) return rot # Matrix <-> Axis-Angle conversion def matrix_to_axis_angle(rot): """OI """ return quaternion_to_axis_angle(matrix_to_quaternion(rot)) def axis_angle_to_matrix(n, theta): """OI """ # print n.shape, theta return quaternion_to_matrix(axis_angle_to_quaternion(n, theta))
[ "numpy.trace", "numpy.zeros", "math.sin", "numpy.linalg.norm", "math.cos", "numpy.array" ]
[((525, 545), 'numpy.linalg.norm', 'numpy.linalg.norm', (['w'], {}), '(w)\n', (542, 545), False, 'import numpy\n'), ((551, 568), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (562, 568), False, 'import numpy\n'), ((776, 795), 'numpy.zeros', 'numpy.zeros', (['(3, 1)'], {}), '((3, 1))\n', (787, 795), False, 'import numpy\n'), ((924, 943), 'math.cos', 'math.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (932, 943), False, 'import math\n'), ((947, 966), 'math.sin', 'math.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (955, 966), False, 'import math\n'), ((973, 992), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (984, 992), False, 'import numpy\n'), ((1165, 1298), 'numpy.array', 'numpy.array', (['[[s], [(rot[2, 1] - rot[1, 2]) / (4 * s)], [(rot[0, 2] - rot[2, 0]) / (4 *\n s)], [(rot[1, 0] - rot[0, 1]) / (4 * s)]]'], {}), '([[s], [(rot[2, 1] - rot[1, 2]) / (4 * s)], [(rot[0, 2] - rot[2,\n 0]) / (4 * s)], [(rot[1, 0] - rot[0, 1]) / (4 * s)]])\n', (1176, 1298), False, 'import numpy\n'), ((1437, 1723), 'numpy.array', 'numpy.array', (['[[1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 * qx * qz + 2 *\n qy * qw], [2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 * qz * qz, 2 *\n qy * qz - 2 * qx * qw], [2 * qx * qz - 2 * qy * qw, 2 * qy * qz + 2 *\n qx * qw, 1 - 2 * qx * qx - 2 * qy * qy]]'], {}), '([[1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 *\n qx * qz + 2 * qy * qw], [2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 *\n qz * qz, 2 * qy * qz - 2 * qx * qw], [2 * qx * qz - 2 * qy * qw, 2 * qy *\n qz + 2 * qx * qw, 1 - 2 * qx * qx - 2 * qy * qy]])\n', (1448, 1723), False, 'import numpy\n'), ((733, 760), 'numpy.linalg.norm', 'numpy.linalg.norm', (['quat[1:]'], {}), '(quat[1:])\n', (750, 760), False, 'import numpy\n'), ((828, 847), 'math.sin', 'math.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (836, 847), False, 'import math\n'), ((1131, 1147), 'numpy.trace', 'numpy.trace', (['rot'], {}), '(rot)\n', (1142, 1147), False, 'import numpy\n')]
import flappybird as fb import random import time from keras.models import Sequential from keras.layers import Dense from keras.optimizers import SGD import numpy as np import copy SCALE_FACTOR = 200 class GeneticBrain(fb.Brain): def __init__(self,n_input,n_hidden): ''' self.model = Sequential() self.model.add(Dense(n_hidden,activation='sigmoid',input_shape=(n_input,))) self.model.add(Dense(1,activation='sigmoid')) #print(self.getModel()) ''' self.model = NeuralNetwork([n_input,n_hidden],'logistic') def decideFlap(self,params): #print(params) distance = params['distance'] + params['pipeWidth'] deltaHeight = (params['bottomPipeHeight'] + params['topPipeHeight'])/2 - params['height'] velY = params['velY'] data = [distance * SCALE_FACTOR, deltaHeight * SCALE_FACTOR] pred = self.model.predict(data) #print(pred) return pred[0] > 0.5 def getModel(self): return self.model.getWeights() def setModel(self,weights): self.model.setWeights(weights) return True class GeneticAlgorithm(): def __init__(self,max_units,top_units): self.max_units = max_units self.top_units = top_units if max_units < top_units: self.top_units = max_units self.population = [] self.best_brain = None def reset(self): self.iteration = 1 self.mutateRate = 1 self.best_population = 0 self.best_fitness = 0 self.best_score = 0 def createPopulation(self): self.population = [] for i in range(self.max_units): newUnit = GeneticBrain(2,6) newUnit.index = i newUnit.fitness = 0 newUnit.score = 0 newUnit.isWinner = False self.population.append(newUnit) return self.population def evolvePopulation(self,results): winners = self.selection(results) for w in winners: print("%d: fitness = %f score = %d" %(w.index,w.fitness,w.score)) if self.mutateRate == 1 and winners[0].fitness < 0: # all is bad # create another population print("recreate popultation") return self.createPopulation() else: self.mutateRate = 0.2 if winners[0].fitness > self.best_fitness: self.best_fitness = winners[0].fitness self.best_score = winners[0].score winners[0].model.save('best.h5') for i in range(self.top_units,self.max_units): if i == self.top_units: parantA = winners[0].getModel() parantB = winners[1].getModel() offspring = self.crossOver(parantA,parantB) elif i < self.max_units - 2: parantA = self.getRandomUnit(winners).getModel() parantB = self.getRandomUnit(winners).getModel() offspring = self.crossOver(parantA,parantB) else: offspring = winners[0].getModel() offspring = self.mutation(offspring) newUnit = self.population[i] newUnit.setModel(offspring) newUnit.score = 0 newUnit.isWinner = False return self.population def selection(self,results): for i in range(self.top_units): self.population[results[i].index].isWinner = True return results[:self.top_units] def crossOver(self,parantA,parantB): length = np.size(parantA[1],0) cutPoint = random.randint(0,length-1) for i in range(cutPoint,length): tmp = parantA[1][0][i] parantA[1][0][i] = parantB[1][0][i] parantB[1][0][i] = tmp if random.randint(0,1): return parantA else: return parantB def mutation(self,offspring): for i in offspring[1]: for bias in i: bias = self.mutate(bias) for i in offspring[0]: for weight in i: weight = self.mutate(weight) return offspring def mutate(self,gene): if random.random() < self.mutateRate: mutateFactor = 1 + (random.random() - 0.5) * 3 + (random.random() - 0.5) gene *= mutateFactor return gene def getRandomUnit(self,array): return array[random.randint(0,len(array)-1)] def normalize(self,value,maxValue): if value < -maxValue: value = -maxValue elif value > maxValue: value = maxValue return value/maxValue def saveBestBird(self): pass import pygame class PlayerBrain(fb.Brain): # 玩家大脑 def decideFlap(self,params): #print(params) return params['playerClick'] class HappyBrain(fb.Brain): def __init__(self): random.seed(2000) def decideFlap(self,params): #print(params) pygame.event.get() if params['height'] < 40: return False r = random.randint(0,1000) return r > 940 def train(): bird_num = 10 GA = GeneticAlgorithm(bird_num,4) GA.reset() brains = GA.createPopulation() #brains = [HappyBrain()] * bird_num g = fb.FlappyBirdGame(30,bird_num,brains) train_time = 200 for i in range(train_time): g.run() results = g.result() print("Generation %d:" %(i)) sorted_brains = [] for r in results[::-1]: b = r[0].brain b.fitness = (r[1]['score']) * r[1]['interval'] - r[1]['distance'] b.score = r[1]['score'] sorted_brains.append(b) brains = GA.evolvePopulation(sorted_brains) print("best score = %d best fitness = %d" % (GA.best_score,GA.best_fitness)) g.reset(bird_num,brains) GA.saveBestBird() print("GA end!") from simpleNeuralNetwork import NeuralNetwork class simpleNNBrain(fb.Brain): def __init__(self): self.model = NeuralNetwork([2,6,1],'logistic') print(self.model.getWeights()) def decideFlap(self,params): distance = params['distance'] + params['pipeWidth'] deltaHeight = (params['bottomPipeHeight'] + params['topPipeHeight'])/2 - params['height'] velY = params['velY'] data = [distance * SCALE_FACTOR, deltaHeight * SCALE_FACTOR] pred = self.model.predict(data) #print(pred) print(pred) return pred[0] > 0.5 def train_test(): bird_num = 10 brains = [] for i in range(bird_num): brains.append(simpleNNBrain()) g = fb.FlappyBirdGame(30,bird_num,brains) for i in range(10): g.run() result = g.result() brains = [] for i in range(bird_num): brains.append(simpleNNBrain()) g.reset(10,brains) if __name__ == '__main__': train()
[ "numpy.size", "random.randint", "flappybird.FlappyBirdGame", "pygame.event.get", "random.random", "random.seed", "simpleNeuralNetwork.NeuralNetwork" ]
[((5525, 5564), 'flappybird.FlappyBirdGame', 'fb.FlappyBirdGame', (['(30)', 'bird_num', 'brains'], {}), '(30, bird_num, brains)\n', (5542, 5564), True, 'import flappybird as fb\n'), ((6932, 6971), 'flappybird.FlappyBirdGame', 'fb.FlappyBirdGame', (['(30)', 'bird_num', 'brains'], {}), '(30, bird_num, brains)\n', (6949, 6971), True, 'import flappybird as fb\n'), ((529, 575), 'simpleNeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['[n_input, n_hidden]', '"""logistic"""'], {}), "([n_input, n_hidden], 'logistic')\n", (542, 575), False, 'from simpleNeuralNetwork import NeuralNetwork\n'), ((3740, 3762), 'numpy.size', 'np.size', (['parantA[1]', '(0)'], {}), '(parantA[1], 0)\n', (3747, 3762), True, 'import numpy as np\n'), ((3781, 3810), 'random.randint', 'random.randint', (['(0)', '(length - 1)'], {}), '(0, length - 1)\n', (3795, 3810), False, 'import random\n'), ((4005, 4025), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4019, 4025), False, 'import random\n'), ((5096, 5113), 'random.seed', 'random.seed', (['(2000)'], {}), '(2000)\n', (5107, 5113), False, 'import random\n'), ((5178, 5196), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5194, 5196), False, 'import pygame\n'), ((5268, 5291), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (5282, 5291), False, 'import random\n'), ((6311, 6347), 'simpleNeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['[2, 6, 1]', '"""logistic"""'], {}), "([2, 6, 1], 'logistic')\n", (6324, 6347), False, 'from simpleNeuralNetwork import NeuralNetwork\n'), ((4396, 4411), 'random.random', 'random.random', ([], {}), '()\n', (4409, 4411), False, 'import random\n'), ((4493, 4508), 'random.random', 'random.random', ([], {}), '()\n', (4506, 4508), False, 'import random\n'), ((4463, 4478), 'random.random', 'random.random', ([], {}), '()\n', (4476, 4478), False, 'import random\n')]
#!/usr/bin/python import roslib import rospy import cv2 import numpy as np import cv_bridge import time from sensor_msgs.msg import Image from std_msgs.msg import String from common import * from jupiter.msg import BallPosition class Detector: current_camera = None camera_subscription = None bridge = None processed_image_publisher = None processed_image_bw_publisher = None offset = 100 wheel_publisher = None state = "" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm = "" ball_position = None def __init__(self): init_arguments(self) self.state = "NO_SEARCH" rospy.Subscriber("/jupiter/detector/current_camera", String, self.camera_change) rospy.Subscriber("/jupiter/detector/state_change", String, self.state_change) self.robot_movement_publisher = rospy.Publisher("/jupiter/robot_movement/command", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher("/jupiter/robot_movement/result", String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher("/jupiter/processed_image", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher("/jupiter/processed_image_bw", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher("/jupiter/ball_position", BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera = command.data rospy.loginfo("Detector: current camera changed to %s", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == "ASUS_CAMERA": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, "/Asus_Camera/rgb/image_raw"), Image, self.process_image) elif self.current_camera == "ARM_CAMERA": self.camera_subscription = rospy.Subscriber("/Creative_Camera/rgb/image_raw" if self.is_simulation else "/komodo_1/arm_cam_node/image_raw", Image, self.process_image) self.move_robot_or_arm = "MOVE_ROBOT" def state_change(self, command): if command.data == "SEARCH": self.state = "SEARCH" rospy.loginfo("Detector: starting to search for ball") elif command.data == "NO_SEARCH": self.state = "NO_SEARCH" rospy.loginfo("Detector: stopped searching for ball") def process_image(self, image): if self.state == "NO_SEARCH": return image_cv = self.bridge.imgmsg_to_cv2(image, "bgr8") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras have different sensors, so their color rendition varies. Adjust for this issue when trying to filter the red colors in the image. if self.current_camera == "ASUS_CAMERA": (lower, upper) = ([0, 0, 100], [55, 55, 255]) # dark red lower = np.array(lower, dtype = "uint8") upper = np.array(upper, dtype = "uint8") mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0, 0, 100], [70, 100, 255]) lower = np.array(lower, dtype = "uint8") upper = np.array(upper, dtype = "uint8") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) = ([65, 50, 170], [100, 70, 255]) lower2 = np.array(lower2, dtype = "uint8") upper2 = np.array(upper2, dtype = "uint8") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity = True params.filterByArea = True params.minArea = 30 if self.current_camera == "ASUS_CAMERA" else 15 params.maxArea = 2500 if self.current_camera == "ASUS_CAMERA" else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera == "FRONT_CAMERA": params.minDistBetweenBlobs = 20.0 # Create a detector with the parameters, according to your OpenCV version (2 or 3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles = [] for keypoint in keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y, r]) target = None if circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for circle in circles: if circle[2] > max_r and (circle[1] >= (image.height * 0.5) if self.current_camera == "ASUS_CAMERA" else True): max_r = circle[2] target = circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0) # publish the keypoints and target circle superimposed on the source image from the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, "bgr8")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, "bgr8")) if target[2]: rospy.loginfo("x: %d, y: %d, radius: %d", target[0], target[1], target[2]) if self.current_camera == "ASUS_CAMERA" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish("STOP-BALL_FOUND") rospy.loginfo("Detector: ball found") elif target != None and self.current_camera == "ASUS_CAMERA" and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish("STOP-BALL_AT_BOTTOM_OF_FRAME") rospy.loginfo("Detector: ball is at bottom of Asus Camera frame") elif target != None and self.current_camera == "ARM_CAMERA" and self.move_robot_or_arm == "MOVE_ROBOT": if self.is_simulation: # the real arm cam emits an upside-down image, so adjust for orientation if target[1] < 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish("FORWARD-LEFT") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish("FORWARD-RIGHT") else: self.robot_movement_publisher.publish("FORWARD_ARM") else: self.move_robot_or_arm = "MOVE_ARM" self.robot_movement_publisher.publish("STOP-READY_TO_GRAB") else: if target[1] > 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish("FORWARD-RIGHT") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish("FORWARD-LEFT") else: self.robot_movement_publisher.publish("FORWARD_ARM") else: self.move_robot_or_arm = "MOVE_ARM" self.robot_movement_publisher.publish("STOP-READY_TO_GRAB") elif target != None and self.current_camera == "ARM_CAMERA" and self.move_robot_or_arm == "MOVE_ARM": rospy.loginfo("Detector: publishing ball position") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = "NO_SEARCH" def asus_ballpark(self, x, image): return (image.width * 0.65) <= x and x <= (image.width * 0.85) if __name__ == "__main__": rospy.init_node("detector") detector = Detector() rospy.spin()
[ "cv2.GaussianBlur", "rospy.Subscriber", "cv2.bitwise_and", "numpy.around", "cv2.__version__.split", "cv2.inRange", "cv2.cvtColor", "rospy.init_node", "jupiter.msg.BallPosition", "cv2.circle", "rospy.loginfo", "cv2.SimpleBlobDetector_create", "cv2.SimpleBlobDetector", "cv2.bitwise_or", "cv_bridge.CvBridge", "cv2.SimpleBlobDetector_Params", "cv2.threshold", "rospy.Publisher", "numpy.array", "rospy.spin" ]
[((10179, 10206), 'rospy.init_node', 'rospy.init_node', (['"""detector"""'], {}), "('detector')\n", (10194, 10206), False, 'import rospy\n'), ((10237, 10249), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (10247, 10249), False, 'import rospy\n'), ((742, 827), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/jupiter/detector/current_camera"""', 'String', 'self.camera_change'], {}), "('/jupiter/detector/current_camera', String, self.camera_change\n )\n", (758, 827), False, 'import rospy\n'), ((831, 908), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/jupiter/detector/state_change"""', 'String', 'self.state_change'], {}), "('/jupiter/detector/state_change', String, self.state_change)\n", (847, 908), False, 'import rospy\n'), ((949, 1022), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/robot_movement/command"""', 'String'], {'queue_size': '(10)'}), "('/jupiter/robot_movement/command', String, queue_size=10)\n", (964, 1022), False, 'import rospy\n'), ((1064, 1136), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/robot_movement/result"""', 'String'], {'queue_size': '(10)'}), "('/jupiter/robot_movement/result', String, queue_size=10)\n", (1079, 1136), False, 'import rospy\n'), ((1161, 1181), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (1179, 1181), False, 'import cv_bridge\n'), ((1223, 1288), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/processed_image"""', 'Image'], {'queue_size': '(10)'}), "('/jupiter/processed_image', Image, queue_size=10)\n", (1238, 1288), False, 'import rospy\n'), ((1335, 1403), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/processed_image_bw"""', 'Image'], {'queue_size': '(10)'}), "('/jupiter/processed_image_bw', Image, queue_size=10)\n", (1350, 1403), False, 'import rospy\n'), ((1445, 1515), 'rospy.Publisher', 'rospy.Publisher', (['"""/jupiter/ball_position"""', 'BallPosition'], {'queue_size': '(10)'}), "('/jupiter/ball_position', BallPosition, queue_size=10)\n", (1460, 1515), False, 'import rospy\n'), ((1547, 1561), 'jupiter.msg.BallPosition', 'BallPosition', ([], {}), '()\n', (1559, 1561), False, 'from jupiter.msg import BallPosition\n'), ((1696, 1772), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: current camera changed to %s"""', 'self.current_camera'], {}), "('Detector: current camera changed to %s', self.current_camera)\n", (1709, 1772), False, 'import rospy\n'), ((3019, 3056), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image_cv', '(9, 9)', '(0)'], {}), '(image_cv, (9, 9), 0)\n', (3035, 3056), False, 'import cv2\n'), ((4464, 4504), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_BGR2GRAY'], {}), '(output, cv2.COLOR_BGR2GRAY)\n', (4476, 4504), False, 'import cv2\n'), ((4538, 4615), 'cv2.threshold', 'cv2.threshold', (['image_grayscale', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (4551, 4615), False, 'import cv2\n'), ((4633, 4664), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (4662, 4664), False, 'import cv2\n'), ((5357, 5383), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (5378, 5383), False, 'import cv2\n'), ((2641, 2695), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: starting to search for ball"""'], {}), "('Detector: starting to search for ball')\n", (2654, 2695), False, 'import rospy\n'), ((3351, 3381), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (3359, 3381), True, 'import numpy as np\n'), ((3404, 3434), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (3412, 3434), True, 'import numpy as np\n'), ((3456, 3496), 'cv2.inRange', 'cv2.inRange', (['blurred_image', 'lower', 'upper'], {}), '(blurred_image, lower, upper)\n', (3467, 3496), False, 'import cv2\n'), ((3518, 3574), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred_image', 'blurred_image'], {'mask': 'mask'}), '(blurred_image, blurred_image, mask=mask)\n', (3533, 3574), False, 'import cv2\n'), ((3633, 3670), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image_cv', '(9, 9)', '(0)'], {}), '(image_cv, (9, 9), 0)\n', (3649, 3670), False, 'import cv2\n'), ((3750, 3780), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (3758, 3780), True, 'import numpy as np\n'), ((3803, 3833), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (3811, 3833), True, 'import numpy as np\n'), ((3855, 3895), 'cv2.inRange', 'cv2.inRange', (['blurred_image', 'lower', 'upper'], {}), '(blurred_image, lower, upper)\n', (3866, 3895), False, 'import cv2\n'), ((3929, 3985), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred_image', 'blurred_image'], {'mask': 'mask'}), '(blurred_image, blurred_image, mask=mask)\n', (3944, 3985), False, 'import cv2\n'), ((4072, 4103), 'numpy.array', 'np.array', (['lower2'], {'dtype': '"""uint8"""'}), "(lower2, dtype='uint8')\n", (4080, 4103), True, 'import numpy as np\n'), ((4127, 4158), 'numpy.array', 'np.array', (['upper2'], {'dtype': '"""uint8"""'}), "(upper2, dtype='uint8')\n", (4135, 4158), True, 'import numpy as np\n'), ((4181, 4224), 'cv2.inRange', 'cv2.inRange', (['blurred_image2', 'lower2', 'upper2'], {}), '(blurred_image2, lower2, upper2)\n', (4192, 4224), False, 'import cv2\n'), ((4259, 4318), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred_image2', 'blurred_image2'], {'mask': 'mask2'}), '(blurred_image2, blurred_image2, mask=mask2)\n', (4274, 4318), False, 'import cv2\n'), ((4374, 4437), 'cv2.bitwise_or', 'cv2.bitwise_or', (['output_dark_orange', 'output_light_orange', 'output'], {}), '(output_dark_orange, output_light_orange, output)\n', (4388, 4437), False, 'import cv2\n'), ((5437, 5467), 'cv2.SimpleBlobDetector', 'cv2.SimpleBlobDetector', (['params'], {}), '(params)\n', (5459, 5467), False, 'import cv2\n'), ((5506, 5543), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (5535, 5543), False, 'import cv2\n'), ((6445, 6516), 'cv2.circle', 'cv2.circle', (['processed_image_bw', 'center', 'target[2]', '(255, 0, 0)', '(1)', '(8)', '(0)'], {}), '(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0)\n', (6455, 6516), False, 'import cv2\n'), ((6669, 6737), 'cv2.circle', 'cv2.circle', (['processed_image', 'center', 'target[2]', '(255, 0, 0)', '(1)', '(8)', '(0)'], {}), '(processed_image, center, target[2], (255, 0, 0), 1, 8, 0)\n', (6679, 6737), False, 'import cv2\n'), ((2330, 2473), 'rospy.Subscriber', 'rospy.Subscriber', (["('/Creative_Camera/rgb/image_raw' if self.is_simulation else\n '/komodo_1/arm_cam_node/image_raw')", 'Image', 'self.process_image'], {}), "('/Creative_Camera/rgb/image_raw' if self.is_simulation else\n '/komodo_1/arm_cam_node/image_raw', Image, self.process_image)\n", (2346, 2473), False, 'import rospy\n'), ((2787, 2840), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: stopped searching for ball"""'], {}), "('Detector: stopped searching for ball')\n", (2800, 2840), False, 'import rospy\n'), ((5887, 5905), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (5896, 5905), True, 'import numpy as np\n'), ((6318, 6330), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6326, 6330), True, 'import numpy as np\n'), ((6586, 6598), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6594, 6598), True, 'import numpy as np\n'), ((7116, 7190), 'rospy.loginfo', 'rospy.loginfo', (['"""x: %d, y: %d, radius: %d"""', 'target[0]', 'target[1]', 'target[2]'], {}), "('x: %d, y: %d, radius: %d', target[0], target[1], target[2])\n", (7129, 7190), False, 'import rospy\n'), ((7479, 7516), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: ball found"""'], {}), "('Detector: ball found')\n", (7492, 7516), False, 'import rospy\n'), ((7891, 7956), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: ball is at bottom of Asus Camera frame"""'], {}), "('Detector: ball is at bottom of Asus Camera frame')\n", (7904, 7956), False, 'import rospy\n'), ((9547, 9598), 'rospy.loginfo', 'rospy.loginfo', (['"""Detector: publishing ball position"""'], {}), "('Detector: publishing ball position')\n", (9560, 9598), False, 'import rospy\n')]
import json import matplotlib.pyplot as plt import numpy as np import pickle import tensorflow as tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for t in CLASSES} for track in tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for k in infos: tracks = infos[k] fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {} for tag in tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use = [] for track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f) f.write('\nTraining configuration:\n' + training_config_text + '\n') f.write('\nModel configuration:\n' + model_config_text + '\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\n' print(details) print(' Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation("relu")(x) def compute_scores(tp, fp, fn): if tp != 0: precision = tp / (tp + fp) recall = tp / (tp + fn) fscore = 2. * precision * recall / (precision + recall) return precision, recall, fscore else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving checkpoints to {config_copy["filepath"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) * 100 + 11 for i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training ' + legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch') plt.legend(legends, loc=plot['caption-loc'], framealpha=.5) plt.savefig(f'{save_directory}/history.png') plt.close()
[ "matplotlib.pyplot.title", "numpy.sum", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.ModelCheckpoint", "pickle.load", "tensorflow.keras.callbacks.EarlyStopping", "traceback.print_exc", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.callbacks.ReduceLROnPlateau", "matplotlib.pyplot.close", "tensorflow.keras.utils.plot_model", "tensorflow.keras.layers.Activation", "numpy.random.shuffle", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ]
[((4864, 4908), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{save_directory}/history.png"""'], {}), "(f'{save_directory}/history.png')\n", (4875, 4908), True, 'import matplotlib.pyplot as plt\n'), ((4913, 4924), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4922, 4924), True, 'import matplotlib.pyplot as plt\n'), ((937, 976), 'numpy.sum', 'np.sum', (['[t.frame_count for t in tracks]'], {}), '([t.frame_count for t in tracks])\n', (943, 976), True, 'import numpy as np\n'), ((2215, 2308), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': 'f"""{save_directory}/model.png"""', 'show_shapes': '(True)'}), "(model, to_file=f'{save_directory}/model.png',\n show_shapes=True)\n", (2240, 2308), True, 'import tensorflow as tf\n'), ((2347, 2386), 'numpy.sum', 'np.sum', (['[t.frame_count for t in tracks]'], {}), '([t.frame_count for t in tracks])\n', (2353, 2386), True, 'import numpy as np\n'), ((2934, 2990), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n'], {'kernel_initializer': '"""he_normal"""'}), "(n, kernel_initializer='he_normal')\n", (2955, 2990), True, 'import tensorflow as tf\n'), ((3002, 3038), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3036, 3038), True, 'import tensorflow as tf\n'), ((3053, 3087), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3079, 3087), True, 'import tensorflow as tf\n'), ((3752, 3801), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {}), '(**config_copy)\n', (3786, 3801), True, 'import tensorflow as tf\n'), ((4278, 4307), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(plt_position + i)'], {}), '(plt_position + i)\n', (4289, 4307), True, 'import matplotlib.pyplot as plt\n'), ((4316, 4340), 'matplotlib.pyplot.title', 'plt.title', (["plot['title']"], {}), "(plot['title'])\n", (4325, 4340), True, 'import matplotlib.pyplot as plt\n'), ((4685, 4701), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(1)'}), '(left=1)\n', (4693, 4701), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4728), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4718, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4736, 4763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["plot['y-label']"], {}), "(plot['y-label'])\n", (4746, 4763), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4782, 4791), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4860), 'matplotlib.pyplot.legend', 'plt.legend', (['legends'], {'loc': "plot['caption-loc']", 'framealpha': '(0.5)'}), "(legends, loc=plot['caption-loc'], framealpha=0.5)\n", (4810, 4860), True, 'import matplotlib.pyplot as plt\n'), ((1267, 1292), 'numpy.random.shuffle', 'np.random.shuffle', (['tracks'], {}), '(tracks)\n', (1284, 1292), True, 'import numpy as np\n'), ((3858, 3909), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'tf.keras.callbacks.ReduceLROnPlateau', ([], {}), '(**config_copy)\n', (3894, 3909), True, 'import tensorflow as tf\n'), ((4411, 4443), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[value]'], {}), '(history.history[value])\n', (4419, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4625), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[value]'], {}), '(history.history[value])\n', (4601, 4625), True, 'import matplotlib.pyplot as plt\n'), ((377, 398), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (396, 398), False, 'import traceback\n'), ((3972, 4019), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {}), '(**config_copy)\n', (4004, 4019), True, 'import tensorflow as tf\n'), ((318, 332), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (329, 332), False, 'import pickle\n')]
""" demo05_gridsearch.py 网格搜索 """ import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y = data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y = \ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l, r = x[:, 0].min() - 1, x[:, 0].max() + 1 b, t = x[:, 1].min() - 1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange', 'alpha': 0.8}) mp.show()
[ "matplotlib.pyplot.title", "sklearn.model_selection.GridSearchCV", "matplotlib.pyplot.show", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter", "sklearn.metrics.classification_report", "matplotlib.pyplot.figure", "numpy.array", "numpy.loadtxt", "matplotlib.pyplot.pcolormesh", "sklearn.svm.SVC", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.ylabel", "numpy.linspace", "matplotlib.pyplot.xlabel" ]
[((185, 250), 'numpy.loadtxt', 'np.loadtxt', (['"""../ml_data/multiple2.txt"""'], {'delimiter': '""","""', 'dtype': '"""f8"""'}), "('../ml_data/multiple2.txt', delimiter=',', dtype='f8')\n", (195, 250), True, 'import numpy as np\n'), ((338, 395), 'sklearn.model_selection.train_test_split', 'ms.train_test_split', (['x', 'y'], {'test_size': '(0.25)', 'random_state': '(5)'}), '(x, y, test_size=0.25, random_state=5)\n', (357, 395), True, 'import sklearn.model_selection as ms\n'), ((410, 435), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (417, 435), True, 'import sklearn.svm as svm\n'), ((649, 685), 'sklearn.model_selection.GridSearchCV', 'ms.GridSearchCV', (['model', 'params'], {'cv': '(5)'}), '(model, params, cv=5)\n', (664, 685), True, 'import sklearn.model_selection as ms\n'), ((1045, 1130), 'numpy.array', 'np.array', (['[[2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]'], {}), '([[2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]\n )\n', (1053, 1130), True, 'import numpy as np\n'), ((1576, 1623), 'matplotlib.pyplot.figure', 'mp.figure', (['"""Probability"""'], {'facecolor': '"""lightgray"""'}), "('Probability', facecolor='lightgray')\n", (1585, 1623), True, 'import matplotlib.pyplot as mp\n'), ((1624, 1660), 'matplotlib.pyplot.title', 'mp.title', (['"""Probability"""'], {'fontsize': '(20)'}), "('Probability', fontsize=20)\n", (1632, 1660), True, 'import matplotlib.pyplot as mp\n'), ((1661, 1688), 'matplotlib.pyplot.xlabel', 'mp.xlabel', (['"""x"""'], {'fontsize': '(14)'}), "('x', fontsize=14)\n", (1670, 1688), True, 'import matplotlib.pyplot as mp\n'), ((1689, 1716), 'matplotlib.pyplot.ylabel', 'mp.ylabel', (['"""y"""'], {'fontsize': '(14)'}), "('y', fontsize=14)\n", (1698, 1716), True, 'import matplotlib.pyplot as mp\n'), ((1717, 1745), 'matplotlib.pyplot.tick_params', 'mp.tick_params', ([], {'labelsize': '(10)'}), '(labelsize=10)\n', (1731, 1745), True, 'import matplotlib.pyplot as mp\n'), ((1746, 1802), 'matplotlib.pyplot.pcolormesh', 'mp.pcolormesh', (['grid_x[0]', 'grid_x[1]', 'grid_y'], {'cmap': '"""gray"""'}), "(grid_x[0], grid_x[1], grid_y, cmap='gray')\n", (1759, 1802), True, 'import matplotlib.pyplot as mp\n'), ((1804, 1870), 'matplotlib.pyplot.scatter', 'mp.scatter', (['test_x[:, 0]', 'test_x[:, 1]'], {'c': 'test_y', 'cmap': '"""brg"""', 's': '(80)'}), "(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80)\n", (1814, 1870), True, 'import matplotlib.pyplot as mp\n'), ((1871, 1960), 'matplotlib.pyplot.scatter', 'mp.scatter', (['prob_x[:, 0]', 'prob_x[:, 1]'], {'c': 'pred_prob_y', 'cmap': '"""jet_r"""', 's': '(80)', 'marker': '"""D"""'}), "(prob_x[:, 0], prob_x[:, 1], c=pred_prob_y, cmap='jet_r', s=80,\n marker='D')\n", (1881, 1960), True, 'import matplotlib.pyplot as mp\n'), ((2390, 2399), 'matplotlib.pyplot.show', 'mp.show', ([], {}), '()\n', (2397, 2399), True, 'import matplotlib.pyplot as mp\n'), ((981, 1026), 'sklearn.metrics.classification_report', 'sm.classification_report', (['test_y', 'pred_test_y'], {}), '(test_y, pred_test_y)\n', (1005, 1026), True, 'import sklearn.metrics as sm\n'), ((1369, 1389), 'numpy.linspace', 'np.linspace', (['l', 'r', 'n'], {}), '(l, r, n)\n', (1380, 1389), True, 'import numpy as np\n'), ((1412, 1432), 'numpy.linspace', 'np.linspace', (['b', 't', 'n'], {}), '(b, t, n)\n', (1423, 1432), True, 'import numpy as np\n')]
import struct import numpy as np from .nbt import NBTFile import io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8') self.write_var_uint32(len(es)) self.append(es) def write_UUID_bytes(self,uuid_bytes:bytes): self.append(uuid_bytes)
[ "numpy.uint32", "io.BytesIO", "numpy.uint64", "struct.unpack", "struct.pack", "numpy.int32", "numpy.int64" ]
[((574, 591), 'numpy.int32', 'np.int32', (['(v_ >> 1)'], {}), '(v_ >> 1)\n', (582, 591), True, 'import numpy as np\n'), ((1036, 1053), 'numpy.int64', 'np.int64', (['(v_ >> 1)'], {}), '(v_ >> 1)\n', (1044, 1053), True, 'import numpy as np\n'), ((1175, 1233), 'struct.unpack', 'struct.unpack', (['"""fff"""', 'self.bytes[self.curr - 12:self.curr]'], {}), "('fff', self.bytes[self.curr - 12:self.curr])\n", (1188, 1233), False, 'import struct\n'), ((1295, 1350), 'struct.unpack', 'struct.unpack', (['"""f"""', 'self.bytes[self.curr - 4:self.curr]'], {}), "('f', self.bytes[self.curr - 4:self.curr])\n", (1308, 1350), False, 'import struct\n'), ((1480, 1535), 'struct.unpack', 'struct.unpack', (['"""B"""', 'self.bytes[self.curr - 1:self.curr]'], {}), "('B', self.bytes[self.curr - 1:self.curr])\n", (1493, 1535), False, 'import struct\n'), ((2077, 2132), 'struct.unpack', 'struct.unpack', (['"""B"""', 'self.bytes[self.curr - 1:self.curr]'], {}), "('B', self.bytes[self.curr - 1:self.curr])\n", (2090, 2132), False, 'import struct\n'), ((2195, 2250), 'struct.unpack', 'struct.unpack', (['"""h"""', 'self.bytes[self.curr - 2:self.curr]'], {}), "('h', self.bytes[self.curr - 2:self.curr])\n", (2208, 2250), False, 'import struct\n'), ((2313, 2368), 'struct.unpack', 'struct.unpack', (['"""i"""', 'self.bytes[self.curr - 4:self.curr]'], {}), "('i', self.bytes[self.curr - 4:self.curr])\n", (2326, 2368), False, 'import struct\n'), ((2432, 2487), 'struct.unpack', 'struct.unpack', (['"""I"""', 'self.bytes[self.curr - 4:self.curr]'], {}), "('I', self.bytes[self.curr - 4:self.curr])\n", (2445, 2487), False, 'import struct\n'), ((2876, 2926), 'io.BytesIO', 'io.BytesIO', (['self.bytes[self.curr - _len:self.curr]'], {}), '(self.bytes[self.curr - _len:self.curr])\n', (2886, 2926), False, 'import io\n'), ((3559, 3578), 'struct.pack', 'struct.pack', (['"""f"""', 'f'], {}), "('f', f)\n", (3570, 3578), False, 'import struct\n'), ((3632, 3651), 'struct.pack', 'struct.pack', (['"""B"""', 'b'], {}), "('B', b)\n", (3643, 3651), False, 'import struct\n'), ((3712, 3731), 'struct.pack', 'struct.pack', (['"""B"""', 'b'], {}), "('B', b)\n", (3723, 3731), False, 'import struct\n'), ((3791, 3810), 'struct.pack', 'struct.pack', (['"""I"""', 'i'], {}), "('I', i)\n", (3802, 3810), False, 'import struct\n'), ((4028, 4040), 'numpy.uint32', 'np.uint32', (['x'], {}), '(x)\n', (4037, 4040), True, 'import numpy as np\n'), ((4342, 4354), 'numpy.uint64', 'np.uint64', (['x'], {}), '(x)\n', (4351, 4354), True, 'import numpy as np\n')]
import numpy as np from pycocotools_local.coco import * import os.path as osp from .utils import to_tensor, random_scale from mmcv.parallel import DataContainer as DC import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" valid_inds = [] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): """Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. """ slices_ann_info = {'r': [], 'g': [], 'b': []} for info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] # Two formats are provided. # 1. mask: a binary map of the same size of the image. # 2. polys: each mask consists of one or several polys, each poly is a # list of float. if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys = [] cur_poly_lens = [] for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1: continue bbox = [x1, y1, x1 + w - 1, y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in ann['segmentation'] if len(p) >= 6 ] # valid polygons have >= 3 points (6 coordinates) poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly format is not used in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data, key, tensors): if key in data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently images with # no proposals are just ignored, but they can be used for # training in concept. if len(proposals) == 0: return None if not (proposals.shape[1] == 4 or proposals.shape[1] == 5): raise AssertionError( 'proposals should have shapes (n, 4) or (n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:, 4, None] proposals = proposals[:, :4] else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip = True if np.random.rand() < self.flip_ratio else False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if there is no valid gt bbox if len(gt_bboxes) == 0: return None # extra augmentation if self.extra_aug is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img # randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores]) if scores is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg), stack=True)) return data
[ "numpy.zeros", "numpy.hstack", "numpy.array", "mmcv.parallel.DataContainer", "numpy.random.rand", "mmcv.imrescale", "os.path.join" ]
[((5455, 5502), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (5463, 5502), True, 'import os.path as osp\n'), ((4019, 4063), 'numpy.array', 'np.array', (['cur_slice_bboxes'], {'dtype': 'np.float32'}), '(cur_slice_bboxes, dtype=np.float32)\n', (4027, 4063), True, 'import numpy as np\n'), ((4099, 4141), 'numpy.array', 'np.array', (['cur_slice_labels'], {'dtype': 'np.int64'}), '(cur_slice_labels, dtype=np.int64)\n', (4107, 4141), True, 'import numpy as np\n'), ((4195, 4229), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (4203, 4229), True, 'import numpy as np\n'), ((4265, 4293), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (4273, 4293), True, 'import numpy as np\n'), ((4377, 4428), 'numpy.array', 'np.array', (['cur_slice_bboxes_ignore'], {'dtype': 'np.float32'}), '(cur_slice_bboxes_ignore, dtype=np.float32)\n', (4385, 4428), True, 'import numpy as np\n'), ((4489, 4523), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (4497, 4523), True, 'import numpy as np\n'), ((6612, 6628), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6626, 6628), True, 'import numpy as np\n'), ((7833, 7903), 'mmcv.imrescale', 'mmcv.imrescale', (['gt_seg', 'self.seg_scale_factor'], {'interpolation': '"""nearest"""'}), "(gt_seg, self.seg_scale_factor, interpolation='nearest')\n", (7847, 7903), False, 'import mmcv\n'), ((8177, 8207), 'numpy.hstack', 'np.hstack', (['[proposals, scores]'], {}), '([proposals, scores])\n', (8186, 8207), True, 'import numpy as np\n'), ((9823, 9850), 'mmcv.parallel.DataContainer', 'DC', (['gt_masks'], {'cpu_only': '(True)'}), '(gt_masks, cpu_only=True)\n', (9825, 9850), True, 'from mmcv.parallel import DataContainer as DC\n'), ((9254, 9281), 'mmcv.parallel.DataContainer', 'DC', (['img_meta'], {'cpu_only': '(True)'}), '(img_meta, cpu_only=True)\n', (9256, 9281), True, 'from mmcv.parallel import DataContainer as DC\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """Python toolkit for generating and analyzing nanostructure data""" from __future__ import absolute_import, division, print_function, \ unicode_literals __docformat__ = 'restructuredtext en' import os import sys import shutil import subprocess from distutils.command.clean import clean as Clean if sys.version_info[0] < 3: raise RuntimeError("Python version 3.4+ required.\n\n" "Sorry, but there are features of Python 3\n" "that I want to take advantage of and without\n" "worrying about Python 2 compatibility.\n" "Therefore, Python 2 support was removed starting\n" "in v0.3.7. Once/if I learn how to automate the\n" "backporting process from the setup script,\n" "I will restore Python 2 support that way.\n" "Until then, if you must install this for Python 2\n" "you're on your own. It shouldn't be difficult\n" "but you'll have to manually backport the package\n" "source code using a Python 3 to Python 2\n" "compatibility library such as the python `future`\n" "module, which provides a python script called\n" "`pasteurize` that can be run on the source\n" "directory to automate the backporting process.\n" "You'll also need to hack this setup script\n" "to remove any exceptions that are raised when\n" "executed under Python 2.") #if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4): if (3, 0) <= sys.version_info[:2] < (3, 4): raise RuntimeError("Python 3.4+ required.") if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins try: import setuptools except ImportError: sys.exit("setuptools required for Python3 install.\n" "`pip install --upgrade setuptools`") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = """\ Development Status :: 4 - Beta Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved :: BSD License Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System :: Unix Operating System :: MacOS Programming Language :: Python Programming Language :: Python :: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization Topic :: Software Development Topic :: Software Development :: Libraries :: Python Modules """ MAJOR = 0 MINOR = 3 MICRO = 21 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) # Return the GIT version as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = "Unknown" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!) hackish: we are setting a global variable so that the main # sknano __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \ "Remove build directories, __pycache__ directories, " \ ".ropeproject directories, and compiled files in the source tree." def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git rev number needs to be done inside # write_version_py(), otherwise the import of sknano.version messes # up the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be a source distribution, use existing version file # load it as a separate module to not load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = "Unknown" if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = """ # THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if not release: version = full_version """ FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the version file everytime write_version_py() # Figure out whether to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want to do that unconditionally, # because we risk updating an installed numpy/scipy which fails too often. # Just if the minimum version is not installed, we may give it a try. build_requires = [] try: import numpy numpy_version = \ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version = \ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six module to install_requires (used in numpydoc git submodule) # install_requires += ['six>=1.9'] # # Add future module to install requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run out of an .egg file include_package_data=True, ) if len(sys.argv) >= 2 and \ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy are not required. # They are required to succeed without them when, for example, # pip is used to install Scipy when Numpy is not yet present in # the system. try: from setuptools import setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ == '__main__': setup_package()
[ "os.remove", "subprocess.Popen", "distutils.core.setup", "scipy.version.short_version.split", "distutils.command.clean.clean.run", "os.path.exists", "os.walk", "os.environ.get", "imp.load_source", "shutil.rmtree", "numpy.distutils.misc_util.Configuration", "os.path.join", "numpy.version.short_version.split", "sys.exit" ]
[((4346, 4372), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (4360, 4372), False, 'import os\n'), ((4378, 4399), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (4387, 4399), False, 'import os\n'), ((5804, 5826), 'os.path.exists', 'os.path.exists', (['""".git"""'], {}), "('.git')\n", (5818, 5826), False, 'import os\n'), ((7225, 7270), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (7238, 7270), False, 'from numpy.distutils.misc_util import Configuration\n'), ((10846, 10863), 'distutils.core.setup', 'setup', ([], {}), '(**metadata)\n', (10851, 10863), False, 'from distutils.core import setup\n'), ((2029, 2130), 'sys.exit', 'sys.exit', (['"""setuptools required for Python3 install.\n`pip install --upgrade setuptools`"""'], {}), '(\n """setuptools required for Python3 install.\n`pip install --upgrade setuptools`"""\n )\n', (2037, 2130), False, 'import sys\n'), ((4866, 4881), 'distutils.command.clean.clean.run', 'Clean.run', (['self'], {}), '(self)\n', (4875, 4881), True, 'from distutils.command.clean import clean as Clean\n'), ((4893, 4916), 'os.path.exists', 'os.path.exists', (['"""build"""'], {}), "('build')\n", (4907, 4916), False, 'import os\n'), ((4997, 5014), 'os.walk', 'os.walk', (['"""sknano"""'], {}), "('sknano')\n", (5004, 5014), False, 'import os\n'), ((5400, 5414), 'os.walk', 'os.walk', (['"""doc"""'], {}), "('doc')\n", (5407, 5414), False, 'import os\n'), ((5874, 5909), 'os.path.exists', 'os.path.exists', (['"""sknano/version.py"""'], {}), "('sknano/version.py')\n", (5888, 5909), False, 'import os\n'), ((3702, 3719), 'os.environ.get', 'os.environ.get', (['k'], {}), '(k)\n', (3716, 3719), False, 'import os\n'), ((4930, 4952), 'shutil.rmtree', 'shutil.rmtree', (['"""build"""'], {}), "('build')\n", (4943, 4952), False, 'import shutil\n'), ((6085, 6139), 'imp.load_source', 'imp.load_source', (['"""sknano.version"""', '"""sknano/version.py"""'], {}), "('sknano.version', 'sknano/version.py')\n", (6100, 6139), False, 'import imp\n'), ((3911, 3965), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'env': 'env'}), '(cmd, stdout=subprocess.PIPE, env=env)\n', (3927, 3965), False, 'import subprocess\n'), ((5156, 5187), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (5168, 5187), False, 'import os\n'), ((5323, 5353), 'os.path.join', 'os.path.join', (['dirpath', 'dirname'], {}), '(dirpath, dirname)\n', (5335, 5353), False, 'import os\n'), ((5550, 5580), 'os.path.join', 'os.path.join', (['dirpath', 'dirname'], {}), '(dirpath, dirname)\n', (5562, 5580), False, 'import os\n'), ((8092, 8130), 'numpy.version.short_version.split', 'numpy.version.short_version.split', (['"""."""'], {}), "('.')\n", (8125, 8130), False, 'import numpy\n'), ((8455, 8493), 'scipy.version.short_version.split', 'scipy.version.short_version.split', (['"""."""'], {}), "('.')\n", (8488, 8493), False, 'import scipy\n')]
import os,random os.environ["KERAS_BACKEND"] = "tensorflow" from PIL import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import * from keras import initializers import matplotlib.pyplot as plt import cPickle, random, sys, keras from keras.models import Model from functools import partial normal = partial(initializers.normal, scale=.02) ## load and preprocess the dataset (use FERG for example) ## batch_size = 256 num_ep = 7 num_pp = 6 epochs = 1000 img_rows, img_cols = 64, 64 clipvalue = 20 noise_dim = 10 c_dim = num_pp n_dim = 10 z_dim = 128 date = 2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1 -= 1 label2 = f['ep'] label2 = np.asarray(label2) label2 -= 1 label3 = f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori = (x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) * epsilon ############ Build the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x = Input(input_shape) h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim, name="encoder_mu")(h) mean = Dense(z_dim, name="encoder_mean")(h) logvar = Dense(z_dim, name="encoder_sigma", activation = 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k = 5 x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 * 128, activation = 'relu')(h) h = Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name="Decoder") # #### reload the trained weights to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x for xx in xrange(0,1): idx1 = 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc = 'bottom' for pp in xrange(0,1): for xx in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ### def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var / 2) * epsilon # mean and variance of the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('synthesis_no_input_'+'pp_'+str(i)+'.tif')
[ "keras.layers.core.Reshape", "numpy.ones", "keras.models.Model", "matplotlib.pyplot.figure", "numpy.exp", "numpy.random.normal", "keras.layers.core.Flatten", "keras.layers.Input", "keras.layers.concatenate", "numpy.transpose", "numpy.reshape", "numpy.linspace", "keras.layers.core.Dropout", "keras.utils.to_categorical", "functools.partial", "h5py.File", "numpy.uint8", "keras.layers.core.Dense", "keras.layers.convolutional.Conv2DTranspose", "numpy.asarray", "keras.layers.Conv2D", "numpy.squeeze", "numpy.zeros", "numpy.expand_dims", "numpy.where", "keras.layers.advanced_activations.LeakyReLU", "keras.layers.Lambda", "PIL.Image.fromarray" ]
[((725, 765), 'functools.partial', 'partial', (['initializers.normal'], {'scale': '(0.02)'}), '(initializers.normal, scale=0.02)\n', (732, 765), False, 'from functools import partial\n'), ((1065, 1098), 'h5py.File', 'h5py.File', (['"""FERG_64_64_color.mat"""'], {}), "('FERG_64_64_color.mat')\n", (1074, 1098), False, 'import h5py\n'), ((1176, 1194), 'numpy.asarray', 'np.asarray', (['label1'], {}), '(label1)\n', (1186, 1194), True, 'import numpy as np\n'), ((1233, 1251), 'numpy.asarray', 'np.asarray', (['label2'], {}), '(label2)\n', (1243, 1251), True, 'import numpy as np\n'), ((1291, 1309), 'numpy.asarray', 'np.asarray', (['label3'], {}), '(label3)\n', (1301, 1309), True, 'import numpy as np\n'), ((1340, 1360), 'numpy.asarray', 'np.asarray', (['FrameNum'], {}), '(FrameNum)\n', (1350, 1360), True, 'import numpy as np\n'), ((1390, 1403), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1400, 1403), True, 'import numpy as np\n'), ((1413, 1442), 'numpy.transpose', 'np.transpose', (['x', '[3, 2, 1, 0]'], {}), '(x, [3, 2, 1, 0])\n', (1425, 1442), True, 'import numpy as np\n'), ((2047, 2091), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train1', 'num_pp'], {}), '(y_train1, num_pp)\n', (2073, 2091), False, 'import cPickle, random, sys, keras\n'), ((2103, 2146), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test1', 'num_pp'], {}), '(y_test1, num_pp)\n', (2129, 2146), False, 'import cPickle, random, sys, keras\n'), ((2158, 2202), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train2', 'num_ep'], {}), '(y_train2, num_ep)\n', (2184, 2202), False, 'import cPickle, random, sys, keras\n'), ((2214, 2257), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test2', 'num_ep'], {}), '(y_test2, num_ep)\n', (2240, 2257), False, 'import cPickle, random, sys, keras\n'), ((5749, 5779), 'keras.layers.Input', 'Input', (['(img_rows, img_cols, 3)'], {}), '((img_rows, img_cols, 3))\n', (5754, 5779), False, 'from keras.layers import Input, merge, Lambda\n'), ((5837, 5878), 'keras.layers.Input', 'Input', ([], {'shape': '(c_dim,)', 'name': '"""aux_input_c"""'}), "(shape=(c_dim,), name='aux_input_c')\n", (5842, 5878), False, 'from keras.layers import Input, merge, Lambda\n'), ((5893, 5934), 'keras.layers.Input', 'Input', ([], {'shape': '(n_dim,)', 'name': '"""aux_input_z"""'}), "(shape=(n_dim,), name='aux_input_z')\n", (5898, 5934), False, 'from keras.layers import Input, merge, Lambda\n'), ((9090, 9108), 'numpy.zeros', 'np.zeros', (['(1, 128)'], {}), '((1, 128))\n', (9098, 9108), True, 'import numpy as np\n'), ((9126, 9143), 'numpy.ones', 'np.ones', (['(1, 128)'], {}), '((1, 128))\n', (9133, 9143), True, 'import numpy as np\n'), ((1527, 1548), 'numpy.where', 'np.where', (['(label3 == 0)'], {}), '(label3 == 0)\n', (1535, 1548), True, 'import numpy as np\n'), ((1573, 1594), 'numpy.where', 'np.where', (['(label3 == 1)'], {}), '(label3 == 1)\n', (1581, 1594), True, 'import numpy as np\n'), ((3238, 3256), 'keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (3243, 3256), False, 'from keras.layers import Input, merge, Lambda\n'), ((4440, 4480), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[mean, logvar]'], {}), '([mean, logvar])\n', (4464, 4480), False, 'import cPickle, random, sys, keras\n'), ((4491, 4524), 'keras.models.Model', 'Model', (['x', '[z, h2]'], {'name': '"""Encoder"""'}), "(x, [z, h2], name='Encoder')\n", (4496, 4524), False, 'from keras.models import Model\n'), ((4578, 4599), 'keras.layers.Input', 'Input', ([], {'shape': '(z_dim,)'}), '(shape=(z_dim,))\n', (4583, 4599), False, 'from keras.layers import Input, merge, Lambda\n'), ((4620, 4661), 'keras.layers.Input', 'Input', ([], {'shape': '(c_dim,)', 'name': '"""aux_input_c"""'}), "(shape=(c_dim,), name='aux_input_c')\n", (4625, 4661), False, 'from keras.layers import Input, merge, Lambda\n'), ((4732, 4774), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[x, auxiliary_c]'], {}), '([x, auxiliary_c])\n', (4756, 4774), False, 'import cPickle, random, sys, keras\n'), ((5612, 5654), 'keras.models.Model', 'Model', (['[x, auxiliary_c]', 'h'], {'name': '"""Decoder"""'}), "([x, auxiliary_c], h, name='Decoder')\n", (5617, 5654), False, 'from keras.models import Model\n'), ((6554, 6586), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx1, :, :, :]'], {}), '(x_ori[idx1, :, :, :])\n', (6564, 6586), True, 'import numpy as np\n'), ((6598, 6630), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx2, :, :, :]'], {}), '(x_ori[idx2, :, :, :])\n', (6608, 6630), True, 'import numpy as np\n'), ((6776, 6802), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (6786, 6802), True, 'import matplotlib.pyplot as plt\n'), ((6813, 6845), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx1, :, :, :]'], {}), '(x_ori[idx1, :, :, :])\n', (6823, 6845), True, 'import numpy as np\n'), ((6854, 6884), 'numpy.uint8', 'np.uint8', (['(img1 * 127.5 + 127.5)'], {}), '(img1 * 127.5 + 127.5)\n', (6862, 6884), True, 'import numpy as np\n'), ((6893, 6921), 'PIL.Image.fromarray', 'Image.fromarray', (['img1', '"""RGB"""'], {}), "(img1, 'RGB')\n", (6908, 6921), False, 'from PIL import Image\n'), ((6961, 6993), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx2, :, :, :]'], {}), '(x_ori[idx2, :, :, :])\n', (6971, 6993), True, 'import numpy as np\n'), ((7002, 7032), 'numpy.uint8', 'np.uint8', (['(img2 * 127.5 + 127.5)'], {}), '(img2 * 127.5 + 127.5)\n', (7010, 7032), True, 'import numpy as np\n'), ((7064, 7092), 'PIL.Image.fromarray', 'Image.fromarray', (['img2', '"""RGB"""'], {}), "(img2, 'RGB')\n", (7079, 7092), False, 'from PIL import Image\n'), ((7131, 7162), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(1000)'}), '(0.0, 1.0, num=1000)\n', (7142, 7162), True, 'import numpy as np\n'), ((8894, 8969), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'epsilon_std', 'size': '(z_mean.shape[0], z_dim)'}), '(loc=0.0, scale=epsilon_std, size=(z_mean.shape[0], z_dim))\n', (8910, 8969), True, 'import numpy as np\n'), ((3265, 3326), 'keras.layers.Conv2D', 'Conv2D', (['(units / 8)', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units / 8, (k, k), strides=(2, 2), border_mode='same')\n", (3271, 3326), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((3382, 3398), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3389, 3398), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((3454, 3468), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3463, 3468), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3480, 3541), 'keras.layers.Conv2D', 'Conv2D', (['(units / 4)', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units / 4, (k, k), strides=(2, 2), border_mode='same')\n", (3486, 3541), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((3597, 3613), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3604, 3613), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((3669, 3683), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3678, 3683), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3695, 3756), 'keras.layers.Conv2D', 'Conv2D', (['(units / 2)', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units / 2, (k, k), strides=(2, 2), border_mode='same')\n", (3701, 3756), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((3813, 3829), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3820, 3829), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((3885, 3899), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3894, 3899), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3911, 3968), 'keras.layers.Conv2D', 'Conv2D', (['units', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units, (k, k), strides=(2, 2), border_mode='same')\n", (3917, 3968), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((4026, 4042), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (4033, 4042), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4054, 4068), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4063, 4068), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4117, 4126), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (4124, 4126), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4193, 4226), 'keras.layers.core.Dense', 'Dense', (['z_dim'], {'name': '"""encoder_mean"""'}), "(z_dim, name='encoder_mean')\n", (4198, 4226), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4243, 4299), 'keras.layers.core.Dense', 'Dense', (['z_dim'], {'name': '"""encoder_sigma"""', 'activation': '"""sigmoid"""'}), "(z_dim, name='encoder_sigma', activation='sigmoid')\n", (4248, 4299), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4375, 4414), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(z_dim,)'}), '(sampling, output_shape=(z_dim,))\n', (4381, 4414), False, 'from keras.layers import Input, merge, Lambda\n'), ((4783, 4820), 'keras.layers.core.Dense', 'Dense', (['(4 * 4 * 128)'], {'activation': '"""relu"""'}), "(4 * 4 * 128, activation='relu')\n", (4788, 4820), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4834, 4854), 'keras.layers.core.Reshape', 'Reshape', (['(4, 4, 128)'], {}), '((4, 4, 128))\n', (4841, 4854), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4894, 4980), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['units', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(units, (k, k), strides=(2, 2), padding='same', activation=\n 'relu')\n", (4909, 4980), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((5106, 5195), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['(units / 2)', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(units / 2, (k, k), strides=(2, 2), padding='same',\n activation='relu')\n", (5121, 5195), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((5320, 5409), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['(units / 2)', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(units / 2, (k, k), strides=(2, 2), padding='same',\n activation='relu')\n", (5335, 5409), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((5505, 5582), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['(3)', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(3, (k, k), strides=(2, 2), padding='same', activation='tanh')\n", (5520, 5582), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((6671, 6699), 'numpy.expand_dims', 'np.expand_dims', (['img1'], {'axis': '(0)'}), '(img1, axis=0)\n', (6685, 6699), True, 'import numpy as np\n'), ((6741, 6769), 'numpy.expand_dims', 'np.expand_dims', (['img2'], {'axis': '(0)'}), '(img2, axis=0)\n', (6755, 6769), True, 'import numpy as np\n'), ((7233, 7270), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c', 'num_pp'], {}), '(c, num_pp)\n', (7259, 7270), False, 'import cPickle, random, sys, keras\n'), ((7341, 7373), 'numpy.reshape', 'np.reshape', (['z_interp', '(1, z_dim)'], {}), '(z_interp, (1, z_dim))\n', (7351, 7373), True, 'import numpy as np\n'), ((7430, 7445), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (7440, 7445), True, 'import numpy as np\n'), ((7460, 7489), 'numpy.uint8', 'np.uint8', (['(img * 127.5 + 127.5)'], {}), '(img * 127.5 + 127.5)\n', (7468, 7489), True, 'import numpy as np\n'), ((7502, 7529), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (7517, 7529), False, 'from PIL import Image\n'), ((7733, 7764), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx, :, :, :]'], {}), '(x_ori[idx, :, :, :])\n', (7743, 7764), True, 'import numpy as np\n'), ((7776, 7811), 'numpy.uint8', 'np.uint8', (['(input_img * 127.5 + 127.5)'], {}), '(input_img * 127.5 + 127.5)\n', (7784, 7811), True, 'import numpy as np\n'), ((7824, 7851), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (7839, 7851), False, 'from PIL import Image\n'), ((7911, 7942), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx, :, :, :]'], {}), '(x_ori[idx, :, :, :])\n', (7921, 7942), True, 'import numpy as np\n'), ((8174, 8211), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c', 'num_pp'], {}), '(c, num_pp)\n', (8200, 8211), False, 'import cPickle, random, sys, keras\n'), ((8301, 8320), 'numpy.squeeze', 'np.squeeze', (['img_rec'], {}), '(img_rec)\n', (8311, 8320), True, 'import numpy as np\n'), ((8336, 8374), 'numpy.uint8', 'np.uint8', (['(impanted_img * 127.5 + 127.5)'], {}), '(impanted_img * 127.5 + 127.5)\n', (8344, 8374), True, 'import numpy as np\n'), ((8387, 8414), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (8402, 8414), False, 'from PIL import Image\n'), ((8476, 8509), 'numpy.uint8', 'np.uint8', (['(img_rec * 127.5 + 127.5)'], {}), '(img_rec * 127.5 + 127.5)\n', (8484, 8509), True, 'import numpy as np\n'), ((8522, 8549), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (8537, 8549), False, 'from PIL import Image\n'), ((8668, 8706), 'numpy.uint8', 'np.uint8', (['(impanted_img * 127.5 + 127.5)'], {}), '(impanted_img * 127.5 + 127.5)\n', (8676, 8706), True, 'import numpy as np\n'), ((8719, 8746), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (8734, 8746), False, 'from PIL import Image\n'), ((9316, 9353), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c', 'num_pp'], {}), '(c, num_pp)\n', (9342, 9353), False, 'import cPickle, random, sys, keras\n'), ((9406, 9421), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (9416, 9421), True, 'import numpy as np\n'), ((9436, 9465), 'numpy.uint8', 'np.uint8', (['(img * 127.5 + 127.5)'], {}), '(img * 127.5 + 127.5)\n', (9444, 9465), True, 'import numpy as np\n'), ((9478, 9505), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (9493, 9505), False, 'from PIL import Image\n'), ((7205, 7218), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (7212, 7218), True, 'import numpy as np\n'), ((8096, 8132), 'numpy.expand_dims', 'np.expand_dims', (['impanted_img'], {'axis': '(0)'}), '(impanted_img, axis=0)\n', (8110, 8132), True, 'import numpy as np\n'), ((8146, 8159), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (8153, 8159), True, 'import numpy as np\n'), ((8991, 9012), 'numpy.exp', 'np.exp', (['(z_log_var / 2)'], {}), '(z_log_var / 2)\n', (8997, 9012), True, 'import numpy as np\n'), ((9290, 9300), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (9297, 9300), True, 'import numpy as np\n')]
import zmq from zmq import ssh import numpy as np from environments.inmoov.inmoov_p2p_client_ready import InmoovGymEnv from .inmoov_server import server_connection, client_ssh_connection, client_connection SERVER_PORT = 7777 HOSTNAME = 'localhost' def send_array(socket, A, flags=0, copy=True, track=False): """send a numpy array with metadata""" md = dict( dtype = str(A.dtype), shape = A.shape, ) socket.send_json(md, flags|zmq.SNDMORE) return socket.send(A, flags, copy=copy, track=track) def test_inmoov_gym(): while True: k = input() try: # time.sleep(0.5) action = np.zeros(shape=(joints_num,)) signal = k.split() joint, move = int(signal[0]), float(signal[1]) action[joint] = move robot.step(action) except: continue # robot.step() if __name__ == "__main__": socket = server_connection() robot = InmoovGymEnv(debug_mode=True, positional_control=True) init_pose = robot._inmoov.get_joints_pos() joints_num = len(init_pose) while True: msg = socket.recv_json() command = msg["command"] if command == "position": data = robot.server_step(msg[command]) joint_state, reward, done, infos, px, end_position = data send_array(socket, joint_state, flags=0, copy=True, track=False) send_array(socket, np.array(reward), flags=0, copy=True, track=False) send_array(socket, np.array(done), flags=0, copy=True, track=False) send_array(socket, px, flags=0, copy=True, track=False) send_array(socket, end_position, flags=0, copy=True, track=False) print("message sent") elif command == "action": print(1) elif command == "done": print(2) elif command == "reset": print(3)
[ "numpy.zeros", "environments.inmoov.inmoov_p2p_client_ready.InmoovGymEnv", "numpy.array" ]
[((972, 1026), 'environments.inmoov.inmoov_p2p_client_ready.InmoovGymEnv', 'InmoovGymEnv', ([], {'debug_mode': '(True)', 'positional_control': '(True)'}), '(debug_mode=True, positional_control=True)\n', (984, 1026), False, 'from environments.inmoov.inmoov_p2p_client_ready import InmoovGymEnv\n'), ((655, 684), 'numpy.zeros', 'np.zeros', ([], {'shape': '(joints_num,)'}), '(shape=(joints_num,))\n', (663, 684), True, 'import numpy as np\n'), ((1452, 1468), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (1460, 1468), True, 'import numpy as np\n'), ((1534, 1548), 'numpy.array', 'np.array', (['done'], {}), '(done)\n', (1542, 1548), True, 'import numpy as np\n')]
""" Automatic 2D class selection tool. MIT License Copyright (c) 2019 <NAME> Institute of Molecular Physiology Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from os import path, listdir import h5py from PIL import Image # install it via pip install pillow import numpy as np import mrcfile """ The format of the .hf file is the following: ['MDF']['images']['i']['image'] where i is a number representing the i-th images hence to get the images number 5: ['MDF']['images']['5']['image'][()] """ def create_circular_mask(h, w, center=None, radius=None): if center is None: # use the middle of the image center = (int(w / 2), int(h / 2)) if radius is None: # use the smallest distance between the center and image walls radius = min(center[0], center[1], w - center[0], h - center[1]) Y, X = np.ogrid[:h, :w] dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2) mask = dist_from_center <= radius return mask def checkfiles(path_to_files): """ checks if the hdf files are in the correct path and returns True if all of them exists :param path_to_files: list of paths :return: """ if isinstance(path_to_files, (list, tuple)): for p in path_to_files: if not path.isfile(p): return False elif isinstance(path_to_files, str): return path.isfile(path_to_files) return True def calc_2d_spectra(img): from scipy import fftpack import numpy as np F1 = fftpack.fft2(img) F2 = fftpack.fftshift(F1) psd2D = np.abs(F2) ** 2 return psd2D def getList_files(paths): """ Returns the list of the valid hdf files in the given paths. It is called recursively :param paths: path or list of paths :return: """ if isinstance(paths, str): paths = [paths] list_new_paths = list() iterate = False for p in paths: if path.isdir(p): iterate = True list_new_paths += [path.join(p, f) for f in listdir(p)] elif path.isfile(p): list_new_paths.append(p) else: print( "WARNING: The given path '" + str(p) + "' is not a folder or a file and it will be ignored" ) if iterate is True: return getList_files(list_new_paths) return list_new_paths def getList_relevant_files(path_to_files): """ Check if the given files are hdf/mrcs/st with a valid format. Return The list of valid hdf :param path_to_files: list of all the files present in the folder (and subfolder)given from the user :return: list of valid hdf """ return [ path_to_file for path_to_file in path_to_files if path_to_file.endswith("mrcs") or path_to_file.endswith("mrc") or path_to_file.endswith("st") or h5py.is_hdf5(path_to_file) ] """ FUNCTION TO READ THE HDF""" def get_key_list_images(path): """ Returns the list of the keys representing the images in the hdf/mrcs/st file. It will be converted in list of integer :param path: :return: """ print("Try to list images on", path) import os filename_ext = os.path.basename(path).split(".")[-1] result_list = None try: if filename_ext in {"mrcs", "st"}: with mrcfile.mmap(path, permissive=True, mode="r") as mrc: list_candidate = [i for i in range(mrc.header.nz)] if len(list_candidate) > 0: result_list = list_candidate if filename_ext == "mrc": with mrcfile.mmap(path, permissive=True, mode="r") as mrc: result_list = list(range(1)) except Exception as e: print(e) print( "WARNING in get_list_images: the file '" + path + " is not an valid mrc file. It will be ignored" ) if filename_ext == "hdf": try: with h5py.File(path, "r") as f: list_candidate = [int(v) for v in list(f["MDF"]["images"])] except: print( "WARNING in get_list_images: the file '" + path + " is not an HDF file with the following format:\n\t['MDF']['images']. It will be ignored" ) if len(list_candidate) > 0: result_list = list_candidate return result_list def getImages_fromList_key(file_index_tubles): """ Returns the images in the hdf file (path_to_file) listed in (list_images) :param path_to_file: path to hdf file :param list_images: list of keys of the DB. It is the output( or part of its) given from 'get_list_images' :return: Returns a list of numpy arrays """ # driver="core" result_data = list() for path_to_file, list_images in file_index_tubles: data = list() if path.isfile(path_to_file): if path.basename(path_to_file).split(".")[-1] == "hdf": try: with h5py.File(path_to_file, 'r') as f: if isinstance(list_images, list) or isinstance( list_images, tuple ): data = [ np.nan_to_num(f["MDF"]["images"][str(i)]["image"][()]) for i in list_images ] # [()] is used instead of .value elif isinstance(list_images, int): data = np.nan_to_num(f["MDF"]["images"][str(list_images)]["image"][()]) else: print( "\nERROR in getImages_fromList_key: invalid list_images, it should be a string or a list/tuple of strings:", type(list_images), ) print("you try to get the following images") print(list_images) exit() except Exception as e: print(e) print( "\nERROR in getImages_fromList_key: the file '" + path_to_file + " is not an HDF file with the following format:\n\t['MDF']['images']['0']['image']" ) print("you try to get the following images") print(list_images) print("there are " + str(len(f["MDF"]["images"]))) exit() elif path.basename(path_to_file).split(".")[-1] in ["mrc", "mrcs", "st"]: data = [] with mrcfile.mmap(path_to_file, permissive=True, mode="r") as mrc: if isinstance(list_images, int): list_images = [list_images] if isinstance(list_images, list) or isinstance(list_images, tuple): if mrc.header.nz > 1: if len(list_images)==1: data = np.nan_to_num(mrc.data[list_images[0]]) else: data = [np.nan_to_num(mrc.data[i]) for i in list_images] elif len(list_images) == 1: data = np.nan_to_num(mrc.data) result_data.append(data) return result_data def getImages_fromList_key_old(path_to_file, list_images): """ Returns the images in the hdf file (path_to_file) listed in (list_images) :param path_to_file: path to hdf file :param list_images: list of keys of the DB. It is the output( or part of its) given from 'get_list_images' :return: Returns a list of numpy arrays """ data = list() if path.isfile(path_to_file): if path.basename(path_to_file).split(".")[-1] == "hdf": try: with h5py.File(path_to_file, driver="core") as f: if isinstance(list_images, list) or isinstance(list_images, tuple): data = [ f["MDF"]["images"][str(i)]["image"][()] for i in list_images ] # [()] is used instead of .value elif isinstance(list_images, int): data = f["MDF"]["images"][str(list_images)]["image"][()] else: print( "\nERROR in getImages_fromList_key: invalid list_images, it should be a string or a list/tuple of strings:", type(list_images), ) print("you try to get the following images") print(list_images) exit() except Exception as e: print(e) print( "\nERROR in getImages_fromList_key: the file '" + path_to_file + " is not an HDF file with the following format:\n\t['MDF']['images']['0']['image']" ) print("you try to get the following images") print(list_images) print("there are " + str(len(f["MDF"]["images"]))) exit() elif path.basename(path_to_file).split(".")[-1] in ["mrc", "mrcs", "st"]: data = [] with mrcfile.mmap(path_to_file, permissive=True, mode="r") as mrc: if isinstance(list_images, int): list_images = [list_images] if isinstance(list_images, list) or isinstance(list_images, tuple): if mrc.header.nz > 1: data = [mrc.data[i] for i in list_images] elif len(list_images) == 1: data = [mrc.data] return data """ FUNCTION TO MANIPULATE THE IMAGES""" def apply_mask(img, mask): mean = np.mean(img) img[mask==False]=mean return img def resize_img(img, resize=(76, 76)): """ Resize the given image into the given size :param img: as numpy array :param resize: resize size :return: return the resized img """ im = Image.fromarray(img) return np.array(im.resize(resize, resample=Image.BILINEAR)) def normalize_img(img): """ normalize the images in base of its mean and variance :param img: :return: """ import numpy as np # img = img.astype(np.float64, copy=False) mean = np.mean(img) std = np.std(img) img = (img - mean) / (std+0.00001) # img = img.astype(np.float32, copy=False) return img def flip_img(img, t=None): """ It flip the image in function of the given typ :param img: :param t: type of the flip 1 --> flip over the row. Flipped array in up-down direction.(X) 2 --> flip over the column Flipped array in right-left direction(Y) 3 --> flip over the column and the row (X and Y) otherwise --> no flip :return: """ if t == 1: return np.flipud(img) elif t == 2: return np.fliplr(img) elif t == 3: return np.flipud(np.fliplr(img)) return img
[ "h5py.File", "numpy.abs", "numpy.nan_to_num", "os.path.basename", "numpy.std", "scipy.fftpack.fftshift", "os.path.isdir", "numpy.flipud", "numpy.fliplr", "os.path.isfile", "numpy.mean", "scipy.fftpack.fft2", "PIL.Image.fromarray", "h5py.is_hdf5", "os.path.join", "os.listdir", "mrcfile.mmap", "numpy.sqrt" ]
[((1861, 1913), 'numpy.sqrt', 'np.sqrt', (['((X - center[0]) ** 2 + (Y - center[1]) ** 2)'], {}), '((X - center[0]) ** 2 + (Y - center[1]) ** 2)\n', (1868, 1913), True, 'import numpy as np\n'), ((2499, 2516), 'scipy.fftpack.fft2', 'fftpack.fft2', (['img'], {}), '(img)\n', (2511, 2516), False, 'from scipy import fftpack\n'), ((2526, 2546), 'scipy.fftpack.fftshift', 'fftpack.fftshift', (['F1'], {}), '(F1)\n', (2542, 2546), False, 'from scipy import fftpack\n'), ((8828, 8853), 'os.path.isfile', 'path.isfile', (['path_to_file'], {}), '(path_to_file)\n', (8839, 8853), False, 'from os import path, listdir\n'), ((10977, 10989), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (10984, 10989), True, 'import numpy as np\n'), ((11241, 11261), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (11256, 11261), False, 'from PIL import Image\n'), ((11537, 11549), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (11544, 11549), True, 'import numpy as np\n'), ((11560, 11571), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (11566, 11571), True, 'import numpy as np\n'), ((2559, 2569), 'numpy.abs', 'np.abs', (['F2'], {}), '(F2)\n', (2565, 2569), True, 'import numpy as np\n'), ((2913, 2926), 'os.path.isdir', 'path.isdir', (['p'], {}), '(p)\n', (2923, 2926), False, 'from os import path, listdir\n'), ((5883, 5908), 'os.path.isfile', 'path.isfile', (['path_to_file'], {}), '(path_to_file)\n', (5894, 5908), False, 'from os import path, listdir\n'), ((12126, 12140), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (12135, 12140), True, 'import numpy as np\n'), ((2365, 2391), 'os.path.isfile', 'path.isfile', (['path_to_files'], {}), '(path_to_files)\n', (2376, 2391), False, 'from os import path, listdir\n'), ((3036, 3050), 'os.path.isfile', 'path.isfile', (['p'], {}), '(p)\n', (3047, 3050), False, 'from os import path, listdir\n'), ((12173, 12187), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (12182, 12187), True, 'import numpy as np\n'), ((2264, 2278), 'os.path.isfile', 'path.isfile', (['p'], {}), '(p)\n', (2275, 2278), False, 'from os import path, listdir\n'), ((2986, 3001), 'os.path.join', 'path.join', (['p', 'f'], {}), '(p, f)\n', (2995, 3001), False, 'from os import path, listdir\n'), ((3871, 3897), 'h5py.is_hdf5', 'h5py.is_hdf5', (['path_to_file'], {}), '(path_to_file)\n', (3883, 3897), False, 'import h5py\n'), ((4212, 4234), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4228, 4234), False, 'import os\n'), ((4342, 4387), 'mrcfile.mmap', 'mrcfile.mmap', (['path'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path, permissive=True, mode='r')\n", (4354, 4387), False, 'import mrcfile\n'), ((4607, 4652), 'mrcfile.mmap', 'mrcfile.mmap', (['path'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path, permissive=True, mode='r')\n", (4619, 4652), False, 'import mrcfile\n'), ((4970, 4990), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (4979, 4990), False, 'import h5py\n'), ((3011, 3021), 'os.listdir', 'listdir', (['p'], {}), '(p)\n', (3018, 3021), False, 'from os import path, listdir\n'), ((8957, 8995), 'h5py.File', 'h5py.File', (['path_to_file'], {'driver': '"""core"""'}), "(path_to_file, driver='core')\n", (8966, 8995), False, 'import h5py\n'), ((10435, 10488), 'mrcfile.mmap', 'mrcfile.mmap', (['path_to_file'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path_to_file, permissive=True, mode='r')\n", (10447, 10488), False, 'import mrcfile\n'), ((12230, 12244), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (12239, 12244), True, 'import numpy as np\n'), ((6024, 6052), 'h5py.File', 'h5py.File', (['path_to_file', '"""r"""'], {}), "(path_to_file, 'r')\n", (6033, 6052), False, 'import h5py\n'), ((7720, 7773), 'mrcfile.mmap', 'mrcfile.mmap', (['path_to_file'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path_to_file, permissive=True, mode='r')\n", (7732, 7773), False, 'import mrcfile\n'), ((8866, 8893), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (8879, 8893), False, 'from os import path, listdir\n'), ((5925, 5952), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (5938, 5952), False, 'from os import path, listdir\n'), ((10327, 10354), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (10340, 10354), False, 'from os import path, listdir\n'), ((7604, 7631), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (7617, 7631), False, 'from os import path, listdir\n'), ((8114, 8153), 'numpy.nan_to_num', 'np.nan_to_num', (['mrc.data[list_images[0]]'], {}), '(mrc.data[list_images[0]])\n', (8127, 8153), True, 'import numpy as np\n'), ((8364, 8387), 'numpy.nan_to_num', 'np.nan_to_num', (['mrc.data'], {}), '(mrc.data)\n', (8377, 8387), True, 'import numpy as np\n'), ((8228, 8254), 'numpy.nan_to_num', 'np.nan_to_num', (['mrc.data[i]'], {}), '(mrc.data[i])\n', (8241, 8254), True, 'import numpy as np\n')]