repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
ewanlee/mackrl | [
"6dd505aa09830f16c35a022f67e255db935c807e"
] | [
"src/envs/starcraft2/starcraft2_2pairs.py"
] | [
"from ..multiagentenv import MultiAgentEnv\nfrom .map_params import get_map_params, map_present\nfrom utils.dict2namedtuple import convert\nfrom operator import attrgetter\nfrom copy import deepcopy\nfrom absl import flags\nimport numpy as np\nimport pygame\nimport sys\nimport os\nimport math\n\nfrom pysc2 import maps\nfrom pysc2 import run_configs\nfrom pysc2.lib import protocol\n\nfrom s2clientprotocol import common_pb2 as sc_common\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\nfrom s2clientprotocol import raw_pb2 as r_pb\nfrom s2clientprotocol import debug_pb2 as d_pb\n\nFLAGS = flags.FLAGS\nFLAGS(['main.py'])\n\n_possible_results = {\n sc_pb.Victory: 1,\n sc_pb.Defeat: -1,\n sc_pb.Tie: 0,\n sc_pb.Undecided: 0,\n}\n\nraces = {\n \"R\": sc_common.Random,\n \"P\": sc_common.Protoss,\n \"T\": sc_common.Terran,\n \"Z\": sc_common.Zerg,\n}\n\ndifficulties = {\n \"1\": sc_pb.VeryEasy,\n \"2\": sc_pb.Easy,\n \"3\": sc_pb.Medium,\n \"4\": sc_pb.MediumHard,\n \"5\": sc_pb.Hard,\n \"6\": sc_pb.Harder,\n \"7\": sc_pb.VeryHard,\n \"8\": sc_pb.CheatVision,\n \"9\": sc_pb.CheatMoney,\n \"A\": sc_pb.CheatInsane,\n}\n\naction_move_id = 16 # target: PointOrUnit\naction_attack_id = 23 # target: PointOrUnit\naction_stop_id = 4 # target: None\naction_heal_id = 386 # target: Unit\n\n'''\nStarCraft II\n'''\n\nclass SC2(MultiAgentEnv):\n\n def __init__(self, **kwargs):\n self.glob_args = kwargs[\"args\"]\n\n args = kwargs[\"env_args\"]\n if isinstance(args, dict):\n args = convert(args)\n # Read arguments\n self.map_name = args.map_name\n assert map_present(self.map_name), \\\n \"map {} not in map registry! please add.\".format(self.map_name)\n map_params = convert(get_map_params(self.map_name))\n self.n_agents = map_params.n_agents\n self.n_enemies = map_params.n_enemies\n self.episode_limit = map_params.limit\n self._move_amount = args.move_amount\n self._step_mul = args.step_mul\n self.difficulty = args.difficulty\n # Observations and state\n self.obs_own_health = args.obs_own_health\n self.obs_all_health = args.obs_all_health\n self.obs_instead_of_state = args.obs_instead_of_state\n self.obs_last_action = args.obs_last_action\n self.obs_pathing_grid = args.obs_pathing_grid\n self.obs_terrain_height = args.obs_terrain_height\n self.state_last_action = args.state_last_action\n if self.obs_all_health:\n self.obs_own_health = True\n self.n_obs_pathing = 8\n self.n_obs_height = 9\n # Rewards args\n self.reward_sparse = args.reward_sparse\n self.reward_only_positive = args.reward_only_positive\n self.reward_negative_scale = args.reward_negative_scale\n self.reward_death_value = args.reward_death_value\n self.reward_win = args.reward_win\n self.reward_defeat = args.reward_defeat\n self.reward_scale = args.reward_scale\n self.reward_scale_rate = args.reward_scale_rate\n # Other\n self.continuing_episode = args.continuing_episode\n self.seed = args.seed\n self.heuristic = args.heuristic\n self.window_size = (1920, 1200)\n self.save_replay_prefix = args.save_replay_prefix\n self.restrict_actions = args.restrict_actions\n\n # For sanity check\n self.debug_inputs = False # DEBUG\n self.debug_rewards = False\n\n # Actions\n self.n_actions_no_attack = 6\n self.n_actions_move = 4\n self.n_actions = self.n_actions_no_attack + self.n_enemies\n\n # Map info\n self._agent_race = map_params.a_race\n self._bot_race = map_params.b_race\n self.shield_bits_ally = 1 if self._agent_race == \"P\" else 0\n self.shield_bits_enemy = 1 if self._bot_race == \"P\" else 0\n self.unit_type_bits = map_params.unit_type_bits\n self.map_type = map_params.map_type\n\n # MACKRL arguments\n self.mackrl_delegate_if_zero_ck = getattr(kwargs[\"args\"], \"mackrl_delegate_if_zero_ck\", False)\n self.relax_pairwise_aa = args.relax_pairwise_aa if hasattr(args, \"relax_pairwise_aa\") else False\n\n if sys.platform == 'linux':\n os.environ['SC2PATH'] = os.path.join(os.getcwd(), \"3rdparty\", 'StarCraftII')\n self.game_version = args.game_version\n else:\n # Can be derived automatically\n self.game_version = None\n\n # Launch the game\n self._launch()\n\n self.max_reward = self.n_enemies * self.reward_death_value + self.reward_win\n self._game_info = self.controller.game_info()\n self._map_info = self._game_info.start_raw\n self.map_x = self._map_info.map_size.x\n self.map_y = self._map_info.map_size.y\n self.map_play_area_min = self._map_info.playable_area.p0\n self.map_play_area_max = self._map_info.playable_area.p1\n self.max_distance_x = self.map_play_area_max.x - self.map_play_area_min.x\n self.max_distance_y = self.map_play_area_max.y - self.map_play_area_min.y\n self.terrain_height = np.flip(np.transpose(np.array(list(self._map_info.terrain_height.data)).reshape(self.map_x, self.map_y)), 1)\n self.pathing_grid = np.flip(np.transpose(np.array(list(self._map_info.pathing_grid.data)).reshape(self.map_x, self.map_y)), 1)\n\n self._episode_count = 0\n self._total_steps = 0\n\n self.battles_won = 0\n self.battles_game = 0\n self.timeouts = 0\n self.force_restarts = 0\n\n self.last_stats = None\n\n self.obs_noise = getattr(self.glob_args, \"obs_noise\", False)\n self.obs_noise_std = getattr(self.glob_args, \"obs_noise_std\", False)\n\n self._min_unit_type = 0\n self.marine_id = self.marauder_id = self.medivac_id = 0\n self.hydralisk_id = self.zergling_id = self.baneling_id = 0\n\n self.stalker_id = self.colossus_id = self.zealot_id = 0\n\n def init_ally_unit_types(self, min_unit_type):\n # This should be called once from the init_units function\n\n self.stalker_id = self.sentry_id = self.zealot_id = self.colossus_id = 0\n self.marine_id = self.marauder_id= self.medivac_id = 0\n self.hydralisk_id = 0\n\n self._min_unit_type = min_unit_type\n if self.map_type == \"marines\":\n self.marine_id = min_unit_type\n elif self.map_type == \"stalkers_and_zealots\":\n self.stalker_id = min_unit_type\n self.zealot_id = min_unit_type + 1\n elif self.map_type == \"colossi_and_zealots\":\n self.colossus_id = min_unit_type\n self.zealot_id = min_unit_type + 1\n elif self.map_type == \"hydralisks_and_zerglings\":\n self.hydralisk_id = min_unit_type\n self.zergling_id = min_unit_type + 1\n elif self.map_type == \"stalkers\":\n self.hydralisk_id = min_unit_type\n self.zealot_id = min_unit_type + 1\n elif self.map_type == \"MMM\":\n self.marauder_id = min_unit_type\n self.marine_id = min_unit_type + 1\n self.medivac_id = min_unit_type + 2\n elif self.map_type == \"zealots\":\n self.zealot_id = min_unit_type\n elif self.map_type == \"marauders\":\n self.marauders_id = min_unit_type\n elif self.map_type == \"hydralisks\":\n self.hydralisk_id = min_unit_type\n elif self.map_type == \"hydralisks\":\n self.hydralisk_id = min_unit_type\n elif self.map_type == \"stalkers\":\n self.stalker_id = min_unit_type\n elif self.map_type == \"colossus\":\n self.colossus_id = min_unit_type\n elif self.map_type == \"bane\":\n self.baneling_id = min_unit_type\n self.zergling_id = min_unit_type + 1\n\n # if self.map_type == 'sz' or self.map_type == 's_v_z' or self.map_type == \"stalkers_and_zealots\":\n # self.stalker_id = min_unit_type\n # self.zealot_id = min_unit_type + 1\n # elif self.map_type == 'MMM':\n # self.marauder_id = min_unit_type\n # self.marine_id = min_unit_type + 1\n # self.medivac_id = min_unit_type + 2\n # elif self.map_type == 'zealots':\n # self.zealot_id = min_unit_type\n # elif self.map_type == 'focus_fire':\n # self.hydralisk_id = min_unit_type\n # elif self.map_type == 'retarget':\n # self.stalker_id = min_unit_type\n # elif self.map_type == 'colossus':\n # self.colossus_id = min_unit_type\n\n def _launch(self):\n\n self._run_config = run_configs.get()\n self._map = maps.get(self.map_name)\n\n # Setting up the interface\n self.interface = sc_pb.InterfaceOptions(\n raw = True, # raw, feature-level data\n score = True)\n\n self._sc2_proc = self._run_config.start(game_version=self.game_version, window_size=self.window_size)\n self.controller = self._sc2_proc.controller\n\n # Create the game.\n create = sc_pb.RequestCreateGame(realtime = False,\n random_seed = self.seed,\n local_map=sc_pb.LocalMap(map_path=self._map.path, map_data=self._run_config.map_data(self._map.path)))\n create.player_setup.add(type=sc_pb.Participant)\n create.player_setup.add(type=sc_pb.Computer, race=races[self._bot_race],\n difficulty=difficulties[self.difficulty])\n self.controller.create_game(create)\n\n join = sc_pb.RequestJoinGame(race=races[self._agent_race], options=self.interface)\n self.controller.join_game(join)\n\n def save_replay(self):\n prefix = self.save_replay_prefix or self.map_name\n replay_path = self._run_config.save_replay(self.controller.save_replay(), replay_dir='', prefix=prefix)\n print(\"Replay saved at: %s\" % replay_path)\n\n def reset(self, obs_noise_std=None):\n \"\"\"Start a new episode.\"\"\"\n\n if self.debug_inputs or self.debug_rewards:\n print('------------>> RESET <<------------')\n\n self._episode_steps = 0\n if self._episode_count > 0:\n # No need to restart for the first episode.\n self._restart()\n\n self._episode_count += 1\n\n if self.heuristic:\n self.heuristic_targets = [0] * self.n_agents\n\n # Information kept for counting the reward\n self.death_tracker_ally = np.zeros(self.n_agents)\n self.death_tracker_enemy = np.zeros(self.n_enemies)\n self.previous_agent_units = None\n self.previous_enemy_units = None\n\n self.last_action = np.zeros((self.n_agents, self.n_actions))\n\n try:\n self._obs = self.controller.observe()\n self.init_units()\n except protocol.ProtocolError:\n self.full_restart()\n except protocol.ConnectionError:\n self.full_restart()\n\n #print(self.controller.query(q_pb.RequestQuery(abilities=[q_pb.RequestQueryAvailableAbilities(unit_tag=self.agents[0].tag)])))\n #print(self.controller.data_raw())\n\n if self.obs_noise:\n self._noise_obs(obs_noise_std=obs_noise_std if obs_noise_std is not None else 0.0)\n\n return self.get_obs(), self.get_state()\n\n def _restart(self):\n\n # Kill and restore all units\n try:\n self.kill_all_units()\n self.controller.step(2)\n except protocol.ProtocolError:\n self.full_restart()\n except protocol.ConnectionError:\n self.full_restart()\n\n def full_restart(self):\n # End episode and restart a new one\n self._sc2_proc.close()\n self._launch()\n self.force_restarts += 1\n\n def one_hot(self, data, nb_classes):\n \"\"\"Convert an iterable of indices to one-hot encoded labels.\"\"\"\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]\n\n def step(self, actions, obs_noise_std=None):\n\n actions = [int(a) for a in actions]\n\n self.last_action = self.one_hot(actions, self.n_actions)\n\n # Collect individual actions\n sc_actions = []\n for a_id, action in enumerate(actions):\n if not self.heuristic:\n agent_action = self.get_agent_action(a_id, action)\n else:\n agent_action = self.get_agent_action_heuristic(a_id, action)\n if agent_action:\n sc_actions.append(agent_action)\n # Send action request\n req_actions = sc_pb.RequestAction(actions=sc_actions)\n\n try:\n res_actions = self.controller.actions(req_actions)\n # Make step in SC2, i.e. apply actions\n self.controller.step(self._step_mul)\n # Observe here so that we know if the episode is over.\n self._obs = self.controller.observe()\n except protocol.ProtocolError:\n self.full_restart()\n return 0, True, {}\n except protocol.ConnectionError:\n self.full_restart()\n return 0, True, {}\n\n self._total_steps += 1\n self._episode_steps += 1\n\n # Update what we know about units\n end_game = self.update_units()\n\n terminated = False\n reward = self.reward_battle()\n info = {\"battle_won\": False}\n\n if end_game is not None:\n # Battle is over\n terminated = True\n self.battles_game += 1\n if end_game == 1:\n self.battles_won += 1\n info[\"battle_won\"] = True\n if not self.reward_sparse:\n reward += self.reward_win\n else:\n reward = 1\n\n elif end_game == -1:\n if not self.reward_sparse:\n reward += self.reward_defeat\n else:\n reward = -1\n\n elif self.episode_limit > 0 and self._episode_steps >= self.episode_limit:\n # Episode limit reached\n terminated = True\n if self.continuing_episode:\n info[\"episode_limit\"] = True\n self.battles_game += 1\n self.timeouts += 1\n\n if self.debug_inputs or self.debug_rewards:\n print(\"Total Reward = %.f \\n ---------------------\" % reward)\n\n if self.reward_scale:\n reward /= (self.max_reward / self.reward_scale_rate)\n\n if self.obs_noise:\n self._noise_obs(obs_noise_std=obs_noise_std if obs_noise_std is not None else 0.0) # create noised observations data\n\n return reward, terminated, info\n\n def _noise_obs(self, obs_noise_std):\n class Obj: pass\n self.agents_noisy = {}\n for ak, _ in self.agents.items():\n self.agents_noisy[ak] = {}\n for ak2, agent in self.agents.items():\n self.agents_noisy[ak][ak2] = Obj()\n self.agents_noisy[ak][ak2].pos = Obj()\n self.agents_noisy[ak][ak2].pos.x = agent.pos.x + np.random.normal(0.0, obs_noise_std)\n self.agents_noisy[ak][ak2].pos.y = agent.pos.y + np.random.normal(0.0, obs_noise_std)\n self.agents_noisy[ak][ak2].health = agent.health\n self.agents_noisy[ak][ak2].health_max = agent.health_max\n self.agents_noisy[ak][ak2].unit_type = agent.unit_type\n self.agents_noisy[ak][ak2].shield = agent.shield\n\n self.enemies_noisy = {}\n for ak, _ in self.agents.items():\n self.enemies_noisy[ak] = {}\n for ek, enemy in self.enemies.items():\n self.enemies_noisy[ak][ek] = Obj()\n self.enemies_noisy[ak][ek].pos = Obj()\n self.enemies_noisy[ak][ek].pos.x = enemy.pos.x + np.random.normal(0.0, obs_noise_std)\n self.enemies_noisy[ak][ek].pos.y = enemy.pos.y + np.random.normal(0.0, obs_noise_std)\n self.enemies_noisy[ak][ek].health = enemy.health\n self.enemies_noisy[ak][ek].health_max = enemy.health_max\n self.enemies_noisy[ak][ek].unit_type = enemy.unit_type\n self.enemies_noisy[ak][ek].shield = enemy.shield\n pass\n\n def get_agent_action(self, a_id, action):\n\n unit = self.get_unit_by_id(a_id)\n tag = unit.tag\n x = unit.pos.x\n y = unit.pos.y\n\n true_avail_actions = self.get_avail_agent_actions(a_id)\n if true_avail_actions[action] == 0:\n action = 1\n\n if action == 0:\n # no-op (valid only when dead)\n try:\n assert unit.health == 0, \"No-op chosen but the agent's unit is not dead\"\n except Exception as e:\n pass\n if self.debug_inputs:\n print(\"Agent %d: Dead\"% a_id)\n return None\n elif action == 1:\n # stop\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_stop_id,\n unit_tags = [tag],\n queue_command = False)\n if self.debug_inputs:\n print(\"Agent %d: Stop\"% a_id)\n\n elif action == 2:\n # north\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_move_id,\n target_world_space_pos = sc_common.Point2D(x = x, y = y + self._move_amount),\n unit_tags = [tag],\n queue_command = False)\n if self.debug_inputs:\n print(\"Agent %d: North\"% a_id)\n\n elif action == 3:\n # south\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_move_id,\n target_world_space_pos = sc_common.Point2D(x = x, y = y - self._move_amount),\n unit_tags = [tag],\n queue_command = False)\n if self.debug_inputs:\n print(\"Agent %d: South\"% a_id)\n\n elif action == 4:\n # east\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_move_id,\n target_world_space_pos = sc_common.Point2D(x = x + self._move_amount, y = y),\n unit_tags = [tag],\n queue_command = False)\n if self.debug_inputs:\n print(\"Agent %d: East\"% a_id)\n\n elif action == 5:\n # west\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_move_id,\n target_world_space_pos = sc_common.Point2D(x = x - self._move_amount, y = y),\n unit_tags = [tag],\n queue_command = False)\n if self.debug_inputs:\n print(\"Agent %d: West\"% a_id)\n else:\n # attack/heal units that are in range\n target_id = action - self.n_actions_no_attack\n if self.map_type == 'MMM' and unit.unit_type == self.medivac_id:\n target_unit = self.agents[target_id]\n action_id = action_heal_id\n else:\n target_unit = self.enemies[target_id]\n action_id = action_attack_id\n target_tag = target_unit.tag\n\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_id,\n target_unit_tag = target_tag,\n unit_tags = [tag],\n queue_command = False)\n\n if self.debug_inputs:\n print(\"Agent %d attacks enemy # %d\" % (a_id, target_id))\n\n sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))\n return sc_action\n\n def get_agent_action_heuristic(self, a_id, action):\n\n unit = self.get_unit_by_id(a_id)\n tag = unit.tag\n\n target_tag = self.enemies[self.heuristic_targets[a_id]].tag\n action_id = action_attack_id\n\n cmd = r_pb.ActionRawUnitCommand(ability_id = action_id,\n target_unit_tag = target_tag,\n unit_tags = [tag],\n queue_command = False)\n\n sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))\n return sc_action\n\n def reward_battle(self):\n\n if self.reward_sparse:\n return 0\n\n # delta health - delta enemies + delta deaths where value:\n # if enemy unit dies, add reward_death_value per dead unit\n # if own unit dies, subtract reward_death_value per dead unit\n\n reward = 0\n delta_deaths = 0\n delta_ally = 0\n delta_enemy = 0\n\n neg_scale = self.reward_negative_scale\n\n if self.debug_rewards:\n for al_id in range(self.n_agents):\n print(\"Agent %d: diff HP = %.f, diff shield = %.f\" % (al_id, self.previous_agent_units[al_id].health - self.agents[al_id].health, self.previous_agent_units[al_id].shield - self.agents[al_id].shield))\n print('---------------------')\n for al_id in range(self.n_enemies):\n print(\"Enemy %d: diff HP = %.f, diff shield = %.f\" % (al_id, self.previous_enemy_units[al_id].health - self.enemies[al_id].health, self.previous_enemy_units[al_id].shield - self.enemies[al_id].shield))\n\n # update deaths\n for al_id, al_unit in self.agents.items():\n if not self.death_tracker_ally[al_id]:\n # did not die so far\n prev_health = self.previous_agent_units[al_id].health + self.previous_agent_units[al_id].shield\n if al_unit.health == 0:\n # just died\n self.death_tracker_ally[al_id] = 1\n if not self.reward_only_positive:\n delta_deaths -= self.reward_death_value * neg_scale\n delta_ally += prev_health * neg_scale\n else:\n # still alive\n delta_ally += (prev_health - al_unit.health - al_unit.shield) * neg_scale\n\n for e_id, e_unit in self.enemies.items():\n if not self.death_tracker_enemy[e_id]:\n prev_health = self.previous_enemy_units[e_id].health + self.previous_enemy_units[e_id].shield\n if e_unit.health == 0:\n self.death_tracker_enemy[e_id] = 1\n delta_deaths += self.reward_death_value\n delta_enemy += prev_health\n else:\n delta_enemy += prev_health - e_unit.health - e_unit.shield\n\n if self.reward_only_positive:\n\n if self.debug_rewards:\n print(\"--------------------------\")\n print(\"Delta enemy: \", delta_enemy)\n print(\"Delta deaths: \", delta_deaths)\n print(\"Reward: \", delta_enemy + delta_deaths)\n print(\"--------------------------\")\n\n reward = delta_enemy + delta_deaths\n reward = abs(reward) # shield regeration\n else:\n if self.debug_rewards:\n print(\"--------------------------\")\n print(\"Delta enemy: \", delta_enemy)\n print(\"Delta deaths: \", delta_deaths)\n print(\"Delta ally: \", - delta_ally)\n print(\"Reward: \", delta_enemy + delta_deaths)\n print(\"--------------------------\")\n\n reward = delta_enemy + delta_deaths - delta_ally\n\n return reward\n\n def get_total_actions(self):\n return self.n_actions\n\n def distance(self, x1, y1, x2, y2):\n return math.hypot(x2 - x1, y2 - y1)\n\n def unit_shoot_range(self, agent_id):\n return 6\n\n def unit_sight_range(self, agent_id):\n return 9\n\n def unit_max_cooldown(self, unit):\n\n switcher = {\n self.marine_id: 15,\n self.marauder_id: 25,\n self.medivac_id: 200, # max energy\n self.stalker_id: 35,\n self.zealot_id: 22,\n self.colossus_id: 24,\n self.hydralisk_id: 10,\n self.zergling_id: 11,\n self.baneling_id: 1\n }\n return switcher.get(unit.unit_type, 15)\n\n # if self.map_type in ['marines', 'm_z']:\n # return 15\n #\n # unit = self.get_unit_by_id(agent_id)\n # if unit.unit_type == self.marine_id:\n # return 15\n # if unit.unit_type == self.marauder_id:\n # return 25\n # if unit.unit_type == self.medivac_id:\n # return 200\n #\n # if unit.unit_type == self.stalker_id:\n # return 35\n # if unit.unit_type == self.zealot_id:\n # return 22\n # if unit.unit_type == self.colossus_id:\n # return 24\n # if unit.unit_type == self.sentry_id:\n # return 13\n # if unit.unit_type == self.hydralisk_id:\n # return 10\n\n def unit_max_shield(self, unit):\n\n if unit.unit_type == 74 or unit.unit_type == self.stalker_id: # Protoss's Stalker\n return 80\n if unit.unit_type == 73 or unit.unit_type == self.zealot_id: # Protoss's Zaelot\n return 50\n if unit.unit_type == 77 or unit.unit_type == self.sentry_id: # Protoss's Sentry\n return 40\n if unit.unit_type == 4 or unit.unit_type == self.colossus_id: # Protoss's Colossus\n return 150\n else:\n return -1\n\n def can_move(self, unit, direction):\n\n m = self._move_amount / 2\n\n if direction == 0: # north\n x, y = int(unit.pos.x), int(unit.pos.y + m)\n elif direction == 1: # south\n x, y = int(unit.pos.x), int(unit.pos.y - m)\n elif direction == 2: # east\n x, y = int(unit.pos.x + m), int(unit.pos.y)\n else : # west\n x, y = int(unit.pos.x - m), int(unit.pos.y)\n\n try:\n if self.pathing_grid[x, y] == 0:\n return True\n except Exception as e:\n return False\n\n return False\n\n def circle_grid_coords(self, unit, radius):\n # Generates coordinates in grid that lie within a circle\n\n r = radius\n x_floor = int(unit.pos.x)\n y_floor = int(unit.pos.y)\n\n points = []\n for x in range(-r, r + 1):\n Y = int(math.sqrt(abs(r*r-x*x))) # bound for y given x\n for y in range(- Y, + Y + 1):\n points.append((x_floor + x, y_floor + y))\n return points\n\n def get_surrounding_points(self, unit, include_self=False):\n\n x = int(unit.pos.x)\n y = int(unit.pos.y)\n\n ma = self._move_amount\n\n points = [\n (x, y + 2 * ma), (x, y - 2 * ma),\n (x + 2 * ma, y), (x - 2 * ma, y),\n (x + ma, y + ma), (x - ma, y - ma),\n (x + ma, y - ma), (x - ma, y + ma)\n ]\n\n if include_self:\n points.append((x, y))\n\n return points\n\n def check_bounds(self, x, y):\n return x >= 0 and y >=0 and x < self.map_x and y < self.map_y\n\n def get_surrounding_pathing(self, unit):\n\n points = self.get_surrounding_points(unit, include_self=False)\n vals = [self.pathing_grid[x, y] / 255 if self.check_bounds(x, y) else 1 for x, y in points]\n return vals\n\n def get_surrounding_height(self, unit):\n\n points = self.get_surrounding_points(unit, include_self=True)\n vals = [self.terrain_height[x, y] / 255 if self.check_bounds(x, y) else 1 for x, y in points]\n return vals\n\n def get_obs_agent(self, agent_id, obs_noise=False):\n # unit: replace with noisy unit\n\n unit = self.get_unit_by_id(agent_id)\n\n nf_al = 4 + self.unit_type_bits\n nf_en = 4 + self.unit_type_bits\n\n if self.obs_all_health:\n nf_al += 1 + self.shield_bits_ally\n nf_en += 1 + self.shield_bits_enemy\n\n if self.obs_last_action:\n nf_al += self.n_actions\n\n nf_own = self.unit_type_bits\n if self.obs_own_health:\n nf_own += 1 + self.shield_bits_ally\n\n move_feats_len = self.n_actions_move\n if self.obs_pathing_grid:\n move_feats_len += self.n_obs_pathing\n if self.obs_terrain_height:\n move_feats_len += self.n_obs_height\n\n move_feats = np.zeros(move_feats_len, dtype=np.float32) # exclude no-op & stop\n enemy_feats = np.zeros((self.n_enemies, nf_en), dtype=np.float32)\n ally_feats = np.zeros((self.n_agents - 1, nf_al), dtype=np.float32)\n own_feats = np.zeros(nf_own, dtype=np.float32)\n\n if unit.health > 0: # otherwise dead, return all zeros\n x = unit.pos.x\n y = unit.pos.y\n sight_range = self.unit_sight_range(agent_id)\n\n # Movement features\n avail_actions = self.get_avail_agent_actions(agent_id)\n for m in range(self.n_actions_move):\n move_feats[m] = avail_actions[m + 2]\n\n ind = self.n_actions_move\n\n if self.obs_pathing_grid:\n move_feats[ind: ind + self.n_obs_pathing] = self.get_surrounding_pathing(unit)\n ind += self.n_obs_pathing\n\n if self.obs_terrain_height:\n move_feats[ind:] = self.get_surrounding_height(unit)\n\n # Enemy features\n if obs_noise:\n enemies = self.enemies_noisy[agent_id]\n else:\n enemies = self.enemies\n\n for e_id, e_unit in enemies.items():\n e_x = e_unit.pos.x\n e_y = e_unit.pos.y\n dist = self.distance(x, y, e_x, e_y)\n\n if dist < sight_range and e_unit.health > 0: # visible and alive\n # Sight range > shoot range\n enemy_feats[e_id, 0] = avail_actions[self.n_actions_no_attack + e_id] # available\n enemy_feats[e_id, 1] = dist / sight_range # distance\n enemy_feats[e_id, 2] = (e_x - x) / sight_range # relative X\n enemy_feats[e_id, 3] = (e_y - y) / sight_range # relative Y\n\n ind = 4\n if self.obs_all_health:\n enemy_feats[e_id, ind] = e_unit.health / e_unit.health_max # health\n ind += 1\n if self.shield_bits_enemy > 0:\n max_shield = self.unit_max_shield(e_unit)\n if max_shield != -1:\n enemy_feats[e_id, ind] = e_unit.shield / max_shield # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(e_unit, False)\n enemy_feats[e_id, ind + type_id] = 1 # unit type\n\n # Ally features\n al_ids = [al_id for al_id in range(self.n_agents) if al_id != agent_id]\n for i, al_id in enumerate(al_ids):\n\n if obs_noise:\n al_unit = self.get_unit_by_id_noisy(obs_id=agent_id, get_id=al_id)\n else:\n al_unit = self.get_unit_by_id(al_id)\n al_x = al_unit.pos.x\n al_y = al_unit.pos.y\n dist = self.distance(x, y, al_x, al_y)\n\n if dist < sight_range and al_unit.health > 0: # visible and alive\n ally_feats[i, 0] = 1 # visible\n ally_feats[i, 1] = dist / sight_range # distance\n ally_feats[i, 2] = (al_x - x) / sight_range # relative X\n ally_feats[i, 3] = (al_y - y) / sight_range # relative Y\n\n ind = 4\n if self.obs_all_health:\n ally_feats[i, ind] = al_unit.health / al_unit.health_max # health\n ind += 1\n if self.shield_bits_ally > 0:\n max_shield = self.unit_max_shield(al_unit)\n if max_shield != -1:\n ally_feats[i, ind] = al_unit.shield / max_shield # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(al_unit, True)\n try:\n ally_feats[i, ind + type_id] = 1\n except Exception as e:\n pass\n ind += self.unit_type_bits\n\n if self.obs_last_action:\n ally_feats[i, ind:] = self.last_action[al_id]\n\n # Own features\n ind = 0\n if self.obs_own_health:\n own_feats[ind] = unit.health / unit.health_max\n ind += 1\n if self.shield_bits_ally > 0:\n max_shield = self.unit_max_shield(unit)\n own_feats[ind] = unit.shield / max_shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(unit, True)\n own_feats[ind + type_id] = 1\n\n agent_obs = np.concatenate((move_feats.flatten(),\n enemy_feats.flatten(),\n ally_feats.flatten(),\n own_feats.flatten()))\n\n if self.debug_inputs:\n print(\"***************************************\")\n print(\"Agent: \", agent_id)\n print(\"Available Actions\\n\", self.get_avail_agent_actions(agent_id))\n print(\"Move feats\\n\", move_feats)\n print(\"Enemy feats\\n\", enemy_feats)\n print(\"Ally feats\\n\", ally_feats)\n print(\"Own feats\\n\", own_feats)\n print(\"***************************************\")\n\n return agent_obs\n\n def get_obs(self, obs_noise=False):\n agents_obs = [self.get_obs_agent(i, obs_noise=obs_noise) for i in range(self.n_agents)]\n return agents_obs\n\n def get_state(self):\n\n if self.obs_instead_of_state:\n obs_concat = np.concatenate(self.get_obs(), axis=0).astype(np.float32)\n return obs_concat\n\n nf_al = 4 + self.shield_bits_ally + self.unit_type_bits\n nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits\n\n ally_state = np.zeros((self.n_agents, nf_al))\n enemy_state = np.zeros((self.n_enemies, nf_en))\n\n center_x = self.map_x / 2\n center_y = self.map_y / 2\n\n for al_id, al_unit in self.agents.items():\n if al_unit.health > 0:\n x = al_unit.pos.x\n y = al_unit.pos.y\n max_cd = self.unit_max_cooldown(al_unit)\n\n ally_state[al_id, 0] = al_unit.health / al_unit.health_max # health\n if self.map_type == 'MMM' and al_unit.unit_type == self.medivac_id:\n ally_state[al_id, 1] = al_unit.energy / max_cd # energy\n else:\n ally_state[al_id, 1] = al_unit.weapon_cooldown / max_cd # cooldown\n ally_state[al_id, 2] = (x - center_x) / self.max_distance_x # relative X\n ally_state[al_id, 3] = (y - center_y) / self.max_distance_y # relative Y\n\n ind = 4\n if self.shield_bits_ally > 0:\n max_shield = self.unit_max_shield(al_unit)\n ally_state[al_id, ind] = al_unit.shield / max_shield # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(al_unit, True)\n ally_state[al_id, ind + type_id] = 1\n\n for e_id, e_unit in self.enemies.items():\n if e_unit.health > 0:\n x = e_unit.pos.x\n y = e_unit.pos.y\n\n enemy_state[e_id, 0] = e_unit.health / e_unit.health_max # health\n enemy_state[e_id, 1] = (x - center_x) / self.max_distance_x # relative X\n enemy_state[e_id, 2] = (y - center_y) / self.max_distance_y # relative Y\n\n ind = 3\n if self.shield_bits_enemy > 0:\n max_shield = self.unit_max_shield(e_unit)\n enemy_state[e_id, ind] = e_unit.shield / max_shield # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(e_unit, False)\n enemy_state[e_id, ind + type_id] = 1\n\n state = np.append(ally_state.flatten(), enemy_state.flatten())\n if self.state_last_action:\n state = np.append(state, self.last_action.flatten())\n state = state.astype(dtype=np.float32)\n\n if self.debug_inputs:\n print(\"------------ STATE ---------------\")\n print(\"Ally state\\n\", ally_state)\n print(\"Enemy state\\n\", enemy_state)\n print(\"Last action\\n\", self.last_action)\n print(\"----------------------------------\")\n\n return state\n\n def get_unit_type_id(self, unit, ally):\n\n \"\"\"Returns the ID of unit type in the given scenario.\"\"\"\n\n if ally: # use new SC2 unit types\n if self.map_type == \"colossi_and_zealots\":\n if unit.unit_type == 1927:\n type_id = 0\n else:\n type_id = 1\n else:\n type_id = unit.unit_type - self._min_unit_type\n\n else: # use default SC2 unit types\n if self.map_type == \"stalkers_and_zealots\":\n # id(Stalker) = 74, id(Zealot) = 73\n type_id = unit.unit_type - 73\n if self.map_type == \"marines_and_zealots\":\n if unit.unit_type == 73:\n type_id = 0\n else:\n type_id = 1\n if self.map_type == \"marines_and_zealots_and_stalkers\":\n if unit.unit_type == 73:\n type_id = 0\n if unit.unit_type == 74:\n type_id = 1\n else:\n type_id = 2\n if self.map_type == \"colossi_and_zealots\":\n if unit.unit_type == 73:\n type_id = 0\n else:\n type_id = 1\n if self.map_type == \"hydralisks_and_zerglings\":\n if unit.unit_type == 73:\n type_id = 0\n else:\n type_id = 1\n if self.map_type == \"bane\":\n if unit.unit_type == 9:\n type_id = 0\n else:\n type_id = 1\n elif self.map_type == \"MMM\":\n if unit.unit_type == 51:\n type_id = 0\n elif unit.unit_type == 48:\n type_id = 1\n else:\n type_id = 2\n\n return type_id\n # if ally: # we use new SC2 unit types\n #\n # if self.map_type == 'sz':\n # type_id = unit.unit_type - self.stalker_id\n # elif self.map_type == 'MMM':\n # if unit.unit_type == self.marauder_id:\n # type_id = 0\n # elif unit.unit_type == self.marine_id:\n # type_id = 1\n # else:\n # type_id = 2\n #\n # else: # 'We use default SC2 unit types'\n #\n # if self.map_type == 'sz':\n # # id(Stalker) = 74, id(Zealot) = 73\n # type_id = unit.unit_type - 73\n # elif self.map_type == 'MMM':\n # if unit.unit_type == 51:\n # type_id = 0\n # elif unit.unit_type == 48:\n # type_id = 1\n # else:\n # type_id = 2\n\n #return type_id\n\n def get_state_size(self):\n\n if self.obs_instead_of_state:\n return self.get_obs_size() * self.n_agents\n\n nf_al = 4 + self.shield_bits_ally + self.unit_type_bits\n nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits\n\n enemy_state = self.n_enemies * nf_en\n ally_state = self.n_agents * nf_al\n\n size = enemy_state + ally_state\n\n if self.state_last_action:\n size += self.n_agents * self.n_actions\n\n return size\n\n def get_avail_agent_actions(self, agent_id):\n unit = self.get_unit_by_id(agent_id)\n if unit.health > 0:\n # cannot do no-op as alife\n avail_actions = [0] * self.n_actions\n\n # stop should be allowed\n avail_actions[1] = 1\n\n # see if we can move\n if self.can_move(unit, 0):\n avail_actions[2] = 1\n if self.can_move(unit, 1):\n avail_actions[3] = 1\n if self.can_move(unit, 2):\n avail_actions[4] = 1\n if self.can_move(unit, 3):\n avail_actions[5] = 1\n\n # can attack only those who is alife\n # and in the shooting range\n shoot_range = self.unit_shoot_range(agent_id)\n\n target_items = self.enemies.items()\n if self.map_type == 'MMM' and unit.unit_type == self.medivac_id:\n # Medivacs cannot heal themselves and other flying units\n target_items = [(t_id, t_unit) for (t_id, t_unit) in self.agents.items() if t_unit.unit_type != self.medivac_id]\n\n for t_id, t_unit in target_items:\n if t_unit.health > 0:\n dist = self.distance(unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y)\n if dist <= shoot_range:\n avail_actions[t_id + self.n_actions_no_attack] = 1\n\n return avail_actions\n\n else:\n # only no-op allowed\n return [1] + [0] * (self.n_actions - 1)\n\n def get_unrestricted_actions(self, agent_id):\n unit = self.get_unit_by_id(agent_id)\n if unit.health > 0:\n # cannot do no-op as alife\n avail_actions = [1] * self.n_actions\n avail_actions[0] = 0\n else:\n avail_actions = [0] * self.n_actions\n avail_actions[0] = 1\n return avail_actions\n\n def get_avail_actions(self):\n\n avail_actions = []\n for agent_id in range(self.n_agents):\n if self.restrict_actions:\n avail_agent = self.get_avail_agent_actions(agent_id)\n avail_actions.append(avail_agent)\n else:\n avail_agent = self.get_unrestricted_actions(agent_id)\n avail_actions.append(avail_agent)\n\n return avail_actions\n\n def get_obs_size(self):\n\n nf_al = 4 + self.unit_type_bits\n nf_en = 4 + self.unit_type_bits\n\n if self.obs_all_health:\n nf_al += 1 + self.shield_bits_ally\n nf_en += 1 + self.shield_bits_enemy\n\n own_feats = self.unit_type_bits\n if self.obs_own_health:\n own_feats += 1 + self.shield_bits_ally\n\n if self.obs_last_action:\n nf_al += self.n_actions\n\n move_feats = self.n_actions_move\n if self.obs_pathing_grid:\n move_feats += self.n_obs_pathing\n if self.obs_terrain_height:\n move_feats += self.n_obs_height\n\n enemy_feats = self.n_enemies * nf_en\n ally_feats = (self.n_agents - 1) * nf_al\n\n return move_feats + enemy_feats + ally_feats + own_feats\n\n def close(self):\n self._sc2_proc.close()\n\n def render(self):\n pass\n\n def kill_all_units(self):\n\n units_alive = [unit.tag for unit in self.agents.values() if unit.health > 0] + [unit.tag for unit in self.enemies.values() if unit.health > 0]\n debug_command = [d_pb.DebugCommand(kill_unit = d_pb.DebugKillUnit(tag = units_alive))]\n self.controller.debug(debug_command)\n\n def init_units(self):\n\n # In case controller step fails\n while True:\n\n self.agents = {}\n self.enemies = {}\n\n ally_units = [unit for unit in self._obs.observation.raw_data.units if unit.owner == 1]\n ally_units_sorted = sorted(ally_units, key=attrgetter('unit_type', 'pos.x', 'pos.y'), reverse=False)\n\n for i in range(len(ally_units_sorted)):\n self.agents[i] = ally_units_sorted[i]\n if self.debug_inputs:\n print(\"Unit %d is %d, x = %.1f, y = %1.f\" % (len(self.agents), self.agents[i].unit_type, self.agents[i].pos.x, self.agents[i].pos.y))\n\n for unit in self._obs.observation.raw_data.units:\n if unit.owner == 2:\n self.enemies[len(self.enemies)] = unit\n if self._episode_count == 1:\n self.max_reward += unit.health_max + unit.shield_max\n\n if self._episode_count == 1:\n min_unit_type = min(unit.unit_type for unit in self.agents.values())\n self.init_ally_unit_types(min_unit_type)\n\n if len(self.agents) == self.n_agents and len(self.enemies) == self.n_enemies:\n # All good\n return\n\n try:\n self.controller.step(1)\n self._obs = self.controller.observe()\n except protocol.ProtocolError:\n self.full_restart()\n self.reset()\n except protocol.ConnectionError:\n self.full_restart()\n self.reset()\n\n def update_units(self):\n\n # This function assumes that self._obs is up-to-date\n n_ally_alive = 0\n n_enemy_alive = 0\n\n # Store previous state\n self.previous_agent_units = deepcopy(self.agents)\n self.previous_enemy_units = deepcopy(self.enemies)\n\n for al_id, al_unit in self.agents.items():\n updated = False\n for unit in self._obs.observation.raw_data.units:\n if al_unit.tag == unit.tag:\n self.agents[al_id] = unit\n updated = True\n n_ally_alive += 1\n break\n\n if not updated: # means dead\n al_unit.health = 0\n\n for e_id, e_unit in self.enemies.items():\n updated = False\n for unit in self._obs.observation.raw_data.units:\n if e_unit.tag == unit.tag:\n self.enemies[e_id] = unit\n updated = True\n n_enemy_alive += 1\n break\n\n if not updated: # means dead\n e_unit.health = 0\n\n if self.heuristic:\n for al_id, al_unit in self.agents.items():\n current_target = self.heuristic_targets[al_id]\n if current_target == 0 or self.enemies[current_target].health == 0:\n x = al_unit.pos.x\n y = al_unit.pos.y\n min_dist = 32\n min_id = -1\n for e_id, e_unit in self.enemies.items():\n if e_unit.health > 0:\n dist = self.distance(x, y, e_unit.pos.x, e_unit.pos.y)\n if dist < min_dist:\n min_dist = dist\n min_id = e_id\n self.heuristic_targets[al_id] = min_id\n\n if (n_ally_alive == 0 and n_enemy_alive > 0) or self.only_medivac_left(ally=True):\n return -1 # loss\n if (n_ally_alive > 0 and n_enemy_alive == 0) or self.only_medivac_left(ally=False):\n return 1 # win\n if n_ally_alive == 0 and n_enemy_alive == 0:\n return 0 # tie, not sure if this is possible\n\n return None\n\n def only_medivac_left(self, ally):\n if self.map_type != 'MMM':\n return False\n\n if ally:\n units_alive = [a for a in self.agents.values() if (a.health > 0 and a.unit_type != self.medivac_id)]\n if len(units_alive) == 0:\n return True\n return False\n else:\n units_alive = [a for a in self.enemies.values() if (a.health > 0 and a.unit_type != self.medivac_id)]\n if len(units_alive) == 1 and units_alive[0].unit_type == 54:\n return True\n return False\n\n def get_unit_by_id(self, a_id):\n return self.agents[a_id]\n\n def get_unit_by_id_noisy(self, obs_id, get_id):\n return self.agents_noisy[obs_id][get_id]\n\n def get_stats(self):\n stats = {}\n stats[\"battles_won\"] = self.battles_won\n stats[\"battles_game\"] = self.battles_game\n stats[\"battles_draw\"] = self.timeouts\n stats[\"win_rate\"] = self.battles_won / self.battles_game\n stats[\"timeouts\"] = self.timeouts\n stats[\"restarts\"] = self.force_restarts\n return stats\n\n def get_agg_stats(self, stats):\n\n current_stats = {}\n for stat in stats:\n for _k, _v in stat.items():\n if not (_k in current_stats):\n current_stats[_k] = []\n if _k in [\"win_rate\"]:\n continue\n current_stats[_k].append(_v)\n\n # average over stats\n aggregate_stats = {}\n for _k, _v in current_stats.items():\n if _k in [\"win_rate\"]:\n aggregate_stats[_k] = np.mean([ (_a - _b)/(_c - _d) for _a, _b, _c, _d in zip(current_stats[\"battles_won\"],\n [0]*len(current_stats[\"battles_won\"]) if self.last_stats is None else self.last_stats[\"battles_won\"],\n current_stats[\"battles_game\"],\n [0]*len(current_stats[\"battles_game\"]) if self.last_stats is None else\n self.last_stats[\"battles_game\"])\n if (_c - _d) != 0.0])\n else:\n aggregate_stats[_k] = np.mean([_a-_b for _a, _b in zip(_v, [0]*len(_v) if self.last_stats is None else self.last_stats[_k])])\n\n self.last_stats = current_stats\n return aggregate_stats\n\n # TODO Mika - not sure if we need these anymore and whether they are correct after my changes\n def get_intersect(self, coordinates, e_unit, sight_range ):\n e_x = e_unit.pos.x\n e_y = e_unit.pos.y\n distances = np.sum((coordinates - np.array([e_x, e_y] ))**2, 1)**0.5\n if max( distances ) > sight_range:\n return False\n else:\n return True\n\n def get_obs_intersection_noisy(self, agent_ids):\n # return array of all noisy obs intersections, by observing agent id (agent_ids)\n return [self.get_obs_intersection(agent_ids, obs_id=_a, obs_noise=True) for _a in agent_ids]\n\n def get_obs_intersection(self, agent_ids, obs_id=None, obs_noise=False):\n\n \"\"\" Returns the intersection of all of agent_ids agents' observations. \"\"\"\n # Create grid\n nf_al = 4\n nf_en = 5\n\n if self.map_name in ['2s_3z', '3s_4z', '4s_5z', '3s_5z']:\n # unit types (in onehot)\n nf_al += 2\n nf_en += 2\n\n # move_feats = np.zeros(self.n_actions_no_attack - 2, dtype=np.float32) # exclude no-op & stop\n enemy_feats = -1*np.ones((len(agent_ids), self.n_enemies, nf_en), dtype=np.float32)\n ally_feats = -1*np.ones((len(agent_ids), self.n_agents, nf_al), dtype=np.float32)\n #state = np.concatenate((enemy_feats.flatten(),\n # ally_feats.flatten()))\n # state = state.astype(dtype=np.float32)\n state = np.concatenate((enemy_feats.flatten(),\n ally_feats.flatten()))\n state = state.astype(dtype=np.float32)\n #Todo: Check that the dimensions are consistent.\n aa1 = np.array(self.get_avail_agent_actions(agent_ids[0]))\n aa2 = np.array(self.get_avail_agent_actions(agent_ids[1]))\n if self.relax_pairwise_aa: # OR mode\n aa1 = [ 1 if ((aa1[_i] == 1) or (aa2[_i] == 1)) else 0 for _i in range(aa1.shape[0]) ]\n aa2 = aa1.copy()\n a_a1 = np.reshape( aa1, [-1,1])\n a_a2 = np.reshape( aa2, [1,-1])\n avail_actions = a_a1.dot(a_a2)\n # Debug!! TODO: FIX THE IF TRUE BELOW\n if self.mackrl_delegate_if_zero_ck:\n avail_all = avail_actions.copy() * 0\n else:\n avail_all = avail_actions.copy() * 0 + 1\n\n coordinates = np.zeros([len(agent_ids), 2])\n if obs_noise:\n agents = self.agents_noisy[obs_id]\n else:\n agents = self.agents\n for i, a_id in enumerate(agent_ids):\n if not (agents[a_id].health > 0):\n return state, avail_all * 0.0 # FORCE TO DELEGATE\n else:\n coordinates[i] = [agents[a_id].pos.x, agents[a_id].pos.y]\n\n # Calculate pairwise distances\n distances = ((coordinates[:, 0:1] - coordinates[:, 0:1].T)**2 + (coordinates[:, 1:2] - coordinates[:, 1:2].T)**2)**0.5\n sight_range = self.unit_sight_range(agent_ids[0])\n\n # Check that max pairwise distance is less than sight_range.\n if np.max(distances) > sight_range:\n return state, avail_all\n\n x = np.mean(coordinates, 0)[0]\n y = np.mean(coordinates, 0)[1]\n\n if obs_noise:\n enemies = self.enemies_noisy[obs_id]\n else:\n enemies = self.enemies\n for e_id, e_unit in enemies.items():\n e_x = e_unit.pos.x\n e_y = e_unit.pos.y\n # dist = self.distance(x, y, e_x, e_y)\n\n if self.get_intersect(coordinates, e_unit, sight_range) and e_unit.health > 0: # visible and alive\n # Sight range > shoot range\n for i, a_id in enumerate(agent_ids):\n dist = self.distance(self.agents[a_id].pos.x, self.agents[a_id].pos.y, e_x, e_y)\n enemy_feats[i, e_id, 0] = a_a1[self.n_actions_no_attack + e_id, 0] # available\n enemy_feats[i, e_id, 1] = dist / sight_range # distance\n enemy_feats[i, e_id, 2] = (e_x - self.agents[a_id].pos.x) / sight_range # relative X\n enemy_feats[i, e_id, 3] = (e_y - self.agents[a_id].pos.y) / sight_range # relative Y\n enemy_feats[i, e_id, 4] = a_a2[0, self.n_actions_no_attack + e_id] # available\n\n if self.map_name in ['2s_3z', '3s_4z', '4s_5z', '3s_5z']:\n type_id = e_unit.unit_type - 73 # id(Stalker) = 74, id(Zealot) = 73\n enemy_feats[i, e_id, 4 + type_id] = 1\n\n else:\n avail_actions[self.n_actions_no_attack + e_id, :] = 0\n avail_actions[:, self.n_actions_no_attack + e_id] = 0\n\n # place the features of the agent himself always at the first place\n al_ids = list(agent_ids)\n for al in range(self.n_agents):\n if al not in agent_ids:\n al_ids.append(al)\n for i, al_id in enumerate(al_ids):\n if obs_noise:\n al_unit = self.get_unit_by_id_noisy(obs_id=obs_id, get_id=al_id)\n else:\n al_unit = self.get_unit_by_id(al_id)\n al_x = al_unit.pos.x\n al_y = al_unit.pos.y\n #dist = self.distance(x, y, al_x, al_y)\n if self.get_intersect(coordinates, al_unit, sight_range) and al_unit.health > 0: # visible and alive\n for j, a_id in enumerate(agent_ids):\n dist = self.distance(agents[a_id].pos.x, agents[a_id].pos.y, al_x, al_y)\n ally_feats[j, i, 0] = 1 # visible\n ally_feats[j, i, 1] = dist / sight_range # distance\n ally_feats[j, i, 2] = (al_x - agents[a_id].pos.x) / sight_range # relative X\n ally_feats[j, i, 3] = (al_y - agents[a_id].pos.y) / sight_range # relative Y\n\n if self.map_name in ['2s_3z', '3s_4z', '4s_5z', '3s_5z']:\n type_id = al_unit.unit_type - self.stalker_id # id(Stalker) = self.stalker_id, id(Zealot) = self.zealot_id\n ally_feats[j, i, 4 + type_id] = 1\n\n state = np.concatenate((enemy_feats.flatten(),\n ally_feats.flatten()))\n\n state = state.astype(dtype=np.float32)\n\n if self.debug_inputs:\n print(\"***************************************\")\n print(\"Agent_intersections: \", agent_ids)\n print(\"Enemy feats\\n\", enemy_feats)\n print(\"Ally feats\\n\", ally_feats)\n print(\"***************************************\")\n return state, avail_actions\n\n def get_obs_intersect_pair_size(self):\n return self.get_obs_intersect_size()*2\n\n def get_obs_intersect_all_size(self):\n return self.get_obs_intersect_size()*self.n_agents\n\n def get_obs_intersect_size(self):\n\n nf_al = 4\n nf_en = 5\n\n if self.map_name in ['2s_3z', '3s_4z', '4s_5z', '3s_5z']:\n nf_al += 2\n nf_en += 2\n\n enemy_feats = self.n_enemies * nf_en\n ally_feats = (self.n_agents) * nf_al\n\n return enemy_feats + ally_feats\n\nfrom components.transforms import _seq_mean\n\nclass StatsAggregator():\n\n def __init__(self):\n self.last_stats = None\n self.stats = []\n pass\n\n def aggregate(self, stats, add_stat_fn):\n\n current_stats = {}\n for stat in stats:\n for _k, _v in stat.items():\n if not (_k in current_stats):\n current_stats[_k] = []\n if _k in [\"win_rate\"]:\n continue\n current_stats[_k].append(_v)\n\n # average over stats\n aggregate_stats = {}\n for _k, _v in current_stats.items():\n if _k in [\"win_rate\"]:\n aggregate_stats[_k] = np.mean([ (_a - _b)/(_c - _d) for _a, _b, _c, _d in zip(current_stats[\"battles_won\"],\n [0]*len(current_stats[\"battles_won\"]) if self.last_stats is None else self.last_stats[\"battles_won\"],\n current_stats[\"battles_game\"],\n [0]*len(current_stats[\"battles_game\"]) if self.last_stats is None else\n self.last_stats[\"battles_game\"])\n if (_c - _d) != 0.0])\n else:\n aggregate_stats[_k] = np.mean([_a-_b for _a, _b in zip(_v, [0]*len(_v) if self.last_stats is None else self.last_stats[_k])])\n\n # add stats that have just been produced to tensorboard / sacred\n for _k, _v in aggregate_stats.items():\n add_stat_fn(_k, _v)\n\n # collect stats for logging horizon\n self.stats.append(aggregate_stats)\n # update last stats\n self.last_stats = current_stats\n pass\n\n def log(self, log_directly=False):\n assert not log_directly, \"log_directly not supported.\"\n logging_str = \" Win rate: {}\".format(_seq_mean([ stat[\"win_rate\"] for stat in self.stats ]))\\\n + \" Timeouts: {}\".format(_seq_mean([ stat[\"timeouts\"] for stat in self.stats ]))\\\n + \" Restarts: {}\".format(_seq_mean([ stat[\"restarts\"] for stat in self.stats ]))\n\n # flush stats\n self.stats = []\n return logging_str\n"
] | [
[
"numpy.max",
"numpy.random.normal",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"numpy.mean",
"numpy.eye"
]
] |
DeLaVlag/tvb-hpc | [
"6559707dfee8ec712d9624aaf441f9901919fe63"
] | [
"examples/hcp100.py"
] | [
"\n\"\"\"\nParallel simulation of HCP 100 subjects.\n\n\"\"\"\n\nimport numpy as np\nimport loopy as lp\nfrom scipy import sparse\nlp.set_caching_enabled(False)\nfrom tvb_hpc import model, coupling, network, utils, compiler, scheme\n\nLOG = utils.getLogger('hcp100')\n\n# load data\nnpz = np.load('data/hcp-100.npz')\nW = npz['weights'].astype(np.float32)\nL = npz['lengths'].astype(np.float32)\nnsubj, nnode, _ = W.shape\nLOG.info('nsubj %d nnode %d', nsubj, nnode)\n\n# enforce sparsity across subject weights (all: 15% sparse, any: 48%)\nW_nz: np.ndarray = ~(W == 0)\nW_nzmask = W_nz.all(axis=0)\n\nW_ = np.array([((w + 1e-3) * W_nzmask) for w in W])\nL_ = np.array([((l + 1e-3) * W_nzmask) for l in L])\n\nW0_nz: np.ndarray = ~(W_[0] == 0)\nW_nnz = W0_nz.sum()\nLOG.info('W nnz %.3f', W_nnz * 100 / (nsubj * nnode * nnode))\n\n# build sparse W & L\nsW_data = np.zeros((nsubj, W_nnz), np.float32)\nsL_data = np.zeros((nsubj, W_nnz), np.float32)\nsW_col = np.zeros((W_nnz, ), np.uintc)\nsW_row = np.zeros((nnode + 1, ), np.uintc)\nfor i, (w, l) in enumerate(zip(W_, L_)):\n sw = sparse.csr_matrix(w)\n sl = sparse.csr_matrix(l)\n np.testing.assert_allclose(sw.nnz, W_nnz)\n np.testing.assert_allclose(sl.nnz, W_nnz)\n sL_data[i, :] = sl.data\n sW_data[i, :] = sl.data\n if i == 0:\n sW_col[:] = sw.indices\n sW_row[:] = sw.indptr\n else:\n np.testing.assert_allclose(sw.indices, sW_col)\n np.testing.assert_allclose(sw.indptr, sW_row)\n np.testing.assert_allclose(sl.indices, sW_col)\n np.testing.assert_allclose(sl.indptr, sW_row)\nsD_data = (sL_data / 0.1).astype('i')\nDmax = sD_data.max()\n\n\n# build other data arrays\nnext, state, drift = np.zeros((3, nsubj, nnode, 2), np.float32)\ninput, param, diffs = np.zeros((3, nsubj, nnode, 2), np.float32)\nobsrv = np.zeros((Dmax + 3, nsubj, nnode, 2), np.float32)\nLOG.info('obsrv %r %.3f MB', obsrv.shape, obsrv.nbytes / 2**20)\n\n\n# build kernels\ntarget = compiler.OpenMPCTarget()\ntarget.iname_pragma_map['i_subj'] = 'omp parallel for'\n\ndef comp():\n comp = compiler.Compiler(cc='/usr/local/bin/gcc-6')\n comp.cflags += '-fopenmp -march=native -ffast-math'.split()\n comp.ldflags += '-fopenmp'.split()\n return comp\n\n\ndef batch_knl(knl):\n varying = 'weights delays state input obsrv drift diffs next'.split()\n # wait for bug fix\n #varying.remove('delays')\n return lp.to_batched(knl, 'nsubj', varying, 'i_subj',\n sequential=False)\n\nosc = model.G2DO()\nosc.dt = 0.1\nosc_knl = osc.kernel(target)\nosc_knl = batch_knl(osc_knl)\nosc_fn = compiler.CompiledKernel(osc_knl, comp())\n\ncfun = coupling.Linear(osc)\nnet = network.Network(osc, cfun)\nnet_knl = net.kernel(target)\nnet_knl = batch_knl(net_knl)\nnet_fn = compiler.CompiledKernel(net_knl, comp())\n\nscm = scheme.EulerStep(osc.dt)\nscm_knl = scm.kernel(target)\nscm_knl = batch_knl(scm_knl)\nscm_knl = lp.prioritize_loops(scm_knl, ['i_subj', 'i', 'j'])\nscm_knl = lp.fix_parameters(scm_knl, nsvar=2)\nscm_knl = lp.tag_inames(scm_knl, [('j', 'ilp')])\nscm_fn = compiler.CompiledKernel(scm_knl, comp())\n\n\n# step function\ndef step(n_step=1):\n for _ in range(n_step):\n t = Dmax + 1\n net_fn(nsubj=nsubj, t=t, ntime=Dmax + 3, nnode=nnode, nnz=W_nnz,\n row=sW_row, col=sW_col,\n delays=sD_data, weights=sW_data,\n input=input, obsrv=obsrv\n )\n osc_fn(nsubj=nsubj, nnode=nnode,\n state=state, input=input, param=param,\n drift=drift, diffs=diffs, obsrv=obsrv[t])\n scm_fn(nsubj=nsubj, nnode=nnode, nsvar=2, next=next, state=state,\n drift=drift)\n # TODO\n # obsrv[:Dmax] = obsrv[-Dmax:]\n\n# warm up\nstep(20)\n\n# time it\nwith utils.timer('1000 time steps'):\n step(1000)\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros",
"numpy.load",
"scipy.sparse.csr_matrix"
]
] |
urasakikeisuke/rigidmask | [
"4bb781102218dfd11efa767e2d0ba987d9949fd1"
] | [
"submission.py"
] | [
"from __future__ import print_function\nimport os\nimport sys\nimport cv2\nimport pdb\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport time\nfrom utils.io import mkdir_p\nfrom utils.util_flow import save_pfm, write_flow\nfrom utils.flowlib import write_flo, point_vec\nfrom dataloader.exploader import disparity_loader\nfrom utils import dydepth as ddlib\ncudnn.benchmark = False\n\nparser = argparse.ArgumentParser(description='RigidMask')\nparser.add_argument('--dataset', default='2015',\n help='{2015, 2015val, sintelval, seq-XXX}')\nparser.add_argument('--datapath', default='/ssd/kitti_scene/training/',\n help='dataset path')\nparser.add_argument('--loadmodel', default=None,\n help='model path')\nparser.add_argument('--outdir', default='output',\n help='output dir')\nparser.add_argument('--testres', type=float, default=1,\n help='resolution')\nparser.add_argument('--maxdisp', type=int ,default=256,\n help='maxium disparity. Only affect the coarsest cost volume size')\nparser.add_argument('--fac', type=float ,default=1,\n help='controls the shape of search grid. Only affect the coarse cost volume size')\nparser.add_argument('--disp_path', default='',\n help='disparity input (only used for stereo)')\nparser.add_argument('--mask_path', default='',\n help='mask input')\nparser.add_argument('--refine', dest='refine', action='store_true',\n help='refine scene flow by rigid body motion')\nparser.add_argument('--sensor', default='mono',\n help='{mono} or stereo, will affect rigid motion parameterization')\nargs = parser.parse_args()\n\n\n# dataloader\nif args.dataset == '2015':\n from dataloader import kitti15list as DA\n maxw,maxh = [int(args.testres*1280), int(args.testres*384)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == '2015val':\n from dataloader import kitti15list_val as DA\n maxw,maxh = [int(args.testres*1280), int(args.testres*384)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == '2015vallidar':\n from dataloader import kitti15list_val_lidar as DA\n maxw,maxh = [int(args.testres*1280), int(args.testres*384)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == '2015test':\n from dataloader import kitti15list as DA\n maxw,maxh = [int(args.testres*1280), int(args.testres*384)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif 'seq' in args.dataset:\n from dataloader import seqlist as DA\n maxw,maxh = [int(args.testres*1280), int(args.testres*384)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'sinteltemple':\n from dataloader import sintel_temple as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'sinteltest':\n from dataloader import sintellist as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'sintel':\n from dataloader import sintel_mrflow_val as DA\n #from dataloader import sintellist as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'sinteldepth':\n from dataloader import sintel_rtn_val as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'sintelval':\n from dataloader import sintellist_val as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'mosegsintel':\n from dataloader import moseg_sintellist_val as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'mb':\n from dataloader import mblist as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'hd1k':\n from dataloader import hd1klist_val as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'viper':\n from dataloader import viperlist_val as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'viper_test':\n from dataloader import viperlist_test as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \nelif args.dataset == 'tum':\n from dataloader import tumlist as DA\n maxw,maxh = [int(args.testres*1024), int(args.testres*448)]\n test_left_img, test_right_img ,_= DA.dataloader(args.datapath) \n\nmax_h = int(maxh // 64 * 64)\nmax_w = int(maxw // 64 * 64)\nif max_h < maxh: max_h += 64\nif max_w < maxw: max_w += 64\nmaxh = max_h\nmaxw = max_w\n\n\nmean_L = [[0.33,0.33,0.33]]\nmean_R = [[0.33,0.33,0.33]]\n\n# construct model, VCN-expansion\nfrom models.VCNplus import VCN\nmodel = VCN([1, maxw, maxh], md=[int(4*(args.maxdisp/256)),4,4,4,4], fac=args.fac,exp_unc=not ('kitti' in args.loadmodel))\nmodel = nn.DataParallel(model, device_ids=[0])\nmodel.cuda()\n\nif args.loadmodel is not None:\n pretrained_dict = torch.load(args.loadmodel,map_location='cpu')\n mean_L=pretrained_dict['mean_L']\n mean_R=pretrained_dict['mean_R']\n pretrained_dict['state_dict'] = {k:v for k,v in pretrained_dict['state_dict'].items()}\n model.load_state_dict(pretrained_dict['state_dict'],strict=False)\nelse:\n print('dry run')\nprint('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n\n# load intrinsics calib\nif 'seq' in args.dataset: \n calib_path = '%s-calib.txt'%(args.datapath.rsplit('/',1)[0])\n if os.path.exists(calib_path):\n seqcalib = np.loadtxt(calib_path)\n else:\n exit()\n\nmkdir_p('%s/%s/'% (args.outdir, args.dataset))\ndef main():\n model.eval()\n ttime_all = []\n for inx in range(len(test_left_img)):\n idxname = test_left_img[inx].split('/')[-1].split('.')[0]\n print(test_left_img[inx])\n imgL_o = cv2.imread(test_left_img[inx])[:,:,::-1]\n imgR_o = cv2.imread(test_right_img[inx])[:,:,::-1]\n\n # for gray input images\n if len(imgL_o.shape) == 2:\n imgL_o = np.tile(imgL_o[:,:,np.newaxis],(1,1,3))\n imgR_o = np.tile(imgR_o[:,:,np.newaxis],(1,1,3))\n\n # resize\n maxh = imgL_o.shape[0]*args.testres\n maxw = imgL_o.shape[1]*args.testres\n max_h = int(maxh // 64 * 64)\n max_w = int(maxw // 64 * 64)\n if max_h < maxh: max_h += 64\n if max_w < maxw: max_w += 64\n\n input_size = imgL_o.shape\n imgL = cv2.resize(imgL_o,(max_w, max_h))\n imgR = cv2.resize(imgR_o,(max_w, max_h))\n imgL_noaug = torch.Tensor(imgL/255.)[np.newaxis].float().cuda()\n\n # flip channel, subtract mean\n imgL = imgL[:,:,::-1].copy() / 255. - np.asarray(mean_L).mean(0)[np.newaxis,np.newaxis,:]\n imgR = imgR[:,:,::-1].copy() / 255. - np.asarray(mean_R).mean(0)[np.newaxis,np.newaxis,:]\n imgL = np.transpose(imgL, [2,0,1])[np.newaxis]\n imgR = np.transpose(imgR, [2,0,1])[np.newaxis]\n\n # modify module according to inputs\n from models.VCNplus import WarpModule, flow_reg\n for i in range(len(model.module.reg_modules)):\n model.module.reg_modules[i] = flow_reg([1,max_w//(2**(6-i)), max_h//(2**(6-i))], \n ent=getattr(model.module, 'flow_reg%d'%2**(6-i)).ent,\\\n maxdisp=getattr(model.module, 'flow_reg%d'%2**(6-i)).md,\\\n fac=getattr(model.module, 'flow_reg%d'%2**(6-i)).fac).cuda()\n for i in range(len(model.module.warp_modules)):\n model.module.warp_modules[i] = WarpModule([1,max_w//(2**(6-i)), max_h//(2**(6-i))]).cuda()\n\n # get intrinsics\n if '2015' in args.dataset:\n from utils.util_flow import load_calib_cam_to_cam\n ints = load_calib_cam_to_cam(test_left_img[inx].replace('image_2','calib_cam_to_cam')[:-7]+'.txt')\n K0 = ints['K_cam2']\n K1 = K0\n fl = K0[0,0]\n cx = K0[0,2]\n cy = K0[1,2]\n bl = ints['b20']-ints['b30']\n fl_next = fl\n intr_list = [torch.Tensor(inxx).cuda() for inxx in [[fl],[cx],[cy],[bl],[1],[0],[0],[1],[0],[0]]]\n elif 'sintel' in args.dataset and not 'test' in test_left_img[inx]:\n from utils.sintel_io import cam_read\n passname = test_left_img[inx].split('/')[-1].split('_')[-4]\n seqname1 = test_left_img[inx].split('/')[-1].split('_')[-3]\n seqname2 = test_left_img[inx].split('/')[-1].split('_')[-2]\n framename = int(test_left_img[inx].split('/')[-1].split('_')[-1].split('.')[0])\n #TODO add second camera\n K0,_ = cam_read('/data/gengshay/tf_depth/sintel-data/training/camdata_left/%s_%s/frame_%04d.cam'%(seqname1, seqname2, framename+1))\n K1,_ = cam_read('/data/gengshay/tf_depth/sintel-data/training/camdata_left/%s_%s/frame_%04d.cam'%(seqname1, seqname2, framename+2))\n fl = K0[0,0]\n cx = K0[0,2]\n cy = K0[1,2]\n fl_next = K1[0,0]\n bl = 0.1\n intr_list = [torch.Tensor(inxx).cuda() for inxx in [[fl],[cx],[cy],[bl],[1],[0],[0],[1],[0],[0]]]\n elif 'seq' in args.dataset:\n fl,cx,cy = seqcalib[inx]\n bl = 1\n fl_next = fl\n K0 = np.eye(3)\n K0[0,0] = fl\n K0[1,1] = fl\n K0[0,2] = cx\n K0[1,2] = cy\n K1 = K0\n intr_list = [torch.Tensor(inxx).cuda() for inxx in [[fl],[cx],[cy],[bl],[1],[0],[0],[1],[0],[0]]]\n else:\n print('NOT using given intrinsics')\n fl = min(input_size[0], input_size[1]) *2\n fl_next = fl\n cx = input_size[1]/2.\n cy = input_size[0]/2.\n bl = 1\n K0 = np.eye(3)\n K0[0,0] = fl\n K0[1,1] = fl\n K0[0,2] = cx\n K0[1,2] = cy\n K1 = K0\n intr_list = [torch.Tensor(inxx).cuda() for inxx in [[fl],[cx],[cy],[bl],[1],[0],[0],[1],[0],[0]]]\n intr_list.append(torch.Tensor([input_size[1] / max_w]).cuda()) # delta fx\n intr_list.append(torch.Tensor([input_size[0] / max_h]).cuda()) # delta fy\n intr_list.append(torch.Tensor([fl_next]).cuda())\n \n disc_aux = [None,None,None,intr_list,imgL_noaug,None]\n \n if args.disp_path=='': disp_input=None\n else:\n try:\n disp_input = disparity_loader('%s/%s_disp.pfm'%(args.disp_path,idxname)) \n except:\n disp_input = disparity_loader('%s/%s.png'%(args.disp_path,idxname))\n disp_input = torch.Tensor(disp_input.copy())[np.newaxis,np.newaxis].cuda()\n\n # forward\n imgL = Variable(torch.FloatTensor(imgL).cuda())\n imgR = Variable(torch.FloatTensor(imgR).cuda())\n with torch.no_grad():\n imgLR = torch.cat([imgL,imgR],0)\n model.eval()\n torch.cuda.synchronize()\n start_time = time.time()\n rts = model(imgLR, disc_aux, disp_input)\n torch.cuda.synchronize()\n ttime = (time.time() - start_time); print('time = %.2f' % (ttime*1000) )\n ttime_all.append(ttime)\n flow, occ, logmid, logexp, fgmask, heatmap, polarmask, disp = rts\n bbox = polarmask['bbox']\n polarmask = polarmask['mask']\n polarcontour = polarmask[:polarmask.shape[0]//2] \n polarmask = polarmask[polarmask.shape[0]//2:]\n\n # upsampling\n occ = cv2.resize(occ.data.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)\n logexp = cv2.resize(logexp.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)\n logmid = cv2.resize(logmid.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)\n fgmask = cv2.resize(fgmask.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)\n heatmap= cv2.resize(heatmap.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)\n polarcontour= cv2.resize(polarcontour, (input_size[1],input_size[0]),interpolation=cv2.INTER_NEAREST)\n polarmask= cv2.resize(polarmask, (input_size[1],input_size[0]),interpolation=cv2.INTER_NEAREST).astype(int)\n polarmask[np.logical_and(fgmask>0,polarmask==0)]=-1\n if args.disp_path=='':\n disp= cv2.resize(disp.cpu().numpy(), (input_size[1],input_size[0]),interpolation=cv2.INTER_LINEAR)\n else:\n disp = np.asarray(disp_input.cpu())[0,0]\n flow = torch.squeeze(flow).data.cpu().numpy()\n flow = np.concatenate( [cv2.resize(flow[0],(input_size[1],input_size[0]))[:,:,np.newaxis],\n cv2.resize(flow[1],(input_size[1],input_size[0]))[:,:,np.newaxis]],-1)\n flow[:,:,0] *= imgL_o.shape[1] / max_w\n flow[:,:,1] *= imgL_o.shape[0] / max_h\n flow = np.concatenate( (flow, np.ones([flow.shape[0],flow.shape[1],1])),-1)\n bbox[:,0] *= imgL_o.shape[1] / max_w\n bbox[:,2] *= imgL_o.shape[1] / max_w\n bbox[:,1] *= imgL_o.shape[0] / max_h\n bbox[:,3] *= imgL_o.shape[0] / max_h\n \n # draw instance center and motion in 2D\n ins_center_vis = np.zeros(flow.shape[:2])\n for k in range(bbox.shape[0]):\n from utils.detlib import draw_umich_gaussian\n draw_umich_gaussian(ins_center_vis, bbox[k,:4].reshape(2,2).mean(0), 15)\n ins_center_vis = 256*np.stack([ins_center_vis, np.zeros(ins_center_vis.shape), np.zeros(ins_center_vis.shape)],-1)\n if args.refine:\n ## depth and scene flow estimation\n # save initial disp and flow\n init_disp = disp.copy()\n init_flow = flow.copy()\n init_logmid = logmid.copy()\n\n if args.mask_path == '':\n mask_input = polarmask\n else:\n mask_input = cv2.imread('%s/%s.png'%(args.mask_path,idxname),0)\n if mask_input is None:\n mask_input = cv2.imread('%s/%s.png'%(args.mask_path,idxname.split('_')[0]),0)\n \n bgmask = (mask_input == 0) \n scene_type, T01_c, R01,RTs = ddlib.rb_fitting(bgmask,mask_input,disp,flow,occ,K0,K1,bl,parallax_th=4,mono=(args.sensor=='mono'), sintel='Sintel' in idxname)\n print('camera trans: '); print(T01_c)\n disp,flow,disp1 = ddlib.mod_flow(bgmask,mask_input,disp,disp/np.exp(logmid),flow,occ,bl,K0,K1,scene_type, T01_c,R01, RTs, fgmask,mono=(args.sensor=='mono'), sintel='Sintel' in idxname)\n logmid = np.clip(np.log(disp / disp1),-1,1)\n\n # draw ego vehicle\n ct = [4*input_size[0]//5,input_size[1]//2][::-1] \n cv2.circle(ins_center_vis, tuple(ct), radius=10,color=(0,255,255),thickness=10)\n obj_3d = K0[0,0]*bl/np.median(disp[bgmask]) * np.linalg.inv(K0).dot(np.hstack([ct,np.ones(1)]))\n obj_3d2 = obj_3d + (-R01.T.dot(T01_c))\n ed = K0.dot(obj_3d2)\n ed = (ed[:2]/ed[-1]).astype(int)\n if args.sensor=='mono':\n direct = (ed - ct)\n direct = 50*direct/(1e-9+np.linalg.norm(direct))\n else:\n direct = (ed - ct)\n ed = (ct+direct).astype(int)\n if np.linalg.norm(direct)>1:\n ins_center_vis = cv2.arrowedLine(ins_center_vis, tuple(ct), tuple(ed), (0,255,255),6,tipLength=float(30./np.linalg.norm(direct)))\n\n # draw each object\n for k in range(mask_input.max()):\n try:\n obj_mask = mask_input==k+1\n if obj_mask.sum()==0:continue\n ct = np.asarray(np.nonzero(obj_mask)).mean(1).astype(int)[::-1] # Nx2\n cv2.circle(ins_center_vis, tuple(ct), radius=5,color=(255,0,0),thickness=5)\n if RTs[k] is not None:\n #ins_center_vis[mask_input==k+1] = imgL_o[mask_input==k+1]\n obj_3d = K0[0,0]*bl/np.median(disp[mask_input==k+1]) * np.linalg.inv(K0).dot(np.hstack([ct,np.ones(1)]))\n obj_3d2 = obj_3d + (-RTs[k][0].T.dot(RTs[k][1]) )\n ed = K0.dot(obj_3d2)\n ed = (ed[:2]/ed[-1]).astype(int)\n if args.sensor=='mono':\n direct = (ed - ct)\n direct = 50*direct/(np.linalg.norm(direct)+1e-9)\n else:\n direct = (ed - ct)\n ed = (ct+direct).astype(int)\n if np.linalg.norm(direct)>1:\n ins_center_vis = cv2.arrowedLine(ins_center_vis, tuple(ct), tuple(ed), (255,0,0),3,tipLength=float(30./np.linalg.norm(direct))) \n except:pdb.set_trace()\n cv2.imwrite('%s/%s/mvis-%s.jpg'% (args.outdir, args.dataset,idxname), ins_center_vis[:,:,::-1])\n\n # save predictions\n with open('%s/%s/flo-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,flow[::-1].astype(np.float32))\n flowvis = point_vec(imgL_o, flow)\n cv2.imwrite('%s/%s/visflo-%s.jpg'% (args.outdir, args.dataset,idxname),flowvis)\n imwarped = ddlib.warp_flow(imgR_o, flow[:,:,:2])\n cv2.imwrite('%s/%s/warp-%s.jpg'% (args.outdir, args.dataset,idxname),imwarped[:,:,::-1])\n cv2.imwrite('%s/%s/warpt-%s.jpg'% (args.outdir, args.dataset,idxname),imgL_o[:,:,::-1])\n cv2.imwrite('%s/%s/warps-%s.jpg'% (args.outdir, args.dataset,idxname),imgR_o[:,:,::-1])\n with open('%s/%s/occ-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,occ[::-1].astype(np.float32))\n with open('%s/%s/exp-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,logexp[::-1].astype(np.float32))\n with open('%s/%s/mid-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,logmid[::-1].astype(np.float32))\n with open('%s/%s/fg-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,fgmask[::-1].astype(np.float32))\n with open('%s/%s/hm-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,heatmap[::-1].astype(np.float32))\n with open('%s/%s/pm-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,polarmask[::-1].astype(np.float32))\n ddlib.write_calib(K0,bl,polarmask.shape, K0[0,0]*bl / (np.median(disp)/5),\n '%s/%s/calib-%s.txt'% (args.outdir, args.dataset,idxname))\n \n # submit to KITTI benchmark\n if 'test' in args.dataset:\n outdir = 'benchmark_output'\n # kitti scene flow\n import skimage.io\n skimage.io.imsave('%s/disp_0/%s.png'% (outdir,idxname),(disp*256).astype('uint16'))\n skimage.io.imsave('%s/disp_1/%s.png'% (outdir,idxname),(disp1*256).astype('uint16'))\n flow[:,:,2]=1.\n write_flow( '%s/flow/%s.png'% (outdir,idxname.split('.')[0]),flow)\n\n # save visualizations\n with open('%s/%s/disp-%s.pfm'% (args.outdir, args.dataset,idxname),'w') as f:\n save_pfm(f,disp[::-1].astype(np.float32))\n\n try:\n # point clouds\n from utils.fusion import pcwrite\n hp2d0 = np.concatenate( [np.tile(np.arange(0, input_size[1]).reshape(1,-1),(input_size[0],1)).astype(float)[None], # 1,2,H,W\n np.tile(np.arange(0, input_size[0]).reshape(-1,1),(1,input_size[1])).astype(float)[None],\n np.ones(input_size[:2])[None]], 0).reshape(3,-1)\n hp2d1 = hp2d0.copy()\n hp2d1[:2] += np.transpose(flow,[2,0,1])[:2].reshape(2,-1)\n p3d0 = (K0[0,0]*bl/disp.flatten()) * np.linalg.inv(K0).dot(hp2d0)\n p3d1 = (K0[0,0]*bl/disp1.flatten()) * np.linalg.inv(K1).dot(hp2d1)\n def write_pcs(points3d, imgL_o,mask_input,path):\n # remove some points\n points3d = points3d.T.reshape(input_size[:2]+(3,))\n points3d[points3d[:,:,-1]>np.median(points3d[:,:,-1])*5]=0\n #points3d[:2*input_size[0]//5] = 0. # KITTI\n points3d = np.concatenate([points3d, imgL_o],-1)\n validid = np.linalg.norm(points3d[:,:,:3],2,-1) >0\n bgidx = np.logical_and(validid, mask_input==0)\n fgidx = np.logical_and(validid, mask_input>0)\n pcwrite(path.replace('/pc', '/fgpc'), points3d[fgidx])\n pcwrite(path.replace('/pc', '/bgpc'), points3d[bgidx])\n pcwrite(path, points3d[validid])\n if inx==0:\n write_pcs(p3d0,imgL_o,mask_input,path='%s/%s/pc0-%s.ply'% (args.outdir, args.dataset,idxname))\n write_pcs(p3d1,imgL_o,mask_input,path='%s/%s/pc1-%s.ply'% (args.outdir, args.dataset,idxname))\n except:pass\n torch.cuda.empty_cache()\n print(np.mean(ttime_all))\n \n \n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.cat",
"numpy.median",
"numpy.tile",
"numpy.exp",
"numpy.mean",
"torch.squeeze",
"torch.load",
"torch.nn.DataParallel",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.log",
"torch.FloatTensor",
"numpy.logical_and",
"numpy.eye",
"numpy.nonzero",
"numpy.transpose",
"numpy.arange",
"torch.Tensor",
"numpy.linalg.inv",
"numpy.zeros",
"torch.cuda.empty_cache",
"numpy.loadtxt",
"torch.cuda.synchronize",
"numpy.asarray",
"torch.no_grad",
"numpy.ones"
]
] |
qermezkon/TextBaseEmotionDetectionWithEnsembleMethod | [
"8de1119a306c980f0b83948dd3858ad702986241"
] | [
"TfIdfFeatureExtraction.py"
] | [
"import numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\nfrom nltk.tokenize import RegexpTokenizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\n\n\ndef encode_label(label):\n le = LabelEncoder()\n label_encoded = le.fit(label).transform(label)\n return label_encoded\n\n\ndef readdata(train_set_path):\n x = []\n y = []\n stop_words = set(stopwords.words('english'))\n with open(train_set_path, encoding=\"utf8\") as infile:\n for line in infile:\n data = []\n data = line.split(\",\")\n stemmer = PorterStemmer()\n if data[1] != \"tweet_id\":\n content = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", data[3].lower())\n toker = RegexpTokenizer(r'((?<=[^\\w\\s])\\w(?=[^\\w\\s])|(\\W))+', gaps=True)\n word_tokens = toker.tokenize(content)\n filtered_sentence = [stemmer.stem(w) for w in word_tokens if not w in stop_words and w.isalpha()]\n x.append(' '.join(filtered_sentence))\n y.append(data[1])\n\n x, y = np.array(x), np.array(y)\n return x, y\n\n\nif __name__ == '__main__':\n print(\"Begin Extract Features ....\")\n dataset_csv = 'D:\\\\My Source Codes\\\\Projects-Python' \\\n '\\\\TextBaseEmotionDetectionWithEnsembleMethod\\\\Dataset\\\\' \\\n 'text_emotion_6class.csv'\n feature_csv = 'D:\\\\My Source Codes\\\\Projects-Python' \\\n '\\\\TextBaseEmotionDetectionWithEnsembleMethod\\\\Dataset\\\\' \\\n 'tfidffeature6cl.csv'\n x, y = readdata(dataset_csv)\n y = encode_label(y)\n features_vectors = pd.DataFrame()\n\n vectorizer = TfidfVectorizer()\n vectorizer.fit(x)\n x_tfidf = vectorizer.transform(x)\n features_vectors = pd.DataFrame(x_tfidf.toarray())\n features_vectors['label'] = y\n features_vectors.to_csv(feature_csv, mode='a', header=False, index=False)"
] | [
[
"pandas.DataFrame",
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
MrBoriska/PathPlanning | [
"8b10dd1262ba82d9906dd96978ec5fe71427dbe4"
] | [
"Sampling_based_Planning/rrt_2D/batch_informed_trees.py"
] | [
"\"\"\"\nBatch Informed Trees (BIT*)\n@author: huiming zhou\n\"\"\"\n\nimport os\nimport sys\nimport math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom scipy.spatial.transform import Rotation as Rot\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) +\n \"/../../\")\n\nfrom Sampling_based_Planning.rrt_2D import env, plotting, utils\n\n\nclass Node:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.parent = None\n\n\nclass Tree:\n def __init__(self, x_start, x_goal):\n self.x_start = x_start\n self.goal = x_goal\n\n self.r = 4.0\n self.V = set()\n self.E = set()\n self.QE = set()\n self.QV = set()\n\n self.V_old = set()\n\n\nclass BITStar:\n def __init__(self, x_start, x_goal, eta, iter_max):\n self.x_start = Node(x_start[0], x_start[1])\n self.x_goal = Node(x_goal[0], x_goal[1])\n self.eta = eta\n self.iter_max = iter_max\n\n self.env = env.Env()\n self.plotting = plotting.Plotting(x_start, x_goal)\n self.utils = utils.Utils()\n\n self.fig, self.ax = plt.subplots()\n\n self.delta = self.utils.delta\n self.x_range = self.env.x_range\n self.y_range = self.env.y_range\n\n self.obs_circle = self.env.obs_circle\n self.obs_rectangle = self.env.obs_rectangle\n self.obs_boundary = self.env.obs_boundary\n\n self.Tree = Tree(self.x_start, self.x_goal)\n self.X_sample = set()\n self.g_T = dict()\n\n def init(self):\n self.Tree.V.add(self.x_start)\n self.X_sample.add(self.x_goal)\n\n self.g_T[self.x_start] = 0.0\n self.g_T[self.x_goal] = np.inf\n\n cMin, theta = self.calc_dist_and_angle(self.x_start, self.x_goal)\n C = self.RotationToWorldFrame(self.x_start, self.x_goal, cMin)\n xCenter = np.array([[(self.x_start.x + self.x_goal.x) / 2.0],\n [(self.x_start.y + self.x_goal.y) / 2.0], [0.0]])\n\n return theta, cMin, xCenter, C\n\n def planning(self):\n theta, cMin, xCenter, C = self.init()\n\n for k in range(500):\n if not self.Tree.QE and not self.Tree.QV:\n if k == 0:\n m = 350\n else:\n m = 200\n\n if self.x_goal.parent is not None:\n path_x, path_y = self.ExtractPath()\n plt.plot(path_x, path_y, linewidth=2, color='r')\n plt.pause(0.5)\n\n self.Prune(self.g_T[self.x_goal])\n self.X_sample.update(self.Sample(m, self.g_T[self.x_goal], cMin, xCenter, C))\n self.Tree.V_old = {v for v in self.Tree.V}\n self.Tree.QV = {v for v in self.Tree.V}\n # self.Tree.r = self.radius(len(self.Tree.V) + len(self.X_sample))\n\n while self.BestVertexQueueValue() <= self.BestEdgeQueueValue():\n self.ExpandVertex(self.BestInVertexQueue())\n\n vm, xm = self.BestInEdgeQueue()\n self.Tree.QE.remove((vm, xm))\n\n if self.g_T[vm] + self.calc_dist(vm, xm) + self.h_estimated(xm) < self.g_T[self.x_goal]:\n actual_cost = self.cost(vm, xm)\n if self.g_estimated(vm) + actual_cost + self.h_estimated(xm) < self.g_T[self.x_goal]:\n if self.g_T[vm] + actual_cost < self.g_T[xm]:\n if xm in self.Tree.V:\n # remove edges\n edge_delete = set()\n for v, x in self.Tree.E:\n if x == xm:\n edge_delete.add((v, x))\n\n for edge in edge_delete:\n self.Tree.E.remove(edge)\n else:\n self.X_sample.remove(xm)\n self.Tree.V.add(xm)\n self.Tree.QV.add(xm)\n\n self.g_T[xm] = self.g_T[vm] + actual_cost\n self.Tree.E.add((vm, xm))\n xm.parent = vm\n\n set_delete = set()\n for v, x in self.Tree.QE:\n if x == xm and self.g_T[v] + self.calc_dist(v, xm) >= self.g_T[xm]:\n set_delete.add((v, x))\n\n for edge in set_delete:\n self.Tree.QE.remove(edge)\n else:\n self.Tree.QE = set()\n self.Tree.QV = set()\n\n if k % 5 == 0:\n self.animation(xCenter, self.g_T[self.x_goal], cMin, theta)\n\n path_x, path_y = self.ExtractPath()\n plt.plot(path_x, path_y, linewidth=2, color='r')\n plt.pause(0.01)\n plt.show()\n\n def ExtractPath(self):\n node = self.x_goal\n path_x, path_y = [node.x], [node.y]\n\n while node.parent:\n node = node.parent\n path_x.append(node.x)\n path_y.append(node.y)\n\n return path_x, path_y\n\n def Prune(self, cBest):\n self.X_sample = {x for x in self.X_sample if self.f_estimated(x) < cBest}\n self.Tree.V = {v for v in self.Tree.V if self.f_estimated(v) <= cBest}\n self.Tree.E = {(v, w) for v, w in self.Tree.E\n if self.f_estimated(v) <= cBest and self.f_estimated(w) <= cBest}\n self.X_sample.update({v for v in self.Tree.V if self.g_T[v] == np.inf})\n self.Tree.V = {v for v in self.Tree.V if self.g_T[v] < np.inf}\n\n def cost(self, start, end):\n if self.utils.is_collision(start, end):\n return np.inf\n\n return self.calc_dist(start, end)\n\n def f_estimated(self, node):\n return self.g_estimated(node) + self.h_estimated(node)\n\n def g_estimated(self, node):\n return self.calc_dist(self.x_start, node)\n\n def h_estimated(self, node):\n return self.calc_dist(node, self.x_goal)\n\n def Sample(self, m, cMax, cMin, xCenter, C):\n if cMax < np.inf:\n return self.SampleEllipsoid(m, cMax, cMin, xCenter, C)\n else:\n return self.SampleFreeSpace(m)\n\n def SampleEllipsoid(self, m, cMax, cMin, xCenter, C):\n r = [cMax / 2.0,\n math.sqrt(cMax ** 2 - cMin ** 2) / 2.0,\n math.sqrt(cMax ** 2 - cMin ** 2) / 2.0]\n L = np.diag(r)\n\n ind = 0\n delta = self.delta\n Sample = set()\n\n while ind < m:\n xBall = self.SampleUnitNBall()\n x_rand = np.dot(np.dot(C, L), xBall) + xCenter\n node = Node(x_rand[(0, 0)], x_rand[(1, 0)])\n in_obs = self.utils.is_inside_obs(node)\n in_x_range = self.x_range[0] + delta <= node.x <= self.x_range[1] - delta\n in_y_range = self.y_range[0] + delta <= node.y <= self.y_range[1] - delta\n\n if not in_obs and in_x_range and in_y_range:\n Sample.add(node)\n ind += 1\n\n return Sample\n\n def SampleFreeSpace(self, m):\n delta = self.utils.delta\n Sample = set()\n\n ind = 0\n while ind < m:\n node = Node(random.uniform(self.x_range[0] + delta, self.x_range[1] - delta),\n random.uniform(self.y_range[0] + delta, self.y_range[1] - delta))\n if self.utils.is_inside_obs(node):\n continue\n else:\n Sample.add(node)\n ind += 1\n\n return Sample\n\n def radius(self, q):\n cBest = self.g_T[self.x_goal]\n lambda_X = len([1 for v in self.Tree.V if self.f_estimated(v) <= cBest])\n radius = 2 * self.eta * (1.5 * lambda_X / math.pi * math.log(q) / q) ** 0.5\n\n return radius\n\n def ExpandVertex(self, v):\n self.Tree.QV.remove(v)\n X_near = {x for x in self.X_sample if self.calc_dist(x, v) <= self.Tree.r}\n\n for x in X_near:\n if self.g_estimated(v) + self.calc_dist(v, x) + self.h_estimated(x) < self.g_T[self.x_goal]:\n self.g_T[x] = np.inf\n self.Tree.QE.add((v, x))\n\n if v not in self.Tree.V_old:\n V_near = {w for w in self.Tree.V if self.calc_dist(w, v) <= self.Tree.r}\n\n for w in V_near:\n if (v, w) not in self.Tree.E and \\\n self.g_estimated(v) + self.calc_dist(v, w) + self.h_estimated(w) < self.g_T[self.x_goal] and \\\n self.g_T[v] + self.calc_dist(v, w) < self.g_T[w]:\n self.Tree.QE.add((v, w))\n if w not in self.g_T:\n self.g_T[w] = np.inf\n\n def BestVertexQueueValue(self):\n if not self.Tree.QV:\n return np.inf\n\n return min(self.g_T[v] + self.h_estimated(v) for v in self.Tree.QV)\n\n def BestEdgeQueueValue(self):\n if not self.Tree.QE:\n return np.inf\n\n return min(self.g_T[v] + self.calc_dist(v, x) + self.h_estimated(x)\n for v, x in self.Tree.QE)\n\n def BestInVertexQueue(self):\n if not self.Tree.QV:\n print(\"QV is Empty!\")\n return None\n\n v_value = {v: self.g_T[v] + self.h_estimated(v) for v in self.Tree.QV}\n\n return min(v_value, key=v_value.get)\n\n def BestInEdgeQueue(self):\n if not self.Tree.QE:\n print(\"QE is Empty!\")\n return None\n\n e_value = {(v, x): self.g_T[v] + self.calc_dist(v, x) + self.h_estimated(x)\n for v, x in self.Tree.QE}\n\n return min(e_value, key=e_value.get)\n\n @staticmethod\n def SampleUnitNBall():\n while True:\n x, y = random.uniform(-1, 1), random.uniform(-1, 1)\n if x ** 2 + y ** 2 < 1:\n return np.array([[x], [y], [0.0]])\n\n @staticmethod\n def RotationToWorldFrame(x_start, x_goal, L):\n a1 = np.array([[(x_goal.x - x_start.x) / L],\n [(x_goal.y - x_start.y) / L], [0.0]])\n e1 = np.array([[1.0], [0.0], [0.0]])\n M = a1 @ e1.T\n U, _, V_T = np.linalg.svd(M, True, True)\n C = U @ np.diag([1.0, 1.0, np.linalg.det(U) * np.linalg.det(V_T.T)]) @ V_T\n\n return C\n\n @staticmethod\n def calc_dist(start, end):\n return math.hypot(start.x - end.x, start.y - end.y)\n\n @staticmethod\n def calc_dist_and_angle(node_start, node_end):\n dx = node_end.x - node_start.x\n dy = node_end.y - node_start.y\n return math.hypot(dx, dy), math.atan2(dy, dx)\n\n def animation(self, xCenter, cMax, cMin, theta):\n plt.cla()\n self.plot_grid(\"Batch Informed Trees (BIT*)\")\n\n plt.gcf().canvas.mpl_connect(\n 'key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\n\n for v in self.X_sample:\n plt.plot(v.x, v.y, marker='.', color='lightgrey', markersize='2')\n\n if cMax < np.inf:\n self.draw_ellipse(xCenter, cMax, cMin, theta)\n\n for v, w in self.Tree.E:\n plt.plot([v.x, w.x], [v.y, w.y], '-g')\n\n plt.pause(0.001)\n\n def plot_grid(self, name):\n for (ox, oy, w, h) in self.obs_boundary:\n self.ax.add_patch(\n patches.Rectangle(\n (ox, oy), w, h,\n edgecolor='black',\n facecolor='black',\n fill=True\n )\n )\n\n for (ox, oy, w, h) in self.obs_rectangle:\n self.ax.add_patch(\n patches.Rectangle(\n (ox, oy), w, h,\n edgecolor='black',\n facecolor='gray',\n fill=True\n )\n )\n\n for (ox, oy, r) in self.obs_circle:\n self.ax.add_patch(\n patches.Circle(\n (ox, oy), r,\n edgecolor='black',\n facecolor='gray',\n fill=True\n )\n )\n\n plt.plot(self.x_start.x, self.x_start.y, \"bs\", linewidth=3)\n plt.plot(self.x_goal.x, self.x_goal.y, \"rs\", linewidth=3)\n\n plt.title(name)\n plt.axis(\"equal\")\n\n @staticmethod\n def draw_ellipse(x_center, c_best, dist, theta):\n a = math.sqrt(c_best ** 2 - dist ** 2) / 2.0\n b = c_best / 2.0\n angle = math.pi / 2.0 - theta\n cx = x_center[0]\n cy = x_center[1]\n t = np.arange(0, 2 * math.pi + 0.1, 0.2)\n x = [a * math.cos(it) for it in t]\n y = [b * math.sin(it) for it in t]\n rot = Rot.from_euler('z', -angle).as_dcm()[0:2, 0:2]\n fx = rot @ np.array([x, y])\n px = np.array(fx[0, :] + cx).flatten()\n py = np.array(fx[1, :] + cy).flatten()\n plt.plot(cx, cy, marker='.', color='darkorange')\n plt.plot(px, py, linestyle='--', color='darkorange', linewidth=2)\n\n\ndef main():\n x_start = (18, 8) # Starting node\n x_goal = (37, 18) # Goal node\n eta = 2\n iter_max = 200\n print(\"start!!!\")\n bit = BITStar(x_start, x_goal, eta, iter_max)\n # bit.animation(\"Batch Informed Trees (BIT*)\")\n bit.planning()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"numpy.linalg.det",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.cla",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.arange",
"matplotlib.pyplot.pause",
"numpy.linalg.svd",
"matplotlib.patches.Circle",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"numpy.diag",
"matplotlib.pyplot.axis"
]
] |
botev/kernel-gof | [
"56efe2db76d0c2398d896f2c7c7ebde301698b92"
] | [
"kgof/plot.py"
] | [
"\"\"\"Module containing convenient functions for plotting\"\"\"\n\n__author__ = 'wittawat'\n\nimport kgof.glo as glo\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport autograd.numpy as np\n\n\ndef get_func_tuples():\n \"\"\"\n Return a list of tuples where each tuple is of the form\n (func_name used in the experiments, label name, plot line style)\n \"\"\"\n func_tuples = [\n ('job_fssdJ1q_med', 'FSSD-rand J1', 'r--^'),\n ('job_fssdJ5q_med', 'FSSD-rand', 'r--^'),\n ('job_fssdq_med', 'FSSD-rand', 'r--^'),\n\n ('job_fssdJ1q_opt', 'FSSD-opt J1', 'r-s'),\n ('job_fssdq_opt', 'FSSD-opt', 'r-s'),\n ('job_fssdJ5q_opt', 'FSSD-opt', 'r-s'),\n ('job_fssdJ5q_imq_optv', 'FSSD-IMQv', 'k-h'),\n ('job_fssdJ5q_imqb1_optv', 'FSSD-IMQ-1', 'k--s'),\n ('job_fssdJ5q_imqb2_optv', 'FSSD-IMQ-2', 'k-->'),\n ('job_fssdJ5q_imqb3_optv', 'FSSD-IMQ-3', 'k-.*'),\n ('job_fssdJ5q_imq_opt', 'FSSD-IMQ', 'y-x'),\n ('job_fssdJ5q_imq_optbv', 'FSSD-IMQ-bv', 'y--d'),\n ('job_fssdJ10q_opt', 'FSSD-opt', 'k-s'),\n\n ('job_fssdJ5p_opt', 'FSSD-opt J5', 'm-s'),\n ('job_fssdp_opt', 'FSSDp-opt', 'm-s'),\n ('job_fssdJ10p_opt', 'FSSDp-opt J10', 'k-s'),\n\n ('job_fssdJ1q_opt2', 'FSSD-opt2 J1', 'b-^'),\n ('job_fssdJ5q_opt2', 'FSSD-opt2 J5', 'r-^'),\n ('job_me_opt', 'ME-opt', 'b-d'),\n\n ('job_kstein_med', 'KSD', 'g-o'),\n ('job_kstein_imq', 'KSD-IMQ', 'c-*'),\n ('job_lin_kstein_med', 'LKS', 'g-.h'),\n ('job_mmd_med', 'MMD', 'm--^'),\n ('job_mmd_opt', 'MMD-opt', 'm-<'),\n ('job_mmd_dgauss_opt', 'MMD-dopt', 'y-<'),\n ]\n return func_tuples\n\ndef set_default_matplotlib_options():\n # font options\n font = {\n # 'family' : 'normal',\n #'weight' : 'bold',\n 'size' : 30\n }\n matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\n\n\n # matplotlib.use('cairo')\n matplotlib.rc('text', usetex=True)\n matplotlib.rcParams['text.usetex'] = True\n plt.rc('font', **font)\n plt.rc('lines', linewidth=3, markersize=10)\n # matplotlib.rcParams['ps.useafm'] = True\n # matplotlib.rcParams['pdf.use14corefonts'] = True\n\n matplotlib.rcParams['pdf.fonttype'] = 42\n matplotlib.rcParams['ps.fonttype'] = 42\n\ndef get_func2label_map():\n # map: job_func_name |-> plot label\n func_tuples = get_func_tuples()\n #M = {k:v for (k,v) in zip(func_names, labels)}\n M = {k:v for (k,v,_) in func_tuples}\n return M\n\n\ndef func_plot_fmt_map():\n \"\"\"\n Return a map from job function names to matplotlib plot styles \n \"\"\"\n # line_styles = ['o-', 'x-', '*-', '-_', 'D-', 'h-', '+-', 's-', 'v-', \n # ',-', '1-']\n func_tuples = get_func_tuples()\n M = {k:v for (k, _, v) in func_tuples}\n return M\n\n\ndef plot_2d_data(pdata):\n \"\"\"\n pdata: an instance of PairedData\n Return a figure handle\n \"\"\"\n X, Y = pdata.xy()\n n, d = X.shape \n if d != 2:\n raise ValueError('d must be 2 to plot.') \n # plot\n fig = plt.figure()\n plt.plot(X, Y, 'ob')\n plt.title(pdata.label)\n return fig\n\nclass PlotValues(object):\n \"\"\"\n An object encapsulating values of a plot where there are many curves, \n each corresponding to one method, with common x-axis values.\n \"\"\"\n def __init__(self, xvalues, methods, plot_matrix):\n \"\"\"\n xvalues: 1d numpy array of x-axis values\n methods: a list of method names\n plot_matrix: len(methods) x len(xvalues) 2d numpy array containing \n values that can be used to plot\n \"\"\"\n self.xvalues = xvalues\n self.methods = methods\n self.plot_matrix = plot_matrix\n\n def ascii_table(self, tablefmt=\"pipe\"):\n \"\"\"\n Return an ASCII string representation of the table.\n\n tablefmt: \"plain\", \"fancy_grid\", \"grid\", \"simple\" might be useful.\n \"\"\"\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)\n\n# end of class PlotValues\n\ndef plot_prob_reject(ex, fname, func_xvalues, xlabel, func_title=None, \n return_plot_values=False):\n \"\"\"\n plot the empirical probability that the statistic is above the threshold.\n This can be interpreted as type-1 error (when H0 is true) or test power \n (when H1 is true). The plot is against the specified x-axis.\n\n - ex: experiment number \n - fname: file name of the aggregated result\n - func_xvalues: function taking aggregated results dictionary and return the values \n to be used for the x-axis values. \n - xlabel: label of the x-axis. \n - func_title: a function: results dictionary -> title of the plot\n - return_plot_values: if true, also return a PlotValues as the second\n output value.\n\n Return loaded results\n \"\"\"\n #from IPython.core.debugger import Tracer \n #Tracer()()\n\n results = glo.ex_load_result(ex, fname)\n\n def rej_accessor(jr):\n rej = jr['test_result']['h0_rejected']\n # When used with vectorize(), making the value float will make the resulting \n # numpy array to be of float. nan values can be stored.\n return float(rej)\n\n #value_accessor = lambda job_results: job_results['test_result']['h0_rejected']\n vf_pval = np.vectorize(rej_accessor)\n # results['job_results'] is a dictionary: \n # {'test_result': (dict from running perform_test(te) '...':..., }\n rejs = vf_pval(results['job_results'])\n repeats, _, n_methods = results['job_results'].shape\n\n # yvalues (corresponding to xvalues) x #methods\n mean_rejs = np.mean(rejs, axis=0)\n #print mean_rejs\n #std_pvals = np.std(rejs, axis=0)\n #std_pvals = np.sqrt(mean_rejs*(1.0-mean_rejs))\n\n xvalues = func_xvalues(results)\n\n #ns = np.array(results[xkey])\n #te_proportion = 1.0 - results['tr_proportion']\n #test_sizes = ns*te_proportion\n line_styles = func_plot_fmt_map()\n method_labels = get_func2label_map()\n \n func_names = [f.__name__ for f in results['method_job_funcs'] ]\n plotted_methods = []\n for i in range(n_methods): \n te_proportion = 1.0 - results['tr_proportion']\n fmt = line_styles[func_names[i]]\n #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])\n method_label = method_labels[func_names[i]]\n plotted_methods.append(method_label)\n plt.plot(xvalues, mean_rejs[:, i], fmt, label=method_label)\n '''\n else:\n # h0 is true \n z = stats.norm.isf( (1-confidence)/2.0)\n for i in range(n_methods):\n phat = mean_rejs[:, i]\n conf_iv = z*(phat*(1-phat)/repeats)**0.5\n #plt.errorbar(test_sizes, phat, conf_iv, fmt=line_styles[i], label=method_labels[i])\n plt.plot(test_sizes, mean_rejs[:, i], line_styles[i], label=method_labels[i])\n '''\n \n ylabel = 'Rejection rate'\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.xticks(np.hstack((xvalues) ))\n \n alpha = results['alpha']\n plt.legend(loc='best')\n title = '%s. %d trials. $\\\\alpha$ = %.2g.'%( results['prob_label'],\n repeats, alpha) if func_title is None else func_title(results)\n plt.title(title)\n plt.grid()\n if return_plot_values:\n return results, PlotValues(xvalues=xvalues, methods=plotted_methods,\n plot_matrix=mean_rejs.T)\n else:\n return results\n \n\ndef plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):\n results = glo.ex_load_result(ex, fname)\n value_accessor = lambda job_results: job_results['time_secs']\n vf_pval = np.vectorize(value_accessor)\n # results['job_results'] is a dictionary: \n # {'test_result': (dict from running perform_test(te) '...':..., }\n times = vf_pval(results['job_results'])\n repeats, _, n_methods = results['job_results'].shape\n time_avg = np.mean(times, axis=0)\n time_std = np.std(times, axis=0)\n\n xvalues = func_xvalues(results)\n\n #ns = np.array(results[xkey])\n #te_proportion = 1.0 - results['tr_proportion']\n #test_sizes = ns*te_proportion\n line_styles = func_plot_fmt_map()\n method_labels = get_func2label_map()\n \n func_names = [f.__name__ for f in results['method_job_funcs'] ]\n for i in range(n_methods): \n te_proportion = 1.0 - results['tr_proportion']\n fmt = line_styles[func_names[i]]\n #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])\n method_label = method_labels[func_names[i]]\n plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,\n label=method_label)\n \n ylabel = 'Time (s)'\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.xlim([np.min(xvalues), np.max(xvalues)])\n plt.xticks( xvalues, xvalues )\n plt.legend(loc='best')\n plt.gca().set_yscale('log')\n title = '%s. %d trials. '%( results['prob_label'],\n repeats ) if func_title is None else func_title(results)\n plt.title(title)\n #plt.grid()\n return results\n\n\ndef box_meshgrid(func, xbound, ybound, nx=50, ny=50):\n \"\"\"\n Form a meshed grid (to be used with a contour plot) on a box\n specified by xbound, ybound. Evaluate the grid with [func]: (n x 2) -> n.\n \n - xbound: a tuple (xmin, xmax)\n - ybound: a tuple (ymin, ymax)\n - nx: number of points to evluate in the x direction\n \n return XX, YY, ZZ where XX is a 2D nd-array of size nx x ny\n \"\"\"\n \n # form a test location grid to try \n minx, maxx = xbound\n miny, maxy = ybound\n loc0_cands = np.linspace(minx, maxx, nx)\n loc1_cands = np.linspace(miny, maxy, ny)\n lloc0, lloc1 = np.meshgrid(loc0_cands, loc1_cands)\n # nd1 x nd0 x 2\n loc3d = np.dstack((lloc0, lloc1))\n # #candidates x 2\n all_loc2s = np.reshape(loc3d, (-1, 2) )\n # evaluate the function\n func_grid = func(all_loc2s)\n func_grid = np.reshape(func_grid, (ny, nx))\n \n assert lloc0.shape[0] == ny\n assert lloc0.shape[1] == nx\n assert np.all(lloc0.shape == lloc1.shape)\n \n return lloc0, lloc1, func_grid\n\ndef get_density_cmap():\n \"\"\"\n Return a colormap for plotting the model density p.\n Red = high density \n white = very low density.\n Varying from white (low) to red (high).\n \"\"\"\n # Add completely white color to Reds colormap in Matplotlib\n list_colors = plt.cm.datad['Reds']\n list_colors = list(list_colors)\n list_colors.insert(0, (1, 1, 1))\n list_colors.insert(0, (1, 1, 1))\n lscm = matplotlib.colors.LinearSegmentedColormap.from_list(\"my_Reds\", list_colors)\n return lscm\n"
] | [
[
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xticks",
"matplotlib.colors.LinearSegmentedColormap.from_list"
]
] |
EMK-Lab/rec_to_nwb | [
"d3550b4c1df9629a3687fa5a9208d142c9c7c7fb"
] | [
"rec_to_nwb/processing/nwb/components/position/pos_timestamp_manager.py"
] | [
"import logging.config\nimport os\n\nimport pandas as pd\nfrom rec_to_binaries.read_binaries import readTrodesExtractedDataFile\n\nfrom rec_to_nwb.processing.nwb.common.timestamps_manager import TimestampManager\n\npath = os.path.dirname(os.path.abspath(__file__))\n\nlogging.config.fileConfig(fname=str(path) + '/../../../../logging.conf', disable_existing_loggers=False)\nlogger = logging.getLogger(__name__)\n\n\nclass PosTimestampManager(TimestampManager):\n def __init__(self, directories, continuous_time_directories):\n TimestampManager.__init__(self, directories, continuous_time_directories)\n\n # override\n def _get_timestamps(self, dataset_id):\n pos_online = readTrodesExtractedDataFile(self.directories[dataset_id][0])\n position = pd.DataFrame(pos_online['data'])\n return position.time.to_numpy(dtype='int64')\n"
] | [
[
"pandas.DataFrame"
]
] |
ndaidong/tf-ssd-mobilenet | [
"3d6082178b018d9e02c6044d562ef05ca2021899"
] | [
"make_tfrecord.py"
] | [
"#!/usr/bin/env python3\n\nimport glob\nimport argparse\nimport sys\nimport hashlib\nimport io\n\nfrom os import path, mkdir, remove\nfrom shutil import rmtree\nfrom random import shuffle\nfrom lxml import etree\n\nfrom funcy import compose\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport tensorflow as tf\n\nfrom tflib.object_detection.utils import dataset_util\nfrom tflib.object_detection.utils import label_map_util\n\n\ndef get_default_label_map():\n return 'configs/label_map.pbtxt'\n\n\ndef get_default_data_dir():\n return 'temp/data'\n\n\ndef get_default_extract_count():\n return 100\n\n\ndef get_default_split_ratio():\n return 0.1\n\n\ndef create_example(entry, label_map_dict):\n img_path = entry[0]\n label_path = entry[1]\n try:\n with tf.gfile.GFile(label_path, 'r') as fid:\n xml_str = bytes(bytearray(fid.read(), encoding='utf-8'))\n xml = etree.fromstring(xml_str)\n data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']\n\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width, height = image.size\n width = int(width)\n height = int(height)\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n if 'object' in data:\n for obj in data['object']:\n difficult_obj.append(int(0))\n\n _xmin = max(float(obj['bndbox']['xmin']), 0)\n _ymin = max(float(obj['bndbox']['ymin']), 0)\n _xmax = min(float(obj['bndbox']['xmax']), width)\n _ymax = min(float(obj['bndbox']['ymax']), height)\n\n xmin.append(_xmin / width)\n ymin.append(_ymin / height)\n xmax.append(_xmax / width)\n ymax.append(_ymax / height)\n\n class_name = obj['name']\n classes_text.append(class_name.encode('utf8'))\n classes.append(label_map_dict[class_name])\n truncated.append(int(0))\n poses.append('Unspecified'.encode('utf8'))\n\n return tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')\n ),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')\n ),\n 'image/key/sha256': dataset_util.bytes_feature(\n key.encode('utf8')\n ),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature(\n 'jpeg'.encode('utf8')\n ),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(\n xmin\n ),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(\n xmax\n ),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(\n ymin\n ),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(\n ymax\n ),\n 'image/object/class/text': dataset_util.bytes_list_feature(\n classes_text\n ),\n 'image/object/class/label': dataset_util.int64_list_feature(\n classes\n ),\n 'image/object/difficult': dataset_util.int64_list_feature(\n difficult_obj\n ),\n 'image/object/truncated': dataset_util.int64_list_feature(\n truncated\n ),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n except ValueError as err:\n print(img_path)\n print(label_path)\n print(err)\n return None\n\n\ndef select(count):\n def get_subset(arr):\n shuffle(arr)\n max_size = min(count, len(arr))\n return arr[:max_size]\n return get_subset\n\n\ndef handle(files):\n arr = []\n for i in range(len(files)):\n imagesrc = str(files[i])\n xml_file = imagesrc.replace('images/', 'labels/')\n xml_file = xml_file.replace('.jpg', '.xml')\n if path.isfile(xml_file):\n arr.append([imagesrc, xml_file])\n return arr\n\n\ndef check(d):\n files = []\n if path.isdir(d):\n files = glob.glob(d + '/images/*.jpg')\n return files\n\n\ndef load(d, count):\n return compose(select(count), handle, check)(d)\n\n\ndef process(entries, output_dir, label_map, split_ratio):\n rat = float(split_ratio)\n if rat >= 1 or rat < 0:\n rat = get_default_split_ratio()\n total = len(entries)\n test_size = round(rat * total)\n training_size = total - test_size\n print('test/train/total {}/{}/{}'.format(test_size, training_size, total))\n\n test_set = entries[:test_size]\n training_set = entries[test_size:]\n\n label_map_dict = label_map_util.get_label_map_dict(label_map)\n print(label_map_dict)\n\n tfwriter = tf.python_io.TFRecordWriter\n\n print('Handling training set ({})'.format(training_size))\n train_writer = tfwriter(output_dir + '/train.record')\n for entry in tqdm(training_set):\n try:\n exp = create_example(entry, label_map_dict)\n if exp is not None:\n train_writer.write(exp.SerializeToString())\n except ValueError as err:\n print(err)\n continue\n train_writer.close()\n\n print('Handling test set ({})'.format(test_size))\n test_writer = tfwriter(output_dir + '/eval.record')\n for entry in tqdm(test_set):\n try:\n exp = create_example(entry, label_map_dict)\n if exp is not None:\n test_writer.write(exp.SerializeToString())\n except ValueError as err:\n print(err)\n continue\n test_writer.close()\n\n\ndef preload(input_dir, extracting_count, output_dir, label_map, split_ratio):\n if path.exists(output_dir):\n rmtree(output_dir)\n mkdir(output_dir)\n files = load(input_dir, int(extracting_count))\n total = len(files)\n\n if total > 0:\n print('Selected {} entries to process'.format(total))\n return process(files, output_dir, label_map, split_ratio)\n else:\n print('No input label & image. Stopped!')\n\n\ndef start():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d',\n '--dir',\n help='Path to dataset. Default \"../vgg-faces-utils/output\"'\n )\n parser.add_argument(\n '-l',\n '--labelmap',\n help='Path to label map. Default \"configs/label_map.pbtxt\"'\n )\n parser.add_argument(\n '-e',\n '--extract',\n help='How many items? Default 100'\n )\n parser.add_argument(\n '-o',\n '--output',\n help='Path to output dir. Default \"temp/data\"'\n )\n parser.add_argument(\n '-r',\n '--ratio',\n help='Ratio of Training/Test set. Default 0.1 (9 train/1 eval)'\n )\n args = parser.parse_args()\n if not args.dir:\n print('Please specify path to source dir')\n else:\n label_map = args.labelmap\n if label_map is None:\n label_map = get_default_label_map()\n\n count = args.extract\n if count is None:\n count = get_default_extract_count()\n\n odir = args.output\n if odir is None:\n odir = get_default_data_dir()\n\n ratio = args.ratio\n if ratio is None:\n ratio = get_default_split_ratio()\n\n entries = preload(\n path.normpath(args.dir),\n count,\n odir,\n label_map,\n ratio\n )\n\n\nif __name__ == '__main__':\n start()\n\n"
] | [
[
"tensorflow.gfile.GFile"
]
] |
Pratyay360/Mask-Detector | [
"81027c6e8735e9154f4f53b41110e08c45e1c3dc"
] | [
"train_mask_detector.py"
] | [
"# import the necessary packages\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications import MobileNetV2\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing.image import load_img\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n# initialize the initial learning rate, number of epochs to train for,\n# and batch size\nINIT_LR = 1e-4\nEPOCHS = 20\nBS = 32\n## Add your dataset folder path here.\nDIRECTORY = r\"/media/pratyay/Local Disk/coding/python/opencv/Face-Mask-Detection-master/dataset\"\nCATEGORIES = [\"with_mask\", \"without_mask\"]\n\n# grab the list of images in our dataset directory, then initialize\n# the list of data (i.e., images) and class images\nprint(\"[INFO] loading images...\")\n\ndata = []\nlabels = []\n\nfor category in CATEGORIES:\n path = os.path.join(DIRECTORY, category)\n for img in os.listdir(path):\n \timg_path = os.path.join(path, img)\n \timage = load_img(img_path, target_size=(224, 224))\n \timage = img_to_array(image)\n \timage = preprocess_input(image)\n\n \tdata.append(image)\n \tlabels.append(category)\n\n# perform one-hot encoding on the labels\nlb = LabelBinarizer()\nlabels = lb.fit_transform(labels)\nlabels = to_categorical(labels)\n\ndata = np.array(data, dtype=\"float32\")\nlabels = np.array(labels)\n\n(trainX, testX, trainY, testY) = train_test_split(data, labels,\n\ttest_size=0.20, stratify=labels, random_state=42)\n\n# construct the training image generator for data augmentation\naug = ImageDataGenerator(\n\trotation_range=20,\n\tzoom_range=0.15,\n\twidth_shift_range=0.2,\n\theight_shift_range=0.2,\n\tshear_range=0.15,\n\thorizontal_flip=True,\n\tfill_mode=\"nearest\")\n\n# load the MobileNetV2 network, ensuring the head FC layer sets are\n# left off\nbaseModel = MobileNetV2(weights=\"imagenet\", include_top=False,\n\tinput_tensor=Input(shape=(224, 224, 3)))\n\n# construct the head of the model that will be placed on top of the\n# the base model\nheadModel = baseModel.output\nheadModel = AveragePooling2D(pool_size=(7, 7))(headModel)\nheadModel = Flatten(name=\"flatten\")(headModel)\nheadModel = Dense(128, activation=\"relu\")(headModel)\nheadModel = Dropout(0.5)(headModel)\nheadModel = Dense(2, activation=\"softmax\")(headModel)\n\n# place the head FC model on top of the base model (this will become\n# the actual model we will train)\nmodel = Model(inputs=baseModel.input, outputs=headModel)\n\n# loop over all layers in the base model and freeze them so they will\n# *not* be updated during the first training process\nfor layer in baseModel.layers:\n\tlayer.trainable = False\n\n# compile our model\nprint(\"[INFO] compiling model...\")\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,\n\tmetrics=[\"accuracy\"])\n\n# train the head of the network\nprint(\"[INFO] training head...\")\nH = model.fit(\n\taug.flow(trainX, trainY, batch_size=BS),\n\tsteps_per_epoch=len(trainX) // BS,\n\tvalidation_data=(testX, testY),\n\tvalidation_steps=len(testX) // BS,\n\tepochs=EPOCHS)\n\n# make predictions on the testing set\nprint(\"[INFO] evaluating network...\")\npredIdxs = model.predict(testX, batch_size=BS)\n\n# for each image in the testing set we need to find the index of the\n# label with corresponding largest predicted probability\npredIdxs = np.argmax(predIdxs, axis=1)\n\n# show a nicely formatted classification report\nprint(classification_report(testY.argmax(axis=1), predIdxs,\n\ttarget_names=lb.classes_))\n\n# serialize the model to disk\nprint(\"[INFO] saving mask detector model...\")\nmodel.save(\"mask_detector.model\", save_format=\"h5\")\n\n# plot the training loss and accuracy\nN = EPOCHS\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, N), H.history[\"val_accuracy\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend(loc=\"lower left\")\nplt.savefig(\"plot.png\")\n"
] | [
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"numpy.arange",
"tensorflow.keras.optimizers.Adam",
"sklearn.preprocessing.LabelBinarizer",
"numpy.array",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.preprocessing.image.img_to_array",
"matplotlib.pyplot.style.use",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel"
]
] |
DeepRank/DeepRank_VariantPred | [
"42cd85c7a521463ab2b644ef8da87c107a2b5bce"
] | [
"deeprank/features/PSSM/reformat_pssm.py"
] | [
"import sys\n\nimport numpy as np\n\n\ndef write_newfile(names_oldfile, name_newfile):\n\n chainID = {0: 'A', 1: 'B'}\n resconv = {\n 'A': 'ALA',\n 'R': 'ARG',\n 'N'\t: 'ASN',\n 'D': 'ASP',\n 'C': 'CYS',\n 'E': 'GLU',\n 'Q': 'GLN',\n 'G': 'GLY',\n 'H': 'HIS',\n 'I': 'ILE',\n 'L': 'LEU',\n 'K': 'LYS',\n 'M': 'MET',\n 'F': 'PHE',\n 'P': 'PRO',\n 'S': 'SER',\n 'T': 'THR',\n 'W': 'TRP',\n 'Y': 'TYR',\n 'V': 'VAL'\n }\n\n # write the new file\n new_file = open(name_newfile, 'w')\n\n for ifile, f in enumerate(names_oldfile):\n\n # read the file\n f = open(f, 'r')\n data = f.readlines()[4:-6]\n f.close()\n\n # write the new file\n for l in data:\n l = l.split()\n if len(l) > 0:\n\n chain = chainID[ifile]\n feat = '{:>4}'.format(chain)\n\n resNum = l[0]\n feat += '{:>10}'.format(resNum)\n\n resName1 = l[2]\n resName3 = resconv[resName1]\n feat += '{:>10}'.format(resName3)\n\n feat += '\\t'\n values = map(int, l[3:23])\n feat += '\\t'.join(map(\"{: 3d}\".format, values))\n\n feat += '\\n'\n new_file.write(feat)\n\n new_file.close()\n\n\noldfile_dir = '../PSSM/'\n#oldfiles = sp.check_output('ls %s/*PSSM' %(oldfile_dir),shell=True).decode('utf-8').split()\noldfiles = list(filter(lambda x: '.PSSM' in x, os.listdir(oldfile_dir)))\noldfiles = [oldfile_dir + f for f in oldfiles]\nnfile = len(oldfiles)\noldfiles = np.array(oldfiles).reshape(int(nfile / 2), 2).tolist()\n\n\nfor filenames in oldfiles:\n\n print('process files\\n\\t%s\\n\\t%s' % (filenames[0], filenames[1]))\n cplx_name = []\n cplx_name.append(filenames[0].split('/')[-1])\n cplx_name.append(filenames[1].split('/')[-1])\n cplx_name = list(set([cplx_name[0][:4], cplx_name[1][:4]]))\n print(cplx_name)\n if len(cplx_name) > 1:\n print('error' + cplx_name)\n sys.exit()\n\n name_newfile = './' + cplx_name[0] + '.PSSM'\n print('\\nexport to \\t%s\\n' % (name_newfile))\n write_newfile(filenames, name_newfile)\n"
] | [
[
"numpy.array"
]
] |
rombl4/BioPAL | [
"1a990cff76f718463078eae05c5b5279e794d8d3"
] | [
"lib_tdx.py"
] | [
"import sys\nsys.path.append('./dep_gedi_tdx/demgen')\n\nimport gdal\nimport numpy as np\nimport logging\nimport xml.etree.ElementTree as ElementTree\nimport matplotlib.pyplot as plt\nfrom scipy import constants,interpolate,ndimage,special,stats,signal,optimize\nfrom lib import demgen\nfrom pathlib import Path\nfrom lib import geolib\nfrom lib.newtonbackgeo import NewtonBackgeocoder\nimport xml.etree.cElementTree as et\nfrom matplotlib import cm\nfrom numba import jit\nfrom lib.demgen import tandemhandler\nimport os\nimport matplotlib\nimport psutil\nimport lib_profile\nimport lib_plots\nimport struct\nimport lib_filter\n\nimport socket\nif socket.gethostname() == 'hr-slx012':\n matplotlib.use('Agg')\n\n\n#Definition of constants for invalid values\nINVALID_WATER = -1000\nINVALID_SETTLEMENTS = -1001\nINVALID_NON_FOREST = -1002\nINVALID_KZ = -1003\n\n\nclass GEDI_Exception(Exception):\n pass\n\n\n\ndef read_cos(path_image,parameters):\n \"\"\"Read the .cos files from TSX/TDX\n\n The calibration factor is not applied np.sqrt(calFactor)\n\n Parameters\n ----------\n path_image : str\n Complete path of the image to read (without including the folder IMAGEDATA in the path)\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n\n Returns\n -------\n im : 2D numpy array\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n #logging info\n log = logging.getLogger('read_cos')\n\n #generate file name\n file_name = path_image+'/IMAGEDATA/'+'IMAGE_'+parameters['polLayer']+'_'+parameters['antennaReceiveConfiguration']+'_'+parameters['elevationBeamConfiguration']+'.cos'\n\n log.info('Reading '+file_name)\n\n #get the image with gdal as a complex64 (by default using gdal)\n im_gdal = gdal.Open(file_name)\n im = im_gdal.GetRasterBand(1).ReadAsArray()\n\n #Transform the real and imaginary parts as int16 because\n # each pixel is 2 bytes (int16) real + 2 bytes (int16) imaginry\n im_real = np.asarray(im.real,np.int16)\n im_imag = np.asarray(im.imag,np.int16)\n\n #Re-intrept each pxiel as a IEEE 754 half-precision binary floating point\n im_real = np.frombuffer(im_real, np.float16)\n im_imag = np.frombuffer(im_imag, np.float16)\n\n #Reshape to the dimensions of the image\n im_real = im_real.reshape(im.shape)\n im_imag = im_imag.reshape(im.shape)\n\n #generate the complex image\n im = im_real + 1j*im_imag\n\n #to apply the calibration factor (get the value from the xml file 'calfactor') then Multiply im by sqrt(parameters['calFactor'])\n return im\n\ndef get_params_from_xml(path_acquisition):\n \"\"\"Extract information from the xml files of the TDX/TSX images\n\n Parameters\n ----------\n path_acquisition : srt\n String with the complete path of the TDX image\n\n Returns\n -------\n parameters : dict\n Dictionary with parameters of the master image\n parameters_slave : dict\n Dictionary with parameters of the slave image\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('get_params_from_xml')\n log.info('Getting parameters from xml ...')\n\n #define a dictionary output to save the parameters as a dictionary\n parameters = {}\n\n # --- Get paramaters from the global xml ----\n\n #Get xml name from the folder path name\n xml_file = path_acquisition.rsplit('/')[-2]+'.xml'\n #parse the xml\n tree = ElementTree.parse(path_acquisition+xml_file)\n root = tree.getroot()\n\n #### Get the paths of each image\n\n #go until the level of each name\n productComponents = root.find(\"productComponents\")\n for component in productComponents.findall(\"component\"):\n if component.attrib['componentClass'] == 'imageData':\n #get the names inside the imagedata component\n for i_name in component.findall(\".//name\"):\n #check if the name corresponds to the folder name (it stats with T)\n if i_name.text[0] == 'T':\n parameters[i_name.text[0:3]+'_name'] = i_name.text\n\n\n ## get active satelite\n parameters['active_sat'] = root.find('.//satelliteID' + root.find('.//inSARmasterID').text.lower()).text[0:3]\n\n #### get the polarization\n parameters['polLayer'] = root.find(\".//polLayer\").text\n\n ### get the antenna receive configuration\n parameters['antennaReceiveConfiguration'] = root.find(\".//antennaReceiveConfiguration\").text\n\n ## get the elevation beam configuration\n parameters['elevationBeamConfiguration'] = root.find(\".//elevationBeamConfiguration\").text\n\n ## Tap Take id: acquisitionItemID appliedToID processingPriority operationsTypeSat1 operationsTypeSat2\n parameters['tap_take_id'] = root.find(\".//TAPtakeID\").text\n \n \n\n # --- Get paramateres from the xml of the active image ----\n\n ### get orbit from the active sensor\n\n #Get xml name from the folder path name\n xml_file = path_acquisition+parameters[parameters['active_sat']+'_name']+'/'+parameters[parameters['active_sat']+'_name']+'.xml'\n #parse the xml\n tree = ElementTree.parse(xml_file)\n root = tree.getroot()\n\n time_gps_start = np.double(root.find(\"productInfo\").find(\"sceneInfo\").find(\"start\").find(\".//timeGPS\").text)\n time_gps_start_fraction = np.double(root.find(\"productInfo\").find(\"sceneInfo\").find(\"start\").find(\".//timeGPSFraction\").text)\n time_ref = time_gps_start + time_gps_start_fraction\n\n time_orbit = []\n posX = []\n posY = []\n posZ = []\n VelX = []\n VelY = []\n VelZ = []\n\n for ElementStateVec in root.find(\"platform\").find(\"orbit\").findall(\"stateVec\"):\n time_orbit.append(np.double(ElementStateVec.find(\".//timeGPS\").text)+np.double(ElementStateVec.find(\".//timeGPSFraction\").text))\n posX.append(np.double(ElementStateVec.find(\".//posX\").text))\n posY.append(np.double(ElementStateVec.find(\".//posY\").text))\n posZ.append(np.double(ElementStateVec.find(\".//posZ\").text))\n VelX.append(np.double(ElementStateVec.find(\".//velX\").text))\n VelY.append(np.double(ElementStateVec.find(\".//velY\").text))\n VelZ.append(np.double(ElementStateVec.find(\".//velZ\").text))\n\n orbit = np.zeros((np.size(time_orbit),7))\n orbit[:,0] = np.asarray(time_orbit) - time_ref\n orbit[:,1] = np.asarray(posX)\n orbit[:,2] = np.asarray(posY)\n orbit[:,3] = np.asarray(posZ)\n orbit[:,4] = np.asarray(VelX)\n orbit[:,5] = np.asarray(VelY)\n orbit[:,6] = np.asarray(VelZ)\n\n parameters['orbit_no_interp'] = orbit\n\n # get range sampling frequency\n parameters['orbit_direction'] = root.find('productInfo').find('missionInfo').find(\".//orbitDirection\").text.lower()\n\n # get range sampling frequency\n parameters['rg_sampling_freq'] = np.double(root.find(\".//commonRSF\").text)\n\n #get range delay of the image\n parameters['rg_delay'] = np.double(root.find('productInfo').find('sceneInfo').find('rangeTime').find(\".//firstPixel\").text)\n\n #get the sampling of the focused image 'commonPRF'\n parameters['commonPRF'] = np.double(root.find(\".//commonPRF\").text)\n\n #number of rows (azimuth)\n parameters['n_az'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//numberOfRows\").text)\n #number of cols (range)\n parameters['n_rg'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//numberOfColumns\").text)\n\n #row spacing\n parameters['spacing_rg'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//rowSpacing\").text)*constants.c/2.0\n\n #row spacing\n parameters['spacing_az'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//columnSpacing\").text)*constants.c/2.0\n\n # get the ground Range spacing\n parameters['groundNear'] = np.double(root.find(\"productSpecific\").find(\".//groundNear\").text)\n parameters['groundFar'] = np.double(root.find(\"productSpecific\").find(\".//groundFar\").text)\n\n # get the Azimuth spacing\n parameters['projectedSpacingAzimuth'] = np.double(root.find(\"productSpecific\").find(\".//projectedSpacingAzimuth\").text)\n\n #effective velocity at mid range (v0 of TAXI)\n parameters['effective_vel_mid_rg'] = np.sqrt(np.double(root.find('processing').find('geometry').find('velocityParameter').find('velocityParameterPolynomial').find(\".//coefficient\").text))\n\n #get range first pixel\n parameters['rg_first_pixel'] = np.double(root.find('productInfo').find('sceneInfo').find('rangeTime').find(\".//firstPixel\").text) #* constants.c / 2.0\n\n # range vector\n parameters['range_vec'] = np.linspace(start=parameters['rg_first_pixel'] * constants.c / 2,\n stop=(parameters['n_rg']-1)*parameters['spacing_rg']+(parameters['rg_first_pixel'] * constants.c / 2),\n num=int(parameters['n_rg']))\n\n ## get center of the image\n for i_corner,ElementCorner in enumerate(root.find(\"productInfo\").find(\"sceneInfo\").findall(\"sceneCenterCoord\")):\n parameters['sceneCenter_lat'] = np.double(ElementCorner.find(\".//lat\").text)\n parameters['sceneCenter_lon'] = np.double(ElementCorner.find(\".//lon\").text)\n parameters['sceneCenter_incidenceAngle'] = np.double(ElementCorner.find(\".//incidenceAngle\").text)\n\n\n #chose the center of the active satellite\n parameters['center_coord'] = np.asarray([parameters['sceneCenter_lat'],parameters['sceneCenter_lon']])\n\n #get the look direction\n parameters['look_dir'] = root.find('productInfo').find('acquisitionInfo').find(\".//lookDirection\").text.lower()\n\n # center frequency\n parameters['f0'] = np.double(root.find('instrument').find('radarParameters').find(\".//centerFrequency\").text)\n\n # Range Look Bandwidth\n parameters['cdw'] = np.double(root.find('processing').find('processingParameter').find(\".//rangeLookBandwidth\").text)\n\n # calibration factor\n parameters['calFactor'] = np.double(root.find('calibration').find('calibrationConstant').find(\".//calFactor\").text)\n\n # slant range resolution\n parameters['rg_resolution'] = np.double(root.find('productSpecific').find('complexImageInfo').find(\".//slantRangeResolution\").text)\n\n # numner of noise records\n parameters['n_noise_records'] = np.double(root.find('noise').find(\".//numberOfNoiseRecords\").text)\n\n # Get noise parameters\n parameters['noise_utc'] = []\n parameters['validity_range_min'] = []\n parameters['validity_range_max'] = []\n parameters['reference_point'] = []\n for i_element,ElementImageNoise in enumerate(root.find(\"noise\").findall(\"imageNoise\")):\n parameters['noise_utc'].append(ElementImageNoise.find(\".//timeUTC\").text)\n parameters['validity_range_min'].append(np.double(ElementImageNoise.find(\".//validityRangeMin\").text))\n parameters['validity_range_max'].append(np.double(ElementImageNoise.find(\".//validityRangeMax\").text))\n parameters['reference_point'].append(np.double(ElementImageNoise.find(\".//referencePoint\").text))\n parameters['noise_polynomial_degree'] = np.double(ElementImageNoise.find(\".//polynomialDegree\").text)\n\n #get coeficients\n parameters['noise_coef'] = np.zeros((int(parameters['n_noise_records']),int(parameters['noise_polynomial_degree'])+1))\n for i_element, ElementImageNoise in enumerate(root.find(\"noise\").findall(\"imageNoise\")):\n for j_element,ElementNoiseEstimate in enumerate(ElementImageNoise.find('noiseEstimate').findall(\".//coefficient\")):\n parameters['noise_coef'][i_element,j_element] = np.double(ElementNoiseEstimate.text)\n\n #start UTC\n parameters['start_utc'] = root.find('productInfo').find('sceneInfo').find('start').find(\".//timeUTC\").text\n #stop UTC\n parameters['stop_utc'] = root.find('productInfo').find('sceneInfo').find('stop').find(\".//timeUTC\").text\n\n parameters['rg_last_pixel'] = np.double(root.find('productInfo').find('sceneInfo').find('rangeTime').find(\".//lastPixel\").text)\n\n #### --- For the four corner coordinates of the master we need to get the info from TDX and TSX -------\n\n #### TDX #######\n #Get xml name from the folder path name\n xml_file = path_acquisition+parameters['TDX_name']+'/'+parameters['TDX_name']+'.xml'\n #parse the xml\n tree = ElementTree.parse(xml_file)\n root = tree.getroot()\n ## get corners for the image\n for i_corner,ElementCorner in enumerate(root.find(\"productInfo\").find(\"sceneInfo\").findall(\"sceneCornerCoord\")):\n parameters['sceneCorner_lat_'+str(i_corner+1)+'_TDX'] = np.double(ElementCorner.find(\".//lat\").text)\n parameters['sceneCorner_lon_'+str(i_corner+1)+'_TDX'] = np.double(ElementCorner.find(\".//lon\").text)\n\n\n #### TSX #######\n\n #Get xml name from the folder path name\n xml_file = path_acquisition+parameters['TSX_name']+'/'+parameters['TSX_name']+'.xml'\n #parse the xml\n tree = ElementTree.parse(xml_file)\n root = tree.getroot()\n ## get coorners for the image\n for i_corner,ElementCorner in enumerate(root.find(\"productInfo\").find(\"sceneInfo\").findall(\"sceneCornerCoord\")):\n parameters['sceneCorner_lat_'+str(i_corner+1)+'_TSX'] = np.double(ElementCorner.find(\".//lat\").text)\n parameters['sceneCorner_lon_'+str(i_corner+1)+'_TSX'] = np.double(ElementCorner.find(\".//lon\").text)\n\n ## get corner follwing TAXI procedure\n if parameters['sceneCorner_lat_1_'+parameters['active_sat']] > parameters['sceneCorner_lat_3_'+parameters['active_sat']]:\n parameters['sceneCorner_lat_1'] = np.max([parameters['sceneCorner_lat_1_TDX'],parameters['sceneCorner_lat_1_TSX']])\n parameters['sceneCorner_lat_2'] = np.max([parameters['sceneCorner_lat_2_TDX'],parameters['sceneCorner_lat_2_TSX']])\n parameters['sceneCorner_lat_3'] = np.min([parameters['sceneCorner_lat_3_TDX'],parameters['sceneCorner_lat_3_TSX']])\n parameters['sceneCorner_lat_4'] = np.min([parameters['sceneCorner_lat_4_TDX'],parameters['sceneCorner_lat_4_TSX']])\n else:\n parameters['sceneCorner_lat_1'] = np.min([parameters['sceneCorner_lat_1_TDX'],parameters['sceneCorner_lat_1_TSX']])\n parameters['sceneCorner_lat_2'] = np.min([parameters['sceneCorner_lat_2_TDX'],parameters['sceneCorner_lat_2_TSX']])\n parameters['sceneCorner_lat_3'] = np.max([parameters['sceneCorner_lat_3_TDX'],parameters['sceneCorner_lat_3_TSX']])\n parameters['sceneCorner_lat_4'] = np.max([parameters['sceneCorner_lat_4_TDX'],parameters['sceneCorner_lat_4_TSX']])\n\n parameters['sceneCorner_lon_1'] = parameters['sceneCorner_lon_1_'+parameters['active_sat']]\n parameters['sceneCorner_lon_2'] = parameters['sceneCorner_lon_2_'+parameters['active_sat']]\n parameters['sceneCorner_lon_3'] = parameters['sceneCorner_lon_3_'+parameters['active_sat']]\n parameters['sceneCorner_lon_4'] = parameters['sceneCorner_lon_4_'+parameters['active_sat']]\n\n #Position of the corners: upper right, upper left, lower right ,lower left\n corners = np.zeros((4,2),'double')\n corners[0,0] = parameters['sceneCorner_lat_1']\n corners[1,0] = parameters['sceneCorner_lat_2']\n corners[2,0] = parameters['sceneCorner_lat_3']\n corners[3,0] = parameters['sceneCorner_lat_4']\n corners[0,1] = parameters['sceneCorner_lon_1']\n corners[1,1] = parameters['sceneCorner_lon_2']\n corners[2,1] = parameters['sceneCorner_lon_3']\n corners[3,1] = parameters['sceneCorner_lon_4']\n\n parameters['corners'] = np.copy(corners)\n \n ###### get also parameters of the slave image ##########################################\n \n #define a dictionary output to save the parameters as a dictionary\n parameters_slave = {}\n \n #Get xml name from the folder path name\n slave_name = 'TDX'\n if parameters['active_sat'] == 'TDX': slave_name = 'TSX'\n \n xml_file = path_acquisition+parameters[slave_name+'_name']+'/'+parameters[slave_name+'_name']+'.xml'\n #parse the xml\n tree = ElementTree.parse(xml_file)\n root = tree.getroot()\n \n ## get corners for the image\n for i_corner,ElementCorner in enumerate(root.find(\"productInfo\").find(\"sceneInfo\").findall(\"sceneCornerCoord\")):\n parameters_slave['sceneCorner_lat_'+str(i_corner+1)] = np.double(ElementCorner.find(\".//lat\").text)\n parameters_slave['sceneCorner_lon_'+str(i_corner+1)] = np.double(ElementCorner.find(\".//lon\").text)\n\n\n #use same order as with the master\n corners = np.zeros((4,2),'double')\n corners[0,0] = parameters_slave['sceneCorner_lat_1']\n corners[1,0] = parameters_slave['sceneCorner_lat_2']\n corners[2,0] = parameters_slave['sceneCorner_lat_3']\n corners[3,0] = parameters_slave['sceneCorner_lat_4']\n corners[0,1] = parameters_slave['sceneCorner_lon_1']\n corners[1,1] = parameters_slave['sceneCorner_lon_2']\n corners[2,1] = parameters_slave['sceneCorner_lon_3']\n corners[3,1] = parameters_slave['sceneCorner_lon_4']\n\n parameters_slave['corners'] = np.copy(corners)\n\n\n # get orbit slave\n \n time_gps_start = np.double(root.find(\"productInfo\").find(\"sceneInfo\").find(\"start\").find(\".//timeGPS\").text)\n time_gps_start_fraction = np.double(root.find(\"productInfo\").find(\"sceneInfo\").find(\"start\").find(\".//timeGPSFraction\").text)\n time_ref = time_gps_start + time_gps_start_fraction\n \n time_orbit = []\n posX = []\n posY = []\n posZ = []\n VelX = []\n VelY = []\n VelZ = []\n\n for ElementStateVec in root.find(\"platform\").find(\"orbit\").findall(\"stateVec\"):\n time_orbit.append(np.double(ElementStateVec.find(\".//timeGPS\").text)+np.double(ElementStateVec.find(\".//timeGPSFraction\").text))\n posX.append(np.double(ElementStateVec.find(\".//posX\").text))\n posY.append(np.double(ElementStateVec.find(\".//posY\").text))\n posZ.append(np.double(ElementStateVec.find(\".//posZ\").text))\n VelX.append(np.double(ElementStateVec.find(\".//velX\").text))\n VelY.append(np.double(ElementStateVec.find(\".//velY\").text))\n VelZ.append(np.double(ElementStateVec.find(\".//velZ\").text))\n\n orbit = np.zeros((np.size(time_orbit),7))\n orbit[:,0] = np.asarray(time_orbit,'double') - time_ref\n orbit[:,1] = np.asarray(posX)\n orbit[:,2] = np.asarray(posY)\n orbit[:,3] = np.asarray(posZ)\n orbit[:,4] = np.asarray(VelX)\n orbit[:,5] = np.asarray(VelY)\n orbit[:,6] = np.asarray(VelZ)\n\n parameters_slave['orbit_no_interp'] = orbit\n\n\n #get range delay of the image\n parameters_slave['rg_delay'] = np.double(root.find('productInfo').find('sceneInfo').find('rangeTime').find(\".//firstPixel\").text)\n\n # Row spacing\n parameters_slave['spacing_rg'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//rowSpacing\").text)*constants.c/2.0\n\n # get the Azimuth spacing\n parameters_slave['projectedSpacingAzimuth'] = np.double(root.find(\"productSpecific\").find(\".//projectedSpacingAzimuth\").text)\n\n parameters_slave['groundNear'] = np.double(root.find(\"productSpecific\").find(\".//groundNear\").text)\n parameters_slave['groundFar'] = np.double(root.find(\"productSpecific\").find(\".//groundFar\").text)\n\n #get the sampling of the focused image 'commonPRF'\n parameters_slave['commonPRF'] = np.double(root.find(\".//commonPRF\").text)\n\n # calibration factor\n parameters_slave['calFactor'] = np.double(root.find('calibration').find('calibrationConstant').find(\".//calFactor\").text)\n\n #number of rows (azimuth)\n parameters_slave['n_az'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//numberOfRows\").text)\n #number of cols (range)\n parameters_slave['n_rg'] = np.double(root.find('productInfo').find('imageDataInfo').find('imageRaster').find(\".//numberOfColumns\").text)\n\n #effective velocity at mid range (v0 of TAXI)\n parameters_slave['effective_vel_mid_rg'] = np.sqrt(np.double(root.find('processing').find('geometry').find('velocityParameter').find('velocityParameterPolynomial').find(\".//coefficient\").text))\n\n # get range sampling frequency\n parameters_slave['rg_sampling_freq'] = np.double(root.find(\".//commonRSF\").text)\n\n #get the look direction\n parameters_slave['look_dir'] = root.find('productInfo').find('acquisitionInfo').find(\".//lookDirection\").text.lower()\n\n #center frequency\n parameters_slave['f0'] = np.double(root.find('instrument').find('radarParameters').find(\".//centerFrequency\").text)\n\n # numner of noise records\n parameters_slave['n_noise_records'] = np.double(root.find('noise').find(\".//numberOfNoiseRecords\").text)\n\n # Get noise parameters\n parameters_slave['noise_utc'] = []\n parameters_slave['validity_range_min'] = []\n parameters_slave['validity_range_max'] = []\n parameters_slave['reference_point'] = []\n for i_element,ElementImageNoise in enumerate(root.find(\"noise\").findall(\"imageNoise\")):\n parameters_slave['noise_utc'].append(ElementImageNoise.find(\".//timeUTC\").text)\n parameters_slave['validity_range_min'].append(np.double(ElementImageNoise.find(\".//validityRangeMin\").text))\n parameters_slave['validity_range_max'].append(np.double(ElementImageNoise.find(\".//validityRangeMax\").text))\n parameters_slave['reference_point'].append(np.double(ElementImageNoise.find(\".//referencePoint\").text))\n parameters_slave['noise_polynomial_degree'] = np.double(ElementImageNoise.find(\".//polynomialDegree\").text)\n\n #get coeficients\n parameters_slave['noise_coef'] = np.zeros((int(parameters_slave['n_noise_records']),int(parameters_slave['noise_polynomial_degree'])+1))\n for i_element, ElementImageNoise in enumerate(root.find(\"noise\").findall(\"imageNoise\")):\n for j_element,ElementNoiseEstimate in enumerate(ElementImageNoise.find('noiseEstimate').findall(\".//coefficient\")):\n parameters_slave['noise_coef'][i_element,j_element] = np.double(ElementNoiseEstimate.text)\n\n #start UTC\n parameters_slave['start_utc'] = root.find('productInfo').find('sceneInfo').find('start').find(\".//timeUTC\").text\n #stop UTC\n parameters_slave['stop_utc'] = root.find('productInfo').find('sceneInfo').find('stop').find(\".//timeUTC\").text\n\n parameters_slave['rg_last_pixel'] = np.double(root.find('productInfo').find('sceneInfo').find('rangeTime').find(\".//lastPixel\").text)\n\n #get range first pixel\n parameters_slave['rg_first_pixel'] = np.double(root.find('productInfo').find('sceneInfo').find('rangeTime').find(\".//firstPixel\").text) #* constants.c / 2.0\n\n # range vector\n parameters_slave['range_vec'] = np.linspace(start=parameters_slave['rg_first_pixel'] * constants.c / 2,\n stop=(parameters_slave['n_rg']-1)*parameters_slave['spacing_rg']+(parameters_slave['rg_first_pixel'] * constants.c / 2),\n num=int(parameters_slave['n_rg']))\n\n\n return parameters,parameters_slave\n\ndef compute_interferogram(resolution,parameters,mst,slv,flat,kz,dem,multilook=None,resolution_slcs=1):\n \"\"\"Computation of the interferogram and coherence\n\n Parameters\n ----------\n resolution : float\n Desired resolution, to be used in the averaging of the SLC's\n parameters\n mst : 2D numpy array\n SLC master image\n output from read_cos()\n slv : 2D numpy array\n SLC slave image image\n output from read_cos()\n flat : 2D numpy array\n Output from compute_slant_phase_flat() with the SLC dimensions\n kz : 2D numpy array\n Vertical wavenumber\n Ouput from get_baselines() with the SLC dimensions\n dem : 2D numpy array\n dem in range/azimuth coordinates with the same size as SLC\n multilook : list with the pixel size to use for the multilook in azimuth and range\n if it is None we take the size based on the input resolution\n resolution_slcs : int\n resoution at which we smooth the input images before the coherence computation,\n if it is = 1 we do nothing\n\n Returns\n -------\n interferogram : 2D numpy array\n coherence : 2D numpy array\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('compute_interferogram')\n log.info('Computation of the interferogram and coherence ...')\n\n\n # check if the dem has nans\n # NOTE: For the full processing, the dem has no nanas as it is checked in the function get_dem()\n if np.sum(np.isnan(dem)) > 0:\n log.info('Interpolation nan values of the dem')\n points_nan = np.where(np.isnan(dem))\n size_block = 2\n for i_nan_row, i_nan_col in zip(points_nan[0], points_nan[1]):\n value_inter = np.nan\n while np.isnan(value_inter):\n # get the points of the block to interpolate and check that it is inside the image\n size_block = size_block + 2\n ini_row = np.clip(i_nan_row - size_block, 0, dem.shape[0])\n fin_row = np.clip(i_nan_row + size_block, 0, dem.shape[0])\n if ini_row == fin_row: fin_row + 2\n ini_col = np.clip(i_nan_col - size_block, 0, dem.shape[1])\n fin_col = np.clip(i_nan_col + size_block, 0, dem.shape[1])\n if ini_col == fin_col: fin_col + 2\n # we use the mean over the neihboors as interpolator\n value_inter = np.nanmean(dem[ini_row:fin_row, ini_col:fin_col])\n\n # change the nan value by the interpolated one\n dem[i_nan_row, i_nan_col] = value_inter\n\n\n if resolution_slcs > 1:\n log.info('Reduce resolution of SLCs to avoid bias in the coherence')\n ml_rg_slc = np.int(np.round(resolution_slcs / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n ml_az_slc = np.int(np.round(resolution_slcs / parameters['projectedSpacingAzimuth']))\n # smooth images\n mst = ndimage.uniform_filter(mst.real, (ml_az_slc, ml_rg_slc)) + 1j * ndimage.uniform_filter(mst.imag, (ml_az_slc, ml_rg_slc))\n slv = ndimage.uniform_filter(slv.real, (ml_az_slc, ml_rg_slc)) + 1j * ndimage.uniform_filter(slv.imag, (ml_az_slc, ml_rg_slc))\n\n\n #get multi-look size in radar coordinates. For the range we get the mean value between near and far\n if multilook is None:\n ml_rg = np.int(np.round(resolution/((parameters['groundNear']+parameters['groundFar'])/2.)))\n ml_az = np.int(np.round(resolution/parameters['projectedSpacingAzimuth']))\n else:\n ml_az = multilook[0]\n ml_rg = multilook[1]\n\n log.info('Multilook in range ='+str(ml_rg))\n log.info('Multilook in azimuth ='+str(ml_az))\n\n\n ####make fast version using multyply and output keyword\n #interferofram * flateart_phase * topographic phase\n log.info(' Generate interferogram step 1 of 2 ...')\n interferogram = np.multiply(mst,np.conj(slv))\n #interferogram = numexpr.evaluate(\"mst*conj(slv)\")\n # NOTE: Check if we need (or not) to remove the meanheight from the dem before the topograhic phase compensation\n # np.multiply(np.multiply(interferogram, np.exp(-flat * 1j), out=interferogram), np.exp(np.multiply(-(dem-meanheight), kz * 1j)), out=interferogram)\n np.multiply(np.multiply(interferogram,np.exp(-flat*1j),out=interferogram),np.exp(np.multiply(-dem,kz*1j)),out=interferogram)\n #interferogram = numexpr.evaluate(\"interferogram*exp(-flat*1j)*exp(-dem*kz*1j)\")\n\n\n log.info(' Generate interferogram step 2 of 2 ...')\n ndimage.uniform_filter(interferogram.real,(ml_az,ml_rg),output=interferogram.real)+1j*ndimage.uniform_filter(interferogram.imag,(ml_az, ml_rg),output=interferogram.imag)\n\n # #coherence\n log.info(' Generate coherence step 1 of 3 ...')\n mst_multilook = np.multiply(mst,np.conj(mst))\n #mst_multilook = numexpr.evaluate(\"mst*conj(mst)\")\n ndimage.uniform_filter(mst_multilook.real,(ml_az,ml_rg),output=mst_multilook.real)+1j*ndimage.uniform_filter(mst_multilook.imag,(ml_az, ml_rg),output=mst_multilook.imag)\n\n log.info(' Generate coherence step 2 of 3 ...')\n slv_multilook = np.multiply(slv, np.conj(slv))\n #slv_multilook = numexpr.evaluate(\"slv*conj(slv)\")\n ndimage.uniform_filter(slv_multilook.real, (ml_az, ml_rg), output=slv_multilook.real) + 1j * ndimage.uniform_filter(slv_multilook.imag, (ml_az, ml_rg), output=slv_multilook.imag)\n\n log.info(' Generate coherence step 3 of 3 ...')\n coherence = np.divide(interferogram,np.sqrt(np.multiply(mst_multilook,slv_multilook)))\n #coherence = numexpr.evaluate(\"(interferogram)/(sqrt(mst_multilook*slv_multilook))\")\n\n log.info('Computation of the interferogram and coherence ok!')\n\n return interferogram,coherence\n\n\ndef get_2dlut_kz_coh_height_from_master_profile(master_profile,height_vector,kz_min=0,kz_max=0.5,n_elements_lut=100000):\n \"\"\"Generate a LUT table that realtes the values of kz and height with coherence\n\n Parameters\n ----------\n master_profile : 1D numpy array\n Common profile generated from GEDI data\n height_vector: module\n height vector used for the LUT\n kz_min : float,optional\n Minimum value of kz for the LUT\n kz_max : float, optional\n Maximum value of kz for the LUT\n n_elements_lut : int,optional\n size of the lut for the precessiong to comp\n\n Returns\n -------\n lut_2d_kz_coh : 2D numpy array\n With dimensions (n_elements_lut,size_common_profile)\n kz_lut_axes : 1D numpy array\n Defines the rows of lut_2d_kz_coh\n kz_lut_axes = np.linspace(kz_min, kz_max, n_elements_lut)\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n\n log = logging.getLogger('get_2dlut_kz_coh_height_from_master_profile')\n log.info('Computing 2D LUT for kz and height <-> coherence')\n\n kz_lut_axes = np.linspace(kz_min, kz_max, n_elements_lut)\n\n # #generation a 2D LUT for a range of kz values\n if len(height_vector) != len(master_profile):\n master_profile = ndimage.zoom(master_profile,np.float(len(height_vector))/len(master_profile))\n\n size_common_profile = len(master_profile)\n arrray_from_0_to_1 = np.linspace(0, 1, num=size_common_profile)\n aux_mat = np.matmul(np.reshape(arrray_from_0_to_1, (size_common_profile, 1)),np.reshape(height_vector, (1, len(height_vector))))\n constant_sum_master_profile = np.abs(np.sum(master_profile))\n\n\n lut_2d_kz_coh = np.zeros((len(kz_lut_axes),size_common_profile))\n for i_kz,kz_value in enumerate(kz_lut_axes):\n exp_kz_mat = np.exp(0 + 1j * kz_value * aux_mat)\n aux_mat2 = np.squeeze(np.abs(np.matmul(exp_kz_mat, np.reshape(master_profile, (size_common_profile, 1)))))\n lut_2d_kz_coh[i_kz,:] = aux_mat2 / constant_sum_master_profile\n\n\n return lut_2d_kz_coh,kz_lut_axes\n\n@jit(nopython=True)\ndef one_pixel_forest_height(kz_cor_pixel,kz_lut_axes,lut_kz_coh_heights,coh_cor_pixel,height_vector):\n \"\"\"Compute the forest height for one pixel\n\n Parameters\n ----------\n kz_cor_pixel : float\n Corrected kz for one pixel\n kz_lut_axes : 1D numpy array\n All possible values in the LUT\n lut_kz_coh_heights : 2D numpy array\n LUT taht realtes kz and coherence with ehgiht\n coh_cor_pixel : float\n Corrected coherence for one pixel\n height_vector : 1D numpy array\n Vector of heights\n\n Returns\n -------\n height_vector[pos_lut] : float, value of height\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n # Use a pre-generated LUT for certain range of kzs\n pos_kz = np.argmin(np.abs(kz_cor_pixel - kz_lut_axes))\n lut_coh_heights = lut_kz_coh_heights[pos_kz, :]\n\n # find the position of the lut which is more similar to the coherence input\n pos_lut = np.argmin(np.abs(coh_cor_pixel - lut_coh_heights))\n\n return height_vector[pos_lut]\n\n\ndef forest_height_inversion(inputs,kz_cor,coh_cor,parameters,lut_kz_coh_heights=None,kz_lut_axes=None,master_profile=None,use_input_size_kz_coh=False):\n \"\"\"Make the forest height inversion based on a LUT.\n\n There are two options to compute the forest heigth:\n\n 1- Use a predefined lut that relates kz, coherence and hieght\n - Input parameter 'lut_kz_coh_heights' that comes from the function get_2dlut_kz_coh_height_from_master_profile\n - Input paraneter 'kz_lut_axes': Axes of the lut, it is a input parameter of the function get_2dlut_kz_coh_height_from_master_profile\n 2- Compute the lut that relates coherence height for each kz (It is much more slower than option 1)\n - Input paramenter 'master_profile'. It is the master profile used to compute the lut\n\n NOTE: The option 1 has high priority, as it is faster than option 2:\n - It means that, if the user provides as inputs 'lut_kz_coh_heights' and 'master_profile',\n the function will use the lut_kz_coh_heights to get the heights and ignore the master profile.\n - If the lut generate with the 'get_2dlut_kz_coh_height_from_master_profile' has enoguh samples (i. e.\n the 'kz_lut_axes') the results of option 1 and option 2 are (almost) the same.\n\n Parameters\n ----------\n inputs: module\n Module from the inputs file used in the GEDI/TDX procesinng\n Before calling the function make import inputs\n kz_cor : 2D numpy array\n Kz corrected by the den\n Output from processing_tdx_until_coherence()\n coh_cor : 2D numpy array\n Coherence corrected by the dem\n Output from processing_tdx_until_coherence()\n parameters : dict\n Inforamtion realted to the master image\n Output from processing_tdx_until_coherence()\n lut_2d_kz_coh : 2D numpy array\n With dimensions (n_elements_lut,size_common_profile)\n Output from get_2dlut_kz_coh_height_from_master_profile()\n kz_lut_axes : 1D numpy array\n Defines the rows of lut_2d_kz_coh\n kz_lut_axes = np.linspace(kz_min, kz_max, n_elements_lut)\n Output from get_2dlut_kz_coh_height_from_master_profile()\n master_profile : 1D numpy array\n Common profile generated from GEDI data\n use_input_size_kz_coh : Bool\n Flag to not change the size of the input arrays coh and kz\n\n Returns\n -------\n forest_height : 2D numpy array\n Forest heights\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n\n log = logging.getLogger('forest_height_inversion')\n log.info('Computation of the forest height inversion')\n\n height_vector = np.linspace(inputs.min_height_vector, inputs.max_height_vector, num=inputs.n_elements_height_vector)\n\n # INCREASE COHERENCE TO REMOVE BIAS IN THE LOWER PART\n #coh_cor = np.clip(coh_cor / inputs.decorrelation_coherence_before_inversion, 0, 1)\n\n if use_input_size_kz_coh==False:\n # reduce the coh_cor and kz_cor\n pixel_spacing = inputs.pixel_spacing_out\n\n log.info('Original pixels range = ' + str(coh_cor.shape[1]))\n log.info('Original pixels azimuth = ' + str(coh_cor.shape[0]))\n\n factor_rescale_rg = 1.0 / (pixel_spacing / ((parameters['groundNear'] + parameters['groundFar']) / 2.))\n factor_rescale_az = 1.0 / (pixel_spacing / parameters['projectedSpacingAzimuth'])\n coh_cor = ndimage.zoom(coh_cor, (factor_rescale_az, factor_rescale_rg),order=1)\n kz_cor = ndimage.zoom(kz_cor, (factor_rescale_az, factor_rescale_rg), order=1)\n\n # new_shape_az = int(kz_cor.shape[0] * factor_rescale_az)+1\n # new_shape_rg = int(kz_cor.shape[1] * factor_rescale_rg)+1\n # new_shape = [new_shape_az,new_shape_rg]\n # kz_cor = rebin_arbitrary_dims(kz_cor, new_shape, method='mean')\n # coh_cor = rebin_arbitrary_dims(coh_cor, new_shape, method='mean')\n\n log.info('Final pixels to invert in range = ' + str(coh_cor.shape[1]))\n log.info('Final pixels to invert in azimuth = ' + str(coh_cor.shape[0]))\n\n # #quantization error correction (coherence error of quantization is normally around 3 %)\n # coh_cor = coh_cor/inputs.quantization_error\n # coh_cor[(coh_cor < 0) | (coh_cor > 1)] = np.nan\n\n\n n_rows, n_cols = coh_cor.shape\n\n\n #Generate output matrix\n forest_height = np.zeros(coh_cor.shape)\n\n #check if we have a predefined lut\n if lut_kz_coh_heights is not None:\n log.info('Using pre-computed lut for a range of kzs')\n #in that case we fuse option 1, a predefined lut\n flag_compute_lut = False\n elif master_profile is not None:\n log.info('The luts will be computed for each kz')\n #in that case we compute the lut for each kz\n flag_compute_lut = True\n else:\n log.error('Please provide a Master profile to generate the lut for each kz or a 2D LUT that relates kz with coherence and height')\n\n ##check if we have to compute the lut for each kz\n if flag_compute_lut:\n #matrix and constat that we need to compute inside the loops the lut for each kz\n size_common_profile = len(master_profile)\n arrray_from_0_to_1 = np.linspace(0, 1, num=size_common_profile)\n aux_mat = np.matmul(np.reshape(arrray_from_0_to_1, (size_common_profile, 1)),np.reshape(height_vector, (1, len(height_vector))))\n constant_sum_master_profile = np.abs(np.sum(master_profile))\n\n ##check if we have to compute the lut for each kz or we have a predefined lut\n if flag_compute_lut:\n for i_row in range(n_rows):\n for i_col in range(n_cols):\n exp_kz_mat = np.exp(0 + 1j * kz_cor[i_row, i_col] * aux_mat)\n aux_mat2 = np.squeeze(np.abs(np.matmul(exp_kz_mat, np.reshape(master_profile, (size_common_profile, 1)))))\n lut_coh_heights = aux_mat2 / constant_sum_master_profile\n\n # find the position of the lut which is more similar to the coherence input\n pos_lut = np.argmin(np.abs(coh_cor[i_row, i_col] - lut_coh_heights))\n forest_height[i_row, i_col] = height_vector[pos_lut]\n\n else:\n for i_row in range(n_rows):\n for i_col in range(n_cols):\n #Use a function to called with numba (faster than using the code without the function)\n forest_height[i_row, i_col] = one_pixel_forest_height(kz_cor[i_row, i_col], kz_lut_axes, lut_kz_coh_heights,coh_cor[i_row, i_col],height_vector)\n\n # if mask_kz_points[i_row,i_col] == 0:\n # # Use a pre-generated LUT for certain range of kzs\n # pos_kz = np.argmin(np.abs(kz_cor[i_row, i_col] - kz_lut_axes))\n # lut_coh_heights = lut_kz_coh_heights[pos_kz,:]\n #\n # #find the position of the lut which is more similar to the coherence input\n # pos_lut = np.argmin(np.abs(coh_cor[i_row, i_col] - lut_coh_heights))\n # forest_height[i_row, i_col] = height_vector[pos_lut]\n\n\n return forest_height\n\n\ndef interpol_orbits(parameters,margin=-2,polDegree=7,pointsBefore=14,pointsAfter=16,reqTime=None,same_coefficients=True,parameters_slave=None):\n \"\"\"Interpolation of the orbits\n\n Based on interpolorb.pro and InterpolOrbCalc.pro) from TAXI\n It includes only the Chebyshev approximation calculating the values using the same coefficients.\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n margin : float,optional\n Time margin in seconds for the interpolation\n polDegree : int, optional\n Degree of polynom to use\n pointsBefore : int, optiontal\n To compute the minimum required array index to keep amount of points (default 14)\n pointsAfter : int, optional\n To compute the maximun required index to keep amount of points (default 16)\n reqTime : 1D array, optional\n It is the array fiven in parameters_slave['orbit'][:,0]\n If ReqTime is an input is becasue we are computing the active_orbit\n same_coefficients : bool, optional\n If True all values shall be calculated with the same coefficients, so use the mid of all requested times (by default true)\n Set to False to compute the active orbit\n parameters_slave : dict, optional\n Dictionary with parameters of the slave image.\n Output of function get_params_from_xml()\n Use this argument only if you also use the reqTime\n\n Returns\n -------\n orbit : 2D numpy array of dimensions (n,7)\n Contain the orbit information in the form of:\n orbit[:,0]: required time vector used for the interpolation\n orbit[:,1]: interpolated x position vector\n orbit[:,2]: interpolated y position vector\n orbit[:,3]: interpolated z position vector\n orbit[:,4]: interpolated x velocity vector\n orbit[:,5]: interpolated y velocity vector\n orbit[:,6]: interpolated z velocity vector\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('interpol_orbits')\n log.info('Interpolation of orbits ...')\n\n\n # - Interpolation of orbits (translation of interpolorb.pro and InterpolOrbCalc.pro) from TAXI\n #- Using Chebyshev approximation\n\n time = parameters['orbit_no_interp'][:,0]\n posX = parameters['orbit_no_interp'][:,1]\n posY = parameters['orbit_no_interp'][:,2]\n posZ = parameters['orbit_no_interp'][:,3]\n velX = parameters['orbit_no_interp'][:,4]\n velY = parameters['orbit_no_interp'][:,5]\n velZ = parameters['orbit_no_interp'][:,6]\n\n # Compute required time in case we do not have it as an input.\n # If ReqTime is an input is becasue we are computing the active_orbit and we use:\n # the reqTime (gave it as input) and time from the slave (we take it fromparameters_slave)\n if reqTime is None:\n\n ### interpolation of orbits\n ext = np.round(np.abs(margin) * 2 / (1.0 / parameters['commonPRF']))\n # get the required time\n reqTime = np.linspace(start=margin,\n stop=((ext + parameters['n_az']-1) * (1.0 / parameters['commonPRF']))+ margin,\n num=np.int(ext + parameters['n_az']))\n\n else:\n time = parameters_slave['orbit_no_interp'][:,0]\n\n\n Size_reqTime = len(reqTime)\n\n if Size_reqTime <= 0: log.error('The time vector is incorrect.')\n\n ## get the lowest index\n #cut time, position and velocity array\n minIdx=np.argmin(np.abs(time-np.min(reqTime)))\n\n if time[minIdx] > np.min(reqTime): minIdx = minIdx-1\n\n #; minimum required array index keep amount of points\n minIdxNPt=len(time)-1-(pointsAfter+pointsBefore)\n #; find min Idx to use\n minIdx=np.max([np.min([minIdx-pointsBefore,minIdxNPt]),0])\n\n #get the highest index\n #cut time, position and velocity array\n maxIdx=np.argmin(np.abs(time-np.max(reqTime)))\n\n if time[maxIdx] > np.max(reqTime): maxIdx = maxIdx - 1\n\n #; maximun required index to keep amount of points\n maxIdxNPt=minIdx+(pointsAfter+pointsBefore)\n #; find min Idx to use\n maxIdx=np.min([np.max([maxIdx+pointsAfter,maxIdxNPt]),len(time)])\n\n ## from here part of InterpolOrbCalc.pro\n\n # cut the inputs to the used time\n time = time[minIdx:maxIdx]\n posX = posX[minIdx:maxIdx]\n posY = posY[minIdx:maxIdx]\n posZ = posZ[minIdx:maxIdx]\n velX = velX[minIdx:maxIdx]\n velY = velY[minIdx:maxIdx]\n velZ = velZ[minIdx:maxIdx]\n\n #get size of the 'cut' array\n size_time = len(time)\n\n # make output variable with the interpolated orbit\n orbit = np.zeros((Size_reqTime, 7))\n\n i_time = 0\n while i_time < Size_reqTime:\n #; all values shall be calculated with the same coefficients, so use the mid of all requested times\n if same_coefficients:\n coeffCenterTime = (np.max(reqTime) - np.min(reqTime)) / 2.0 + np.min(reqTime)\n else:\n coeffCenterTime = reqTime[i_time]\n \n \n aux = np.abs(time-coeffCenterTime)\n idx = np.argmin(aux)\n \n #; for TSX approximation a duration of 5 Minutes is recommended (30 samples * 10 second interval)\n #; wanted minimum array index (default 14)\n minIdxReq=idx-14\n #; wanted maximum array index (default 15)\n maxIdxReq=idx+15\n #; minimum required array index to maintain requested pol degree\n minIdxDegree=size_time-1 - polDegree\n #; find min Idx to use\n minIdx=np.max([np.min([minIdxReq,minIdxDegree]),0])\n #; maximun required index to maintain requested pol degree\n maxIdxDegree=minIdx + polDegree\n #; find max Idx to use\n maxIdx=np.min([np.max([maxIdxReq,maxIdxDegree]), size_time-1])\n \n useTime=time[minIdx:maxIdx+1]\n #; transform to new interval\n reqTimeInt=2.0*(reqTime-np.min(useTime)) / (np.max(useTime)-np.min(useTime)) -1\n #; transform to new interval\n timeInt=2.0*(useTime-np.min(useTime)) / (np.max(useTime)-np.min(useTime)) -1\n \n # check if enough points are available for more or less correct approximation\n if maxIdx-minIdx <= 3:\n log.error('Not enough points to approximation correct! Point available:'+str(maxIdx-minIdx+1)+ '. Required at least 5, but more would improve results.')\n \n if same_coefficients:\n # ; all values will be calculated using the same coefficients\n j_time = Size_reqTime\n else:\n #Find the reqtime indices which can be calculated with the same input array indices and\n # rememember them for calculation with these coeficients\n finished = 0\n j_time = 1\n while finished == 0 and (i_time+j_time) < Size_reqTime:\n checkindx = np.argmin(np.abs(time - reqTime[i_time+j_time]))\n if checkindx == idx:\n j_time = j_time + 1\n else:\n finished = 1\n\n use_reqTimeInt = np.copy(reqTimeInt[i_time:i_time+j_time])\n \n #; calculate chebyshev coefficients\n degree = np.min([maxIdx-minIdx,polDegree+1])\n \n #Interpolate each of the components\n \n #for posX\n coef = np.polynomial.chebyshev.chebfit(timeInt, posX[minIdx:maxIdx+1],degree)\n orbit[i_time:i_time+j_time,1] = np.polynomial.chebyshev.chebval(use_reqTimeInt,coef)\n \n #for posY\n coef = np.polynomial.chebyshev.chebfit(timeInt, posY[minIdx:maxIdx+1], degree)\n orbit[i_time:i_time+j_time,2] = np.polynomial.chebyshev.chebval(use_reqTimeInt, coef)\n \n #for posZ\n coef = np.polynomial.chebyshev.chebfit(timeInt, posZ[minIdx:maxIdx+1], degree)\n orbit[i_time:i_time+j_time,3] = np.polynomial.chebyshev.chebval(use_reqTimeInt, coef)\n \n #for velX\n coef = np.polynomial.chebyshev.chebfit(timeInt, velX[minIdx:maxIdx+1],degree)\n orbit[i_time:i_time+j_time,4] = np.polynomial.chebyshev.chebval(use_reqTimeInt,coef)\n \n #for velY\n coef = np.polynomial.chebyshev.chebfit(timeInt, velY[minIdx:maxIdx+1], degree)\n orbit[i_time:i_time+j_time,5] = np.polynomial.chebyshev.chebval(use_reqTimeInt, coef)\n \n #for velZ\n coef = np.polynomial.chebyshev.chebfit(timeInt, velZ[minIdx:maxIdx+1], degree)\n orbit[i_time:i_time+j_time,6] = np.polynomial.chebyshev.chebval(use_reqTimeInt, coef)\n\n i_time = i_time + j_time\n\n #add time\n orbit[:,0] = np.asarray(reqTime)\n\n return orbit\n\n\ndef get_dem(path_dem,type_dem,parameters,margin_degrees=0.5,NumThreads=5):\n \"\"\"Get the dem for the processing\n\n It accepts TanDEM DEM 90 m or SRTM DEM.\n\n If the SRTM is used, and the data is not available it downlads the data from:\n - dds.cr.usgs.gov (/srtm/version2_1/SRTM3)\n\n TanDEM DEM 90 m available on:\n - https://download.geoservice.dlr.de/TDM90/\n\n Parameters\n ----------\n path_dem : str\n Complete path where the dem\n type_dem : {'TANDEM','SRTM'}\n Select one of the DEMs\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n margin_degrees : float, optional\n Margin in degrees for the dem respect to the image size\n NumThreads : int, optional\n Number of threads to use in the parallel processing steps.\n\n Returns\n -------\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension representes:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n dem_xyz : 3D numpy array\n DEM in cartesian coordiantes, the last dimension represents X,Y,Z. respectively.\n dem_posting: float\n dem_limits : dict\n Limits of the DEM.\n It contains the following keys: {'minlon': ,'maxlon': ,'minlat': ,'maxlat': 0.}\n dang : float\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('get_dem')\n log.info('Get DEM from '+ type_dem)\n\n if type_dem == 'SRTM':\n #if the teil is not in the path, the SRTMHandler will download the file from internet\n handler = demgen.SRTMHandler(Path(path_dem), download=True)\n elif type_dem == 'TANDEM':\n handler = tandemhandler.tandem_handler(path_dem,type_map='DEM')\n\n # corners for SRTMHandler in the following order: low left, low right, up left, up right\n # corners from parameters['corners']: upper right, upper left, lower right ,lower left\n corners = np.empty([1, 4, 2], np.float64)\n lons = np.array([parameters['corners'][3][1], parameters['corners'][2][1], parameters['corners'][1][1],\n parameters['corners'][0][1]])\n lats = np.array([parameters['corners'][3][0], parameters['corners'][2][0], parameters['corners'][1][0],\n parameters['corners'][0][0]])\n\n # add margin following TAXI code\n maxlat = np.max(lats)\n minlat = np.min(lats)\n maxlon = np.max(lons)\n minlon = np.min(lons)\n\n maxlat += margin_degrees\n minlat -= margin_degrees\n maxlon += margin_degrees\n minlon -= margin_degrees\n\n # # adding ~10km margin\n meanlat = (minlat + maxlat) * 0.5\n Rearth = 6371000.0 # earth radius\n onedeglon = np.abs(Rearth * np.cos(meanlat / (180 / np.pi)) / (180 / np.pi)) # approx. 1º posting\n onedeglat = np.abs(Rearth / (180 / np.pi)) # approx. 1º posting\n marginlon = 10000 / onedeglon\n marginlat = 10000 / onedeglat\n # (to avoid large matrices in longitude)\n if meanlat > 60: marginlon = marginlat\n maxlat += marginlat\n minlat -= marginlat\n maxlon += marginlon\n minlon -= marginlon\n\n #add a bit more margin to round the values to avoid shifts in dem generation by tandem handler\n #maxlat = np.float32(np.ceil(maxlat*4.0))/4.0\n #maxlon = np.float32(np.ceil(maxlon*4.0))/4.0\n #minlat = np.float32(np.floor(minlat*4.0))/4.0\n #minlon = np.float32(np.floor(minlon*4.0))/4.0\n maxlat = np.float32(np.ceil(maxlat))\n maxlon = np.float32(np.ceil(maxlon))\n minlat = np.float32(np.floor(minlat))\n minlon = np.float32(np.floor(minlon))\n\n # Corners for the polygon to get the dem \n corners[0, :, 0] = np.asarray([minlon, maxlon, minlon, maxlon])\n corners[0, :, 1] = np.asarray([minlat, minlat, maxlat, maxlat])\n\n handler.read_blocks(corners)\n dem = handler.build_block()\n\n # Interpolate nans\n log.info('Interpolate nans in the dem ...')\n height_dem = np.copy(np.clip(dem[:, :, 2],0,np.nanmax(dem[:, :, 2])))\n pos_nans = np.isnan(height_dem)\n if np.sum(pos_nans>0):\n\n #interpolate in the nan positions\n positions_finite = np.where(pos_nans == False)\n positions_nan = np.where(pos_nans==True)\n values_inter = interpolate.griddata(positions_finite,height_dem[pos_nans==False],positions_nan,method='linear')\n #asing interpolate values to nan positions\n height_dem[pos_nans==True] = values_inter\n\n #clip between 0 and max to avoid negative values due to interpolation\n dem[:,:,2] = np.copy(np.clip(height_dem,0,np.nanmax(height_dem)))\n height_dem = None\n ##check in case there are more nans\n dem[np.isnan(dem)] = 0\n log.info('Interpolate nans in the dem ok')\n\n #dem[np.isnan(dem)] = 0\n\n #get limits of the dem\n #lon_min, lon_max, lat_min, lat_max = handler.blockLimits[0]\n \n #get dem posting\n lon_pos,lat_pos = np.mgrid[minlon/np.double(180./np.pi):maxlon/np.double(180./np.pi):dem.shape[0]*1j,minlat/np.double(180./np.pi):maxlat/np.double(180./np.pi):dem.shape[1]*1j]\n\n lon_pos = lon_pos.ravel()\n lat_pos = lat_pos.ravel()\n latderiv = np.abs(lat_pos-np.roll(lat_pos,1))\n dlat = np.min(latderiv[latderiv!=0])\n londeriv = np.abs(lon_pos-np.roll(lon_pos,1))\n dlon = np.min(londeriv[londeriv!=0])\n dang = np.min([dlat,dlon])\n dem_posting = np.round(dang*(180.0/np.pi)*onedeglon*0.9,0)\n\n\n dem_limits = {}\n dem_limits['minlon'] = minlon\n dem_limits['maxlon'] = maxlon\n dem_limits['minlat'] = minlat\n dem_limits['maxlat'] = maxlat\n \n ### transform dem from lon,lat,height to cartesian (x,y,z) coordinates\n lon_pos, lat_pos = np.mgrid[dem_limits['minlon']:dem_limits['maxlon']:dem.shape[0] * 1j,dem_limits['minlat']:dem_limits['maxlat']:dem.shape[1] * 1j]\n \n demllh = np.empty_like(dem)\n demllh[:, :, 0] = np.copy(lon_pos) # lon\n demllh[:, :, 1] = np.copy(lat_pos) # lat\n demllh[:, :, 2] = np.copy(dem[:, :, 2]) # height\n \n dem_xyz = geolib.ellip2cart(demllh, num_threads=NumThreads)\n\n return dem,dem_xyz,dem_posting,dem_limits,dang\n\ndef get_offnadir_lookangle(parameters,dem,NumThreads=5):\n \"\"\"Get off nadir and look angles\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension representes:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n Numhreads : int, optional\n Number of threads to use in the parallel processing steps.\n\n Returns\n -------\n offnadir : float\n lookangle : float\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('get_offnadir_lookangle')\n log.info('Computing offnadir and look angles...')\n\n # get mean (but using median as in TAXI) height of the dem\n meanheight = np.median(dem[:, :, 2])\n\n ##get lat lons of the image\n lons = np.array([parameters['corners'][3][1], parameters['corners'][2][1], parameters['corners'][1][1],\n parameters['corners'][0][1]])\n lats = np.array([parameters['corners'][3][0], parameters['corners'][2][0], parameters['corners'][1][0],\n parameters['corners'][0][0]])\n\n # check longitude vaues\n pos = lons > 180\n if np.sum(pos) > 0: lons[pos] = lons[pos] - 360\n\n # get position of the middel\n posmid = parameters['orbit'][int(parameters['orbit'].shape[0] / 2), 1:4]\n scenemid = np.asarray([np.mean(lons), np.mean(lats), meanheight])\n # transform to cart\n scenemid_aux = np.zeros((1, 1, 3), 'float64')\n scenemid_aux[0, 0, :] = scenemid\n scenemid_cart = geolib.ellip2cart(scenemid_aux,num_threads=NumThreads)\n\n # compute look angle\n v1 = scenemid_cart - posmid\n v2 = -posmid\n lookangle = np.arccos(np.sum(v1 * v2) / (np.sqrt(np.sum(np.square(v1))) * np.sqrt(np.sum(np.square(v2)))))\n\n # approx. offnadir angle\n v1 = posmid - scenemid_cart\n v2 = scenemid_cart\n offnadir = np.arccos(np.sum(v1 * v2) / (np.sqrt(np.sum(np.square(v1))) * np.sqrt(np.sum(np.square(v2)))))\n\n return offnadir, lookangle\n\ndef get_params_back_geocoding_dem(parameters, posting,offnadir):\n \"\"\"Compute auxiliary parameteres for the back-geocoding od the DEM\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n posting : float\n output from get_dem()\n offnadir: float\n output from get_offnadir_lookangle()\n\n Returns\n -------\n deltat_dem : float\n Sampling in backgeocoded DEM\n rd_dem : float\n Range delay of back-geocoded DEM\n rs_dem : float\n Range sampling of back-geocoded DEM\n t0_dem : float\n Azimuth start time of DEM with margin\n nrg_dem : float\n Range dimensions in pixels\n naz_dem : float\n Azimuth dimensions in pixels\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n\n log = logging.getLogger('get_params_back_geocoding_dem')\n log.info('Computing auxiliary parameters for backgeocoding DEM ...')\n \n #Mean satellite velocity vs of TAXI\n mean_sat_vel = np.mean(np.sqrt(np.square(parameters['orbit'][:, 4]) + np.square(parameters['orbit'][:, 5]) + np.square(parameters['orbit'][:, 6])))\n # Grpund velocity at mid rag vg of TAXI\n ground_vel_mid_rg = np.square(parameters['effective_vel_mid_rg']) / mean_sat_vel\n # t0_start from TAXI, which is set to 0\n ts_min = 0\n # squint angle is 0, i use the variables instead of 0 to leave the code for future squint adcquisitions\n squint = 0\n azMargin = 0\n\n \n offnadireff = 0.5 * (2 * np.sin(offnadir))\n\n ### Range parameters\n dr = constants.c / 2.0 / parameters['rg_sampling_freq'] # slant-range sampling in image\n gdr = dr / np.sin(offnadireff) # ground-range sampling (aprox)\n ground = parameters['n_rg'] * gdr # ground range scene extension\n rs_dem = posting * np.sin(offnadireff) # range sampling of back-geocoded DEM\n margin = 10000.0\n nrg_dem = np.round((ground + margin * 2) / posting * 0.5, 0) * 2 # give a margin of 10km\n incte = margin * np.sin(offnadireff) * 2.0 / constants.c # 5 km margin at near range\n rd_dem = np.max([0, parameters['rg_delay'] - incte]) # range delay of back-geocoded DEM\n\n ##azimth parameters\n squintmargin = np.round(np.abs((parameters['rg_delay'] * constants.c / 2.0 + parameters['n_rg'] * dr) * np.tan(squint)) * 0.5, 0)\n dx = ground_vel_mid_rg * (1.0 / parameters['commonPRF']) # aximuth sampling in focused image (aprox.) ((1.0 / parameters['commonPRF']) is deltat from TAXI)\n az = parameters['n_az'] * dx # azimuth extension of image (approx.)\n deltat_dem = posting / ground_vel_mid_rg # sampling in backgeocoded deM\n naz_dem = np.round((az + 10000 + 2 * azMargin * ground_vel_mid_rg + 2 * squintmargin) / posting * 0.5, 0) * 2.0 # azimuth dimension with margin of 10 km\n t0_dem = ts_min - 5000.0 / ground_vel_mid_rg - squintmargin / ground_vel_mid_rg - azMargin # azimuth start time of DEM with 5 km margin\n\n\n return deltat_dem, rd_dem, rs_dem, t0_dem, nrg_dem, naz_dem\n\ndef xyz2rgaz(parameters,dem_xyz,dem_limits,deltat_dem,rd_dem,rs_dem,t0_dem,nrg_dem, naz_dem,is_mono=True,is_bistatic=False,rs_dem_master=0,orbit_master=0,NumThreads=5):\n \"\"\"Transform from cartesian coordinates xyz to rg,az\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n dem_xyz : 3D numpy array\n DEM in cartesian coordiantes, the last dimension represents X,Y,Z. respectively.\n output from get_dem()\n dem_limits : dict\n Limits of the DEM.\n It contains the following keys: {'minlon': ,'maxlon': ,'minlat': ,'maxlat': 0.}\n output from get_dem()\n deltat_dem : float\n Sampling in backgeocoded DEM\n Output from get_params_back_geocoding_dem()\n rd_dem : float\n Range delay of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n rs_dem : float\n Range sampling of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n t0_dem : float\n Azimuth start time of DEM with margin\n Output from get_params_back_geocoding_dem()\n nrg_dem : float\n Range dimensions in pixels\n Output from get_params_back_geocoding_dem()\n naz_dem : float\n Azimuth dimensions in pixels\n Output from get_params_back_geocoding_dem()\n is_mono : bool, optional\n is_bistatic : bool, optional\n rs_dem_master : float\n Range sampling of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n orbit_master : 2D numpy array of dimensions (n,7), optional\n Output from interpol_orbits() saved in parameters['orbit_active']\n NumThreads : int, optional\n Number of threads to use in the parallel processing steps.\n\n Returns\n -------\n rgm: 2D numpy array\n range positions\n azm : 2D numpy array\n Azimuth positions\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('xyz2rgaz')\n log.info('Get range-azimuth matrices ...')\n\n\n #### this is the part of the code that in TAXI is outside the xyz2rgz function\n r_near = parameters['rg_delay'] * constants.c / 2.0\n #r_dem_near = rd_dem * constants.c / 2.0\n \n \n ## check if it is bistatic\n if is_bistatic == False:\n \n rs_local = parameters['spacing_rg']\n rs_dem_local = rs_dem\n else:\n \n r_near = r_near*2.0\n rs_local = 2.0*parameters['spacing_rg']\n #r_dem_near = r_dem_near*2.0\n rs_dem_local = 2.0*rs_dem_master\n\n ###-------------\n\n ### ----- From here the function xyz_rgaz_thread of TAXI ------ \n if is_mono ==True:\n r_near = r_near * 2\n rs_local = rs_local * 2\n\n \n \n dimx = parameters['orbit'].shape[0]\n #prf = parameters['commonPRF']\n t0_start = 0\n orbit_margin = (t0_start - parameters['orbit'][0, 0]) / (1 / parameters['commonPRF'])\n \n\n # extent orbit\n eastwin = np.round(deltat_dem / (1 / parameters['commonPRF']) * 2, 0)\n if eastwin < 90.0: eastwin = 90.0\n if eastwin < 500:\n fill = 500.0\n else:\n fill = eastwin\n\n t1 = np.arange(dimx)\n t2 = np.arange(dimx + fill * 2) - fill\n # interpolate time and positions\n func = interpolate.interp1d(t1, np.copy(parameters['orbit'][:, 0]), fill_value=\"extrapolate\")\n ta = func(t2)\n func = interpolate.interp1d(t1, np.copy(parameters['orbit'][:, 1]), fill_value=\"extrapolate\")\n ox = func(t2)\n func = interpolate.interp1d(t1, np.copy(parameters['orbit'][:, 2]), fill_value=\"extrapolate\")\n oy = func(t2)\n func = interpolate.interp1d(t1, np.copy(parameters['orbit'][:, 3]), fill_value=\"extrapolate\")\n oz = func(t2)\n\n if is_mono==True:\n ox_active = np.copy(ox)\n oy_active = np.copy(oy)\n oz_active = np.copy(oz)\n else:\n # if it is the slave, we use the orbit of the master (active)\n func = interpolate.interp1d(t1, orbit_master[:, 1], fill_value=\"extrapolate\")\n ox_active = func(t2)\n func = interpolate.interp1d(t1, orbit_master[:, 2], fill_value=\"extrapolate\")\n oy_active = func(t2)\n func = interpolate.interp1d(t1, orbit_master[:, 3], fill_value=\"extrapolate\")\n oz_active = func(t2)\n\n\n # side vector (for left and right discrimination)\n nadir = -parameters['orbit'][int(dimx * 0.5), 1:4]\n velvec = parameters['orbit'][int(dimx * 0.5), 4:7]\n sidev = np.cross(nadir, velvec)\n sidev = sidev / np.sqrt(np.sum(np.square(sidev)))\n if parameters['look_dir'] == 'left': sidev = -sidev\n\n # preparation inputs for cart2radar\n Na = int(len(ta) / 2) * 2\n ta = ta[0:Na]\n p_tx = np.zeros((len(ta), 3), dtype='double')\n p_tx[:, 0] = ox[0:Na]\n p_tx[:, 1] = oy[0:Na]\n p_tx[:, 2] = oz[0:Na]\n\n if is_mono == True:\n p_rx = np.copy(p_tx)\n else:\n p_rx = np.zeros((len(ta), 3), dtype='double')\n p_rx[:, 0] = ox_active[0:Na]\n p_rx[:, 1] = oy_active[0:Na]\n p_rx[:, 2] = oz_active[0:Na]\n \n \n # get range and azimuth matrices\n r0, indx = geolib.cart2radar(dem_xyz.copy(), ta.copy(), p_tx.copy(),p_rx=np.copy(p_rx),bistatic=1, return_time=False,num_threads=NumThreads)\n\n # create a invalid mask\n invalMask = np.empty_like(r0, dtype='byte')\n invalMask[::] = 1\n invalMask[~np.isnan(r0)] = 0\n invalMask[~np.isnan(indx)] = 0\n\n # convert to image samples\n azm = indx - (orbit_margin + fill)\n rgm = (r0 - r_near) / rs_local\n\n midorbit = np.asarray([ox[int(dimx * 0.5)], oy[int(dimx * 0.5)], oz[int(dimx * 0.5)]])\n\n # checking for antenna direction (make indices on the wrong side negative)\n dotP = (dem_xyz[:, :, 0] - midorbit[0]) * sidev[0] + (dem_xyz[:, :, 1] - midorbit[1]) * sidev[1] + (\n dem_xyz[:, :, 2] - midorbit[2]) * sidev[2]\n sideMask = dotP > 0\n\n # apply masks\n rgm[invalMask == 1] = np.nan\n azm[invalMask == 1] = np.nan\n sideMask[invalMask == 1] = False\n\n rgm[sideMask == False] = np.nan\n azm[sideMask == False] = np.nan\n\n return rgm,azm\n\ndef get_dem_height_from_rg_az(rgm,azm,parameters,dem,deltat_dem,rd_dem,rs_dem,t0_dem,nrg_dem, naz_dem):\n \"\"\"Get the dem height in range/azimuth coordiantes\n\n Warning: This is the Dem sampling not the SLC sampling\n\n Parameters\n ----------\n rgm: 2D numpy array\n range positions\n Output from xyz2rgaz()\n azm : 2D numpy array\n Azimuth positions\n Output from xyz2rgaz()\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension represents:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n Output of get_dem()\n deltat_dem : float\n Sampling in backgeocoded DEM\n Output from get_params_back_geocoding_dem()\n rd_dem : float\n Range delay of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n rs_dem : float\n Range sampling of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n t0_dem : float\n Azimuth start time of DEM with margin\n Output from get_params_back_geocoding_dem()\n nrg_dem : float\n Range dimensions in pixels\n Output from get_params_back_geocoding_dem()\n naz_dem : float\n Azimuth dimensions in pixels\n Output from get_params_back_geocoding_dem()\n\n Returns\n -------\n dem_out : 2D numpy array\n dem height in range-azimuth coordinates\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n\n \"\"\"\n\n log = logging.getLogger('get_dem_height_from_rg_az')\n log.info('Get dem in range-azimuth coordinates ...')\n\n\n #incrd = parameters['rg_delay'] - rd_dem\n #rd_dem = parameters['rg_delay'] - incrd\n ts_min = 0 # t0_start from TAXI, which is set to 0\n\n #convert azm/rgm matrices to dem positions\n deltat_img = (1.0/parameters['commonPRF'])\n azmdem = (azm*deltat_img+ts_min-t0_dem)/deltat_dem\n\n rs_img = parameters['spacing_rg']\n rn_img = parameters['rg_delay']*constants.c/2.0\n rn_dem = rd_dem*constants.c/2.0\n rgmdem = (rgm*rs_img+rn_img-rn_dem)/rs_dem\n dem_height = np.copy(dem[:,:,2])\n\n ##make transpose of azmdem/rgdem to be equal as IDL\n azmdem = np.transpose(azmdem)\n rgmdem = np.transpose(rgmdem)\n dem_height = np.transpose(dem_height)\n\n # Use grid data to interpolate to the desired positions\n grid_az_out, grid_rg_out = np.mgrid[0:naz_dem:naz_dem * 1j, 0:nrg_dem:nrg_dem * 1j]\n valid_pos = np.isfinite(azmdem)\n azmdem_ravel = azmdem[valid_pos]\n rgmdem_ravel = rgmdem[valid_pos]\n values = dem_height[valid_pos]\n points = np.zeros((len(azmdem_ravel), 2))\n points[:, 0] = azmdem_ravel\n points[:, 1] = rgmdem_ravel\n dem_out = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n \n return dem_out\n\n\ndef from_dem_dims_to_slc_dims(parameters,dem_radar_coord,nrg_dem,naz_dem,rd_dem,rs_dem,deltat_dem,t0_dem,function_interp='griddata'):\n \"\"\"Transform image in range/azimuth sampling simensions to the input SLC dimensions\n\n Convert and image from the sampling used in the DEM to the SLC sampling\n Warning!: Not be confused by the naming of the varaibles. It can be whatever image in radar coordinates with the DEM sampling\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image\n Output of function get_params_from_xml()\n dem_radar_coord : 2D numpy array\n Image in range-azimuth coordinates with the sampling of the DEM used in the processing\n nrg_dem : float\n Range dimensions in pixels\n Output from get_params_back_geocoding_dem()\n naz_dem : float\n Azimuth dimensions in pixels\n Output from get_params_back_geocoding_dem()\n rd_dem : float\n Range delay of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n rs_dem : float\n Range sampling of back-geocoded DEM\n Output from get_params_back_geocoding_dem()\n deltat_dem : float\n Sampling in backgeocoded DEM\n Output from get_params_back_geocoding_dem()\n t0_dem : float\n Azimuth start time of DEM with margin\n Output from get_params_back_geocoding_dem()\n function_interp : str\n type of function to interpolate\n - RectBivariateSpline\n - RegularGridInterpolator\n - griddata\n\n Returns\n -------\n dem_interp_slc : 2D numpy array\n Image in radar coordinates with the same dimensions as the SLC\n\n \"\"\"\n\n log = logging.getLogger('from_dem_dims_to_slc_dims')\n log.info('Interpolate image in DEM dims to SLC image dimensions ...')\n\n #incrd = parameters['rg_delay'] - rd_dem\n #rd_dem = parameters['rg_delay'] - incrd\n \n t0_img = 0\n deltat_img = (1.0 / parameters['commonPRF'])\n\n #get rg positions of the slc\n rgvec_dem = np.linspace(start=rd_dem * constants.c / 2.0,\n stop=(nrg_dem - 1) * rs_dem + (rd_dem * constants.c / 2.0),\n num=int(nrg_dem))\n rgvec_img = np.linspace(start=parameters['rg_delay'] * constants.c / 2.0,\n stop=(parameters['n_rg'] - 1) * parameters['spacing_rg'] + (parameters['rg_delay'] * constants.c / 2.0),\n num=int(parameters['n_rg']))\n f_rgpos = interpolate.interp1d(rgvec_dem, np.arange(nrg_dem))\n rgpos = f_rgpos(rgvec_img)\n\n #get azimuth positions of the slc\n azvec_dem = np.linspace(start=t0_dem,\n stop=(naz_dem - 1) * deltat_dem + t0_dem,\n num=int(naz_dem))\n azvec_img = np.linspace(start=t0_img,\n stop=(parameters['n_az'] - 1) * deltat_img + t0_img,\n num=int(parameters['n_az']))\n\n f_azpos = interpolate.interp1d(azvec_dem, np.arange(naz_dem))\n azpos = f_azpos(azvec_img)\n\n #interpolation to the desired positoins\n if function_interp == 'RectBivariateSpline':\n f_img = interpolate.RectBivariateSpline(np.arange(naz_dem), np.arange(nrg_dem), dem_radar_coord, kx=1, ky=1)\n dem_interp_slc = f_img(azpos, rgpos)\n elif function_interp == 'RegularGridInterpolator':\n f_img = interpolate.RegularGridInterpolator((np.arange(naz_dem), np.arange(nrg_dem)), dem_radar_coord, method='linear', bounds_error=False)\n points_out = np.meshgrid(azpos, rgpos, indexing='ij')\n points_out_list = np.reshape(points_out, (2, -1), order='C').T\n dem_interp_slc = f_img(points_out_list)\n dem_interp_slc = np.reshape(dem_interp_slc, (len(azpos),len(rgpos)))\n else:\n #in other cases we use griddata\n grid_az, grid_rg = np.mgrid[0:naz_dem:naz_dem * 1j, 0:nrg_dem:nrg_dem * 1j]\n grid_az_out_interp, grid_rg_out_interp = np.meshgrid(azpos, rgpos)\n values = dem_radar_coord.ravel()\n points = np.zeros((len(values), 2))\n points[:, 0] = grid_az.ravel()\n points[:, 1] = grid_rg.ravel()\n dem_interp_slc = interpolate.griddata(points, values, (grid_az_out_interp, grid_rg_out_interp), method='linear')\n #tranpose to have azimth and range\n dem_interp_slc = np.transpose(dem_interp_slc)\n\n return dem_interp_slc\n\ndef get_dem_xyz_flat_earth(dem,dem_limits,dang,NumThreads=5):\n \"\"\" Obtain the flat earth dem in cartesian coordinates\n \n Parameters\n ----------\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension represents:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n dem_limits : dict\n Limits of the DEM.\n It contains the following keys: {'minlon': ,'maxlon': ,'minlat': ,'maxlat': 0.}\n output from get_dem()\n dang : float\n Output from get_dem()\n NumThreads : int, optional\n Number of threads to use in the parallel processing steps.\n\n Returns\n -------\n dem_xyz_flat : 3D numpy array:\n - (rows, cols,0): X\n - (rows, cols,1): Y\n - (rows, cols,2): Z\n \n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n\n log = logging.getLogger('get_dem_xyz_flat_earth')\n log.info('Get flat earth DEM ...')\n\n meanheight = np.median(dem[:, :, 2])\n Ndec = np.double(1.0)\n #posting_flat = dem_posting * Ndec\n nlon_ = dem.shape[0]\n nlat_ = dem.shape[1]\n\n lon_axis = np.linspace(start=dem_limits['minlon']* (np.pi / 180.0),\n stop=(nlon_ - 1) * dang * Ndec + (dem_limits['minlon'] * (np.pi / 180.0)),\n num=nlon_)\n\n lat_axis = np.linspace(start=dem_limits['minlat']* (np.pi / 180.0),\n stop=(nlat_ - 1) * dang * Ndec + (dem_limits['minlat'] * (np.pi / 180.0)),\n num=nlat_)\n \n #lon_axis2 = np.arange(nlon_)*dang*Ndec + (dem_limits['minlon']* (np.pi / 180.0))\n\n # transform to degree\n lon_axis = lon_axis * (180.0/np.pi)\n lat_axis = lat_axis * (180.0/np.pi)\n\n dem_xyz_flat = np.empty_like(dem)\n dem_xyz_flat[::] = np.nan\n aux_axis_height = np.copy(np.repeat(meanheight, nlon_))\n for i_lat, value_i_lat in enumerate(lat_axis):\n demllh = np.zeros((nlon_, 1, 3))\n demllh[:, 0, 0] = np.copy(lon_axis) # lon\n demllh[:, 0, 1] = np.copy(np.repeat(value_i_lat, nlon_)) # lat\n demllh[:, 0, 2] = aux_axis_height # height\n dem_xyz_flat_aux = geolib.ellip2cart(demllh.copy(),num_threads=NumThreads)\n dem_xyz_flat[:, i_lat, 0] = dem_xyz_flat_aux[:, 0, 0]\n dem_xyz_flat[:, i_lat, 1] = dem_xyz_flat_aux[:, 0, 1]\n dem_xyz_flat[:, i_lat, 2] = dem_xyz_flat_aux[:, 0, 2]\n\n return dem_xyz_flat\n\n\ndef compute_slant_phase_flat(parameters,parameters_slave,rgm_flat_master,azm_flat_master,rgm_flat_slave,\n nrg_dem,naz_dem,rd_dem,rs_dem,deltat_dem,t0_dem):\n \"\"\"Compute phase of phase flat in slant range geometry\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n parameters_slave : dict\n Dictionary with parameters of the slave image.\n Output of function get_params_from_xml()\n rgm_flat_master : 2D numpy array\n output from xyz2rgaz\n azm_flat_master : 2D numpy array\n output from xyz2rgaz\n rgm_flat_slave : 2D numpy array\n output from xyz2rgaz\n nrg_dem : float\n Range dimensions in pixels\n Output from get_params_back_geocoding_dem()\n naz_dem : float\n Azimuth dimensions in pixels\n Output from get_params_back_geocoding_dem()\n rd_dem : float\n Range delay of back-geocoded DEM\n output from get_params_back_geocoding_dem()\n rs_dem : float\n Range sampling of back-geocoded DEM\n output from get_params_back_geocoding_dem()\n deltat_dem : float\n Sampling in backgeocoded DEM\n output from get_params_back_geocoding_dem()\n t0_dem: float\n Azimuth start time of DEM with margin\n output from get_params_back_geocoding_dem()\n\n Returns\n -------\n slantphaseflat : 2D numpy array\n Phase flat in slant range geometry\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('compute_slant_phase_flat')\n log.info('Compute slant phase flat ...')\n\n\n ts_min = 0 # t0_start from TAXI, which is set to 0\n\n # convert azm/rgm matrices to dem positions\n deltat_img = (1.0 / parameters['commonPRF'])\n azmdem = (azm_flat_master * deltat_img + ts_min - t0_dem) / deltat_dem\n\n rs_img_master = parameters['spacing_rg']\n rn_img_master = parameters['rg_delay'] * constants.c / 2.0\n rn_dem = rd_dem * constants.c / 2.0\n rgmdem = (rgm_flat_master * rs_img_master + rn_img_master - rn_dem) / rs_dem\n\n rn_img_slave = parameters_slave['rg_delay'] * constants.c / 2.0\n\n # ##make transpose of azmdem/rgdem to be equal as IDL ????\n azmdem = np.transpose(azmdem)\n rgmdem = np.transpose(rgmdem)\n rgm_flat_slave = np.transpose(rgm_flat_slave)\n rgm_flat_master = np.transpose(rgm_flat_master)\n\n\n if parameters['spacing_rg'] == parameters_slave['spacing_rg']:\n values_matrix = rgm_flat_master - rgm_flat_slave\n # Use grid data to interpolate to the desired positions\n grid_az_out, grid_rg_out = np.mgrid[0:naz_dem:naz_dem * 1j, 0:nrg_dem:nrg_dem * 1j]\n valid_pos = np.isfinite(azmdem)\n azmdem_ravel = azmdem[valid_pos]\n rgmdem_ravel = rgmdem[valid_pos]\n\n values = values_matrix[valid_pos]\n points = np.zeros((len(azmdem_ravel), 2))\n points[:, 0] = azmdem_ravel\n points[:, 1] = rgmdem_ravel\n data = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n slantphaseflat = -4 * np.pi / (constants.c / parameters['f0']) * (rn_img_master - rn_img_slave + data * rs_img_master)\n else:\n # In case the row spacing is different for master and slave\n\n # Use grid data to interpolate to the desired positions\n grid_az_out, grid_rg_out = np.mgrid[0:naz_dem:naz_dem * 1j, 0:nrg_dem:nrg_dem * 1j]\n valid_pos = np.isfinite(azmdem)\n azmdem_ravel = azmdem[valid_pos]\n rgmdem_ravel = rgmdem[valid_pos]\n\n ## For master\n values_master = rgm_flat_master[valid_pos]\n points = np.zeros((len(azmdem_ravel), 2))\n points[:, 0] = azmdem_ravel\n points[:, 1] = rgmdem_ravel\n datam = interpolate.griddata(points, values_master, (grid_az_out, grid_rg_out), method='linear')\n\n ## For slave\n values_master = rgm_flat_slave[valid_pos]\n points = np.zeros((len(azmdem_ravel), 2))\n points[:, 0] = azmdem_ravel\n points[:, 1] = rgmdem_ravel\n datas = interpolate.griddata(points, values_master, (grid_az_out, grid_rg_out), method='linear')\n\n slantphaseflat = -4 * np.pi / (constants.c / parameters['f0']) * (rn_img_master - rn_img_slave + datam*rs_img_master-datas*parameters_slave['spacing_rg'])\n\n return slantphaseflat\n\n\ndef rgz2xyz(parameters,rgm,azm,dem_xyz,deltat_dem,rd_dem,rs_dem,t0_dem,nrg_dem, naz_dem):\n \"\"\"Transform from slant/range to cartesian coordinate\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n rgm : 2D numpy array\n range positions\n Output from xyz2rgaz()\n azm : 2D numpy array\n Azimuth positions\n Output from xyz2rgaz()\n dem_xyz : 3D numpy array\n dem in cartesian coordaintes\n ouput from get_dem()\n deltat_dem : float\n Sampling in backgeocoded DEM\n output from get_params_back_geocoding_dem()\n rd_dem : float\n Range delay of back-geocoded DEM\n output from get_params_back_geocoding_dem()\n rs_dem : float\n Range sampling of back-geocoded DEM\n output from get_params_back_geocoding_dem()\n t0_dem: float\n Azimuth start time of DEM with margin\n output from get_params_back_geocoding_dem()\n nrg_dem : float\n Range dimensions in pixels\n Output from get_params_back_geocoding_dem()\n naz_dem : float\n Azimuth dimensions in pixels\n Output from get_params_back_geocoding_dem()\n\n Returns\n -------\n dem_xyz_slr : 3d numpy array\n - (rows, cols,0): X\n - (rows, cols,1): Y\n - (rows, cols,2): Z\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('rgz2xyz')\n log.info('Get XYZ coordiantes of DEM in slant-range geometry ...')\n\n\n ts_min = 0 # t0_start from TAXI, which is set to 0\n\n #convert azm/rgm matrices to dem positions\n deltat_img = (1.0/parameters['commonPRF'])\n azmdem = (azm*deltat_img+ts_min-t0_dem)/deltat_dem\n\n rs_img = parameters['spacing_rg']\n rn_img = parameters['rg_delay']*constants.c/2.0\n rn_dem = rd_dem*constants.c/2.0\n rgmdem = (rgm*rs_img+rn_img-rn_dem)/rs_dem\n\n ##make transpose of azmdem/rgdem to be equal as IDL\n azmdem = np.transpose(azmdem)\n rgmdem = np.transpose(rgmdem)\n\n grid_az_out, grid_rg_out = np.mgrid[0:naz_dem:naz_dem * 1j, 0:nrg_dem:nrg_dem * 1j]\n valid_pos = np.isfinite(azmdem)\n azmdem_ravel = azmdem[valid_pos]\n rgmdem_ravel = rgmdem[valid_pos]\n points = np.zeros((len(azmdem_ravel), 2))\n points[:, 0] = azmdem_ravel\n points[:, 1] = rgmdem_ravel\n\n #output matrix\n dem_xyz_slr = np.zeros((int(naz_dem), int(nrg_dem), 3))\n \n #interpolate x,y,z separatelly\n for i_position in range(3):\n dem_xyz_aux = np.transpose(np.copy(dem_xyz[:,:,i_position]))\n values = dem_xyz_aux[valid_pos]\n dem_xyz_slr[:,:,i_position] = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n \n\n return dem_xyz_slr\n\n\ndef get_offsets(rgm,azm,rgs,azs,parameters,parameters_slave,dem,deltat_dem,rd_dem,rs_dem,t0_dem,nrg_dem, naz_dem):\n \"\"\"Computation of azimuth offsets\n\n Parameters\n ----------\n rgm : 2D numpy array\n range positions\n Output from xyz2rgaz()\n azm : 2D numpy array\n Azimuth positions\n Output from xyz2rgaz()\n rgs : 2D numpy array\n output from xyz2rgaz\n azs : 2D numpy array\n output from xyz2rgaz\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n parameters : dict\n Dictionary with parameters of the slave image.\n Output of function get_params_from_xml()\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension represents:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n deltat_dem : float\n Sampling in backgeocoded DEM\n output from get_params_back_geocoding_dem()\n rd_dem : float\n Range delay of back-geocoded DEM\n output from get_params_back_geocoding_dem()\n rs_dem : float\n Range sampling of back-geocoded DEM\n output from get_params_back_geocoding_dem()\n t0_dem: float\n Azimuth start time of DEM with margin\n output from get_params_back_geocoding_dem()\n nrg_dem : float\n Range dimensions in pixels\n Output from get_params_back_geocoding_dem()\n naz_dem : float\n Azimuth dimensions in pixels\n Output from get_params_back_geocoding_dem()\n\n Returns\n -------\n az_offset : 2D numpy array\n Azimuth offsets\n rg_offset : 2D numpy array\n Range offsets\n synth_phase : 2D numpy array\n Synthetic phase in slant range geometry\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n January 2021 - Added azimuth offsets and synthetic phase\n\n \"\"\"\n\n log = logging.getLogger('get_offsets')\n log.info('Get azimuth offsets ...')\n\n\n # incrd = parameters['rg_delay'] - rd_dem\n # rd_dem = parameters['rg_delay'] - incrd\n ts_min = 0 # t0_start from TAXI, which is set to 0\n \n # convert azm/rgm matrices to dem positions\n deltat_img = (1.0 / parameters['commonPRF'])\n azmdem = (azm * deltat_img + ts_min - t0_dem) / deltat_dem\n \n rs_img = parameters['spacing_rg']\n rs_img_slave = parameters_slave['spacing_rg']\n rn_img = parameters['rg_delay'] * constants.c / 2.0\n rn_img_slave = parameters_slave['rg_delay'] * constants.c / 2.0\n rn_dem = rd_dem * constants.c / 2.0\n rgmdem = (rgm * rs_img + rn_img - rn_dem) / rs_dem\n \n ##make transpose of azmdem/rgdem to be equal as IDL\n azmdem = np.transpose(azmdem)\n rgmdem = np.transpose(rgmdem)\n azm = np.transpose(azm)\n azs = np.transpose(azs)\n\n azdif = np.copy(azm-azs)\n valid_pos = np.isfinite(azm) * np.isfinite(azs)\n \n ## 1- get offser az\n\n # Use grid data to interpolate to the desired positions\n grid_az_out, grid_rg_out = np.mgrid[0:naz_dem:naz_dem * 1j, 0:nrg_dem:nrg_dem * 1j]\n azmdem_ravel = azmdem[valid_pos]\n rgmdem_ravel = rgmdem[valid_pos]\n values = azdif[valid_pos]\n points = np.zeros((len(azmdem_ravel), 2))\n points[:, 0] = azmdem_ravel\n points[:, 1] = rgmdem_ravel\n az_offset = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n\n ## 2- get offset in range and the synthetic phase in slant-range plane\n rgm = np.transpose(rgm)\n rgs = np.transpose(rgs)\n if parameters['spacing_rg'] == parameters_slave['spacing_rg']:\n rgdif = np.copy(rgm-rgs)\n valid_pos = np.isfinite(rgm) * np.isfinite(rgs)\n values = rgdif[valid_pos]\n rg_offset = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n synth_phase = (-4*np.pi/(constants.c / parameters['f0']))*(rn_img-rn_img_slave+rg_offset*rs_img)\n else:\n valid_pos = np.isfinite(rgm) * np.isfinite(rgm)\n values= rgm[valid_pos]\n rgm_offset = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n values= rgs[valid_pos]\n rgs_offset = interpolate.griddata(points, values, (grid_az_out, grid_rg_out), method='linear')\n\n rg_offset = rgm_offset-rgs_offset\n synth_phase = (-4 * np.pi / (constants.c / parameters['f0'])) * (rn_img - rn_img_slave+ rgm_offset*rs_img - rgs_offset*rs_img_slave)\n\n\n\n return az_offset,rg_offset,synth_phase\n\n\ndef get_baselines(parameters,parameters_slave,dem_xyz_slr,az_offset,deltat_dem,t0_dem):\n \"\"\"Get baseline parameters\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n parameters_slave : dict\n Dictionary with parameters of the slave image.\n Output of function get_params_from_xml()\n dem_xyz_slr : 3D numpy array\n Cartesian coordinates of the dem in slant range geometry\n Ouput of rgz2xyz()\n az_offset : 2D numpy array\n Azimuth offsets\n Ouput from get_offsets()\n deltat_dem : float\n Sampling in backgeocoded DEM\n output from get_params_back_geocoding_dem()\n t0_dem: float\n Azimuth start time of DEM with margin\n output from get_params_back_geocoding_dem()\n\n Returns\n -------\n baseline : 2D numpy array\n Baseline\n bpar : 2D numpy array\n Parallel baseline\n bperp : 2D numpy array\n Perpendicular baseline\n kz : 2D numpy array\n Vertical wavenumber\n thetainc : 2D numpy array\n Inicidence angle\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n\n log = logging.getLogger('get_baselines')\n log.info('Get products: kz, perpendicual baselines,incidence angle ...')\n\n # inputs\n orbitm = parameters['orbit']\n orbits = 0.5 * (parameters_slave['orbit'] + parameters['orbit_active'])\n x = dem_xyz_slr[:, :, 0]\n y = dem_xyz_slr[:, :, 1]\n z = dem_xyz_slr[:, :, 2]\n azoffm = 0\n azm = 0\n azs = 0\n deltat = np.copy(deltat_dem)\n deltatm = 1.0 / parameters['commonPRF']\n # t0_dem\n t0_img = 0\n t0_orbit = parameters['orbit'][0, 0]\n offset_img = np.round(-parameters_slave['orbit'][0, 0] / (1.0 / parameters_slave['commonPRF']), 0)\n azoff = np.copy(az_offset)\n\n nrows = x.shape[0]\n ncols = x.shape[1]\n\n # outputs\n kz = np.zeros((nrows, ncols))\n thetainc = np.zeros((nrows, ncols))\n bperp = np.zeros((nrows, ncols))\n bpar = np.zeros((nrows, ncols))\n baseline = np.zeros((nrows, ncols))\n\n offset_img_master = round((t0_img - t0_orbit) / deltatm, 0)\n\n for i_row in range(nrows):\n # azimuth shift pixels\n Daz = azoff[i_row, :]\n mpos = (i_row * deltat - (t0_img - t0_dem)) / deltatm # master position (at full PRF)\n az = np.clip(mpos - Daz + offset_img, 0, orbits.shape[0] - 2)\n azm = np.clip(mpos + offset_img_master, 0, orbitm.shape[0] - 2)\n\n # linear interpolation between adjancent positions\n az_int = np.copy(np.asarray(az, np.int64))\n S2a = orbits[az_int, 1:4]\n S2b = orbits[az_int + 1, 1:4]\n az_aux2 = np.reshape(np.repeat(az - az_int, 3), (ncols, 3))\n S2 = S2a + az_aux2 * (S2a - S2b)\n\n azm = np.zeros(ncols) + azm\n azm_int = np.asarray(azm, np.int64)\n S1a = orbitm[azm_int, 1:4]\n S1b = orbitm[azm_int + 1, 1:4]\n az_aux1 = np.reshape(np.repeat(azm - azm_int, 3), (ncols, 3))\n S1 = S1a + az_aux1 * (S1a - S1b)\n\n B = S2 - S1\n\n # baseline plane\n\n # velocity vector (along\n v = orbitm[azm_int, 4:7]\n v_norm = np.reshape(np.repeat(np.squeeze(np.linalg.norm(v, axis=1, keepdims=True)), 3), (ncols, 3))\n v = v / v_norm\n\n # Orthogonal to the c laying in the plane of V and B\n Bmod2 = np.reshape(np.repeat(np.sum(np.square(B), 1), 3), (ncols, 3)) # square of baselines\n dotvB2 = np.reshape(np.repeat(np.square(np.sum(v * B, 1)), 3), (ncols, 3)) # dot product between v and B\n aux1 = np.reshape(np.repeat(np.sum(v * B, 1), 3), (ncols, 3))\n a = (B - aux1 * v) / np.sqrt(Bmod2 - dotvB2)\n\n # orthogonal to the other two\n aperp = np.cross(a, v)\n\n # target orbit vector\n P = np.zeros((ncols, 3), 'double')\n P[:, 0] = np.copy(x[i_row, :] - S1[:, 0])\n P[:, 1] = np.copy(y[i_row, :] - S1[:, 1])\n P[:, 2] = np.copy(z[i_row, :] - S1[:, 2])\n\n # vector on baseline plane\n cross_P_aperp = np.cross(P, aperp)\n norm_aperp = np.reshape(np.repeat(np.squeeze(np.linalg.norm(aperp, axis=1, keepdims=True)), 3), (ncols, 3))\n cross_P_aperp_norm = cross_P_aperp / norm_aperp\n Pav = np.cross(aperp, cross_P_aperp_norm) / norm_aperp\n\n # vector perpendicular to baseline plane\n Ppv = np.reshape(np.repeat(np.sum(P * aperp, 1), 3), (ncols, 3)) * aperp / np.reshape(\n np.repeat(np.sum(np.square(aperp), 1), 3), (ncols, 3))\n\n # pararllel and perpendicular baselines\n Pa = np.sum(Pav * a, 1)\n Pp = np.sum(Ppv * aperp, 1)\n\n # vector\n BparallelV = (np.reshape(np.repeat(Pa, 3), (ncols, 3)) * a) + (np.reshape(np.repeat(Pp, 3), (ncols, 3)) * aperp)\n # Normalization\n BparallelV = BparallelV / np.reshape(\n np.repeat(np.squeeze(np.linalg.norm(BparallelV, axis=1, keepdims=True)), 3), (ncols, 3))\n\n Ba = np.sum(B * a, 1)\n Bparallel = Ba * np.sum(a * BparallelV, 1)\n bperpv = np.cross(BparallelV, v)\n Bperpend = Ba * np.sum(a * bperpv, 1)\n\n # assign to putput matrices\n baseline[i_row, :] = np.sqrt(np.sum(np.square(B), 1))\n bpar[i_row, :] = Bparallel\n bperp[i_row, :] = Bperpend\n\n # compute Kz and incidence angle\n\n # orbit-target vector\n Vec1 = np.zeros((ncols, 3), 'double')\n Vec1[:, 0] = np.copy(S1[:, 0] - x[i_row, :])\n Vec1[:, 1] = np.copy(S1[:, 1] - y[i_row, :])\n Vec1[:, 2] = np.copy(S1[:, 2] - z[i_row, :])\n\n # Target position vector\n Vec2 = np.zeros((ncols, 3), 'double')\n Vec2[:, 0] = np.copy(x[i_row, :])\n Vec2[:, 1] = np.copy(y[i_row, :])\n Vec2[:, 2] = np.copy(z[i_row, :])\n\n # incidence angle\n thetainc[i_row, :] = np.arccos(\n np.sum(Vec1 * Vec2, 1) / (np.sqrt(np.sum(np.square(Vec1), 1)) * np.sqrt(np.sum(np.square(Vec2), 1))))\n\n # kz\n rm = np.sqrt(np.sum(np.square(Vec1), 1))\n kz[i_row, :] = -4 * np.pi / (constants.c / parameters['f0']) * bperp[i_row, :] / (rm * np.sin(thetainc[i_row, :]))\n\n # remove invalid positions\n pos_invalid = np.isnan(x)\n\n kz[pos_invalid] = np.nan\n thetainc[pos_invalid] = np.nan\n bperp[pos_invalid] = np.nan\n bpar[pos_invalid] = np.nan\n baseline[pos_invalid] = np.nan\n\n return baseline,bpar,bperp,kz,thetainc \n\n\ndef add_same_offset_images(parameters,parameters_slave,rd_dem,rd_dem_slave,t0_dem):\n \"\"\"Get offsets of the dem for master and slave\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n parameters_slave : dict\n Dictionary with parameters of the slave image.\n Output of function get_params_from_xml()\n rd_dem : float\n Range sampling of back-geocoded DEM for the master\n Output from get_params_back_geocoding_dem()\n rd_dem_slave: float\n Range sampling of back-geocoded DEM for the slave\n Output from get_params_back_geocoding_dem()\n t0_dem\n Azimuth start time of DEM with margin\n Output from get_params_back_geocoding_dem()\n\n Returns\n -------\n rd_dem : float\n rd_dem_slave : float\n t0_dem : float\n t0_dem_slave : float\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('add_same_offset_images')\n log.info('Add same offset to the images ...')\n\n \n incrd = parameters['rg_delay'] - rd_dem\n rd_dem = parameters['rg_delay'] - incrd\n rd_dem_slave = parameters_slave['rg_delay'] - incrd\n ts_min = 0 # t0_start from TAXI, which is set to 0\n inct = ts_min - t0_dem\n t0_dem = ts_min - inct\n t0_dem_slave = ts_min - inct\n\n return rd_dem,rd_dem_slave,t0_dem,t0_dem_slave\n\n\ndef blocks_griddata_2d(points, values, xi, method='linear', fill_value=np.nan,rescale=False, cuts='auto', extra_space=0.25):\n \"\"\"Griddata by bloks\n\n It computes the griddata from scipy by blocks to avoid memory problems when the image is too big\n\n Parameters\n ----------\n points\n values\n xi\n method\n fill_value\n rescale\n cuts\n extra_space\n\n Returns\n -------\n out\n Interpolated image\n\n Notes\n -------\n Author : Alberto Alonso-Gonzalez\n\n \"\"\"\n\n log = logging.getLogger('blocks_griddata_2d')\n log.info('interpolate.griddata by blocks ...')\n\n if isinstance(points, tuple):\n log.info(\"Input values as tuple, converting to numpy array\")\n assert (len(points) == 2)\n points = np.asarray([points[0], points[1]]).T\n\n if isinstance(cuts, str):\n if cuts.capitalize() == 'Auto':\n # Try to separate into blocks of approx 2 million points\n nc1 = np.max([np.int(np.round(np.sqrt(points.shape[0] / 2e6))), 1])\n nc2 = np.max([np.int(np.round(points.shape[0] / nc1 / 2e6)), 1])\n cuts = [nc1, nc2]\n else:\n raise ValueError(\"invalid argument string for cuts param: '{}'\".format(cuts))\n\n extra_space = np.float(extra_space)\n assert (extra_space >= 0.0)\n ndims = points.shape[1]\n assert (ndims == 2)\n log.info(\"Points shape: {}\".format(points.shape))\n log.info(\"Using {} blocks.\".format(cuts))\n mins = np.asarray([np.min(xi[0].ravel()), np.min(xi[1].ravel())])\n maxs = np.asarray([np.max(xi[0].ravel()), np.max(xi[1].ravel())])\n out_shape = xi[0].shape\n out = np.empty(out_shape, dtype=values.dtype)\n out.fill(fill_value)\n widths = maxs - mins\n for i in range(cuts[0]):\n min1 = mins[0] + (np.float(i) - extra_space) * widths[0] / cuts[0]\n max1 = mins[0] + (i + 1.0 + extra_space) * widths[0] / cuts[0]\n minpo1 = mins[0] + np.float(i) * widths[0] / cuts[0]\n maxpo1 = mins[0] + (i + 1.0) * widths[0] / cuts[0]\n for j in range(cuts[1]):\n min2 = mins[1] + (j - extra_space) * widths[1] / cuts[1]\n max2 = mins[1] + (j + 1.0 + extra_space) * widths[1] / cuts[1]\n minpo2 = mins[1] + np.float(j) * widths[1] / cuts[1]\n maxpo2 = mins[1] + (j + 1.0) * widths[1] / cuts[1]\n #log.info(\"Values outer: ({}-{}, {}-{})\".format(min1, max1, min2, max2))\n #log.info(\"Values inner: ({}-{}, {}-{})\".format(minpo1, maxpo1, minpo2, maxpo2))\n vpoints = np.where(\n (points[:, 0] <= max1) & (points[:, 0] >= min1) & (points[:, 1] <= max2) & (points[:, 1] >= min2))\n if (vpoints[0].size > 2):\n vxi = np.where((xi[0] <= maxpo1) & (xi[0] >= minpo1) & (xi[1] <= maxpo2) & (xi[1] >= minpo2))\n out[vxi] = interpolate.griddata(points[vpoints[0], :], values[vpoints[0]], (xi[0][vxi], xi[1][vxi]), method=method,\n fill_value=fill_value, rescale=rescale)\n return out\n\n\ndef geocoding_radar_image(image_radar_coord,parameters,dem,NumThreads=5,margin=0.05,pixels_spacing=10,pixels_border_to_remove=50):\n \"\"\"Geocoding radar image\n\n It uses the newtonbackgeo lib\n\n Parameters\n ----------\n image_radar_coord: 2D numpy array\n Image in radar coordaintes with SLC dimensions or a re-scaled version of it.\n Warning: A crop of the image is not accepted.\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n WARNING!: It must include the interpolated orbits generated in interpol_orbits()\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension represents:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n NumThreads : int, optional\n Number of threads to use in the parallel processing steps.\n margin_degrees : float, optional\n Margin in degrees or meters (repect to the limits of the radr image) for the resulting image\n pixels_spacing : float\n Desired pixels spacing for the resulting image\n pixels_border_to_remove : int\n Number of pixels to remove in each border after geocoding\n This is done to remove the wrong pixels in the borders due to errors\n in the interpolation procedure during the geocoding\n\n\n Returns\n -------\n image_geo_coord: 2D numpy array\n Image in lat lot coordinates\n row_axis_coord 1D numpy array\n Latitude values for the columns of image_geo_coord\n col_axis_coord: 1D numpy array\n Longitude values for the rows of image_geo_coord\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('geocoding_radar_image')\n log.info('Geocoding a radar image ...')\n\n #dem for the geocoding\n dem_llh = np.copy(dem)\n dem = None\n dem_xyz = geolib.ellip2cart(dem_llh, num_threads=NumThreads)\n\n newton = NewtonBackgeocoder(dem_llh, dem_xyz, num_threads=NumThreads)\n dem_llh = None\n dem_xyz = None\n\n #satellite positoins\n p_sat = np.zeros((int(parameters['n_az']), 3), dtype='double')\n p_sat[:, 0] = np.copy(parameters['orbit_slc'][:, 1])\n p_sat[:, 1] = np.copy(parameters['orbit_slc'][:, 2])\n p_sat[:, 2] = np.copy(parameters['orbit_slc'][:, 3])\n\n #satellite velocity\n v_sat = np.zeros((int(parameters['n_az']), 3), dtype='double')\n v_sat[:, 0] = np.copy(parameters['orbit_slc'][:, 4])\n v_sat[:, 1] = np.copy(parameters['orbit_slc'][:, 5])\n v_sat[:, 2] = np.copy(parameters['orbit_slc'][:, 6])\n\n #range to each pixel\n r0 = np.copy(parameters['range_vec'])\n\n #Make the backgeocoding\n log.info('Get irregular grid lon/lat/height <--> range/azimuth ...')\n\n ##reduce image\n n_az, n_rg = image_radar_coord.shape\n\n # Check if the image is re-scales respect to the original slc dimensions\n factor_rescale_rg = n_rg / parameters['n_rg']\n factor_rescale_az = n_az / parameters['n_az']\n\n #Reduce orbit parameters by the correspongin size\n p_sat = ndimage.zoom(p_sat, (factor_rescale_az, 1), order=0, mode='nearest')\n v_sat = ndimage.zoom(v_sat, (factor_rescale_az, 1), order=0, mode='nearest')\n r0 = ndimage.zoom(r0, (factor_rescale_rg), order=0, mode='nearest')\n\n #output matrices\n grid_xyz = np.zeros((n_az,n_rg, 3), dtype='double')\n err = np.zeros((n_az,n_rg), dtype='double')\n #number of azimuth to be computed at each call to the backgeocoded method. By default i add 500,\n # if it is too big then it take much more time than doing it in smaller blocks\n n_azs_for_geocod = 1000\n for i_az in range(0,n_az,n_azs_for_geocod):\n #get the pixels of the block\n if i_az == n_az - np.mod(n_az,n_azs_for_geocod):\n pixel_end = n_az\n else:\n pixel_end = i_az+n_azs_for_geocod\n #make the geocoding of the corresponding block\n grid_xyz_aux, err_aux = newton.backgeocode(p_sat[i_az:pixel_end,:], v_sat[i_az:pixel_end,:], r0)\n grid_xyz[i_az:pixel_end, :, :] = np.copy(grid_xyz_aux)\n err[i_az:pixel_end, :] = np.copy(err_aux)\n\n p_sat = None\n v_sat = None\n r0 = None\n err = None\n\n log.info('Get regular grid for image in lat/lon coordiantes ...')\n\n # transform to lon lat\n grid_llh = geolib.cart2ellip(grid_xyz.copy(), num_threads=NumThreads)\n #\n grid_xyz = None\n\n # check size\n assert (grid_llh.shape[0] == n_az)\n assert (grid_llh.shape[1] == n_rg)\n\n # get coorners and add some margin\n min_lon = np.min(grid_llh[:, :, 0]) - margin\n max_lon = np.max(grid_llh[:, :, 0]) + margin\n min_lat = np.min(grid_llh[:, :, 1]) - margin\n max_lat = np.max(grid_llh[:, :, 1]) + margin\n\n # Seelct the number of lats and lons depending on the pixel spacing provided as an input\n\n ##to get distance between latlon points using pyproj\n # geod = pyproj.Geod(ellps='WGS84')\n # ang1,ang2,dist_cols = geod.inv(min_lon,min_lat,max_lon,min_lat)\n # ang1, ang2, dist_rows = geod.inv(min_lon, min_lat, min_lon, max_lat)\n\n # get distance for each dimension using ECEF coordinates\n corners_llh = np.zeros((2, 2, 3))\n corners_llh[0, 0, 0] = min_lon # lon\n corners_llh[0, 0, 1] = min_lat # lat\n corners_llh[0, 1, 0] = min_lon\n corners_llh[0, 1, 1] = max_lat\n corners_llh[1, 0, 0] = max_lon\n corners_llh[1, 0, 1] = min_lat\n corners_llh[1, 1, 0] = max_lon\n corners_llh[1, 1, 1] = max_lat\n corners_xyz = geolib.ellip2cart(corners_llh.copy(), num_threads=NumThreads)\n dist_rows = np.sqrt((np.square(corners_xyz[0, 0, 0] - corners_xyz[0, 1, 0])) +\n (np.square(corners_xyz[0, 0, 1] - corners_xyz[0, 1, 1])) +\n (np.square(corners_xyz[0, 0, 2] - corners_xyz[0, 1, 2])))\n dist_cols = np.sqrt((np.square(corners_xyz[0, 0, 0] - corners_xyz[1, 0, 0])) +\n (np.square(corners_xyz[0, 0, 1] - corners_xyz[1, 0, 1])) +\n (np.square(corners_xyz[0, 0, 2] - corners_xyz[1, 0, 2])))\n\n # define the numer of lons (rows) and lats (cols) depending in the pixels_spacing\n n_rows = int(np.round(dist_rows / pixels_spacing, 0))\n n_cols = int(np.round(dist_cols / pixels_spacing, 0))\n\n\n # ##interpolation to a regular grid\n\n #grid output image\n grid_x, grid_y = np.mgrid[min_lon:max_lon:n_cols * 1j, min_lat:max_lat:n_rows * 1j]\n # get valules and points for grid data\n valid_pos = np.isfinite(grid_llh[:, :, 0]) * np.isfinite(grid_llh[:, :, 1])\n values = image_radar_coord[valid_pos]\n points = np.zeros((len(values), 2))\n points[:, 0] = grid_llh[valid_pos,0]\n points[:, 1] = grid_llh[valid_pos,1]\n grid_llh = None\n\n #genreate axis of the image in geografical coordinates\n col_axis_coord = np.linspace(min_lon,max_lon,n_cols)\n row_axis_coord = np.linspace(min_lat,max_lat,n_rows)\n\n\n\n #check the number of input poitns of grid data\n if len(values) < 20e6:\n #then we use normal griddata, asuming that we can do it\n try:\n image_geo_coord = interpolate.griddata(points, values, (grid_x, grid_y), method='linear')\n\n except:\n log.error('Error in grid data to much points for interpolation')\n\n else:\n #use grid data by blocks as the number of points is too big\n image_geo_coord = blocks_griddata_2d(points, values, (grid_x, grid_y), method='linear')\n\n\n # Remove borders of the image, due to potential errors in the interpolation doen in the processing of each individual image\n # 1- Get a mask of the finite points\n pos_finite_numbers = np.isfinite(image_geo_coord)\n # using a uniform filter we decrease the area of valid pixels rom the border\n mask_borders = ndimage.uniform_filter(np.asarray(pos_finite_numbers, 'float'), pixels_border_to_remove)\n mask_borders[mask_borders < 0.95] = 0\n # where the mask is 0 we consider as invalid\n image_geo_coord[mask_borders == 0] = np.nan\n\n\n\n return image_geo_coord,row_axis_coord,col_axis_coord\n\n\ndef compute_noise_decorrelation(resolution,parameters,slc,thetainc,rg_slope):\n \"\"\"Compute noise decorrelation\n\n Parameters\n ----------\n resolution : float\n Desired resolution.\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n slc : 2d numpy array\n SLC image\n thetainc : 2D numpy array\n Inicidence angle\n Output from get_baselines()\n rg_slope: 2D numpy array\n Slope im range direction\n Output from compute_corrected_kz_and_slope_dem()\n\n Returns\n -------\n gama_SNR : 2D numpy array\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('compute_noise_decorrelation')\n log.info('Computing noise decorrelation step 1 of 3 ...')\n\n\n ml_rg = np.int(np.round(resolution / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n ml_az = np.int(np.round(resolution / parameters['projectedSpacingAzimuth']))\n\n delta_tau = parameters['validity_range_max'][0]-parameters['validity_range_min'][0]\n dtau = delta_tau / parameters['n_rg']\n tau = np.linspace(start=parameters['validity_range_min'][0],stop=int(parameters['n_rg']-1)*dtau+parameters['validity_range_min'][0], num=int(parameters['n_rg']))\n\n noise_polynomials = np.zeros((int(parameters['n_rg']),int(parameters['n_noise_records'])))\n\n\n for i_noise in range(int(parameters['n_noise_records'])):\n for i_order_poly in range(int(parameters['noise_polynomial_degree'])+1):\n noise_polynomials[:,i_noise] = noise_polynomials[:,i_noise] + parameters['noise_coef'][i_noise,i_order_poly] * (np.power(tau-parameters['reference_point'][i_noise],i_order_poly))\n\n\n noise_eq_beta_nought = np.multiply(noise_polynomials,parameters['calFactor'])\n\n #enlarge noise polynomial to full azimuth size\n #noise_eq_beta_nought = ndimage.zoom(noise_eq_beta_nought,(1,parameters['n_az']/noise_eq_beta_nought.shape[1]),order=1)\n #noise_eq_beta_nought = np.multiply(noise_eq_beta_nought.transpose(),np.sin(thetainc))\n\n noise_eq_beta_nought = ndimage.zoom(noise_eq_beta_nought,(1,parameters['n_az']/noise_eq_beta_nought.shape[1]),order=1)\n noise_eq_beta_nought = np.transpose(noise_eq_beta_nought)\n np.multiply(noise_eq_beta_nought,np.sin(thetainc),out=noise_eq_beta_nought)\n\n\n log.info('Computing noise decorrelation step 2 of 3 ...')\n\n #get amplitud od the slc, include cal factor!\n amp_slc = ndimage.uniform_filter(np.abs(slc*np.sqrt(parameters['calFactor'])),(ml_az,ml_rg))\n # get radar cross section\n # RCS = np.multiply(np.square(amp_slc),np.sin(thetainc-rg_slope))\n # amp_slc = None\n # power = RCS - noise_eq_beta_nought\n # RCS = None\n # SNR = power / noise_eq_beta_nought\n # power = None\n # noise_eq_beta_nought = None\n np.multiply(np.square(amp_slc),np.sin(thetainc-rg_slope),out=amp_slc)\n amp_slc = amp_slc - noise_eq_beta_nought\n amp_slc = amp_slc / noise_eq_beta_nought\n\n #amp_slc = numexpr.evaluate(\"(amp_slc**2)*sin(thetainc-rg_slope)\")\n #amp_slc = numexpr.evaluate(\"amp_slc - noise_eq_beta_nought\")\n #amp_slc = numexpr.evaluate(\"amp_slc / noise_eq_beta_nought\")\n\n\n log.info('Computing noise decorrelation step 3 of 3 ...')\n\n #calculate SNR decorrelation\n # gama_SNR = SNR/(SNR+1)\n # SNR = None\n\n amp_slc = amp_slc/(amp_slc+1)\n #amp_slc = numexpr.evaluate(\"amp_slc/(amp_slc+1)\")\n\n # filter very low gana_SNR\n #gama_SNR[gama_SNR<0] = 0.01\n amp_slc[amp_slc < 0] = 0.01\n\n #return gama_SNR\n return amp_slc\n\n\ndef compute_corrected_kz_and_slope_dem(parameters,thetainc,dem_slc_dims,bperp):\n \"\"\"Compute the kz corrected by the dem, the slope of the dem and range decorrelation\n\n Parameters\n ----------\n parameters : dict\n Dictionary with parameters of the master image.\n Output of function get_params_from_xml()\n thetainc : 2D numpy array\n Inicidence angle\n Ouputput from get_baselines()\n dem_slc_dims : 2D numpy array\n dem in SLC dimensions\n bperp : 2D numpy array\n Perpendicular baseline\n Ouput from get_baselines()\n\n Returns\n -------\n kz_dem : 2D numpy array\n Vertical wavenumber corrected by the dem\n deco_rg : 2D numpy array\n Range decorrelation\n rg_slope : 2D numpy array\n Slope of the dem ni the range direction\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n\n\n log = logging.getLogger('compute_coorected_kz_and_slope_dem')\n log.info('Computing corrrected kz and slope of the DEM ...')\n\n\n # get solpe of the dem\n # xd1, xd2 = np.gradient(dem_slc_dims)\n # rg_slope = np.arctan2(xd2, parameters['spacing_rg'] / np.sin(thetainc))\n # az_slope = np.arctan2(xd1,parameters['spacing_az'])\n rg_slope = np.gradient(dem_slc_dims,axis=1)\n\n np.arctan2(rg_slope, parameters['spacing_rg'] / np.sin(thetainc),out=rg_slope)\n\n\n ### get range decorrelation and corrected kz\n matrix_range_vectors = np.transpose(np.reshape(np.repeat(parameters['range_vec'], int(parameters['n_az'])),(int(parameters['n_rg']), int(parameters['n_az']))))\n # kz_1d = (4*np.pi/(constants.c/parameters['f0'])) * (bperp/(matrix_range_vectors* np.sin(thetainc)))\n # B_crit = (constants.c / parameters['f0']) * (parameters['cdw'] / constants.c) * matrix_range_vectors * np.tan(thetainc - rg_slope)\n # deco_rg = 1 - np.abs(bperp / B_crit)\n deco_rg = 1 - np.abs(bperp / ((constants.c / parameters['f0']) * (parameters['cdw'] / constants.c) * matrix_range_vectors * np.tan(thetainc - rg_slope)))\n kz_dem = (4 * np.pi / (constants.c / parameters['f0'])) * (bperp / (matrix_range_vectors * np.sin(thetainc - rg_slope)))\n\n kz_dem = np.abs(kz_dem)\n\n\n return kz_dem,deco_rg,rg_slope\n\n\n\n\ndef interpolate_forest_height_no_valids_kz(forest_height_radar_coord_kz_invalids,mask_kz_points):\n \"\"\"Interpolate invalid points due to wrong values of kz\n\n Parameters\n ----------\n forest_height_radar_coord_kz_invalids : 2D numpy array\n Forest heights\n output from forest_height_inversion()\n mask_kz_points : 2D numpy array\n mask to indicate which pixels (value not equal 0) have been not included in the kz\n Output from processing_tdx_until_coherence()\n\n Returns\n -------\n forest_height_radar_coord : 2D numpy array\n Forest height with no invalid pixels\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('interpolate_forest_height_no_valids_kz')\n log.info('Interpolate forest in the non-valids kz points ...')\n\n try:\n\n if np.sum(mask_kz_points) > 0:\n\n # grid output image\n grid_x_out, grid_y_out = np.mgrid[0:mask_kz_points.shape[0]:mask_kz_points.shape[0] * 1j,\n 0:mask_kz_points.shape[1]:mask_kz_points.shape[1] * 1j]\n # get valules and points for grid data\n valid_pos = mask_kz_points == 0\n values = forest_height_radar_coord_kz_invalids[valid_pos]\n points = np.zeros((len(values), 2))\n points[:, 0] = grid_x_out[valid_pos]\n points[:, 1] = grid_y_out[valid_pos]\n forest_height_radar_coord = interpolate.griddata(points, values, (grid_x_out, grid_y_out), method='linear')\n\n else:\n #if mask points is all 0, it means that we do not have invalid points\n forest_height_radar_coord = forest_height_radar_coord_kz_invalids\n\n except:\n log.error('Too many invalid point to interpolate')\n\n return forest_height_radar_coord\n\n\ndef rebin_arbitrary_dims(arr, new_shape,method='mean'):\n \"\"\" function to make the rebind of an array using a given method\n\n If the new size in not a multiple number then we increase the size until it is multiple\n\n Advice: If it is not strictly necessary, i would use ndimage.zoom (faster and easier)\n\n Parameters\n ----------\n arr : 2D array\n new_shape : list\n contain the new shape in a list\n method : str\n Method to be used to go from the the size of the original array to the new shape\n 'mean','median','max','min'\n\n Returns\n -------\n new_arr: array with the new dimenions\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : March 2021\n\n\n \"\"\"\n\n\n #==== OPTION 1 using array split to have exactlly the same dimensions==============\n #reshape array with array split, this allows to split into non-multiple size\n arr_reshape = [np.array_split(i.transpose(), new_shape[1]) for i in np.array_split(arr, new_shape[0])]\n #generate new array with the desired dimension\n\n new_arr = np.zeros((new_shape[0], new_shape[1]),dtype=arr.dtype)\n for num_row, i in enumerate(arr_reshape):\n for num_col, j in enumerate(i):\n\n if method == 'mean':\n new_arr[num_row, num_col] = np.nanmean(j)\n if method == 'median':\n new_arr[num_row, num_col] = np.nanmedian(j)\n if method == 'max':\n new_arr[num_row, num_col] = np.nanmax(j)\n if method == 'min':\n new_arr[num_row, num_col] = np.nanmin(j)\n #==============================================================================================\n\n # #====== OPTION 2 make reshape of the multiple part and then add the non multiple part\n # nrows, ncols = arr.shape\n # nrows_new = new_shape[0]\n # ncols_new = new_shape[1]\n #\n # ## Get the rest of pixels in each dimension\n # end_row = nrows % nrows_new\n # end_col = ncols % ncols_new\n # # if it is 0 it means that the dimension is multiple and we take until the last pixel, if not we take until the last pixels to be multiple\n # if end_row == 0:\n # end_row = nrows\n # else:\n # end_row = end_row * -1\n # if end_col == 0:\n # end_col = ncols\n # else:\n # end_col = end_col * -1\n #\n # ## Divide the input array in one with multiple dimensions and the rest\n # arr1 = arr[0:end_row, 0:end_col]\n # arr2_rows = arr[end_row:nrows, ::]\n # arr2_cols = arr[::, end_col:ncols]\n #\n # #Rebin 2D array arr to shape new_shape by averaging.\"\"\"\n # shape = (new_shape[0], arr1.shape[0] // new_shape[0], new_shape[1], arr1.shape[1] // new_shape[1])\n #\n # new_arr = arr1.reshape(shape)\n # if method == 'mean':\n # new_arr = np.nanmean(np.nanmean(new_arr, -1), 1)\n #\n # ##now we include the last pixels correspondiong to the non multiple rows or columns\n # if len(arr2_rows) > 0:\n #\n # if method == 'mean':\n # arr2_rows = np.nanmean(arr2_rows, 0)\n # # make the mean of the array using split array\n # new_arr_rows = [np.nanmean(i) for i in np.array_split(arr2_rows, new_shape[1])]\n #\n # # get the last row of the array concatenated by the previous onw\n # last_row = np.vstack((new_arr[-1, :], new_arr_rows))\n #\n # if method == 'mean':\n # # make the mean\n # last_row = np.nanmean(last_row, 0)\n #\n # # change this new row (that includes the rows of the rest with the previous one\n # new_arr[-1, :] = last_row\n #\n # ##now we include the last pixels correspondiong to the non multiple cols or columns\n # if len(arr2_cols) > 0:\n # if method == 'mean':\n # arr2_cols = np.nanmean(arr2_cols, 1)\n # # make the mean of the array with the new shape\n # new_arr_cols = [np.nanmean(i) for i in np.array_split(arr2_cols, new_shape[0])]\n #\n # # get the last row of the array concatenated by the previous onw\n # last_col = np.vstack((new_arr[:, -1], new_arr_cols))\n #\n # if method == 'mean':\n # # make the mean\n # last_col = np.nanmean(last_col, 0)\n #\n # # change this new col (that includes the cols of the rest with the previous one\n # new_arr[:, -1] = last_col\n #\n # ##==============================================================================================\n\n # #========OPTION 3 to increase the new size until you have a multiple==============\n # nrows,ncols = arr.shape\n # nrows_new = new_shape[0]\n # ncols_new = new_shape[1]\n #\n # # if the dimensions with the new shape are not multiple we take the first multiple value for the new shape\n # while(nrows % nrows_new != 0):\n # nrows_new = nrows_new + 1\n #\n # while(ncols % ncols_new != 0):\n # ncols_new = ncols_new + 1\n #\n # #update to the new shape that it is multiple\n # new_shape[0] = nrows_new\n # new_shape[1] = ncols_new\n #\n #\n # \"\"\"Rebin 2D array arr to shape new_shape by averaging.\"\"\"\n # shape = (new_shape[0], arr.shape[0] // new_shape[0],\n # new_shape[1], arr.shape[1] // new_shape[1])\n #\n #\n # new_arr = arr.reshape(shape)\n # if method == 'mean':\n # new_arr = np.nanmean(np.nanmean(new_arr,-1),1)\n # if method == 'median':\n # new_arr = np.nanmedian(np.nanmedian(new_arr,-1),1)\n # if method == 'max':\n # new_arr = np.nanmax(np.nanmax(new_arr, -1), 1)\n # if method == 'min':\n # new_arr = np.nanmin(np.nanmin(new_arr, -1), 1)\n # #==============================================================================================\n\n return new_arr\n\n\n\n\ndef processing_tdx_until_coherence(inputs,path_image_acquisition):\n \"\"\"Processing of TDX image from cos until coherecen\n\n Parameters\n ----------\n inputs: module\n Module from the inputs file used in the GEDI/TDX procesinng\n Before calling the function make import inputs\n path_image_acquisition : str\n Complete path of the folder that contains the TDX image to process\n\n Returns\n -------\n parameters : dict\n Information related to the master image\n coh_cor : 2D numpy array\n Coherence corrected by the dem\n kz_cor : 2D numpy array\n Kz corrected by the dem\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension represents:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n output from get_dem()\n dem_limits : dict\n Limits of the DEM.\n It contains the following keys: {'minlon': ,'maxlon': ,'minlat': ,'maxlat': 0.}\n output from get_dem()\n deco_rg : 2D numpy array\n Range decorrelation\n phase : 2D numpy array\n Interferogram phase\n uw_phase : 2D numpy array\n Interferogram unwrapped phase\n master_image : 2D numpy array\n slc master image\n If the inputs.save_master_slave is False then master_image=None\n slave_image : 2D numpy array\n slc slave image\n If the inputs.save_master_slave is False then slave_image=None\n kz : 2D numpy array\n Original kz (not corrected by the dem)\n this is not the kz used for the forest heigth!\n coherence : 2D numpy array\n Absolute value of the coherence (not corrected by the dem)\n this is not the kz used for the forest heigth!\n dem_slc_dims : 2D numpy array\n DEM used for the processing of the interferogram in the SLC dimensions\n im_filtered_std : 2D numpy array\n Standard deviation of the third highest pixel in a the window omf inputs.resolution\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('processing_tdx_until_coherece')\n log.info('Pre- Processing of tdx/tsx images until coherence ...')\n\n\n # =============== READING TDX PARAMETERS FROM XML =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n parameters, parameters_slave = get_params_from_xml(path_image_acquisition)\n ####################################################################\n\n # =============== READING EXTERNAL DEM =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n dem, dem_xyz, dem_posting, dem_limits, dang = get_dem(inputs.path_dem,inputs.type_dem,parameters, margin_degrees=0.5,NumThreads=inputs.num_threads)\n ####################################################################\n\n # =============== INTERPOLATION OF ORBITS =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n parameters['orbit_slc'] = interpol_orbits(parameters, margin=0)\n parameters['orbit'] = interpol_orbits(parameters)\n parameters_slave['orbit_slc'] = interpol_orbits(parameters_slave, margin=0)\n parameters_slave['orbit'] = interpol_orbits(parameters_slave)\n parameters['orbit_active'] = interpol_orbits(parameters,reqTime=parameters_slave['orbit'][:,0],same_coefficients=False,parameters_slave=parameters_slave)\n ####################################################################\n\n # =============== PARAMETERS FOR BACK-GEOCODED DEM =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n ## master\n #### get angles\n offnadir, lookangle = get_offnadir_lookangle(parameters, dem,NumThreads=inputs.num_threads)\n # get more parameters back geocoding dem\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n deltat_dem, rd_dem, rs_dem, t0_dem, nrg_dem, naz_dem = get_params_back_geocoding_dem(parameters,dem_posting, offnadir)\n ### slave\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30), 2)) + ' Gb')\n offnadir_slave, lookangle_slave = get_offnadir_lookangle(parameters_slave, dem,NumThreads=inputs.num_threads)\n # get more parameters back geocoding dem\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n deltat_dem_slave, rd_dem_slave, rs_dem_slave, t0_dem_slave, nrg_dem_slave, naz_dem_slave = get_params_back_geocoding_dem(parameters_slave, dem_posting, offnadir_slave)\n # adding same offset to all images\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n rd_dem, rd_dem_slave, t0_dem, t0_dem_slave = add_same_offset_images(parameters, parameters_slave, rd_dem,rd_dem_slave, t0_dem)\n ####################################################################\n\n # =============== CHANGE DEM TO SLANT-RANGE COORDINATES =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n rgm, azm = xyz2rgaz(parameters, dem_xyz, dem_limits, deltat_dem, rd_dem, rs_dem, t0_dem, nrg_dem, naz_dem,NumThreads=inputs.num_threads)\n dem_radar_coord = get_dem_height_from_rg_az(rgm, azm, parameters, dem, deltat_dem, rd_dem, rs_dem, t0_dem,nrg_dem, naz_dem)\n # Iterpolation DEM to get the same size as the SLC\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n dem_slc_dims = from_dem_dims_to_slc_dims(parameters, dem_radar_coord, nrg_dem, naz_dem, rd_dem, rs_dem,deltat_dem, t0_dem)\n dem_slc_dims = dem_slc_dims.astype('float32')\n ####################################################################\n\n # =============== COMPUTATION OF PHASE FLAT IN SLANT-RANGE GEONMETRY =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n ### compute dem flat earth -----\n dem_xyz_flat_master = get_dem_xyz_flat_earth(dem, dem_limits, dang,NumThreads=inputs.num_threads)\n ## get master rg az matrices for flat earth\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n rgm_flat_master, azm_flat_master = xyz2rgaz(parameters, dem_xyz_flat_master, dem_limits, deltat_dem, rd_dem,rs_dem, t0_dem, nrg_dem, naz_dem,NumThreads=inputs.num_threads)\n ## get slave rg az matrix for flat earth\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n rgm_flat_slave, azm_flat_slave = xyz2rgaz(parameters_slave, dem_xyz_flat_master, dem_limits,\n deltat_dem_slave, rd_dem_slave, rs_dem_slave, t0_dem_slave,\n nrg_dem_slave, naz_dem_slave, False, True, rs_dem,\n parameters['orbit_active'],NumThreads=inputs.num_threads)\n ## from here following code of get_offsets.pro from line 482 of TAXI with keyword /flatearth\n # convert master matrices to dem positions\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n slantphaseflat = compute_slant_phase_flat(parameters, parameters_slave, rgm_flat_master, azm_flat_master,\n rgm_flat_slave, nrg_dem, naz_dem, rd_dem, rs_dem, deltat_dem,t0_dem)\n # interpolation to the same size as the SLC\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n slantphaseflat = from_dem_dims_to_slc_dims(parameters, slantphaseflat, nrg_dem, naz_dem, rd_dem, rs_dem,deltat_dem, t0_dem)\n slantphaseflat = slantphaseflat.astype('float32')\n ####################################################################\n\n\n\n # =============== COMPUTATION OF BASELINES =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n # 1 - get the cartesian coordinates of the range azimuth matrix (i.e dem in cartersians in slant-range geometry)\n dem_xyz_slr = rgz2xyz(parameters, rgm, azm, dem_xyz, deltat_dem, rd_dem, rs_dem, t0_dem, nrg_dem, naz_dem)\n # 2 - get az offset\n # get range azimuth matrices for the slave\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n rgs, azs = xyz2rgaz(parameters_slave, dem_xyz, dem_limits, deltat_dem_slave, rd_dem_slave, rs_dem_slave,\n t0_dem_slave, nrg_dem_slave, naz_dem_slave, False, True, rs_dem, parameters['orbit'],NumThreads=inputs.num_threads)\n # get offsets in azimuth\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n az_offset,rg_offset,synth_phase = get_offsets(rgm, azm, rgs, azs, parameters,parameters_slave, dem, deltat_dem, rd_dem, rs_dem, t0_dem, nrg_dem,naz_dem)\n # 3. Get parameters related to baselines (bperp, kz, thetainc) (get_baselines.pro -> get_baselines_dub.pro)\n baseline, bpar, bperp, kz, thetainc = get_baselines(parameters, parameters_slave, dem_xyz_slr, az_offset,deltat_dem, t0_dem)\n\n # interpolate to the same size as slc\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n bperp = from_dem_dims_to_slc_dims(parameters, bperp, nrg_dem, naz_dem, rd_dem, rs_dem, deltat_dem, t0_dem)\n bperp = bperp.astype('float32')\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30), 2)) + ' Gb')\n kz = from_dem_dims_to_slc_dims(parameters, kz, nrg_dem, naz_dem, rd_dem, rs_dem, deltat_dem, t0_dem)\n kz = kz.astype('float32')\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n thetainc = from_dem_dims_to_slc_dims(parameters, thetainc, nrg_dem, naz_dem, rd_dem, rs_dem, deltat_dem,t0_dem)\n thetainc = thetainc.astype('float32')\n ####################################################################\n\n\n\n # =============== READING TDX/TSX COS FILES =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n master_image = read_master_slave(path_image_acquisition,parameters,'master')\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n slave_image = read_master_slave(path_image_acquisition, parameters,'slave')\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n # apply calFactor\n #master_image = master_image * np.sqrt(parameters['calFactor'])\n #slave_image = slave_image * np.sqrt(parameters_slave['calFactor'])\n\n ## save outputs for LEA\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/master_slc_radar_coord.npy', master_image)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/slave_slc_radar_coord.npy', slave_image)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/flat_earth_slc_radar_coord.npy', slantphaseflat)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/kz_radar_coord.npy', kz)\n\n\n # =============== COMPUTATION OF INTERFEROGRAM =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n #meanheight = np.median(dem[:, :, 2])\n interferogram, coherence = compute_interferogram(inputs.resolution, parameters, master_image, slave_image,slantphaseflat, kz, dem_slc_dims,None,inputs.resolution_slcs)\n #remove residual phase in case we want to save it\n if inputs.save_phase:\n interferogram = remove_residual_flat_earth(interferogram, coherence)\n\n\n ## ------- PHASE UNWRAPPING\n if inputs.make_phase_unwrapping:\n try:\n # remove nans from coherence\n coherence[np.isnan(coherence)] = 0\n # WARNING: Based on choi IDL code, we have to test it!\n #Reduce the image to make the phase unwrapping to reduce the computational cost.\n # We take the applyed multilloks\n ml_rg = np.int(np.round(inputs.resolution/((parameters['groundNear']+parameters['groundFar'])/2.)))\n ml_az = np.int(np.round(inputs.resolution/parameters['projectedSpacingAzimuth']))\n # we reduce the orginal multilooks to have a bit of margin respect to the original size\n ml_rg = np.float(ml_rg-2)\n ml_az = np.float(ml_az-2)\n phase = ndimage.zoom(np.angle(coherence), (1/ml_az, 1/ml_rg),order=1)\n abs_coh = ndimage.zoom(np.abs(coherence), (1/ml_az, 1/ml_rg),order=1)\n ## phase unwrapping\n uw_phase = phase_unwrapping(abs_coh, phase, inputs.path_snaphu, inputs.output_path+path_image_acquisition.split('/')[-2]+'/')\n # get again SLC dimensions\n uw_phase = ndimage.zoom(uw_phase, (np.float(coherence.shape[0]/np.float(uw_phase.shape[0])), np.float(coherence.shape[1]/np.float(uw_phase.shape[1]))),order=1)\n ## ------- BASELINE CORRETION\n ## compute plane for the baseline correction\n plane = baseline_correction_using_plane(np.abs(coherence), uw_phase, kz)\n ## remove this plane to the interferogram\n np.multiply(interferogram,np.exp(-plane*kz*1j), out = interferogram)\n # remove this plant to the unwrtapped pahse\n uw_phase = uw_phase - plane*kz\n # get the phase of the interferogram\n phase = np.angle(interferogram)\n except:\n log.error('Unexpected error in the phase unwrapping')\n log.exception('Error:')\n log.info('Generate a fake unwrapped phase to continue the processing')\n uw_phase = np.zeros(interferogram.shape)\n else:\n # get the phase of the interferogram\n phase = np.angle(interferogram)\n uw_phase = None\n ####################################################################\n\n\n # =========================================================================\n # Changhyun approach based on the standard deviation of third highest pixel\n if inputs.make_map_std_for_bias_height:\n im_filtered_std = map_std_for_bias_height(master_image, slave_image, slantphaseflat,kz,rg_slope,parameters,inputs,path_image_acquisition)\n else:\n im_filtered_std = None\n # ==================================================\n\n #save momory\n if inputs.save_kz == False:\n kz = None\n interferogram = None\n\n # =============== COMPUTATION OF VOLUMETRIC COHERENCE CORRECTION =============== #\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n ## 1 - Calculation of DEM corrected kz\n kz_cor, deco_rg, rg_slope = compute_corrected_kz_and_slope_dem(parameters, thetainc,dem_slc_dims, bperp)\n\n kz_cor = np.clip(kz_cor,inputs.hard_lower_limit_kz,inputs.hard_upper_limit_kz)\n bperp = None\n #dem_slc_dims = None\n\n\n # 2- compute noise decorrelation\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n SNR_master = compute_noise_decorrelation(inputs.resolution, parameters, np.copy(master_image), thetainc,rg_slope)\n log.debug('Memory usage ' + str(np.round((psutil.Process(os.getpid()).memory_info()[0] / 2. ** 30) , 2)) + ' Gb')\n SNR_slave = compute_noise_decorrelation(inputs.resolution, parameters_slave, np.copy(slave_image), thetainc,rg_slope)\n rg_slope = None\n thetainc = None\n\n # 3-# correct coherence\n coh_cor = np.clip(np.abs(coherence) / deco_rg / np.sqrt(SNR_master * SNR_slave), 0, 1)\n\n ## save outputs for LEA\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/SNR_master_coord.npy', SNR_master)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/SNR_slave_coord.npy', SNR_slave)\n\n SNR_master = None\n SNR_slave = None\n if inputs.save_coh:\n coherence = np.abs(coherence)\n else:\n coherence = None\n ####################################################################\n\n\n #check if we want to save the master/slave images\n if inputs.save_master_slave == False:\n master_image = None\n slave_image = None\n\n #Added kz and coherence mean to the parameters file\n parameters['kz_cor_mean'] = np.nanmean(kz_cor)\n parameters['coh_cor_mean'] = np.nanmean(coh_cor)\n\n ## save outputs for LEA\n # np.save(inputs.output_path+path_image_acquisition.split('/')[-2]+'/dem_latlon_coord.npy',dem)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/dem_radar_coord.npy', dem_slc_dims)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/range_decorrelation_radar_coord.npy', deco_rg)\n # np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/kz_corrected_by_dem_radar_coord.npy', kz_cor)\n\n #### ADITIONAL DECORRELATION FACTORS ##################\n # Increase of coherence to reduce bias in lower heights\n coh_cor = np.clip(coh_cor/inputs.decorrelation_coherence_before_inversion,0,1)\n #quantization error correction (coherence error of quantization is normally around 3 %)\n coh_cor = coh_cor/inputs.quantization_error\n #remove values that are lower than 0 or heihger than one\n #coh_cor[(coh_cor < 0) | (coh_cor > 1)] = np.nan\n coh_cor = np.clip(coh_cor,0,1)\n #####################################################################################\n\n\n\n return parameters,coh_cor,kz_cor,dem,dem_limits,deco_rg,phase,uw_phase,master_image,slave_image,kz,coherence,dem_slc_dims,im_filtered_std\n\n\n\ndef map_std_for_bias_height(master_image, slave_image, slantphaseflat, kz, rg_slope, parameters, inputs, path_image_acquisition):\n \"\"\"Code suggested from changhyun to remove the bias on the height based on the standadrd deviation of the heights\n\n\n Parameters\n ----------\n master_image : 2D numpy array\n master image in SLC coordinates\n slave_image : 2D numpy array\n Slave image in SLC coordinates\n slantphaseflat : : 2D numpy array\n Output from compute_slant_phase_flat() with the SLC dimensions\n kz : 2D numpy array\n Vertical wavenumber in SLC coordinates\n dem : 2D numpy array\n DEM in SLC coordinates\n parameters : dict\n Information related to the master image\n inputs: module\n Module from the inputs file used in the GEDI/TDX procesinng\n Before calling the function make import inputs\n\n Returns\n -------\n im_filtered_std : 2D numpy array\n Standard deviation of the third highest pixel in a the window omf inputs.resolution\n\n Notes\n -------\n Author : victor Cazcarra-Bes / Changhyun Choi\n Date : June 2021\n\n\n \"\"\"\n log = logging.getLogger('map_std_for_bias_height')\n log.info('Generation a map to deal with the bias in the height...')\n\n\n dem = 0.\n # Step 0: compute the interferomgram\n interferogram, coherence = compute_interferogram(inputs.map_std_resolution_coherence, parameters, master_image, slave_image, slantphaseflat, kz, dem, None, 1)\n master_image = None\n slave_image = None\n slantphaseflat = None\n dem = None\n # smooth images\n ml_rg_tdx = np.int(np.round(inputs.low_resolution_filter_height_tdx / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n ml_az_tdx = np.int(np.round(inputs.low_resolution_filter_height_tdx / parameters['projectedSpacingAzimuth']))\n interferogram2 = ndimage.uniform_filter(interferogram.real, (ml_az_tdx, ml_rg_tdx)) + (-1) * 1j * ndimage.uniform_filter(interferogram.imag,(ml_az_tdx, ml_rg_tdx))\n\n ## Step 1 get the phase of the interferogram and correct using plane aprroahch\n log.info('Get the phase of the interferogram ...')\n # plane correction and topography compensation\n interferogram = np.multiply(interferogram, interferogram2)\n interferogram2 = None\n interferogram[abs(coherence) < 0.4] = 0\n \n #### save interferogram\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/inf_radar_coord.npy', interferogram)\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/inf2_radar_coord.npy', interferogram)\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/coh_radar_coord.npy', coherence)\n\n coherence = None\n mask_slope = np.zeros(interferogram.shape) + 1\n mask_slope[abs(rg_slope) > np.pi/9.0] = np.nan\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/rg_slope_radar_coord.npy', rg_slope)\n rg_slope = None\n interferogram = interferogram*mask_slope\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/mask_slope_radar_coord.npy', mask_slope)\n mask_slope = None\n\n\n ## Step 2 convert phase to height\n log.info('Convert to height ...')\n height_tdx = np.angle(interferogram) / kz\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/residual_height_radar_coord.npy', height_tdx)\n interferogram = None\n kz = None\n\n #Step 3 filter previous result at 120 m and make the difference of the non filtered and the filtered at 120 m\n log.info('Apply filter at low resolution ...')\n #ml_rg_tdx = np.int(np.round(inputs.low_resolution_filter_height_tdx / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n #ml_az_tdx = np.int(np.round(inputs.low_resolution_filter_height_tdx / parameters['projectedSpacingAzimuth']))\n #height_tdx_dem_filter = ndimage.uniform_filter(height_tdx_dem,(ml_az_tdx,ml_rg_tdx))\n #height_tdx = height_tdx_dem - height_tdx_dem_filter\n #### save height\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/hgt_av120_radar_coord.npy', height_tdx)\n height_tdx[height_tdx > 25] = 0\n\n try:\n # first we try a fast method, in case we have problems with the memory we will do it using generic_filter in parallel\n # Step 5. Get the height of the third highest peak at 25 meters window\n log.info('Get the height of the third highest peak ...')\n ml_rg = np.int(np.round(inputs.resolution_filter_height_peak / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n ml_az = np.int(np.round(inputs.resolution_filter_height_peak / parameters['projectedSpacingAzimuth']))\n #make the filter to get the third highest pixel with a \"jumping window\" (i.e not sliding window) to reduce the number of pixels\n jump_row = int(ml_az/2.5)\n jump_col = int(ml_rg/2.5)\n sel_num = -1*int(ml_rg*ml_az/20.0)\n height_peak = lib_filter.filter_get_highest_pixel(height_tdx,sel_num,ml_az,ml_rg,jump_row,jump_col)\n log.info('N of tallest phase: ' + str(sel_num))\n # # Step 6. Make a std filter of the previous result\n log.info('Make the std filter ...')\n #get the size of the window taking into account that we reduce the number pixels before\n ml_rg = np.int(np.round(inputs.resolution_filter_std / ((parameters['groundNear'] + parameters['groundFar']) / 2.))/jump_col)\n ml_az = np.int(np.round(inputs.resolution_filter_std / parameters['projectedSpacingAzimuth'])/jump_row)\n im_filtered_std = lib_filter.filter_std(height_peak, ml_az, ml_rg, 1, 1)\n #np.save(inputs.output_path + path_image_acquisition.split('/')[-2] + '/im_filtered_std_radar_coord.npy', im_filtered_std)\n ## get the original dimensions\n im_filtered_std = ndimage.zoom(im_filtered_std,(np.float(height_tdx.shape[0])/np.float(im_filtered_std.shape[0]),np.float(height_tdx.shape[1])/np.float(im_filtered_std.shape[1])),order=1)\n\n except:\n\n log.info('Get the height of the third highest peak unsing generic filter ...')\n ml_rg = np.int(np.round(inputs.resolution_filter_height_peak / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n ml_az = np.int(np.round(inputs.resolution_filter_height_peak / parameters['projectedSpacingAzimuth']))\n height_peak = lib_filter.generic_filter_parallel(height_tdx, inputs.num_threads_generic_filter, lib_filter.get_third_highest_pixel, (ml_az,ml_rg))\n\n log.info('Make the std filter using generic filter ...')\n ml_rg = np.int(np.round(inputs.resolution_filter_std / ((parameters['groundNear'] + parameters['groundFar']) / 2.)))\n ml_az = np.int(np.round(inputs.resolution_filter_std / parameters['projectedSpacingAzimuth']))\n im_filtered_std = lib_filter.generic_filter_parallel(height_peak, inputs.num_threads_generic_filter, np.std, (ml_az, ml_rg))\n\n\n log.info('Generation a map to deal with the bias in the height end')\n\n return im_filtered_std\n\n\n\ndef remove_residual_flat_earth(interferogram, coherence):\n \"\"\" Remove residual flat earth component for the image by fitting the sine and cosine to real and imaginary parts of interferogram\n\n Parameters\n ----------\n interferogram : complex image\n interferogram with residual flat earth component\n\n coherence : complex image\n coherence\n\n Returns\n -------\n interferogram_cor : complex image\n corrected interferogram with removed flat earth\n\n\n Notes\n -------\n Author : Roman Guliaev ([email protected])\n Date : March 2021\n\n \"\"\"\n log = logging.getLogger('remove_residual_flat_earth')\n log.info('Remove residual flat earth ...')\n\n\n try:\n\n interferogram1 = np.copy(interferogram) / np.abs(interferogram)\n\n def test_func_cos(x, a, b, c):\n return c * np.cos(b * x + a)\n\n def test_func_sin(x, a, b, c):\n return c * np.sin(b * x + a)\n\n x_data = np.linspace(0, 2 * np.pi, interferogram1.shape[1])\n\n\n # calculating mean imag and real interferogram along azimuth for each range\n interferogram2_imag = np.nanmean(interferogram1.imag, axis=0)\n interferogram2_real = np.nanmean(interferogram1.real, axis=0)\n\n # check for nan values\n x_data_imag = x_data[np.isfinite(interferogram2_imag)]\n interferogram2_imag = interferogram2_imag[np.isfinite(interferogram2_imag)]\n\n x_data_real = x_data[np.isfinite(interferogram2_real)]\n interferogram2_real = interferogram2_real[np.isfinite(interferogram2_real)]\n\n # find fit\n params_imag, para2 = optimize.curve_fit(test_func_sin, x_data_imag, interferogram2_imag, p0=[1, 1, .8])\n param_imag = params_imag[1]\n\n params_real, para2 = optimize.curve_fit(test_func_cos, x_data_real, interferogram2_real, p0=[1, 1, .8])\n param_real = params_real[1]\n\n sign_phase0 = 1\n if (np.abs(params_imag[0] - params_real[0]) > np.pi / 2 and np.abs(params_imag[0] - params_real[0]) < 3 * np.pi / 2): sign_phase0 = -1\n\n # taking mean of fit parameter\n flat_frequency = np.mean([np.abs(param_imag), np.abs(param_real)])\n\n # find the sign of complex exponent\n sign_for_rotation = sign_phase0 * np.sign(params_real[1]) * np.sign(params_real[2]) * np.sign(params_imag[1]) * np.sign(params_imag[2])\n\n # vector along range\n plane = np.exp(- sign_for_rotation * 1j * x_data * flat_frequency)\n\n # repeat the same vector for each azimuth\n plane2 = np.repeat([plane], interferogram1.shape[0], axis=0)\n\n # rotate the interferogram (remove the flat earth)\n interferogram3 = interferogram1 * plane2\n\n # absolute ground phase compensation\n interferogram_ground = np.angle(np.nanmean(interferogram3[np.abs(coherence > .93)]))\n interferogram_cor = interferogram3 * np.exp(-1j * interferogram_ground)\n\n\n except:\n log.error('Un-expected error removing residual flat earth')\n return interferogram\n\n return interferogram_cor\n\ndef read_master_slave(path_image_acquisition,parameters,image_to_read):\n \"\"\" Read master or slave image\n\n Parameters\n ----------\n path_image_acquisition : str\n Complete path of the folder that contains the TDX image to process\n parameters : dict\n Inforamtion related to the image\n image_to_read : str\n string to select the images to read 2 options: 'master' or 'slave'\n\n Returns\n -------\n slc_image : 2D numpy array\n Single Look complex image\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : November 2020\n\n \"\"\"\n\n\n log = logging.getLogger('read_master_slave')\n log.info(' Reading '+image_to_read)\n\n\n if image_to_read == 'slave':\n if parameters['active_sat'] == 'TSX':\n ###TDX\n slc_image = read_cos(path_image_acquisition + parameters['TDX_name'], parameters)\n else:\n ##TSX\n slc_image = read_cos(path_image_acquisition + parameters['TSX_name'], parameters)\n\n elif image_to_read == 'master':\n if parameters['active_sat'] == 'TSX':\n ##TSX\n slc_image = read_cos(path_image_acquisition + parameters['TSX_name'], parameters)\n else:\n ###TDX\n slc_image = read_cos(path_image_acquisition + parameters['TDX_name'], parameters)\n\n else:\n log.error(' Wrong parameter image_to_read: '+image_to_read+'. Must be master or slave')\n\n\n return slc_image\n\n\n\n\n\ndef processing_until_forest_height(parameters, coh_cor, kz_cor, dem, common_profile, inputs,make_geocoding=True,use_input_size=False):\n \"\"\"Get the forest height from the coherences and kz\n\n Parameters\n ----------\n parameters : dict\n Inforamtion realted to the master image\n Output from processing_tdx_until_coherence()\n coh_cor : 2D numpy array\n Coherence corrected by the dem\n Output from processing_tdx_until_coherence()\n kz_cor : 2D numpy array\n Kz corrected by the den\n Output from processing_tdx_until_coherence()\n dem : 3D numpy array\n DEM in the form of a 3D array, where the last dimension representes:\n - (rows, cols,0): Longitude\n - (rows, cols,1): Latitude\n - (rows, cols,2): Height\n output from get_dem() / Output from processing_tdx_until_coherence()\n common_profile : list of 1D numpy array\n Common profiles generated from GEDI data\n inputs: module\n Module from the inputs file used in the GEDI/TDX procesinng\n Before calling the function make import inputs\n make_geocoding : bool\n Flag to make the geocoding of the result.\n - If True is assumed the inputs kz and coherence are in radar coordintes and the results will be given in lat lon\n - If False it returns the result in the same coordiantes as the incputs coh_cor and kz_cor\n use_input_size : bool\n Flag to use the size of the input data to not compute the height for all pixels.\n - If False The dada is reduced by the corresponding desired pixel spacing in output taking into account the original pixel spacing\n\n Returns\n -------\n forest_height_geo_lonlat : 2D numpy array\n Forest height\n col_axis_lat_coord: 1D numpy array\n Latitude values for the columns of forest_height_geo_lonlat\n row_axis_lon_coord: 1D numpy array\n Longitude values for the rows of forest_height_geo_lonlat\n lut_kz_coh_heights : 2D numpy array\n lut to make the forest height inversion that relates coh/kz <-> height\n First dimension kz as indicate in kz_lut_axes\n Second dimension height with dimensions:\n - height_vector = np.linspace(inputs.min_height_vector, inputs.max_height_vector, num=inputs.n_elements_height_vector)\n kz_lut_axes : 1D numpy array\n values of kz corresponding to the first dimension of lut_kz_coh_heights\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('processing_until_forest_height')\n log.info('Pre- Processing of tdx/tsx images until forest height ...')\n\n # =============== GENERATION OF 2D LUT COHERENCE/KZ <-> HEIGHT =============== #\n height_vector = np.linspace(inputs.min_height_vector, inputs.max_height_vector, num=inputs.n_elements_height_vector)\n lut_kz_coh_heights_list = []\n for num_common_profile,i_common_profile in enumerate(common_profile):\n lut_kz_coh_heights_aux, kz_lut_axes = get_2dlut_kz_coh_height_from_master_profile(i_common_profile, height_vector,kz_min=0,kz_max=1.2 * np.nanmax(kz_cor),n_elements_lut=5000)\n lut_kz_coh_heights_list.append(lut_kz_coh_heights_aux)\n\n #Check if the profile is adaptative\n if len(common_profile) > 1:\n #interpolate the luts\n lut_kz_coh_heights = lib_profile.interpolate_luts_different_profiles(inputs,lut_kz_coh_heights_list,kz_lut_axes,height_vector)\n else:\n #Tehre is only one lut as there is no adaptative prfdile\n lut_kz_coh_heights = lut_kz_coh_heights_list[0]\n\n lut_kz_coh_heights_list = None\n\n # =============== FOREST HEIGHT INVERSION =============== #\n #Make the forest height inversion\n forest_height_radar_coord = forest_height_inversion(inputs, kz_cor,coh_cor,parameters,lut_kz_coh_heights, kz_lut_axes,use_input_size_kz_coh=use_input_size)\n ##interpolate forest height in the no valid points of kz\n #forest_height_radar_coord = interpolate_forest_height_no_valids_kz(forest_height_radar_coord_kz_invalids,mask_points_processing_height)\n ####################################################################\n\n # =============== GEOCODING FOREST HEIGHT RADAR TO LAT/LON COORDINATES =============== #\n if make_geocoding:\n forest_height_geo_lonlat, col_axis_lat_coord, row_axis_lon_coord = geocoding_radar_image(forest_height_radar_coord,parameters, dem,NumThreads=inputs.num_threads,\n margin=0.05,pixels_spacing=inputs.pixel_spacing_out,pixels_border_to_remove=inputs.pixels_border)\n\n return forest_height_geo_lonlat, col_axis_lat_coord, row_axis_lon_coord, lut_kz_coh_heights, kz_lut_axes\n\n else:\n col_axis_lat_coord = None\n row_axis_lon_coord = None\n\n return forest_height_radar_coord, col_axis_lat_coord, row_axis_lon_coord, lut_kz_coh_heights, kz_lut_axes\n #####################################################################\n\n\n\n\ndef generate_kml_for_forest_height(image,col_axis_lat_coord,row_axis_lon_coord,inputs,output_path='',img_fname = 'forest_height.png',kml_fname='forest_height.kml',title_name='KML for forest height'):\n \"\"\"Generation of a kml and png to be used in google earth\n\n Parameters\n ----------\n image : 2D numpy array\n col_axis_lat_coord : 1D numpy array\n Array with the values of the lat (columns) coordinates with origin on up left corner\n row_axis_lon_coord : 1D numpy array\n Array with the values of the lon (rows) coordinates with origin on up left corner\n inputs : module\n Input file provided in the GEDI/TDX processing.\n output_path: str, optional\n img_fname: str, optional\n Name of the png image. It should be the same as kml_fname\n kml_fname: str, optional\n Name of the kml file. It should be the same as img_fname\n title_name: str, optional\n Name on the\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2020\n\n \"\"\"\n\n log = logging.getLogger('generate_kml_for_forest_height')\n log.info('Generate a kml and png files ...')\n\n\n #Transform input image in the correct positions\n image = np.flipud(np.transpose(image))\n\n ## reduce the size of the image, as we only need this for the kml\n image = ndimage.zoom(image,((image.shape[0]/3.0) / image.shape[0],(image.shape[1]/3.0) / image.shape[1]),order=0, mode='nearest')\n\n #mask the nan points for transparency\n mask = np.ones((image.shape))\n mask[np.isnan(image)] = 0\n\n #get invalid points\n water_points = image==INVALID_WATER\n settement_points = image == INVALID_SETTLEMENTS\n\n\n image[water_points] = np.nan\n image[settement_points] = np.nan\n\n\n #transform to a RGBA image (last dimension is the trasnparency)\n height_vector = np.linspace(inputs.min_height_vector, inputs.max_height_vector, num=inputs.n_elements_height_vector)\n image = image / np.nanmax(height_vector)\n image_png = cm.YlGn(image)\n #Add transparency to nan points\n image_png[:, :, 3] = mask\n\n\n # mask settelments (red)\n image_png[settement_points, 0] = 1\n image_png[settement_points, 1] = 0\n image_png[settement_points, 2] = 0\n image_png[settement_points, 3] = 1\n # mask watter (blue)\n image_png[water_points, 0] = 0\n image_png[water_points, 1] = 0\n image_png[water_points, 2] = 1\n image_png[water_points, 3] = 1\n\n #plt.imsave(output_path + img_fname, image_png,vmin=inputs.min_height_vector,vmax=inputs.max_height_vector,cmap=cmap)\n plt.imsave(output_path + img_fname, image_png, vmin=inputs.min_height_vector, vmax=inputs.max_height_vector)\n\n #names for the kml\n kml_fname = output_path + kml_fname\n title = title_name\n img_name = 'Geocoded image'\n\n #Coordinates of the image\n coods = \"{},{} {},{} {},{} {},{}\".format(np.min(row_axis_lon_coord), np.min(col_axis_lat_coord),\n np.max(row_axis_lon_coord),np.min(col_axis_lat_coord),\n np.max(row_axis_lon_coord), np.max(col_axis_lat_coord),\n np.min(row_axis_lon_coord), np.max(col_axis_lat_coord))\n\n # Generate kml directly\n root = et.Element(\"kml\")\n root.set(\"xmlns\", \"http://www.opengis.net/kml/2.2\")\n root.set(\"xmlns:gx\", \"http://www.google.com/kml/ext/2.2\")\n doc = et.SubElement(root, \"Document\")\n et.SubElement(doc, \"name\").text = title\n overlay = et.SubElement(doc, \"GroundOverlay\")\n et.SubElement(overlay, \"name\").text = img_name\n et.SubElement(overlay, \"open\").text = \"1\"\n icon = et.SubElement(overlay, \"Icon\")\n et.SubElement(icon, \"href\").text = img_fname\n llq = et.SubElement(overlay, \"gx:LatLonQuad\")\n et.SubElement(llq, \"coordinates\").text = coods\n tree = et.ElementTree(root)\n tree.write(kml_fname)\n\n\n\ndef compute_error_between_luts(common_profile,inputs,output_path):\n \"\"\"Computes the errors of the LUTs generated by the extreme profiles used to generate the final LUT.\n\n Explanation:\n common_profile contains a list of profiles used for different heights. For example 70 profiles from height 0 to height 70.\n From this 70 profiles we have 70 different LUTs where the LUT for height 0 and the LUT for height 70 are the more extrem ones.\n In this function we compute the error between these two extrem LUTs as well as the LUT of the box\n\n\n Parameters\n ----------\n common_profile : list\n list of vectors, where each of them correspond to one profile used to generate the global LUT\n inputs : module\n Input file provided in the GEDI/TDX processing.\n output_path : str\n path where the results will be saved\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : May 2021\n\n \"\"\"\n\n\n log = logging.getLogger('compute_error_between_luts')\n log.info('Compute the erro between luts')\n\n #ignore warnings here\n np.seterr('ignore')\n\n #Define number of elements for the height vector used in the performance curve, we use more than the input height vector to get a better sampling\n n_elements_height_vector = inputs.n_elements_height_vector\n max_height_vector = inputs.max_height_vector\n min_height_vector = inputs.min_height_vector\n height_vector = np.linspace(min_height_vector, max_height_vector, num=n_elements_height_vector)\n\n\n #generat the lut\n lut_kz_coh_heights_list = []\n for num_common_profile,i_common_profile in enumerate(common_profile):\n lut_kz_coh_heights_aux, kz_lut_axes = get_2dlut_kz_coh_height_from_master_profile(i_common_profile, height_vector,kz_min=0,kz_max=0.3,n_elements_lut=200)\n lut_kz_coh_heights_list.append(lut_kz_coh_heights_aux)\n\n #Check if the profile is adaptative\n if len(common_profile) > 1:\n #interpolate the luts\n lut_kz_coh_heights = lib_profile.interpolate_luts_different_profiles(inputs,lut_kz_coh_heights_list,kz_lut_axes,height_vector)\n else:\n #There is only one lut as there is no adaptative prfdile\n lut_kz_coh_heights = lut_kz_coh_heights_list[0]\n\n\n\n # get lut box profile\n box_profile = np.ones(inputs.n_elements_height_vector)\n lut_kz_coh_heights_box, kz_lut_axes = get_2dlut_kz_coh_height_from_master_profile(box_profile, height_vector, kz_min=0, kz_max=0.3, n_elements_lut=200)\n\n\n lut_kz_coh_heights = np.clip(lut_kz_coh_heights,inputs.hard_lower_limit_coh,1)\n lut_kz_coh_heights_low = np.clip(lut_kz_coh_heights_list[0], inputs.hard_lower_limit_coh, 1)\n lut_kz_coh_heights_up = np.clip(lut_kz_coh_heights_list[-1], inputs.hard_lower_limit_coh, 1)\n lut_kz_coh_heights_box = np.clip(lut_kz_coh_heights_box, inputs.hard_lower_limit_coh, 1)\n\n\n error_lut_kz_coh_heights_low = np.zeros((lut_kz_coh_heights.shape[1],lut_kz_coh_heights.shape[0]))\n error_lut_kz_coh_heights_up = np.zeros((lut_kz_coh_heights.shape[1],lut_kz_coh_heights.shape[0]))\n error_lut_kz_coh_heights_box = np.zeros((lut_kz_coh_heights.shape[1],lut_kz_coh_heights.shape[0]))\n \n\n\n for pos_kz in range(lut_kz_coh_heights.shape[0]):\n my_int = interpolate.interp1d(lut_kz_coh_heights[pos_kz, :], height_vector, fill_value=\"extrapolate\")\n ## height for the extrems of the profile\n height_lower_bound = my_int(lut_kz_coh_heights_low[pos_kz, :])\n height_upper_bound = my_int(lut_kz_coh_heights_up[pos_kz, :])\n # heights for the box profile\n height_box = my_int(lut_kz_coh_heights_box[pos_kz, :])\n\n #compute errors\n error_lut_kz_coh_heights_low[:,pos_kz] = np.abs(height_vector - height_lower_bound)\n error_lut_kz_coh_heights_up[:, pos_kz] = np.abs(height_vector - height_upper_bound)\n error_lut_kz_coh_heights_box[:, pos_kz] = np.abs(height_vector - height_box)\n\n\n\n #compute error in %\n for i_kz in range(error_lut_kz_coh_heights_low.shape[1]):\n error_lut_kz_coh_heights_low[:, i_kz] = error_lut_kz_coh_heights_low[:, i_kz] / height_vector[::-1] * 100\n error_lut_kz_coh_heights_up[:, i_kz] = error_lut_kz_coh_heights_up[:, i_kz] / height_vector[::-1] * 100\n error_lut_kz_coh_heights_box[:, i_kz] = error_lut_kz_coh_heights_box[:, i_kz] / height_vector[::-1] * 100\n\n\n ## limit the errors to 25 %\n error_lut_kz_coh_heights_low = np.clip(error_lut_kz_coh_heights_low,0,25)\n error_lut_kz_coh_heights_up = np.clip(error_lut_kz_coh_heights_up, 0, 25)\n error_lut_kz_coh_heights_box = np.clip(error_lut_kz_coh_heights_box, 0, 25)\n\n\n\n plt.figure()\n plt.imshow(error_lut_kz_coh_heights_low,aspect='auto',extent=(kz_lut_axes[0],kz_lut_axes[-1],height_vector[-1],height_vector[0]),cmap='jet')\n plt.colorbar()\n plt.title('Error for lower bound')\n plt.xlabel('kz [rad/m]')\n plt.ylabel('Height [m]')\n plt.savefig(output_path + 'error_luts_lower_bound_2d_plot.png', dpi=200, bbox_inches='tight')\n plt.close()\n\n plt.figure()\n plt.imshow(error_lut_kz_coh_heights_up, aspect='auto', extent=(kz_lut_axes[0], kz_lut_axes[-1], height_vector[-1], height_vector[0]), cmap='jet')\n plt.colorbar()\n plt.title('Error for upper bound')\n plt.xlabel('kz [rad/m]')\n plt.ylabel('Height [m]')\n plt.savefig(output_path + 'error_luts_upper_bound_2d_plot.png', dpi=200, bbox_inches='tight')\n plt.close()\n\n plt.figure()\n plt.imshow(error_lut_kz_coh_heights_box, aspect='auto', extent=(kz_lut_axes[0], kz_lut_axes[-1], height_vector[-1], height_vector[0]), cmap='jet')\n plt.colorbar()\n plt.title('Error for box')\n plt.xlabel('kz [rad/m]')\n plt.ylabel('Height [m]')\n plt.savefig(output_path + 'error_luts_box_bound_2d_plot.png', dpi=200, bbox_inches='tight')\n plt.close()\n\n ## make also some 1d plots\n values_kz_plot = [0.05, 0.1, 0.15, 0.2]\n for i_value_kz_plot in values_kz_plot:\n pos_kz = np.argmin(np.abs(i_value_kz_plot - kz_lut_axes))\n my_int = interpolate.interp1d(lut_kz_coh_heights[pos_kz, :], height_vector, fill_value=\"extrapolate\")\n ## height for the extrems of the profile\n height_lower_bound = my_int(lut_kz_coh_heights_low[pos_kz, :])\n height_upper_bound = my_int(lut_kz_coh_heights_up[pos_kz, :])\n # heights for the box profile\n height_box = my_int(lut_kz_coh_heights_box[pos_kz, :])\n\n plt.figure()\n plt.plot(height_vector, height_lower_bound, label='Lower bound profiles')\n plt.plot(height_vector, height_upper_bound, label='Upper bound profiles')\n plt.plot(height_vector, height_box, label='box profiles')\n plt.plot(height_vector, height_vector, c='k', linestyle='--')\n plt.title('Errors between LUTs '+'Kz: ' + str(np.round(kz_lut_axes[pos_kz], 2)))\n plt.xlabel('Height [m]')\n plt.ylabel('Height [m]')\n plt.legend()\n plt.savefig(output_path + 'errors_between_luts_'+'Kz_' + str(np.round(kz_lut_axes[pos_kz], 2))+'.png', dpi=200, bbox_inches='tight')\n plt.close()\n\n return\n\n\ndef get_min_max_valid_heights(inputs,parameters,common_profile,kz_cor,forest_height,output_path,plot_performance_kz_mean=True):\n \"\"\"Compute minimum and maximum valid heights depending on the kz\n\n Parameters\n ----------\n inputs : module\n Input file provided in the GEDI/TDX processing.\n parameters : dict\n Information related to the master image\n Output from processing_tdx_until_coherence()\n \tcommon_profile : list of 1D numpy arrays\n \t\tlist with all common profiles for the generation of the Lut for forest height inversion\n kz_cor : 2D numpy array\n Kz in lat lon coordinates\n forest_height_geo_lonlat : 2D numpy array\n Forest height in lat lon coordinates\n output_path : str\n path where some outputs will be saved\n plot_performance_kz_mean : bool\n To generate the performance plot assuming the true profile\n\n\n Returns\n -------\n max_valid_height : 2D numpy array\n Maximum valid height for each pixel in lat lon coordinates\n min_valid_height : 2D numpy array\n Maximum valid height for each pixel in lat lon coordinates\n bias : 2D numpy array\n bias in height respect to the performance plot\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : Nov 2020\n\n \"\"\"\n\n log = logging.getLogger('get_min_max_valid_heights')\n log.info('Compute the minimum and maximum valid range of heights step 1 of 2 ..')\n\n #ignore warnings here\n np.seterr('ignore')\n\n #DEfine the height vector\n n_elements_height_vector = inputs.n_elements_height_vector\n max_height_vector = inputs.max_height_vector\n min_height_vector = inputs.min_height_vector\n height_vector = np.linspace(min_height_vector, max_height_vector, num=n_elements_height_vector)\n\n\n #generat the lut\n lut_kz_coh_heights_list = []\n for num_common_profile,i_common_profile in enumerate(common_profile):\n lut_kz_coh_heights_aux, kz_lut_axes = get_2dlut_kz_coh_height_from_master_profile(i_common_profile, height_vector,kz_min=0,kz_max=1.2 * np.nanmax(kz_cor),n_elements_lut=200)\n lut_kz_coh_heights_list.append(lut_kz_coh_heights_aux)\n\n #Check if the profile is adaptative\n if len(common_profile) > 1:\n #interpolate the luts\n lut_kz_coh_heights = lib_profile.interpolate_luts_different_profiles(inputs,lut_kz_coh_heights_list,kz_lut_axes,height_vector)\n else:\n #There is only one lut as there is no adaptative prfdile\n lut_kz_coh_heights = lut_kz_coh_heights_list[0]\n\n\n\n lut_kz_coh_heights = np.clip(lut_kz_coh_heights,inputs.hard_lower_limit_coh,1)\n\n #Define number of elements for the height vector used in the performance curve, we use more than the input height vector to get a better sampling\n height_vector = ndimage.zoom(height_vector,3)\n lut_kz_coh_heights = ndimage.zoom(lut_kz_coh_heights,(1,3))\n n_elements_height_vector = len(height_vector)\n\n\n plt.figure(1)\n plt.figure(2)\n values_kz_plot = [0.05,0.075, 0.1,0.125,0.15,0.175,0.2]\n for i_value_kz_plot in values_kz_plot:\n pos_kz = np.argmin(np.abs(i_value_kz_plot - kz_lut_axes))\n plt.figure(1)\n plt.plot(height_vector, lut_kz_coh_heights[pos_kz, :], label='Kz: ' + str(np.round(kz_lut_axes[pos_kz], 3)))\n plt.figure(2)\n plt.plot(height_vector, np.gradient(lut_kz_coh_heights[pos_kz, :]), label='Kz: ' + str(np.round(kz_lut_axes[pos_kz], 3)))\n\n\n\n\n plt.figure(1)\n plt.title('Look-up table used for the performance')\n plt.xlabel('Height [m]')\n plt.ylabel('Coherence')\n plt.legend()\n plt.savefig(output_path + 'lut_kz_coh_height_for_performance_plot.png', dpi=200, bbox_inches='tight')\n plt.close()\n\n plt.figure(2)\n if inputs.slope_coh_lut_kz is not None:\n plt.plot([height_vector[0],height_vector[-1]],[inputs.slope_coh_lut_kz,inputs.slope_coh_lut_kz],'r--')\n plt.title('Gradient Look-up table used for the performance')\n plt.xlabel('Height [m]')\n plt.ylabel('Coherence')\n plt.legend()\n plt.savefig(output_path + 'gradient_lut_kz_coh_height_for_performance_plot.png', dpi=200, bbox_inches='tight')\n plt.close()\n\n\n\n # get number of looks\n ml_rg = np.int(np.round(inputs.resolution/((parameters['groundNear']+parameters['groundFar'])/2.)))\n ml_az = np.int(np.round(inputs.resolution/parameters['projectedSpacingAzimuth']))\n n_of_looks = np.float(ml_rg*ml_az)\n\n\n\n # For simplicity we use the same number of elements for coh and heights (it can be differet)\n n_elements_coh = n_elements_height_vector\n coherence_vector = np.linspace(0, 1, n_elements_coh)\n n_elements_kz = lut_kz_coh_heights.shape[0]\n\n # Now we inverted the heights and we get the limits for all range of kzs. I. e. We generate a LUT for limits of heights\n # Note: We do this to avoid doing the same procedurre for all kzs of the image.\n maximum_height_lut = np.zeros(n_elements_kz)\n minimum_height_lut = np.zeros(n_elements_kz)\n bias_lut = np.zeros((n_elements_kz,n_elements_coh))\n\n\n ## get positions to p\n list_pos_kz_to_plot = []\n for i_value_kz_plot in values_kz_plot:\n pos_kz = np.argmin(np.abs(i_value_kz_plot - kz_lut_axes))\n list_pos_kz_to_plot.append(pos_kz)\n\n for num_kz in range(n_elements_kz):\n\n inverted_heights = np.zeros(n_elements_coh)\n\n # invert the heights for this lut for coherence from 0 to 1\n for num_coh, i_coh in enumerate(coherence_vector):\n pos = np.where(i_coh >= lut_kz_coh_heights[num_kz, :])\n if len(pos[0]) > 0:\n inverted_heights[num_coh] = height_vector[pos[0][0]] # inverted_heights[num_coh] = lib_tdx.one_pixel_forest_height(kz_cor_pixel, kz_lut_axes, lut_kz_coh_heights, i_coh, height_vector)\n\n # remove not valid inverted heights\n inverted_heights[inverted_heights <= 0] = np.nan\n height_coherence_measured = np.copy(inverted_heights)\n #height_coherence_measured[::-1]\n\n\n #Variance introduces by the Cramer-Rao bound\n for num_coh, i_coh in enumerate(coherence_vector):\n sigma = (1 - np.square(1 - i_coh)) / n_of_looks * 2 * n_elements_height_vector\n gauss = signal.windows.gaussian(n_elements_height_vector * 2, sigma)\n gauss = gauss / np.nansum(gauss)\n gauss = gauss[num_coh:num_coh + n_elements_height_vector]\n height_coherence_measured[num_coh] = np.nansum(inverted_heights * gauss) / np.nansum(gauss + inverted_heights * 0)\n\n\n # we account for constant decorrelation\n height_with_decor = np.zeros(n_elements_coh)\n for num_coh, i_coh in enumerate(lut_kz_coh_heights[num_kz, :]):\n # Note: we use 1 - because the coherencen of the lut goes from 1 to 0\n modified_coherence = 1 - i_coh * inputs.decorrelation_filter_kz\n pos_height = np.argmin(np.abs(coherence_vector - modified_coherence))\n height_with_decor[num_coh] = height_coherence_measured[pos_height]\n\n\n # Compute the bias\n bias = np.clip((np.abs(height_with_decor - height_vector) / height_vector), 0, 1)\n bias_lut[num_kz,:] = bias\n\n # get the position of the minimum height\n # NOTE: We add the 0.01 to avoid errors due to the sampling of the function\n pos_bias_lower_limit = np.where(bias < (inputs.limit_bias_min_height - 0.01))[0]\n if len(pos_bias_lower_limit)>1:\n #we get the position of the minimum for the first time we find th bias in the curve\n pos_bias_lower_limit = pos_bias_lower_limit[0]\n minimum_height_lut[num_kz] = height_vector[pos_bias_lower_limit]\n\n #To get the position of the maximum first we find the first position of the curve where the bias is lower than the limit of maximum\n # then we continue the curve to find the next position which will be the limit.\n # Note: We do it like that, because if the limit of minimum height is higher than the limit of the maximum height, then\n # we will not get the correct maximum height following the curve.\n pos_bias_lower_limit_for_up = np.where(bias < (inputs.limit_bias_max_height - 0.01))[0]\n\n if len(pos_bias_lower_limit_for_up) > 1:\n\n pos_bias_lower_limit_for_up = pos_bias_lower_limit_for_up[0]\n pos_bias_upper_limit = np.where(bias[pos_bias_lower_limit_for_up::] > (inputs.limit_bias_max_height + 0.01))[0]\n if len(pos_bias_upper_limit) > 0:\n maximum_height_from_bias = height_vector[pos_bias_upper_limit[0] + pos_bias_lower_limit_for_up]\n else:\n maximum_height_from_bias = max_height_vector\n\n # find minimum peak\n peaks = signal.find_peaks(height_with_decor * -1)\n if len(peaks[0]) > 0:\n height_pos_minimum = height_with_decor[peaks[0][0]]\n # pos_max_height_from_minimum = np.argmin(np.abs(height_with_decor[pos_bias_lower_limit:peaks[0][0]]-height_pos_minimum)) + pos_bias_lower_limit\n pos_max_height_from_minimum = np.where(height_with_decor >= height_pos_minimum)[0][0]\n maximum_height_from_minimum = height_vector[pos_max_height_from_minimum]\n else:\n maximum_height_from_minimum = max_height_vector\n height_pos_minimum = 0\n\n if maximum_height_from_minimum < maximum_height_from_bias:\n maximum_height_lut[num_kz] = maximum_height_from_minimum\n else:\n maximum_height_lut[num_kz] = maximum_height_from_bias\n\n #get the maximum height due to the hard limit of coherence\n for i_height in range(lut_kz_coh_heights.shape[1]):\n aux_error = lut_kz_coh_heights[num_kz, i_height] - inputs.hard_lower_limit_coh\n maximum_height_from_hard_lower_limit_coh = height_vector[i_height]\n if aux_error < 0.01:\n break\n\n #get the maximum due to the slope of the derivated of the LUT\n gradient_lut = np.gradient(lut_kz_coh_heights[num_kz, :])\n\n\n # compute minimum\n peaks = signal.find_peaks(np.abs(gradient_lut))\n\n # check that we have a minimum,\n if (len(peaks[0]) > 0):\n\n if inputs.slope_coh_lut_kz is None:\n #if the slope is none then we take the position of the first minimum as the limit\n maximum_height_from_slope = height_vector[peaks[0][0]]\n\n else:\n\n # from the minimum get the positions where the gradient is higher than the limit (no valid points)\n pos_invalids = np.where(gradient_lut[peaks[0][0]:] > inputs.slope_coh_lut_kz)\n\n # Check that the value of gradient for the minimum peak is already heigher than the limit (inputs.slope_coh_lut_kz), if not it means that the limit is directlly the last position.\n # Example: If the minimum is located at -0.001 and inputs.slope_coh_lut_kz == -0.002 it means that we are always in the upper part of th elimit and we take the last position as limit\n # In other words, if the first position of the invalids is 0, it means that the slope value of the peak of minimum is higher than the limit (inputs.slope_coh_lut_kz) and we take the last position as maximum\n if (len(pos_invalids[0]) > 0) and (pos_invalids[0][0] != 0):\n # we take the first position of the invalids as the limit of height + the position of the minimum\n maximum_height_from_slope = height_vector[peaks[0][0] + pos_invalids[0][0]]\n else:\n maximum_height_from_slope = height_vector[-1]\n else:\n # if there is no minimum, it means that the slope is going always down, we take the last positions as maximum\n maximum_height_from_slope = height_vector[-1]\n\n\n if maximum_height_lut[num_kz] > maximum_height_from_hard_lower_limit_coh:\n maximum_height_lut[num_kz] = maximum_height_from_hard_lower_limit_coh\n\n if maximum_height_lut[num_kz] > maximum_height_from_slope:\n maximum_height_lut[num_kz] = maximum_height_from_slope\n\n #make the plot performance for the mean of the inputs kz\n if plot_performance_kz_mean:\n if num_kz in list_pos_kz_to_plot:\n lib_plots.plot_performance_one_kz(height_vector,height_with_decor,minimum_height_lut[num_kz],min_height_vector,\n max_height_vector,maximum_height_from_bias,maximum_height_from_minimum,maximum_height_from_hard_lower_limit_coh,maximum_height_from_slope,\n height_pos_minimum,maximum_height_lut[num_kz],kz_lut_axes[num_kz],bias,inputs,output_path)\n\n\n log.info('Compute the minimum and maximum valid range of heights step 2 of 2 ..')\n\n nrows,ncols = kz_cor.shape\n maximum_height = np.zeros((nrows,ncols))\n minimum_height = np.zeros((nrows, ncols))\n bias = np.zeros((nrows, ncols))\n\n # We make all rows for one column at the same time\n kz_lut_axes_matrix = np.reshape(np.repeat(kz_lut_axes, nrows), (len(kz_lut_axes), nrows))\n height_vector_matrix = np.reshape(np.repeat(height_vector, nrows), (len(height_vector), nrows))\n kz_cor_aux = np.copy(kz_cor)\n kz_cor_aux[np.isnan(kz_cor)] = 0\n forest_height_aux = np.copy(forest_height)\n\n forest_height_aux[forest_height_aux==INVALID_SETTLEMENTS] = 0\n forest_height_aux[forest_height_aux==INVALID_WATER] = 0\n\n for i_col in range(ncols):\n\n #vector of kzs for one column\n kz_col = kz_cor_aux[:,i_col]\n #get the closes position in the axis of kz for all rows in the corresponding colum\n pos_kz_col = np.argmin(np.abs(kz_col-kz_lut_axes_matrix),0)\n #convert the position to minimum height basec on the previous generated LUT that realtes (kz and minumum height\n maximum_height[:,i_col] = maximum_height_lut[pos_kz_col]\n minimum_height[:,i_col] = minimum_height_lut[pos_kz_col]\n\n #compute the bias for all pixels\n forest_col = forest_height[:, i_col]\n pos_forest_height_col = np.argmin(np.abs(forest_col-height_vector_matrix),0)\n bias[:,i_col] = bias_lut[pos_kz_col,pos_forest_height_col]\n\n\n\n #We limit the height to the the input max height/min height used in the processing\n maximum_height = np.clip(maximum_height,inputs.min_height_vector,inputs.max_height_vector)\n minimum_height = np.clip(minimum_height, inputs.min_height_vector, inputs.max_height_vector)\n #add same nanas as kz_cor\n minimum_height[np.isnan(kz_cor)] = np.nan\n maximum_height[np.isnan(kz_cor)] = np.nan\n bias[np.isnan(kz_cor)] = np.nan\n\n bias[forest_height==INVALID_WATER] =INVALID_WATER\n bias[forest_height == INVALID_SETTLEMENTS] = INVALID_SETTLEMENTS\n minimum_height[forest_height==INVALID_WATER] =INVALID_WATER\n minimum_height[forest_height == INVALID_SETTLEMENTS] = INVALID_SETTLEMENTS\n maximum_height[forest_height==INVALID_WATER] =INVALID_WATER\n maximum_height[forest_height == INVALID_SETTLEMENTS] = INVALID_SETTLEMENTS\n\n log.info('Compute the minimum and maximum valid range of heights ok!')\n\n return maximum_height,minimum_height,bias\n\n\n\n\ndef compute_masks(inputs,output_path,common_profile,kz_cor,coh_cor,forest_height_geo_lonlat,parameters):\n \"\"\"Compute the mask of valid/non-calid pixels for kz and coherence\n\n Parameters\n ----------\n inputs : module\n Input file provided in the GEDI/TDX processing.\n output_path : str\n path where some outputs will be saved\n\tcommon_profile : list of 1D numpy arrays\n\t\tlist with all common profiles for the generation of the Lut for forest height inversion\n kz_cor : 2D numpy array\n Kz in lat lon coordinates\n coh_cor : 2D numpy array\n Coherence in lat lon coordinates\n forest_height_geo_lonlat : 2D numpy array\n Forest height in lat lon coordinates\n parameters : dict\n Infomation related to the master image\n Output from processing_tdx_until_coherence()\n\n Returns\n -------\n max_valid_height : 2D numpy array\n Maximum valid height for each pixel in lat lon coordinates\n min_valid_height : 2D numpy array\n Maximum valid height for each pixel in lat lon coordinates\n bias : 2D numpy array\n bias in height respect to the performance plot\n mask_kz : 2D numpy array\n Binary matrix with 0 valid and 1 non-valid\n It contains 1 (non-valid) if the forest height estimated is NOT between the limits min_valid_height and max_valid_height\n mask_coh : 2D numpy array\n Binary matrix with 0 valid and 1 non-valid\n It contains 1 (non-valid) if the coherence is lower than inputs.hard_lower_limit_coh\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : Nov 2020\n\n \"\"\"\n\n\n log = logging.getLogger('compute_masks')\n log.info('Compute kz and coherence masks ...')\n\n\n #### FILTERIN OF kz ##########################\n # Get maximum and minimum valid height and the bias respect to the ideal performance\n max_valid_height, min_valid_height, bias = get_min_max_valid_heights(inputs, parameters, common_profile, kz_cor, forest_height_geo_lonlat, output_path, plot_performance_kz_mean=True)\n # generate a mask with the values of max and min valid kz\n mask_kz = np.ones(kz_cor.shape)\n valid_pos = (forest_height_geo_lonlat > min_valid_height) * (forest_height_geo_lonlat < max_valid_height)\n mask_kz[valid_pos] = 0\n mask_kz[valid_pos] = 0\n mask_kz[np.isnan(kz_cor)] = np.nan\n ##################\n\n #### FILTERING OF COHERENCE ##########################\n mask_coh = np.zeros(coh_cor.shape)\n # We remove all coherence below certain height\n mask_coh[coh_cor < inputs.hard_lower_limit_coh] = 1\n mask_coh[np.isnan(coh_cor)] = np.nan\n #####################################################\n\n return max_valid_height, min_valid_height, bias, mask_kz, mask_coh\n\n log.info('Compute kz and coherence masks ok!')\n\n\n\ndef phase_unwrapping(coh,phase,path_snaphu,path_files_snaphu):\n \"\"\"Phase unwrapping unsing snaphu\n\n Warnings:\n - Now it uses a basic file configuration, ther eis more info on how to properlly fill the config file in TAXI (make_conf_file.pro)\n\n Parameters\n ----------\n coh : 2D numpy array\n Absolute value of the coherence\n phase : 2D numpy arary\n Coherence phase\n path_snaphu : str\n paht where snaphu is locate\n path_files_snaphu: str\n path where temporary file will be saved\n\n Returns\n -------\n uw_phase : 2D numpy array\n unwrapped phase\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : February 2021\n\n\n \"\"\"\n\n\n log = logging.getLogger('phase_unwrapping')\n log.info('Compute the phase unwrapping using snaphu ...')\n\n # save inputs in the binary format for the snap\n phase_file = open(path_files_snaphu + 'phase.dat', 'wb')\n phase_file.write(np.asarray(phase, 'float32'))\n phase_file.close()\n\n coh_file = open(path_files_snaphu + 'coh.dat', 'wb')\n coh_file.write(np.asarray(coh, 'float32'))\n coh_file.close()\n\n line_length=coh.shape[1]\n ## generate configuration txt file for the snap\n txt_file = open(path_files_snaphu + 'sanphu_config.txt', 'w+')\n txt_file.write('#############################################\\n')\n txt_file.write('# File input and output and runtime options #\\n')\n txt_file.write('#############################################\\n')\n txt_file.write('#\\n')\n txt_file.write('# Input file name\\n')\n txt_file.write('#\\n')\n txt_file.write('INFILE ' + path_files_snaphu + 'phase.dat\\n')\n txt_file.write('#\\n')\n txt_file.write('# Input file line length\\n')\n txt_file.write('#\\n')\n txt_file.write('LINELENGTH '+str(int(line_length))+'\\n')\n txt_file.write('#########################\\n')\n txt_file.write('# Unwrapping parameters #\\n')\n txt_file.write('#########################\\n')\n txt_file.write('STATCOSTMODE TOPO\\n')\n txt_file.write('#VERBOSE TRUE\\n')\n txt_file.write('###############\\n')\n txt_file.write('# Input files #\\n')\n txt_file.write('###############\\n')\n txt_file.write('CORRFILE ' + path_files_snaphu + 'coh.dat\\n')\n txt_file.write('################\\n')\n txt_file.write('# Output files #\\n')\n txt_file.write('################\\n')\n txt_file.write('OUTFILE ' + path_files_snaphu + 'uw_phase.dat\\n')\n txt_file.write('LOGFILE ' + path_files_snaphu + 'snaphu.log\\n')\n txt_file.write('################\\n')\n txt_file.write('# File formats #\\n')\n txt_file.write('################\\n')\n txt_file.write('INFILEFORMAT FLOAT_DATA\\n')\n txt_file.write('CORRFILEFORMAT FLOAT_DATA\\n')\n txt_file.write('OUTFILEFORMAT FLOAT_DATA\\n')\n txt_file.write('################\\n')\n txt_file.close()\n\n # call snaphu\n os.system(path_snaphu+'snaphu -f ' + path_files_snaphu + 'sanphu_config.txt')\n\n ##read unwrapped phase computed with snaphu\n with open(path_files_snaphu + 'uw_phase.dat', mode='rb') as file: # b is important -> binary\n fileContent = file.read()\n uw_phase = struct.unpack(\"f\" * (len(fileContent) // 4), fileContent)\n uw_phase = np.array(uw_phase)\n uw_phase = uw_phase.reshape(phase.shape[0], phase.shape[1])\n\n ##remove tmp files\n os.remove(path_files_snaphu+'sanphu_config.txt')\n os.remove(path_files_snaphu + 'phase.dat')\n os.remove(path_files_snaphu + 'coh.dat')\n os.remove(path_files_snaphu + 'uw_phase.dat')\n\n return uw_phase\n\n\ndef baseline_correction_using_plane(coh_ab,uw_phase,kz):\n \"\"\" Baseline correction based on a plane\n\n WARNINGS:\n - From choi idl code\n - We should really check with TAXI the baseline correction for a better processing\n\n Parameters\n ----------\n coh_ab : 2D numpy array\n absolute value of the cohrece\n uw_phase : 2D numpy array\n unwrapped phase\n kz : 2D numpy array\n vertical wavenumber\n\n Returns\n -------\n plane : 2D numpy array\n Plane with the correction to be applyed to the interferogram\n\n\n Notes\n -------\n Author : Victor Cazcarra-Bes ([email protected])\n Date : February 2021\n\n\n \"\"\"\n\n log = logging.getLogger('baseline_correction_using_plane')\n log.info('Compute the baseline correction using a plane ...')\n\n z_res = uw_phase / kz\n cal_points = np.where(coh_ab > 0.95)\n residual = z_res[cal_points[0], cal_points[1]]\n HH = np.asarray(np.vstack([cal_points[0], cal_points[1], np.ones(len(cal_points[0]))]), 'float64')\n cc1 = np.matmul(HH, np.transpose(HH))\n cc2 = np.linalg.inv(cc1)\n cc3 = np.matmul(cc2, HH)\n coef = np.matmul(cc3, residual)\n rgmesh,azmesh = np.meshgrid(range(coh_ab.shape[1]), range(coh_ab.shape[0]))\n #plane = coef[0] * rgmesh + coef[1] * azmesh + coef[2]\n plane = coef[0] *azmesh + coef[1] * rgmesh + coef[2]\n\n\n\n return plane\n\n\n\n"
] | [
[
"numpy.double",
"numpy.median",
"numpy.sign",
"numpy.nanmean",
"numpy.empty",
"numpy.conj",
"scipy.signal.windows.gaussian",
"numpy.cross",
"numpy.nansum",
"matplotlib.pyplot.imsave",
"numpy.polynomial.chebyshev.chebfit",
"numpy.polynomial.chebyshev.chebval",
"numpy.tan",
"numpy.mean",
"numpy.multiply",
"numpy.gradient",
"numpy.linalg.norm",
"numpy.angle",
"numpy.seterr",
"numpy.nanmin",
"numpy.arange",
"scipy.signal.find_peaks",
"numpy.nanmedian",
"numpy.square",
"numpy.array",
"numpy.matmul",
"numpy.zeros",
"numpy.round",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"scipy.ndimage.zoom",
"scipy.optimize.curve_fit",
"numpy.ones",
"scipy.ndimage.uniform_filter",
"numpy.repeat",
"numpy.meshgrid",
"numpy.argmin",
"numpy.copy",
"numpy.min",
"numpy.where",
"numpy.cos",
"numpy.frombuffer",
"scipy.interpolate.griddata",
"numpy.max",
"matplotlib.pyplot.colorbar",
"numpy.sin",
"numpy.transpose",
"numpy.nanmax",
"matplotlib.use",
"scipy.interpolate.interp1d",
"numpy.int",
"numpy.roll",
"matplotlib.pyplot.figure",
"matplotlib.cm.YlGn",
"numpy.power",
"numpy.floor",
"numpy.array_split",
"numpy.asarray",
"numpy.sum",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"numpy.abs",
"numpy.linspace",
"numpy.exp",
"numpy.size",
"matplotlib.pyplot.savefig",
"numpy.isfinite",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.mod",
"numpy.empty_like",
"numpy.reshape",
"numpy.float",
"numpy.clip",
"numpy.ceil",
"numpy.isnan",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.imshow"
]
] |
fkluger/cuboids_revisited | [
"bf3f27849c7c703245642f110497f271ec659451"
] | [
"util/initialisation.py"
] | [
"from networks.res_net import Network\nfrom bts.pytorch.bts import BtsModel\nfrom util.tee import Tee\nfrom tensorboardX import SummaryWriter\nfrom torch import nn\nimport torch.optim as optim\nfrom collections import namedtuple\nimport torch\nimport os\nimport glob\nimport json\nimport csv\n\n\ndef load_opts_for_eval(opt):\n\n override_options = [\"a_max\", \"a_min\", \"align_depth\", \"bn_on_input\", \"correct_oai\", \"cuboidfitnn\",\n \"cuboidfitnnandadam\", \"cuboids\", \"depth_model\", \"fit_smallest\", \"fitting_iterations\", \"lbfgs\",\n \"min_prob\", \"mss\", \"normalise_depth\", \"num_probs\", \"no_oai_sampling\", \"no_oai_loss\", \"seqransac\",\n \"spare_prob\", \"threshold\", \"unconditional\", \"uniform\"]\n\n if os.path.isdir(opt.load):\n args_file = os.path.join(opt.load, \"commandline_args.txt\")\n\n with open(args_file) as f:\n orig_args = json.load(f)\n\n for orig_key, orig_value in orig_args.items():\n if orig_key in override_options:\n opt.__dict__[orig_key] = orig_value\n\n consac_path = os.path.join(opt.load, 'consac_weights_%06d.net' % opt.epochs)\n depth_path = os.path.join(opt.load, 'depth_weights_%06d.net' % opt.epochs)\n\n if os.path.exists(consac_path):\n opt.load = consac_path\n if os.path.exists(depth_path):\n opt.load_depth = depth_path\n\n return opt\n\n\ndef get_dimensions(opt, dataset=None, image_size=None):\n if dataset is not None:\n image_size = dataset.get_image_size()\n else:\n assert image_size is not None\n\n minimal_set_size = opt.mss\n\n H = image_size[0]\n W = image_size[1]\n Y = H * W\n\n H_ = H // 8\n W_ = W // 8\n Y_ = W_ * H_\n\n M = opt.instances\n P = opt.outerhyps\n S = opt.hyps\n Q = opt.num_probs\n K = opt.samplecount\n\n R = 1\n\n B = opt.batch\n model_dim = 9\n\n data_dim = 2\n dimensions = {\"M\": M, \"P\": P, \"S\": S, \"K\": K, \"Q\": Q, \"R\": R, \"B\": B, \"H\": H, \"W\": W, \"Y\": Y, \"H_\": H_, \"W_\": W_,\n \"Y_\": Y_, \"data\": data_dim, \"mss\": minimal_set_size, \"model\": model_dim}\n\n return H, W, Y, H_, W_, Y_, M, P, S, Q, R, B, K, model_dim, data_dim, minimal_set_size, dimensions\n\n\ndef get_log_and_checkpoint_directory(opt):\n if os.path.isdir(opt.ckpt_dir):\n ckpt_dirs = glob.glob(os.path.join(opt.ckpt_dir, \"session_*\"))\n ckpt_dirs.sort()\n if len(ckpt_dirs) > 0:\n last_ckpt_dir = os.path.split(ckpt_dirs[-1])[1]\n try:\n last_session_id = int(last_ckpt_dir[8:11])\n session_id = last_session_id + 1\n except:\n session_id = 0\n else:\n session_id = 0\n else:\n session_id = 0\n if opt.debugging:\n ckpt_dir = os.path.join(opt.ckpt_dir, \"debug_session\")\n else:\n ckpt_dir = os.path.join(opt.ckpt_dir, \"session_%03d_%s\" % (session_id, opt.depth_model))\n os.makedirs(ckpt_dir, exist_ok=True)\n\n log_file = os.path.join(ckpt_dir, \"output.log\")\n log = Tee(log_file, \"w\", file_only=False)\n\n loss_log_file = os.path.join(ckpt_dir, \"loss.log\")\n loss_log = open(loss_log_file, mode='w')\n loss_log_writer = csv.writer(loss_log, delimiter=',')\n loss_log_writer.writerow(['epoch', 'val_loss', 'train_loss'])\n\n with open(os.path.join(ckpt_dir, 'commandline_args.txt'), 'w') as f:\n json.dump(opt.__dict__, f, indent=2)\n\n tensorboard_directory = ckpt_dir + \"/tensorboard/\"\n if not os.path.exists(tensorboard_directory):\n os.makedirs(tensorboard_directory)\n tensorboard_writer = SummaryWriter(tensorboard_directory)\n\n return ckpt_dir, log, loss_log_writer, loss_log, tensorboard_writer\n\n\ndef get_devices(opt):\n depth_device_ids = [int(x) for x in opt.depth_gpu.split(\",\")]\n if depth_device_ids[0] is None or depth_device_ids[0] < 0 or not torch.cuda.is_available():\n depth_device = torch.device('cpu')\n else:\n depth_device = torch.device('cuda', depth_device_ids[0])\n\n if opt.consac_gpu is None or int(opt.consac_gpu) < 0 or not torch.cuda.is_available():\n consac_device = torch.device('cpu')\n else:\n consac_device = torch.device('cuda', int(opt.consac_gpu))\n\n if opt.fitting_gpu is None or int(opt.fitting_gpu) < 0 or not torch.cuda.is_available():\n fitting_device = torch.device('cpu')\n else:\n fitting_device = torch.device('cuda', int(opt.fitting_gpu))\n\n if opt.inlier_gpu is None or int(opt.inlier_gpu) < 0 or not torch.cuda.is_available():\n inlier_device = torch.device('cpu')\n else:\n inlier_device = torch.device('cuda', int(opt.inlier_gpu))\n\n return fitting_device, consac_device, depth_device, inlier_device\n\n\ndef get_depth_model(opt, devices):\n\n depth_device = devices[2]\n\n if opt.depth_model == \"bts\":\n\n depth_device_ids = [int(x) for x in opt.depth_gpu.split(\",\")]\n\n BtsArgs = namedtuple('BtsArgs', ['encoder', 'bts_size', 'max_depth', 'dataset'])\n args = BtsArgs(encoder='densenet161_bts', bts_size=512, max_depth=10, dataset='nyu')\n\n model = BtsModel(params=args, bn_on_final_depth=True)\n\n loaded_dict = torch.load(opt.load_depth, map_location=depth_device)\n\n model = nn.DataParallel(model, device_ids=depth_device_ids)\n\n model.to(depth_device)\n if \"model\" in loaded_dict.keys():\n model.load_state_dict(loaded_dict[\"model\"], strict=False)\n else:\n model.load_state_dict(loaded_dict, strict=False)\n\n feature_optimizer = optim.Adam(model.parameters(), lr=opt.depth_lr, eps=1e-4, weight_decay=1e-4)\n\n return {\"name\": opt.depth_model, \"model\": model,\n \"optimizer\": feature_optimizer, \"height\": 480, \"width\": 640}\n\n elif opt.depth_model == \"gt\":\n return {\"name\": opt.depth_model, \"model\": None,\n \"optimizer\": None, \"height\": 480, \"width\": 640}\n\n else:\n assert False, \"unknown depth model: %s\" % opt.depth_model\n\n\ndef get_consac_model(opt, devices, data_dim=2):\n\n if opt.seqransac:\n return {\"model\": None, \"optimizer\": None}\n\n minimal_set_size = opt.mss\n\n consac_device = devices[1]\n\n consac_model = Network(data_channels=data_dim, instance_norm=True, feature_size=0, bn_on_input=False,\n num_probs=opt.num_probs, separate_probs=1,\n additional_prob=False)\n\n if opt.load is not None:\n # print(\"consac device: \", consac_device)\n consac_model.load_state_dict(torch.load(opt.load, map_location=consac_device), strict=False)\n consac_model = consac_model.to(consac_device)\n\n consac_optimizer = optim.Adam(consac_model.parameters(), lr=opt.consac_lr, eps=1e-4, weight_decay=1e-4)\n\n return {\"model\": consac_model, \"optimizer\": consac_optimizer, \"scale\": 1./8}\n"
] | [
[
"torch.device",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel"
]
] |
DarthLazar/lenstronomy | [
"5973f9b45761bab434bb273a1882ca3b45f5264b"
] | [
"lenstronomy/LensModel/Profiles/gaussian_ellipse_potential.py"
] | [
"__author__ = 'sibirrer'\n#this file contains a class to make a gaussian\n\nimport numpy as np\nfrom lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa\nimport lenstronomy.Util.param_util as param_util\nfrom lenstronomy.LensModel.Profiles.base_profile import LensProfileBase\n\n__all__ = ['GaussianEllipsePotential']\n\n\nclass GaussianEllipsePotential(LensProfileBase):\n \"\"\"\n this class contains functions to evaluate a Gaussian function and calculates its derivative and hessian matrix\n with ellipticity in the convergence\n\n the calculation follows Glenn van de Ven et al. 2009\n\n \"\"\"\n param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y']\n lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100}\n upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100}\n\n def __init__(self):\n self.spherical = GaussianKappa()\n self._diff = 0.000001\n super(GaussianEllipsePotential, self).__init__()\n\n def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n returns Gaussian\n \"\"\"\n\n phi_G, q = param_util.ellipticity2phi_q(e1, e2)\n x_shift = x - center_x\n y_shift = y - center_y\n cos_phi = np.cos(phi_G)\n sin_phi = np.sin(phi_G)\n e = abs(1 - q)\n x_ = (cos_phi * x_shift + sin_phi * y_shift) * np.sqrt(1 - e)\n y_ = (-sin_phi * x_shift + cos_phi * y_shift) * np.sqrt(1 + e)\n f_ = self.spherical.function(x_, y_, amp=amp, sigma=sigma)\n return f_\n\n def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n returns df/dx and df/dy of the function\n \"\"\"\n phi_G, q = param_util.ellipticity2phi_q(e1, e2)\n x_shift = x - center_x\n y_shift = y - center_y\n cos_phi = np.cos(phi_G)\n sin_phi = np.sin(phi_G)\n e = abs(1 - q)\n x_ = (cos_phi * x_shift + sin_phi * y_shift) * np.sqrt(1 - e)\n y_ = (-sin_phi * x_shift + cos_phi * y_shift) * np.sqrt(1 + e)\n\n f_x_prim, f_y_prim = self.spherical.derivatives(x_, y_, amp=amp, sigma=sigma)\n f_x_prim *= np.sqrt(1 - e)\n f_y_prim *= np.sqrt(1 + e)\n f_x = cos_phi * f_x_prim - sin_phi * f_y_prim\n f_y = sin_phi * f_x_prim + cos_phi * f_y_prim\n return f_x, f_y\n\n def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2\n \"\"\"\n alpha_ra, alpha_dec = self.derivatives(x, y, amp, sigma, e1, e2, center_x, center_y)\n diff = self._diff\n alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, amp, sigma, e1, e2, center_x, center_y)\n alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, amp, sigma, e1, e2, center_x, center_y)\n\n f_xx = (alpha_ra_dx - alpha_ra) / diff\n f_xy = (alpha_ra_dy - alpha_ra) / diff\n f_yx = (alpha_dec_dx - alpha_dec) / diff\n f_yy = (alpha_dec_dy - alpha_dec) / diff\n return f_xx, f_xy, f_yx, f_yy\n\n def density(self, r, amp, sigma, e1, e2):\n \"\"\"\n\n :param r:\n :param amp:\n :param sigma:\n :return:\n \"\"\"\n return self.spherical.density(r, amp, sigma)\n\n def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):\n \"\"\"\n\n :param R:\n :param am:\n :param sigma_x:\n :param sigma_y:\n :return:\n \"\"\"\n return self.spherical.density_2d(x, y, amp, sigma, center_x, center_y)\n\n def mass_2d(self, R, amp, sigma, e1, e2):\n \"\"\"\n\n :param R:\n :param amp:\n :param sigma_x:\n :param sigma_y:\n :return:\n \"\"\"\n return self.spherical.mass_2d(R, amp, sigma)\n\n def mass_3d(self, R, amp, sigma, e1, e2):\n \"\"\"\n\n :param R:\n :param amp:\n :param sigma:\n :param e1:\n :param e2:\n :return:\n \"\"\"\n return self.spherical.mass_3d(R, amp, sigma)\n\n def mass_3d_lens(self, R, amp, sigma, e1, e2):\n \"\"\"\n\n :param R:\n :param amp:\n :param sigma:\n :param e1:\n :param e2:\n :return:\n \"\"\"\n return self.spherical.mass_3d_lens(R, amp, sigma)\n\n def mass_2d_lens(self, R, amp, sigma, e1, e2):\n \"\"\"\n\n :param R:\n :param amp:\n :param sigma_x:\n :param sigma_y:\n :return:\n \"\"\"\n return self.spherical.mass_2d_lens(R, amp, sigma)\n"
] | [
[
"numpy.sin",
"numpy.sqrt",
"numpy.cos"
]
] |
Mostafa3zazi/Face_off | [
"04b69ffad46ded5a6fdd4f0cfed07e1ebc704c5b"
] | [
"faceRecognizer.py"
] | [
"from cloudant.client import Cloudant\r\nfrom cloudant.error import CloudantException\r\nfrom cloudant.result import Result, ResultByKey\r\n\r\n#OpenCV module\r\nimport cv2\r\n#os module for reading training data directories and paths\r\nimport os\r\n#numpy to convert python lists to numpy arrays as it is needed by OpenCV face recognizers\r\nimport numpy as np\r\nfrom trainingFaces import prepare_training_data\r\n\r\n\r\ndef draw_text(img, text, x, y):\r\n cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)\r\n\r\n\r\nserviceUsername = \"6fd37c9b-4719-4fe6-9494-509a4f4e2130-bluemix\"\r\nservicePassword = \"349ecad031b57c58c0bd4b3991abd44b41d78a49632fb6456a038dcfd83951fc\"\r\nserviceURL = \"https://6fd37c9b-4719-4fe6-9494-509a4f4e2130-bluemix:349ecad031b57c58c0bd4b3991abd44b41d78a49632fb6456a038dcfd83951fc@6fd37c9b-4719-4fe6-9494-509a4f4e2130-bluemix.cloudant.com\"\r\n\r\n\r\nclient = Cloudant(serviceUsername, servicePassword, url=serviceURL)\r\nclient.connect()\r\nprint (\"Successfully connected to Cloudant\")\r\n\r\n\r\ndatabaseName = \"class\"\r\n##myDatabaseDemo = client.create_database(databaseName)\r\nmyDatabaseDemo = client[databaseName]\r\nif myDatabaseDemo.exists():\r\n print (\"'{0}' successfully created.\\n\".format(databaseName))\r\n\r\n\r\nprint(\"Collecting data\")\r\nfaces,labels=prepare_training_data(\"C:/Users/Unknown/Desktop/faceoff\")\r\nprint(labels)\r\n\r\nprint(\"Total faces: \", len(faces))\r\nprint(\"Total labels: \", len(labels))\r\n\r\nbanyAdmeen = [\"Unknown\",\"Hazem\", \"Azazi\",\"Omar\",\"Adel\"]\r\nflags=[0,0,0,0,0]\r\nface_recognizer = cv2.face.LBPHFaceRecognizer_create()\r\nface_recognizer.train(faces, np.array(labels))\r\n\r\ncam = cv2.VideoCapture(1)\r\ncam.set(3, 640) # set video widht\r\ncam.set(4, 480) # set video height\r\n# Define min window size to be recognized as a face\r\nminW = 0.1 * cam.get(3)\r\nminH = 0.1 * cam.get(4)\r\n\r\nwhile True:\r\n ret, img = cam.read()\r\n img = cv2.flip(img, 1) # Flip vertically\r\n cv2.imshow(\"camera\",img)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n## x=np.sum(gray)\r\n## print(x)\r\n## if x<15*1000000:\r\n## draw_text(img, \"brightness is low\",0,30)\r\n## cv2.imshow(\"camera\",img)\r\n## k = cv2.waitKey(10) & 0xff # Presss 'ESC' for exiting video\r\n## if k == 27:\r\n## break\r\n## continue\r\n face_cascade = cv2.CascadeClassifier('C:\\opencv\\sources\\data\\lbpcascades\\lbpcascade_frontalface.xml')\r\n face = face_cascade.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=5,minSize=(int(minW), int(minH)))\r\n for (x, y, w, h) in face:\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n cv2.imshow(\"camera\",img)\r\n idh, confidence = face_recognizer.predict(gray[y:y + h, x:x + w])\r\n print ('the id: {0} the confidence {1}'.format(idh,confidence))\r\n print (banyAdmeen[idh])\r\n persentage = \" {0}%\".format(round(100 - confidence))\r\n cv2.putText(img, str(persentage), (x + 5, y + h - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1)\r\n if(confidence<40):\r\n draw_text(img, \"{}\".format(banyAdmeen[idh]),0,30)\r\n if flags[idh]==0:\r\n flags[idh]=1\r\n \r\n my_document = myDatabaseDemo[str(idh)]\r\n my_document['attendance']=my_document['attendance']+1\r\n print (\"saved\")\r\n my_document.save()\r\n \r\n cv2.imshow(\"camera\",img)\r\n k = cv2.waitKey(100) & 0xff # Presss 'ESC' for exiting video\r\n if k == 27:\r\n break\r\n\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n"
] | [
[
"numpy.array"
]
] |
vonHousen/lime | [
"72b2f2c0875c653179b420cd557e04888f5d78cb"
] | [
"lime/lime_base_singleclassifier.py"
] | [
"\"\"\"\nCustom modification of lime_base - that uses decision tree as single local surrogate.\n\"\"\"\nimport numpy as np\n\nfrom lime.lime_base_mod import LimeBaseMod\nfrom sklearn.tree import DecisionTreeClassifier\nfrom collections import defaultdict\nfrom lime.tools import convert_binary_output_to_decimal\n\n\nclass LimeBaseSingleDecisionTree(LimeBaseMod):\n \"\"\"\n Class for learning a local surrogate model from perturbed data.\n Custom modification - uses decision tree as local surrogate.\n \"\"\"\n def __init__(self,\n kernel_fn=None,\n verbose=False,\n random_state=None,\n **decision_tree_kwargs):\n \"\"\"Init function\n\n Args:\n kernel_fn: function that transforms an array of distances into an\n array of proximity values (floats).\n verbose: if true, print local prediction values from linear model.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n decision_tree_kwargs: additional keyword arguments to be passed to DecisionTreeClassifier\n \"\"\"\n super().__init__(\n kernel_fn=kernel_fn,\n verbose=verbose,\n random_state=random_state\n )\n\n if len({\"random_state\", \"max_depth\"} & decision_tree_kwargs.keys()) > 0:\n raise RuntimeError(\"Argument in decision_tree_kwargs not allowed!\")\n self.decision_tree_kwargs = decision_tree_kwargs\n\n def explain_instance_with_data(self,\n neighborhood_data,\n neighborhood_labels,\n distances,\n label_indices_to_explain,\n num_features,\n feature_selection='none',\n model_regressor=None):\n \"\"\"Takes perturbed data, labels and distances, returns explanation.\n\n Args:\n neighborhood_data: perturbed data, 2d array. first element is\n assumed to be the original data point.\n neighborhood_labels: corresponding perturbed labels. should have as\n many columns as the number of possible labels.\n distances: distances to original data point.\n label: label for which we want an explanation\n num_features: maximum number of features in explanation\n feature_selection: deprecated - it cedes responsibility to the Tree, not feature_selection.\n model_regressor: deprecated - DecisionTreeClassifier is always selected\n\n Returns:\n (intercept, exp, score, local_pred):\n intercept is a float.\n explanation is a sorted list of tuples, where each tuple (x,y) corresponds\n to the feature id (x) and the local weight (y). The list is sorted\n by decreasing absolute value of y.\n score is the R^2 value of the returned explanation\n local_pred is the prediction of the explanation model on the original instance\n \"\"\"\n\n data_to_train_local_surrogate, local_surrogate, used_features, weights =\\\n self._train_local_surrogate(\n distances,\n \"none\",\n label_indices_to_explain,\n DecisionTreeClassifier(\n random_state=self.random_state,\n max_depth=num_features,\n **self.decision_tree_kwargs),\n neighborhood_data,\n neighborhood_labels,\n num_features)\n\n explanation = self._get_explanation(local_surrogate, used_features)\n\n return (None, # deprecated field\n explanation,\n local_surrogate,\n used_features,\n weights)\n\n @staticmethod\n def _get_explanation(local_surrogate, used_features):\n explanation = sorted(\n zip(used_features, local_surrogate.feature_importances_),\n key=lambda x: np.abs(x[1]),\n reverse=True)\n return explanation\n\n def _train_local_surrogate(self,\n distances,\n feature_selection,\n label_indices_to_explain,\n local_surrogate,\n neighborhood_data,\n neighborhood_labels,\n num_features):\n weights = self.kernel_fn(distances)\n\n # predicted labels are the labels with the greatest probability - simple majority is not required\n predicted_labels = np.argmax(neighborhood_labels, axis=1)\n prediction_results = np.zeros_like(neighborhood_labels, dtype=\"int32\")\n prediction_results[np.arange(prediction_results.shape[0]), predicted_labels] = 1\n classification_labels_columns = prediction_results[:, label_indices_to_explain]\n regression_labels_columns = neighborhood_labels[:, label_indices_to_explain]\n\n used_features = self._get_best_features(\n regression_labels_columns, feature_selection, neighborhood_data, num_features, weights)\n data_to_train_local_surrogate = neighborhood_data[:, used_features]\n expected_labels = convert_binary_output_to_decimal(classification_labels_columns)\n local_surrogate.fit(\n data_to_train_local_surrogate,\n expected_labels,\n sample_weight=weights)\n return data_to_train_local_surrogate, local_surrogate, used_features, weights\n\n def _get_best_features(self,\n regression_labels_columns,\n feature_selection,\n neighborhood_data,\n num_features,\n weights):\n \"\"\"\n Single classifier uses data with all labels at once.\n The self.feature_selection() method takes only one label at once, so it is executed in a loop, then - the most\n popular labels will be selected.\n \"\"\"\n if feature_selection == \"none\":\n return np.array(range(neighborhood_data.shape[1]))\n\n counter_for_feature = defaultdict(int)\n for column in regression_labels_columns.T:\n used_features = self.feature_selection(\n neighborhood_data,\n column,\n weights,\n num_features,\n feature_selection)\n for feature in used_features:\n counter_for_feature[feature] += 1\n\n sorted_features = \\\n [feature for feature, _ in sorted(counter_for_feature.items(),\n key=lambda item: item[1],\n reverse=True)]\n best_features = sorted_features[:num_features]\n return best_features\n\n"
] | [
[
"numpy.zeros_like",
"numpy.argmax",
"numpy.arange",
"numpy.abs",
"sklearn.tree.DecisionTreeClassifier"
]
] |
ZhehengJiang/Leicester-Fox2 | [
"2a8958b836bee38fc55ccd2bcd5361732e491b8c"
] | [
"load.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport json\nimport keras\nimport numpy as np\nimport os\nimport random\nimport scipy.io as sio\nSTEP = 256\n# MAX_LEN =71936\nMAX_LEN = 16384\n\ndef data_generator(batch_size, preproc, x, y):\n num_examples = len(x)\n examples = zip(x, y)\n examples = sorted(examples, key = lambda x: x[0].shape[0])\n end = num_examples - batch_size + 1\n batches = [examples[i:i+batch_size]\n for i in range(0, end, batch_size)]\n random.shuffle(batches)\n while True:\n for batch in batches:\n x, y = zip(*batch)\n yield preproc.process(x, y)\n\nclass Preproc:\n\n def __init__(self, ecg, codes):\n self.mean, self.std = compute_mean_std(ecg)\n # self.classes = sorted(set(l for label in labels for l in label))\n self.classes = codes\n self.int_to_class = dict( zip(range(len(self.classes)), self.classes))\n self.class_to_int = {c : i for i, c in self.int_to_class.items()}\n\n def process(self, x, y):\n return self.process_x(x), self.process_y(y)\n\n def process_x(self, x):\n x = pad_x(x)\n x = (x - self.mean) / self.std\n x = x[:, :, :]\n return x\n\n def process_y(self, y):\n # TODO, awni, fix hack pad with noise for cinc\n\n y = pad_y(y, val=self.classes[-1], dtype=np.dtype((str, 100)) )\n multi_labels = [s[0] for s in y]\n multi_labels = [s.strip().split(\",\") for s in multi_labels]\n y_new = []\n n=0\n for labels in multi_labels:\n targets=np.zeros((1,len(self.classes)))\n for i in range(len(labels)):\n l = keras.utils.np_utils.to_categorical(\n self.class_to_int[labels[i]], num_classes=len(self.classes))\n targets = targets + l\n y_new.append(np.repeat(targets,len(y[n]),axis=0))\n n=n+1\n return np.array(y_new)\n\ndef pad_x(x, val=0, dtype=np.float32):\n # max_len = max(i.shape[0] for i in x)\n max_len = MAX_LEN\n padded = np.full((len(x), max_len,x[0].shape[1]), val, dtype=dtype)\n for e, i in enumerate(x):\n padded[e, :len(i),:i.shape[1]] = i\n return padded\n\ndef pad_y(y, val=0, dtype=np.float32):\n # max_len = max(len(i) for i in y)\n max_len = int(MAX_LEN/STEP)\n padded = np.full((len(y), max_len), val, dtype=dtype)\n for e, i in enumerate(y):\n padded[e, :len(i)] = i\n return padded\n\ndef compute_mean_std(x):\n x = np.vstack(x)\n return (np.mean(x,axis=0).astype(np.float32),\n np.std(x,axis=0).astype(np.float32))\n\ndef load_ecg(record):\n if os.path.splitext(record)[1] == \".npy\":\n ecg = np.load(record)\n elif os.path.splitext(record)[1] == \".mat\":\n ecg = sio.loadmat(record)['val'].squeeze().transpose()\n else: # Assumes binary 16 bit integers\n with open(record, 'r') as fid:\n ecg = np.fromfile(fid, dtype=np.int16)\n trunc_samp = STEP * int(ecg.shape[0] / STEP)\n return ecg[:trunc_samp,:]\n\n"
] | [
[
"numpy.array",
"scipy.io.loadmat",
"numpy.load",
"numpy.mean",
"numpy.std",
"numpy.fromfile",
"numpy.dtype",
"numpy.vstack"
]
] |
ChristophRaab/prototorch | [
"c4913ecb33e14252fddb87317eab01a03c3c4e3a"
] | [
"tests/test_components.py"
] | [
"\"\"\"ProtoTorch components test suite.\"\"\"\n\nimport prototorch as pt\nimport torch\n\n\ndef test_labcomps_zeros_init():\n protos = torch.zeros(3, 2)\n c = pt.components.LabeledComponents(\n distribution=[1, 1, 1],\n initializer=pt.components.Zeros(2),\n )\n assert (c.components == protos).any() == True\n\n\ndef test_labcomps_warmstart():\n protos = torch.randn(3, 2)\n plabels = torch.tensor([1, 2, 3])\n c = pt.components.LabeledComponents(\n distribution=[1, 1, 1],\n initializer=None,\n initialized_components=[protos, plabels],\n )\n assert (c.components == protos).any() == True\n assert (c.component_labels == plabels).any() == True\n"
] | [
[
"torch.zeros",
"torch.tensor",
"torch.randn"
]
] |
WillieMaddox/MLND | [
"2276f281ad93a6d1427f10154faffa12f8deed49"
] | [
"src/nuswide.py"
] | [
"import os\nfrom time import time\nimport random\nimport threading\nimport numpy as np\n\nfrom glob import glob\nfrom sklearn.model_selection import train_test_split\n\nfrom keras import backend as K\nfrom keras.preprocessing.image import img_to_array, load_img\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications import vgg16\n\nEPS = np.finfo(float).eps\nsubset_n_classes = {'All': 81, 'Lite': 81, 'Object': 31, 'Scene': 33}\ndata_source_dir = \"/media/Borg_LS/DATA\"\n\n\nclass NUSWIDE(object):\n def __init__(self,\n source_dir=data_source_dir,\n subset_name='Lite',\n split_name='train',\n test_size=50, # in percent\n seed=42,\n llb=1,\n lub=10):\n\n \"\"\"Initialization\n\n :param\n subset_name: {All, Lite, Object, Scene}\n \"\"\"\n\n self.source_dir = os.path.join(source_dir, 'NUS-WIDE')\n self.subset_name = subset_name\n self.split_name = split_name\n self.test_size = test_size\n self.seed = seed\n self.label_lower_bound = llb # do not include any data with number of labels less than label_lower_bound\n self.label_upper_bound = lub # do not include any data with number of labels greater than label_upper_bound.\n\n self.labels = None\n self.image_filename_list = None\n\n self.image_dir = os.path.join(self.source_dir, \"Flickr\")\n self.n_classes = subset_n_classes[subset_name]\n assert (self.n_classes in (31, 33, 81))\n\n index_valid_file = '_'.join([subset_name.lower(), str(seed), str(100 - test_size), 'valid']) + '.npy'\n index_test_file = '_'.join([subset_name.lower(), str(seed), str(test_size), 'test']) + '.npy'\n self.index_files = {\n 'valid': '../data/input/' + index_valid_file,\n 'test': '../data/input/' + index_test_file\n }\n self.create_dataset()\n self.filter_bad_data()\n self.num_samples, self.num_classes = self.labels.shape\n assert self.n_classes == self.num_classes\n\n def create_image_filename_list(self, train_or_test):\n\n if self.subset_name is \"All\":\n # ./ImageList/TestImagelist.txt\n imglist_file = self.source_dir + \"/ImageList/\" + train_or_test + \"Imagelist.txt\"\n elif self.subset_name is \"Lite\":\n # ./Lite/imagelist/Test_imageOutPutFileList.txt\n imglist_file = self.source_dir + \"/Lite/imagelist/\" + train_or_test + \"_imageOutPutFileList.txt\"\n elif self.subset_name is \"Object\":\n # ./OBJECT/imagelist/TestObject_image_name.txt\n imglist_file = self.source_dir + \"/OBJECT/imagelist/\" + train_or_test + \"Object_image_name.txt\"\n elif self.subset_name is \"Scene\":\n # ./SCENE/imagelist/Test_imageOutPutFileList.txt\n imglist_file = self.source_dir + \"/SCENE/imagelist/\" + train_or_test + \"_imageOutPutFileList.txt\"\n else:\n raise ValueError\n\n with open(imglist_file) as ifs:\n image_filename_list = ifs.read().strip().replace(\"\\\\\", \"/\").split('\\n')\n\n return np.array(image_filename_list)\n\n # self.image_filename_list = [os.path.join(self.image_dir, fpath) for fpath in fpaths]\n\n def read_nuswide_labels_from_file(self, train_or_test):\n\n if self.subset_name is \"All\":\n # ./TrainTestLabels/Labels_airport_Test.txt\n template = self.source_dir + \"/TrainTestLabels/*\" + train_or_test + \".txt\"\n elif self.subset_name is \"Lite\":\n # ./Lite/groundtruth/Lite_Labels_airport_Test.txt\n template = self.source_dir + \"/Lite/groundtruth/Lite_Labels*\" + train_or_test + \".txt\"\n elif self.subset_name is \"Object\":\n # ./OBJECT/groundtruth/bearTest.txt\n template = self.source_dir + \"/OBJECT/groundtruth/*\" + train_or_test + \".txt\"\n elif self.subset_name is \"Scene\":\n # ./SCENE/groundtruth/Test_Labels_airport.txt\n template = self.source_dir + \"/SCENE/groundtruth/\" + train_or_test + \"*.txt\"\n else:\n raise ValueError\n\n return np.vstack([np.loadtxt(f, dtype=np.uint8) for f in sorted(glob(template))]).T\n\n def filter_outliers(self):\n sums = np.sum(self.labels, axis=1)\n lb = np.where(self.label_lower_bound <= sums)\n ub = np.where(sums <= self.label_upper_bound)\n return np.intersect1d(lb, ub)\n\n def remove_images_with_missing_labels(self):\n good_indices = self.filter_outliers()\n self.labels = self.labels[good_indices]\n self.image_filename_list = self.image_filename_list[good_indices]\n\n def filter_bad_data(self):\n self.remove_images_with_missing_labels()\n\n def create_dataset(self):\n\n if self.split_name in (\"train\",):\n labels = self.read_nuswide_labels_from_file(\"Train\")\n image_filename_list = self.create_image_filename_list(\"Train\")\n\n elif self.split_name in (\"valid\", \"test\"):\n y_testval = self.read_nuswide_labels_from_file(\"Test\")\n x_testval = self.create_image_filename_list(\"Test\")\n index_file = self.index_files[self.split_name]\n\n if os.path.exists(index_file):\n index = np.load(index_file)\n labels = y_testval[index]\n image_filename_list = x_testval[index]\n else:\n index_testval = np.arange(len(y_testval))\n index_valid, index_test, y_valid, y_test = train_test_split(\n index_testval,\n y_testval,\n test_size=self.test_size/100.0,\n random_state=self.seed)\n\n np.save(self.index_files['valid'], index_valid)\n np.save(self.index_files['test'], index_test)\n\n if self.split_name == 'valid':\n labels = y_valid\n image_filename_list = x_testval[index_valid]\n else:\n labels = y_test\n image_filename_list = x_testval[index_test]\n\n else:\n raise ValueError\n\n self.labels = labels\n self.image_filename_list = image_filename_list\n\n def create_concepts_names_file(self, concepts_file):\n # Comes with NUS-WIDE dataset. Concepts81 should be in self.source_dir\n raise ValueError(concepts_file, \"should already exist!!!\")\n\n def create_object_names_file(self, concepts_file):\n # converts \"OBJECT/groundtruth/bearTest.txt\" to \"bear\"\n template = self.source_dir + \"/OBJECT/groundtruth/*Test.txt\"\n with open(concepts_file, 'w') as ofs:\n for f in sorted(glob(template)):\n ofs.write(f.rsplit(os.sep)[-1].split('T')[0] + '\\n')\n\n def create_scene_names_file(self, concepts_file):\n # converts \"SCENE/groundtruth/Test_Labels_airport.txt\" to \"airport\"\n template = self.source_dir + \"/SCENE/groundtruth/Test_Labels*.txt\"\n with open(concepts_file, 'w') as ofs:\n for f in sorted(glob(template)):\n ofs.write(f.rsplit(os.sep)[-1].split('.')[0].split('_')[-1] + '\\n')\n\n def load_label_names(self):\n\n label_names_file = os.path.join(self.source_dir, \"Concepts\" + str(self.n_classes) + \".txt\")\n if not os.path.exists(label_names_file):\n if self.n_classes == 81:\n self.create_concepts_names_file(label_names_file)\n elif self.n_classes == 33:\n self.create_scene_names_file(label_names_file)\n elif self.n_classes == 31:\n self.create_object_names_file(label_names_file)\n else:\n raise ValueError(\"Incorrect number of classes. Should be one of (31, 33, 81)\")\n\n with open(label_names_file, 'r') as ifs:\n label_names = ifs.read().strip().split('\\n')\n return np.array(label_names)\n\n\nclass NusWideGenerator(object):\n def __init__(self,\n image_data_generator=ImageDataGenerator(),\n subset_name='Lite',\n split_name='train',\n source_dir=data_source_dir,\n store_labels=False,\n batch_size=1,\n group_method='none', # 'none' or 'random'\n shuffle=True,\n seed=None,\n standardize_method='zmuv'\n ):\n\n \"\"\"Initialization\"\"\"\n self.subset_name = subset_name\n self.image_data_generator = image_data_generator\n self._nuswide = NUSWIDE(source_dir, subset_name, split_name)\n\n self._num_samples = None\n self._num_classes = None\n self._steps = None\n self._images = None\n self._labels = None\n self._label_names = None\n\n # self.class_ids = None\n # self.class_id_to_name = {}\n # self.class_id_to_index = {}\n # self.names = None\n # self.name_to_class_id = {}\n # self.name_to_index = {}\n # self.load_metadata()\n\n self.batch_size = int(batch_size)\n self.group_method = group_method\n self.shuffle_groups = shuffle\n\n # self.store_labels = store_labels\n self.stored_labels = np.zeros((self.num_samples, self.num_classes)) if store_labels else None\n\n if seed is None:\n seed = np.uint32((time() % 1) * 1000)\n np.random.seed(seed)\n\n self.standardize_method = standardize_method\n self.groups = []\n self.group_index = 0\n self.lock = threading.Lock()\n\n self.group_images()\n\n # def load_metadata(self):\n # cats = self._nuswide.loadCats(self._nuswide.getCatIds())\n # cats.sort(key=lambda x: x['id'])\n # self.class_ids = tuple([c['id'] for c in cats])\n # self.class_id_to_name = {c['id']: c['name'] for c in cats}\n # self.class_id_to_index = {cid: i for i, cid in enumerate(self.class_ids)}\n # self.names = tuple([c['name'] for c in cats])\n # self.name_to_class_id = {c['name']: c['id'] for c in cats}\n # self.name_to_index = {cname: i for i, cname in enumerate(self.names)}\n\n @property\n def num_samples(self):\n if self._num_samples is None:\n self._num_samples = self._nuswide.num_samples\n return self._num_samples\n\n @property\n def num_classes(self):\n if self._num_classes is None:\n self._num_classes = self._nuswide.num_classes\n return self._num_classes\n\n @property\n def steps(self):\n if self._steps is None:\n self._steps = self.num_samples / self.batch_size\n return self._steps\n\n @property\n def labels(self):\n if self._labels is None:\n self._labels = self._nuswide.labels\n return self._labels\n\n @property\n def label_names(self):\n if self._label_names is None:\n self._label_names = self._nuswide.load_label_names()\n return self._label_names\n\n @property\n def images(self):\n if self._images is None:\n self._images = self.load_image_group(np.arange(self.num_samples))\n return self._images\n\n def group_images(self):\n order = np.arange(self.num_samples)\n if self.group_method == 'random':\n np.random.shuffle(order)\n p = list(range(0, len(order), self.batch_size))[1:]\n self.groups = np.split(order, p)\n\n def load_image(self, image_index, dtype=np.uint8):\n image_filename = self._nuswide.image_filename_list[image_index]\n img_path = os.path.join(self._nuswide.image_dir, image_filename)\n img = load_img(img_path, target_size=(224, 224))\n x = img_to_array(img).astype(dtype)\n return np.expand_dims(x, axis=0)\n\n def load_image_group(self, group, dtype=np.uint8):\n return np.vstack([self.load_image(image_index, dtype=dtype) for image_index in group])\n\n def load_labels(self, image_index):\n return self.labels[image_index]\n\n def load_labels_group(self, group, dtype=np.uint8):\n return np.vstack([self.load_labels(image_index) for image_index in group])\n\n def preprocess_group(self, image_group):\n for index, image in enumerate(image_group):\n # image = vgg16.preprocess_input(image, mode='tf')\n if self.standardize_method == 'zmuv':\n image = self.image_data_generator.standardize(image)\n image = self.image_data_generator.random_transform(image)\n image_group[index] = image\n return image_group\n\n def compute_input_output(self, group):\n image_group = self.load_image_group(group, dtype=K.floatx())\n labels_group = self.load_labels_group(group)\n\n if self.standardize_method == 'inet':\n image_group = vgg16.preprocess_input(image_group, mode='tf')\n image_group = self.preprocess_group(image_group)\n\n if self.stored_labels is not None:\n for g, lg in zip(group, labels_group):\n self.stored_labels[g, :] = lg\n\n return image_group, labels_group\n\n def __next__(self):\n return self.next()\n\n def next(self):\n # advance the group index\n with self.lock:\n if self.group_index == 0 and self.shuffle_groups:\n # shuffle groups at start of epoch\n random.shuffle(self.groups)\n group = self.groups[self.group_index]\n self.group_index = (self.group_index + 1) % len(self.groups)\n\n return self.compute_input_output(group)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n nw_train_gen = NusWideGenerator(subset_name='Object', split_name='train')\n train_sums_0 = np.sum(nw_train_gen.labels, axis=0)\n train_sums_1 = np.sum(nw_train_gen.labels, axis=1)\n idx = np.random.randint(len(nw_train_gen.labels))\n img = nw_train_gen.load_image(idx)[0]\n plt.imshow(img)\n plt.show()\n print(nw_train_gen.labels.shape, idx, img.shape, img.min(), img.max())\n # print(label_names[np.where(nw_train_gen.labels[idx])[0]])\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.sum",
"numpy.load",
"numpy.split",
"numpy.random.shuffle",
"numpy.save",
"numpy.finfo",
"numpy.where",
"numpy.loadtxt",
"numpy.arange",
"numpy.intersect1d",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"numpy.expand_dims",
"matplotlib.pyplot.imshow"
]
] |
volpatto/chemicals | [
"721904ee17604f5e8685b0e5fff12e0bac567f73"
] | [
"tests/test_temperature.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Chemical Engineering Design Library (ChEDL). Utilities for process modeling.\nCopyright (C) 2016, Caleb Bell <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport pytest\nfrom chemicals.temperature import *\nimport numpy as np\nfrom fluids.numerics import assert_close, assert_close1d, linspace\n\n\ndef test_data():\n Ts_sums_calc = [np.array(i).sum() for i in [Ts_68, Ts_48, Ts_76, Ts_27]]\n Ts_sums = [186818.69999999998, 175181.39999999997, 368, 133893.09999999998]\n assert_close1d(Ts_sums_calc, Ts_sums)\n diffs_sums_calc = [abs(np.array(i)).sum() for i in [diffs_68, diffs_48, diffs_76, diffs_27]]\n diffs_sums = [46.304000000000016, 151.31800000000001, 0.038800000000000001, 411.17999999999995]\n assert_close1d(diffs_sums_calc, diffs_sums)\n\n\ndef test_conversion():\n # TODO actually test data points instead of covering everything in a slow test\n \n T2 = T_converter(500, 'ITS-68', 'ITS-48')\n assert_close(T2, 499.9470092992346)\n\n high_scales = ('ITS-90', 'ITS-68', 'ITS-27', 'ITS-48')\n\n for scale1 in high_scales:\n for scale2 in high_scales:\n T = T_converter(1000.0, scale1, scale2)\n assert_close(T_converter(T, scale2, scale1), 1000.0)\n\n mid_scales = ('ITS-90', 'ITS-68', 'ITS-48')\n\n for Ti in linspace(100, 1000, 10):\n for scale1 in mid_scales:\n for scale2 in mid_scales:\n T = T_converter(Ti, scale1, scale2)\n assert_close(T_converter(T, scale2, scale1), Ti, rtol=1e-6)\n\n low_scales = ('ITS-90', 'ITS-68', 'ITS-76')\n \n for Ti in (15, 17, 19, 21, 23, 25):\n for scale1 in low_scales:\n for scale2 in low_scales:\n T = T_converter(Ti, scale1, scale2)\n assert_close(T_converter(T, scale2, scale1), Ti)\n\n with pytest.raises(Exception):\n T_converter(10, 'ITS-27', 'ITS-48')\n\n with pytest.raises(Exception):\n T_converter(10, 'FAIL', 'ITS-48')\n\n with pytest.raises(Exception):\n T_converter(10, 'ITS-76', 'FAIL')\n\n\ndef test_diff_68():\n dTs_calc = [ITS90_68_difference(i) for i in [13.7, 70, 80.5, 298.15, 1000, 1500]]\n\n\n dTs = [0, 0.006818871618271216, 0, -0.006253950277664615,\n 0.01231818956580355, -0.31455]\n assert_close1d(dTs, dTs_calc)"
] | [
[
"numpy.array"
]
] |
Willtor/deepsparse | [
"5a5557f4bf9026545116b22b36dcb7d506e8a070"
] | [
"src/deepsparse/benchmark.py"
] | [
"# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCode related to benchmarking batched inference runs\n\"\"\"\n\nfrom typing import Any, Dict, Iterable, Iterator, List, Union\n\nimport numpy\n\n\n__all__ = [\"BatchBenchmarkResult\", \"BenchmarkResults\"]\n\n\nclass BatchBenchmarkResult(object):\n \"\"\"\n A benchmark result for a batched inference run\n\n :param time_start: The system time when the run for the batch was started\n :param time_end: The system time when the run for the batch ended\n :param batch_size: The size of the batch that was benchmarked\n :param inputs: Optional batch inputs that were given for the run\n :param outputs: Optional batch outputs that were given for the run\n :param extras: Optional batch extras to store any other data for the run\n \"\"\"\n\n def __init__(\n self,\n time_start: float,\n time_end: float,\n batch_size: int,\n inputs: Union[None, List[numpy.ndarray]] = None,\n outputs: Union[None, List[numpy.ndarray], Dict[str, numpy.ndarray]] = None,\n extras: Any = None,\n ):\n self._time_start = time_start\n self._time_end = time_end\n self._batch_size = batch_size\n self._inputs = inputs\n self._outputs = outputs\n self._extras = extras\n\n def __repr__(self):\n props = {\n \"time_start\": self.time_start,\n \"time_end\": self.time_end,\n \"size\": self.batch_size,\n \"batches_per_second\": self.batches_per_second,\n \"items_per_second\": self.items_per_second,\n \"ms_per_batch\": self.ms_per_batch,\n \"ms_per_item\": self.ms_per_item,\n }\n\n return f\"{self.__class__.__name__}({props})\"\n\n def __str__(self):\n return (\n f\"{self.__class__.__name__}(ms_per_batch={self.ms_per_batch}, \"\n f\"items_per_second={self.items_per_second})\"\n )\n\n @property\n def time_start(self) -> float:\n \"\"\"\n :return: The system time when the run for the batch was started\n \"\"\"\n return self._time_start\n\n @property\n def time_end(self) -> float:\n \"\"\"\n :return: The system time when the run for the batch ended\n \"\"\"\n return self._time_end\n\n @property\n def time_elapsed(self) -> float:\n \"\"\"\n :return: The time elapsed for the entire run (end - start)\n \"\"\"\n return self._time_end - self._time_start\n\n @property\n def batch_size(self) -> int:\n \"\"\"\n :return: The size of the batch that was benchmarked\n \"\"\"\n return self._batch_size\n\n @property\n def inputs(self) -> Union[None, List[numpy.ndarray]]:\n \"\"\"\n :return: Batch inputs that were given for the run, if any\n \"\"\"\n return self._inputs\n\n @property\n def outputs(self) -> Union[None, List[numpy.ndarray]]:\n \"\"\"\n :return: Batch outputs that were given for the run, if any\n \"\"\"\n return self._outputs\n\n @property\n def extras(self) -> Any:\n \"\"\"\n :return: Batch extras to store any other data for the run\n \"\"\"\n return self._extras\n\n @property\n def batches_per_second(self) -> float:\n \"\"\"\n :return: The number of batches that could be run in one second\n based on this result\n \"\"\"\n return 1.0 / self.time_elapsed\n\n @property\n def items_per_second(self) -> float:\n \"\"\"\n :return: The number of items that could be run in one second\n based on this result\n \"\"\"\n return self._batch_size / self.time_elapsed\n\n @property\n def ms_per_batch(self) -> float:\n \"\"\"\n :return: The number of milliseconds it took to run the batch\n \"\"\"\n return self.time_elapsed * 1000.0\n\n @property\n def ms_per_item(self) -> float:\n \"\"\"\n :return: The averaged number of milliseconds it took to run each item\n in the batch\n \"\"\"\n return self.time_elapsed * 1000.0 / self._batch_size\n\n\nclass BenchmarkResults(Iterable):\n \"\"\"\n The benchmark results for a list of batched inference runs\n \"\"\"\n\n def __init__(self):\n self._results = [] # type: List[BatchBenchmarkResult]\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._properties_dict})\"\n\n def __str__(self):\n \"\"\"\n :return: Human readable form of the benchmark summary\n \"\"\"\n formatted_props = [\n \"\\t{}: {}\".format(key, val) for key, val in self._properties_dict.items()\n ]\n return \"{}:\\n{}\".format(\n self.__class__.__name__,\n \"\\n\".join(formatted_props),\n )\n\n def __len__(self) -> int:\n return len(self._results)\n\n def __getitem__(self, index: int) -> BatchBenchmarkResult:\n return self._results[index]\n\n def __iter__(self) -> Iterator[BatchBenchmarkResult]:\n for result in self._results:\n yield result\n\n @property\n def _properties_dict(self) -> Dict:\n return {\n \"items_per_second\": self.items_per_second,\n \"ms_per_batch\": self.ms_per_batch,\n \"batch_times_mean\": self.batch_times_mean,\n \"batch_times_median\": self.batch_times_median,\n \"batch_times_std\": self.batch_times_std,\n }\n\n @property\n def results(self) -> List[BatchBenchmarkResult]:\n \"\"\"\n :return: the list of recorded batch results\n \"\"\"\n return self._results\n\n @property\n def num_batches(self) -> int:\n \"\"\"\n :return: the number of batches that have been added\n \"\"\"\n return len(self)\n\n @property\n def num_items(self) -> int:\n \"\"\"\n :return: the number of items across all batches that have been added\n \"\"\"\n num_items = sum([res.batch_size for res in self._results])\n\n return num_items\n\n @property\n def batch_times(self) -> List[float]:\n \"\"\"\n :return: the list of all batch run times that have been added\n \"\"\"\n return [res.time_elapsed for res in self._results]\n\n @property\n def batch_sizes(self) -> List[int]:\n \"\"\"\n :return: the list of all batch run sizes that have been added\n \"\"\"\n return [res.batch_size for res in self._results]\n\n @property\n def batch_times_mean(self) -> float:\n \"\"\"\n :return: the mean of all the batch run times that have been added\n \"\"\"\n return numpy.mean(self.batch_times).item()\n\n @property\n def batch_times_median(self) -> float:\n \"\"\"\n :return: the median of all the batch run times that have been added\n \"\"\"\n return numpy.median(self.batch_times).item()\n\n @property\n def batch_times_std(self) -> float:\n \"\"\"\n :return: the standard deviation of all the batch run times that have been added\n \"\"\"\n return numpy.std(self.batch_times).item()\n\n @property\n def batches_per_second(self) -> float:\n \"\"\"\n :return: The number of batches that could be run in one second\n based on this result\n \"\"\"\n return self.num_batches / sum(self.batch_times)\n\n @property\n def items_per_second(self) -> float:\n \"\"\"\n :return: The number of items that could be run in one second\n based on this result\n \"\"\"\n return self.num_items / sum(self.batch_times)\n\n @property\n def ms_per_batch(self) -> float:\n \"\"\"\n :return: The number of milliseconds it took to run the batch\n \"\"\"\n return sum(self.batch_times) * 1000.0 / self.num_batches\n\n @property\n def ms_per_item(self) -> float:\n \"\"\"\n :return: The averaged number of milliseconds it took to run each item\n in the batch\n \"\"\"\n return sum(self.batch_times) * 1000.0 / self.num_items\n\n @property\n def inputs(self) -> Union[None, List[numpy.ndarray]]:\n \"\"\"\n :return: Batch inputs that were given for the run, if any\n \"\"\"\n return [res.inputs for res in self._results]\n\n @property\n def outputs(self) -> Union[None, List[numpy.ndarray]]:\n \"\"\"\n :return: Batch outputs that were given for the run, if any\n \"\"\"\n return [res.outputs for res in self._results]\n\n def append_batch(\n self,\n time_start: float,\n time_end: float,\n batch_size: int,\n inputs: Union[None, List[numpy.ndarray]] = None,\n outputs: Union[None, List[numpy.ndarray], Dict[str, numpy.ndarray]] = None,\n extras: Any = None,\n ):\n \"\"\"\n Add a recorded batch to the current results\n\n :param time_start: The system time when the run for the batch was started\n :param time_end: The system time when the run for the batch ended\n :param batch_size: The size of the batch that was benchmarked\n :param inputs: Optional batch inputs that were given for the run\n :param outputs: Optional batch outputs that were given for the run\n :param extras: Optional batch extras to store any other data for the run\n \"\"\"\n self._results.append(\n BatchBenchmarkResult(\n time_start, time_end, batch_size, inputs, outputs, extras\n )\n )\n"
] | [
[
"numpy.median",
"numpy.std",
"numpy.mean"
]
] |
TDMoses/corridor-of-time-auto-map-gen | [
"717413678a16333bb2aa074d11863c4f32a1bc1c"
] | [
"main.py"
] | [
"import warnings\r\nimport math\r\nimport csv\r\nimport numpy as np\r\nfrom hexagon import Hexagon\r\nfrom PIL import Image\r\nfrom matplotlib import pyplot as plt\r\n\r\n_link_connections = dict(\r\n [(1, 4), (2, 5), (3, 6), (4, 1), (5, 2), (6, 3)]\r\n)\r\n\r\n\r\ndef convert_num_to_link_name(link_num):\r\n return 'link' + str(link_num + 1)\r\n\r\n\r\ndef convert_link_name_to_num(link_name):\r\n return int(link_name[-1]) - 1\r\n\r\n\r\ndef connect_hexes(hexagon1, hexagon2, link_id1, link_id2):\r\n hexagon1.set_hexagon_link(hexagon2, link_id1)\r\n hexagon2.set_hexagon_link(hexagon1, link_id2)\r\n\r\n\r\ndef match_hexagons(hexagons, hexagon_missing_links):\r\n for hexagon in hexagons:\r\n hex_missing_links = hexagon.get_available_links()\r\n for missing_link_id in hex_missing_links:\r\n matching_link_id = _link_connections[missing_link_id + 1] - 1 # back to 0 index\r\n hex_link_value = hexagon.links[missing_link_id]\r\n missing_link_name = convert_num_to_link_name(missing_link_id)\r\n matching_link_name = convert_num_to_link_name(matching_link_id)\r\n for search_list_id, possible_hex_id in \\\r\n enumerate(hexagon_missing_links[matching_link_name]):\r\n possible_hex = hexagons[possible_hex_id]\r\n\r\n if possible_hex.is_link_match(link_id=matching_link_id,\r\n link_value=hex_link_value):\r\n connect_hexes(hexagon1=hexagon,\r\n hexagon2=possible_hex,\r\n link_id1=missing_link_id,\r\n link_id2=matching_link_id)\r\n try:\r\n del hexagon_missing_links[matching_link_name][search_list_id]\r\n hexagon_missing_links[missing_link_name].remove(hexagon.id)\r\n except Exception as e:\r\n print(e)\r\n # import ipdb;ipdb.set_trace(context=5)\r\n print()\r\n break\r\n\r\n break\r\n\r\n\r\ndef set_hexagon_neightbor_locations(hexagon, hexagons):\r\n\r\n hex_outer_centers = [[ 1.07156595e-14, -1.75000000e+02],\r\n [ 1.51554446e+02, -8.75000000e+01],\r\n [ 1.51554446e+02, 8.75000000e+01],\r\n [ 1.07156595e-14, 1.75000000e+02],\r\n [-1.51554446e+02, 8.75000000e+01],\r\n [-1.51554446e+02, -8.75000000e+01]]\r\n\r\n link_relative_pos = dict(list(zip(range(6), hex_outer_centers)))\r\n for link_num, neighbor_id in enumerate(hexagon.hexagon_links):\r\n if neighbor_id not in [None, False]:\r\n neighbor = hexagons[neighbor_id]\r\n if not np.isnan(neighbor.map_x):\r\n continue\r\n neighbor.map_x = hexagon.map_x + link_relative_pos[link_num][0]\r\n neighbor.map_y = hexagon.map_y + link_relative_pos[link_num][1]\r\n\r\n\r\ndef build_maps(hexagons):\r\n\r\n maps = []\r\n\r\n for initial_hexagon in hexagons:\r\n if initial_hexagon.visited:\r\n continue\r\n\r\n initial_hexagon.map_x = 0\r\n initial_hexagon.map_y = 0\r\n initial_hexagon.visited = True\r\n\r\n queue = initial_hexagon.get_hexagon_connetions()\r\n set_hexagon_neightbor_locations(initial_hexagon, hexagons)\r\n maps.append([initial_hexagon])\r\n\r\n prev_hex_id = initial_hexagon.id\r\n\r\n while queue:\r\n curr_hex_id = queue.pop()\r\n curr_hex = hexagons[curr_hex_id]\r\n\r\n if not curr_hex.visited:\r\n curr_hex.visited = True\r\n queue.extend(curr_hex.get_hexagon_connetions())\r\n set_hexagon_neightbor_locations(curr_hex, hexagons)\r\n maps[-1].append(curr_hex)\r\n return maps\r\n\r\ndef build_images(maps):\r\n\r\n def get_min_max_coords(map):\r\n min_coords = [np.inf , np.inf]\r\n max_coords = [-np.inf, -np.inf]\r\n for hexagon in map:\r\n if hexagon.map_x < min_coords[0]:\r\n min_coords[0] = hexagon.map_x\r\n if hexagon.map_y < min_coords[1]:\r\n min_coords[1] = hexagon.map_y\r\n if hexagon.map_x > max_coords[0]:\r\n max_coords[0] = hexagon.map_x\r\n if hexagon.map_y > max_coords[1]:\r\n max_coords[1] = hexagon.map_y\r\n return min_coords, max_coords\r\n\r\n for map_id, map_ in enumerate(maps):\r\n if len(map_) < 5:\r\n continue\r\n min_coords, max_coords = get_min_max_coords(map_)\r\n centering_image_shift = 100\r\n\r\n image_size =(\r\n math.ceil(max_coords[0] - min_coords[0]) + centering_image_shift*2,\r\n math.ceil(max_coords[1] - min_coords[1]) + centering_image_shift*2\r\n )\r\n\r\n img = Image.new('RGBA', image_size, (255, 255, 255, 255))\r\n for hexagon in map_:\r\n hex_img = hexagon.draw_self()\r\n hex_loc = (int(round(hexagon.map_x - min_coords[0])),\r\n int(round(hexagon.map_y - min_coords[1])))\r\n img.paste(hex_img, hex_loc, mask=hex_img)\r\n\r\n img.convert('RGB').save('maps/map' + str(map_id) + '.jpg', 'JPEG', quality=80)\r\n # img_data = np.array(img)\r\n # plt.imshow(img_data)\r\n # plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n _filepath = \"dawning_sample.csv\"\r\n shift_openings = False\r\n\r\n _hexagons = []\r\n _hexagon_missing_links = dict(\r\n link1=[], link2=[], link3=[], link4=[], link5=[], link6=[]\r\n )\r\n\r\n _dup_hash = dict()\r\n with open(_filepath, 'r') as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n header = next(csvreader)\r\n\r\n _hexagon_id = 0\r\n for row_id, row in enumerate(csvreader):\r\n row = dict(list(zip(header[:12], row[:12])))\r\n # import ipdb;ipdb.set_trace(context=5)\r\n if all(np.array(list(row.values())) == ''):\r\n continue\r\n if row['Center'].lower().strip() in ['duplicate', '100% match', 'none']:\r\n continue\r\n elif row['Link1'].strip() == '':\r\n warnings.warn('missing link row: {}'.format(str(_hexagon_id)))\r\n continue\r\n\r\n row['Openings'] = row['Openings'].replace('.', ',')\r\n if shift_openings and row['Openings'].strip() != '':\r\n try:\r\n _openings = list(map(lambda x: str(int(x) + 1), row['Openings'].strip().split(',')))\r\n except Exception as e:\r\n import ipdb; ipdb.set_trace(context=5)\r\n print(e)\r\n else:\r\n _openings = row['Openings'].strip().split(',')\r\n\r\n _new_hex = Hexagon(\r\n id=_hexagon_id,\r\n spreadsheet_row= row_id + 2,\r\n center=row['Center'].strip(),\r\n openings=_openings,\r\n link1=row['Link1'].strip(),\r\n link2=row['Link2'].strip(),\r\n link3=row['Link3'].strip(),\r\n link4=row['Link4'].strip(),\r\n link5=row['Link5'].strip(),\r\n link6=row['Link6'].strip(),\r\n )\r\n if _dup_hash.get(_new_hex.get_hash(), False):\r\n continue\r\n _dup_hash[_new_hex.get_hash()] = True\r\n\r\n _hexagons.append(_new_hex)\r\n for _missing_link_num in _hexagons[-1].get_available_links():\r\n _link_name = convert_num_to_link_name(_missing_link_num)\r\n _hexagon_missing_links[_link_name].append(_hexagon_id)\r\n\r\n _hexagon_id += 1\r\n match_hexagons(_hexagons, _hexagon_missing_links)\r\n\r\n # import ipdb;ipdb.set_trace(context=5)\r\n maps = build_maps(_hexagons)\r\n build_images(maps)\r\n print(_hexagons)\r\n"
] | [
[
"numpy.isnan"
]
] |
sandcobainer/MUSI6202Synth | [
"6c5d5c53c016ff93019fca87c75b97e7a934eeb2"
] | [
"postprocessing.py"
] | [
"# Post processing scripts: interpolation, dithering, resampling and output\n\nfrom scipy import signal\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport wavio\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Downsampler:\n output_fs: int = 48000\n output_br: int = 32\n\n def write_wav(self, wave_file_path, data, fs=output_fs, bitrate=output_br):\n \"\"\"\n Functions writes result to output wavefile at \n specified bitrate and sampling rate\n \"\"\"\n if bitrate == 8:\n sample_width = 1\n elif bitrate == 16:\n sample_width = 2\n elif bitrate == 24:\n sample_width = 3\n else:\n sample_width = 4\n\n wavio.write(wave_file_path, data, fs, sampwidth=sample_width)\n\n \n def low_pass(self, data, Fs_new, Fs):\n \"\"\"\n Functions removes frequencies above the Shannon-Nyquist frequency\n \"\"\"\n b, a = signal.butter(N=2, Wn=Fs_new / 2, btype='low', analog=False, fs=Fs)\n filtered = signal.filtfilt(b, a, data)\n return filtered.astype(np.int32)\n\n \n def down_sample(self, data, factor, target_fs, source_fs):\n \"\"\"\n Function down samples incoming sample to lower sampling rate\n \"\"\"\n low_filtered = self.low_pass(data, target_fs, source_fs)\n return low_filtered[::factor]\n\n \n def cubic_interpolate(self, data, t, num_samples):\n \"\"\"\n Functions implements cubic interpolation\n \"\"\"\n x = np.linspace(0, t, num=len(data), endpoint=True)\n y = data\n cs = interp1d(x, y, kind='cubic')\n xNew = np.linspace(0, t, num=num_samples, endpoint=True)\n out = cs(xNew).astype(np.int32)\n return out\n\n \n def up_sample(self, data, source_fs, target_fs, t):\n \"\"\"\n Function to upsample original data to a higher sampling rate\n \"\"\"\n new_samples = int(int(len(data) / source_fs) * int(target_fs))\n return self.cubic_interpolate(data, t, new_samples)\n\n def add_triangular_dither(self, original, original_br, new_br):\n \"\"\"\n Implements trinagular dithering\n \"\"\"\n diff = original_br - new_br\n left = (-1) * (2 ** diff)\n mode = 0\n right = (2 ** diff) - 1\n size = original.shape\n noise = np.random.triangular(left, mode, right, size)\n noise = noise.astype(np.int32)\n\n return original + noise\n\n def down_quantization(self, data, original_br, new_br):\n \"\"\"\n Down quantizes input sample with triangular dithering\n \"\"\"\n dithered = self.add_triangular_dither(data, original_br, new_br)\n dithered = dithered.astype(np.int32)\n down_quantized = np.zeros(len(dithered), dtype=np.int32)\n\n for i in range(len(dithered)):\n down_quantized[i] = dithered[i] >> (original_br - new_br)\n return down_quantized\n"
] | [
[
"scipy.interpolate.interp1d",
"scipy.signal.butter",
"numpy.random.triangular",
"scipy.signal.filtfilt",
"numpy.linspace"
]
] |
spacejake/face-alignment | [
"5c3acb5ff649de0ee9820bb595856cf2229c5db4"
] | [
"face_alignment/datasets/AFLW2000.py"
] | [
"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport random\nimport math\nfrom skimage import io\nfrom scipy import io as sio\n\nimport torch\nimport torch.utils.data as data\n\nfrom face_alignment.datasets.common import Split, Target, compute_laplacian\nfrom face_alignment.utils import shuffle_lr, flip, crop, getTransform, transform, draw_gaussian, get_preds_fromhm\nfrom face_alignment.util.imutils import *\n\n\nfrom face_alignment.datasets.W300LP import W300LP\n\nclass AFLW2000(W300LP):\n\n def __init__(self, args, split, demo=False):\n super(AFLW2000, self).__init__(args, split, demo)\n self.is_train = False\n assert self.pointType == '3D', \"AFLW2000 provided only 68 3D points\"\n\n\n def load_extras(self):\n # Don't load extras, will only use this dataset for Validation, for now...\n pass\n\n def _getDataFaces(self, is_train):\n base_dir = self.img_dir\n lines = []\n files = [f for f in os.listdir(base_dir) if f.endswith('.mat')]\n for f in files:\n lines.append(os.path.join(base_dir, f))\n print('=> loaded AFLW2000 set, {} images were found'.format(len(lines)))\n return sorted(lines)\n\n def _load_img(self, index):\n return load_image(self.anno[index][:-4] + '.jpg').float()\n\n def _load_anno(self, index):\n main_pts = sio.loadmat(self.anno[index])\n raw_pts = main_pts['pt3d_68'][0:3, :].transpose()\n raw_pts = torch.from_numpy(raw_pts).float()\n return raw_pts\n\n def generateSampleFace(self, idx):\n sf = self.scale_factor\n rf = self.rot_factor\n\n main_pts = sio.loadmat(self.anno[idx])\n raw_pts = main_pts['pt3d_68'][0:3, :].transpose()\n raw_pts = torch.from_numpy(raw_pts)\n mins_ = torch.min(raw_pts, 0)[0].view(3) # min vals\n maxs_ = torch.max(raw_pts, 0)[0].view(3) # max vals\n c = torch.FloatTensor((maxs_[0]-(maxs_[0]-mins_[0])/2, maxs_[1]-(maxs_[1]-mins_[1])/2))\n c[1] -= ((maxs_[1]-mins_[1]) * 0.12).float()\n s = (maxs_[0]-mins_[0]+maxs_[1]-mins_[1])/195\n\n img = load_image(self.anno[idx][:-4] + '.jpg')\n\n r = 0\n if self.is_train:\n s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]\n r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0\n\n if random.random() <= 0.5:\n img = flip(img).float()\n raw_pts = shuffle_lr(raw_pts, width=img.size(2))\n c[0] = img.size(2) - c[0]\n\n img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)\n img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)\n img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)\n\n inp = im_to_torch(crop(im_to_numpy(img), c, s, 256, rotate=r))\n # Transform Points\n # 256x256 GT Heatmap and Points\n pts = raw_pts.clone()\n heatmap256 = torch.zeros(self.nParts, 256, 256)\n transMat256 = getTransform(c, s, 256, rotate=r)\n for i in range(self.nParts):\n if pts[i, 0] > 0:\n pts[i] = transform(pts[i], transMat256)\n pts[i, :2] = pts[i, :2]-1\n heatmap256[i], self.g256 = draw_gaussian(heatmap256[i], pts[i, 0:2], 2, g=self.g256)\n # heatmap256[i] = draw_labelmap(heatmap256[i], pts[i], sigma=3)\n\n # inp = color_normalize(inp, self.mean, self.std)\n\n # 64x64 Intermediate Heatmap\n tpts = raw_pts.clone()\n heatmap64 = torch.zeros(self.nParts, 64, 64)\n transMat64 = getTransform(c, s, 64, rotate=r)\n for i in range(self.nParts):\n if tpts[i, 0] > 0:\n tpts[i] = transform(tpts[i], transMat64)\n heatmap64[i], self.g64 = draw_gaussian(heatmap64[i], tpts[i, 0:2], 1, g=self.g64)\n # heatmap64[i] = draw_labelmap(heatmap64[i], tpts[i] - 1, sigma=1)\n\n # Compute Target Laplacian vectors\n # lap_pts = compute_laplacian(self.laplcian, pts)\n\n #return inp, heatmap64, heatmap256, pts, lap_pts, c, s\n return inp.float(), heatmap64.float(), heatmap256.float(), pts.float(), c.float(), s.float()\n\n\nif __name__==\"__main__\":\n import face_alignment.util.opts as opts\n\n args = opts.argparser()\n args.data = \"../../data/AFLW2000\"\n datasetLoader = AFLW2000\n crop_win = None\n loader = torch.utils.data.DataLoader(\n datasetLoader(args, 'test'),\n batch_size=1,\n #shuffle=True,\n num_workers=1,\n pin_memory=True)\n for i, data in enumerate(loader):\n input, label, meta = data\n target = Target._make(label)\n # show_joints3D(target.pts.squeeze(0))\n # show_joints(input.squeeze(0), target.pts.squeeze(0))\n # show_heatmap(target.heatmap64)\n # show_heatmap(target.heatmap256)\n\n img = im_to_numpy(input.squeeze(0)).astype(np.uint8)\n\n # TEST 256 heatmap extraction\n # test_hmpred, _ = get_preds_fromhm(target.heatmap256, target.center, target.scale)\n # show_joints(input.squeeze(0), test_hmpred.squeeze(0))\n\n # TEST 64 heatmap extraction\n test_hmpred, _ = get_preds_fromhm(target.heatmap64, target.center, target.scale)\n test_hmpred = test_hmpred * 4 # 64->256\n frame = annotate_frame(img, test_hmpred.numpy())\n cv2.imwrite('64-256_output-3dfan.png', frame)\n\n # plt.pause(0.5)\n # plt.draw()\n"
] | [
[
"torch.zeros",
"torch.min",
"torch.max",
"torch.FloatTensor",
"scipy.io.loadmat",
"torch.from_numpy",
"torch.randn"
]
] |
karalleyna/pyprobml | [
"72195e46fdffc4418910e76d02e3d6469f4ce272"
] | [
"scripts/linreg_sgd_pt.py"
] | [
"#https://github.com/fastai/course-v3/blob/master/nbs/dl1/lesson2-sgd.ipynb\n#https://pytorch.org/tutorials/beginner/nn_tutorial.html\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pyprobml_utils as pml\n\n#figdir = os.path.join(os.environ[\"PYPROBML\"], \"figures\")\n#def save_fig(fname): plt.savefig(os.path.join(figdir, fname))\n\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nnp.random.seed(42)\n\nn = 100\nx = torch.ones(n, 2, requires_grad=False) \nx[:,0].uniform_(-1.,1)\n\n\ndef mse(y_hat, y): return ((y_hat-y)**2).mean()\n#def mse(y, y_pred): return (y_pred - y).pow(2).sum()\n\na = torch.as_tensor(np.array([3.0,2.0])).float()\ny = x@a + torch.rand(n)\n\nplt.scatter(x[:,0],y)\n\n\n# must cast parameters to float to match type of x\n#a = torch.as_tensor(np.array([-1.,1])).float()\n#a = nn.Parameter(a);\na = torch.randn(2, requires_grad=True)\nprint(a)\n\n# must prevent backprop passing through y to a\n#y = [email protected]() + torch.rand(n)\n\nlr = 1e-1\n\ndef update():\n y_hat = x@a\n loss = mse(y, y_hat)\n if t % 10 == 0: print(loss)\n loss.backward() \n with torch.no_grad():\n a.sub_(lr * a.grad)\n a.grad.zero_()\n\nfor t in range(100): update()\n\nplt.scatter(x[:,0],y)\nplt.scatter(x[:,0],[email protected]())\npml.savefig('linreg_sgd.pdf')\n"
] | [
[
"torch.rand",
"numpy.array",
"numpy.random.seed",
"torch.no_grad",
"torch.ones",
"matplotlib.pyplot.scatter",
"torch.randn"
]
] |
pylbm/pylbm_gallery | [
"c0e6f3c69b6938769efb5f62cc1e1a550bf160b2"
] | [
"1D/shallow_water.py"
] | [
"\"\"\"\n Solver D1Q2Q2 for the shallow water system on [0, 1]\n\n d_t(h) + d_x(q) = 0, t > 0, 0 < x < 1,\n d_t(q) + d_x(q^2/h+gh^2/2) = 0, t > 0, 0 < x < 1,\n h(t=0,x) = h0(x), q(t=0,x) = q0(x),\n d_t(h)(t,x=0) = d_t(h)(t,x=1) = 0\n d_t(q)(t,x=0) = d_t(q)(t,x=1) = 0\n\n the initial condition is a picewise constant function\n in order to visualize the simulation of elementary waves\n\n test: True\n\"\"\"\nimport sympy as sp\nimport numpy as np\nimport pylbm\n\nh, q, X, LA, g = sp.symbols('h, q, X, LA, g')\n\ndef Riemann_pb(x, xmin, xmax, uL, uR):\n xm = 0.5*(xmin+xmax)\n u = np.empty(x.shape)\n u[x < xm] = uL\n u[x == xm] = .5*(uL+uR)\n u[x > xm] = uR\n return u\n\ndef run(dx, Tf, generator=\"numpy\", sorder=None, withPlot=True):\n \"\"\"\n Parameters\n ----------\n\n dx: double\n spatial step\n\n Tf: double\n final time\n\n generator: pylbm generator\n\n sorder: list\n storage order\n\n withPlot: boolean\n if True plot the solution otherwise just compute the solution\n\n \"\"\"\n # parameters\n xmin, xmax = 0., 1. # bounds of the domain\n la = 2. # velocity of the scheme\n s = 1.5 # relaxation parameter\n\n hL, hR, qL, qR = 1., .25, 0.10, 0.10\n ymina, ymaxa, yminb, ymaxb = 0., 1., 0., .5\n\n dico = {\n 'box': {\n 'x': [xmin, xmax],\n 'label': 0\n },\n 'space_step': dx,\n 'scheme_velocity': la,\n 'schemes': [\n {\n 'velocities': [1, 2],\n 'conserved_moments': h,\n 'polynomials': [1, LA*X],\n 'relaxation_parameters': [0, s],\n 'equilibrium': [h, q],\n },\n {\n 'velocities': [1, 2],\n 'conserved_moments': q,\n 'polynomials': [1, LA*X],\n 'relaxation_parameters': [0, s],\n 'equilibrium': [q, q**2/h+.5*g*h**2],\n },\n ],\n 'init': {h: (Riemann_pb, (xmin, xmax, hL, hR)),\n q: (Riemann_pb, (xmin, xmax, qL, qR))},\n 'boundary_conditions': {\n 0: {\n 'method': {\n 0: pylbm.bc.Neumann,\n 1: pylbm.bc.Neumann\n }\n },\n },\n 'generator': generator,\n 'parameters': {LA: la, g: 1.},\n }\n\n sol = pylbm.Simulation(dico, sorder=sorder)\n\n if withPlot:\n # create the viewer to plot the solution\n viewer = pylbm.viewer.matplotlib_viewer\n fig = viewer.Fig(2, 1)\n ax1 = fig[0]\n ax1.axis(xmin, xmax, .9*ymina, 1.1*ymaxa)\n ax2 = fig[1]\n ax2.axis(xmin, xmax, .9*yminb, 1.1*ymaxb)\n\n x = sol.domain.x\n l1 = ax1.plot(x, sol.m[h], color='b')[0]\n l2 = ax2.plot(x, sol.m[q], color='r')[0]\n\n def update(iframe):\n if sol.t<Tf:\n sol.one_time_step()\n l1.set_data(x, sol.m[h])\n l2.set_data(x, sol.m[q])\n ax1.title = r'$h$ at $t = {0:f}$'.format(sol.t)\n ax2.title = r'$q$ at $t = {0:f}$'.format(sol.t)\n\n fig.animate(update)\n fig.show()\n else:\n while sol.t < Tf:\n sol.one_time_step()\n\n return sol\n\nif __name__ == '__main__':\n dx = 1./256\n Tf = .25\n run(dx, Tf)\n"
] | [
[
"numpy.empty"
]
] |
leeong05/zipline | [
"f41c37a606e40b1c966b7de2ea28b1c3bf049d76"
] | [
"zipline/history/history_container.py"
] | [
"#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom six import itervalues\n\nfrom . history import (\n index_at_dt,\n days_index_at_dt,\n)\n\nfrom zipline.finance import trading\nfrom zipline.utils.data import RollingPanel\n\n# The closing price is referred to be multiple names,\n# allow both for price rollover logic etc.\nCLOSING_PRICE_FIELDS = {'price', 'close_price'}\n\n\ndef create_initial_day_panel(days_needed, fields, sids, dt):\n index = days_index_at_dt(days_needed, dt)\n # Use original index in case of 1 bar.\n if days_needed != 1:\n index = index[:-1]\n window = len(index)\n rp = RollingPanel(window, fields, sids)\n for i, day in enumerate(index):\n rp.index_buf[i] = day\n rp.pos = window\n return rp\n\n\ndef create_current_day_panel(fields, sids, dt):\n # Can't use open_and_close since need to create enough space for a full\n # day, even on a half day.\n # Can now use mkt open and close, since we don't roll\n env = trading.environment\n index = env.market_minutes_for_day(dt)\n return pd.Panel(items=fields, minor_axis=sids, major_axis=index)\n\n\ndef ffill_day_frame(field, day_frame, prior_day_frame):\n # get values which are nan-at the beginning of the day\n # and attempt to fill with the last close\n first_bar = day_frame.ix[0]\n nan_sids = first_bar[np.isnan(first_bar)]\n for sid, _ in nan_sids.iterkv():\n day_frame[sid][0] = prior_day_frame.ix[-1, sid]\n if field != 'volume':\n day_frame = day_frame.ffill()\n return day_frame\n\n\nclass HistoryContainer(object):\n \"\"\"\n Container for all history panels and frames used by an algoscript.\n\n To be used internally by algoproxy, but *not* passed directly to the\n algorithm.\n Entry point for the algoscript is the result of `get_history`.\n \"\"\"\n\n def __init__(self, history_specs, initial_sids, initial_dt):\n # All of the history specs found by the algoscript parsing.\n self.history_specs = history_specs\n\n # The overaching panel needs to be large enough to contain the\n # largest history spec\n self.max_days_needed = max(spec.days_needed for spec\n in itervalues(history_specs))\n\n # The set of fields specified by all history specs\n self.fields = set(spec.field for spec in itervalues(history_specs))\n\n self.prior_day_panel = create_initial_day_panel(\n self.max_days_needed, self.fields, initial_sids, initial_dt)\n\n # This panel contains the minutes for the current day.\n # The value that is used is some sort of aggregation call on the\n # panel, e.g. `sum` for volume, `max` for high, etc.\n self.current_day_panel = create_current_day_panel(\n self.fields, initial_sids, initial_dt)\n\n # Helps prop up the prior day panel against having a nan, when\n # the data has been seen.\n self.last_known_prior_values = {field: {} for field in self.fields}\n\n # Populating initial frames here, so that the cost of creating the\n # initial frames does not show up when profiling get_y\n # These frames are cached since mid-stream creation of containing\n # data frames on every bar is expensive.\n self.return_frames = {}\n\n self.create_return_frames(initial_dt)\n\n def create_return_frames(self, algo_dt):\n \"\"\"\n Populates the return frame cache.\n\n Called during init and at universe rollovers.\n \"\"\"\n for history_spec in itervalues(self.history_specs):\n index = index_at_dt(history_spec, algo_dt)\n index = pd.to_datetime(index)\n frame = pd.DataFrame(\n index=index,\n columns=map(int, self.current_day_panel.minor_axis.values),\n dtype=np.float64)\n self.return_frames[history_spec] = frame\n\n def update(self, data, algo_dt):\n \"\"\"\n Takes the bar at @algo_dt's @data and adds to the current day panel.\n \"\"\"\n self.check_and_roll(algo_dt)\n\n fields = self.fields\n field_data = {sid: {field: bar[field] for field in fields}\n for sid, bar in data.iteritems()\n if (bar\n and\n bar['dt'] == algo_dt\n and\n # Only use data which is keyed in the data panel.\n # Prevents crashes due to custom data.\n sid in self.current_day_panel.minor_axis)}\n field_frame = pd.DataFrame(field_data)\n self.current_day_panel.ix[:, algo_dt, :] = field_frame.T\n\n def roll(self, roll_dt):\n env = trading.environment\n # This should work for price, but not others, e.g.\n # open.\n # Get the most recent value.\n rolled = pd.DataFrame(\n index=self.current_day_panel.items,\n columns=self.current_day_panel.minor_axis)\n\n for field in self.fields:\n if field in CLOSING_PRICE_FIELDS:\n # Use the last price.\n prices = self.current_day_panel.ffill().ix[field, -1, :]\n rolled.ix[field] = prices\n elif field == 'open_price':\n # Use the first price.\n opens = self.current_day_panel.ix['open_price', 0, :]\n rolled.ix['open_price'] = opens\n elif field == 'volume':\n # Volume is the sum of the volumes during the\n # course of the day\n volumes = self.current_day_panel.ix['volume'].apply(np.sum)\n rolled.ix['volume'] = volumes\n elif field == 'high':\n # Use the highest high.\n highs = self.current_day_panel.ix['high'].apply(np.max)\n rolled.ix['high'] = highs\n elif field == 'low':\n # Use the lowest low.\n lows = self.current_day_panel.ix['low'].apply(np.min)\n rolled.ix['low'] = lows\n\n for sid, value in rolled.ix[field].iterkv():\n if not np.isnan(value):\n try:\n prior_values = self.last_known_prior_values[field][sid]\n except KeyError:\n prior_values = {}\n self.last_known_prior_values[field][sid] = prior_values\n prior_values['dt'] = roll_dt\n prior_values['value'] = value\n\n self.prior_day_panel.add_frame(roll_dt, rolled)\n\n # Create a new 'current day' collector.\n next_day = env.next_trading_day(roll_dt)\n\n if next_day:\n # Only create the next panel if there is a next day.\n # i.e. don't create the next panel on the last day of\n # the backest/current day of live trading.\n self.current_day_panel = create_current_day_panel(\n self.fields,\n # Will break on quarter rollover.\n self.current_day_panel.minor_axis,\n next_day)\n\n def check_and_roll(self, algo_dt):\n \"\"\"\n Check whether the algo_dt is at the end of a day.\n If it is, aggregate the day's minute data and store it in the prior\n day panel.\n \"\"\"\n # Use a while loop to account for illiquid bars.\n while algo_dt > self.current_day_panel.major_axis[-1]:\n roll_dt = self.current_day_panel.major_axis[-1]\n self.roll(roll_dt)\n\n def get_history(self, history_spec, algo_dt):\n \"\"\"\n Main API used by the algoscript is mapped to this function.\n\n Selects from the overarching history panel the values for the\n @history_spec at the given @algo_dt.\n \"\"\"\n field = history_spec.field\n\n index = index_at_dt(history_spec, algo_dt)\n index = pd.to_datetime(index)\n\n frame = self.return_frames[history_spec]\n # Overwrite the index.\n # Not worrying about values here since the values are overwritten\n # in the next step.\n frame.index = index\n\n prior_day_panel = self.prior_day_panel.get_current()\n prior_day_frame = prior_day_panel[field].copy()\n if history_spec.ffill:\n first_bar = prior_day_frame.ix[0]\n nan_sids = first_bar[first_bar.isnull()]\n for sid, _ in nan_sids.iterkv():\n try:\n if (\n # Only use prior value if it is before the index,\n # so that a backfill does not accidentally occur.\n self.last_known_prior_values[field][sid]['dt'] <=\n prior_day_frame.index[0]):\n prior_day_frame[sid][0] =\\\n self.last_known_prior_values[field][sid]['value']\n except KeyError:\n # Allow case where there is no previous value.\n # e.g. with leading nans.\n pass\n prior_day_frame = prior_day_frame.ffill()\n frame.ix[:-1] = prior_day_frame.ix[:]\n\n # Copy the current day frame, since the fill behavior will mutate\n # the values in the panel.\n current_day_frame = self.current_day_panel[field][:algo_dt].copy()\n if history_spec.ffill:\n current_day_frame = ffill_day_frame(field,\n current_day_frame,\n prior_day_frame)\n\n if field == 'volume':\n # This works for the day rollup, i.e. '1d',\n # but '1m' will need to allow for 0 or nan minutes\n frame.ix[algo_dt] = current_day_frame.sum()\n elif field == 'high':\n frame.ix[algo_dt] = current_day_frame.max()\n elif field == 'low':\n frame.ix[algo_dt] = current_day_frame.min()\n elif field == 'open_price':\n frame.ix[algo_dt] = current_day_frame.ix[0]\n else:\n frame.ix[algo_dt] = current_day_frame.ix[algo_dt]\n\n return frame\n"
] | [
[
"pandas.to_datetime",
"pandas.Panel",
"numpy.isnan",
"pandas.DataFrame"
]
] |
danlx/JointBERT | [
"5d6d4cd2bfe83b6953898ddc18bb1192418a6a90"
] | [
"data_loader.py"
] | [
"import os\nimport copy\nimport json\nimport logging\n\nimport torch\nfrom torch.utils.data import TensorDataset\n\nfrom utils import get_intent_labels, get_slot_labels\n\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"\n A single training/test example for simple sequence classification.\n\n Args:\n guid: Unique id for the example.\n words: list. The words of the sequence.\n intent_label: (Optional) string. The intent label of the example.\n slot_labels: (Optional) list. The slot labels of the example.\n \"\"\"\n\n def __init__(self, guid, words, intent_label=None, slot_labels=None):\n self.guid = guid\n self.words = words\n self.intent_label = intent_label\n self.slot_labels = slot_labels\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, attention_mask, token_type_ids, intent_label_id, slot_labels_ids):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.intent_label_id = intent_label_id\n self.slot_labels_ids = slot_labels_ids\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass JointProcessor(object):\n \"\"\"Processor for the JointBERT data set \"\"\"\n\n def __init__(self, args):\n self.args = args\n self.intent_labels = get_intent_labels(args)\n self.slot_labels = get_slot_labels(args)\n\n self.input_text_file = 'seq.in'\n self.intent_label_file = 'label'\n self.slot_labels_file = 'seq.out'\n\n @classmethod\n def _read_file(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n lines = []\n for line in f:\n lines.append(line.strip())\n return lines\n\n def _create_examples(self, texts, intents, slots, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for i, (text, intent, slot) in enumerate(zip(texts, intents, slots)):\n guid = \"%s-%s\" % (set_type, i)\n # 1. input_text\n words = text.split() # Some are spaced twice\n # 2. intent\n intent_label = self.intent_labels.index(intent) if intent in self.intent_labels else self.intent_labels.index(\"UNK\")\n # 3. slot\n slot_labels = []\n for s in slot.split():\n slot_labels.append(self.slot_labels.index(s) if s in self.slot_labels else self.slot_labels.index(\"UNK\"))\n\n assert len(words) == len(slot_labels)\n examples.append(InputExample(guid=guid, words=words, intent_label=intent_label, slot_labels=slot_labels))\n return examples\n\n def get_examples(self, mode):\n \"\"\"\n Args:\n mode: train, dev, test\n \"\"\"\n data_path = os.path.join(self.args.data_dir, self.args.task, mode)\n logger.info(\"LOOKING AT {}\".format(data_path))\n return self._create_examples(texts=self._read_file(os.path.join(data_path, self.input_text_file)),\n intents=self._read_file(os.path.join(data_path, self.intent_label_file)),\n slots=self._read_file(os.path.join(data_path, self.slot_labels_file)),\n set_type=mode)\n\n\nprocessors = {\n \"atis\": JointProcessor,\n \"snips\": JointProcessor,\n \"kgqa\": JointProcessor\n}\n\n\ndef convert_examples_to_features(examples, max_seq_len, tokenizer,\n pad_token_label_id=-100,\n cls_token_segment_id=0,\n pad_token_segment_id=0,\n sequence_a_segment_id=0,\n mask_padding_with_zero=True):\n # Setting based on the current model type\n cls_token = tokenizer.cls_token\n sep_token = tokenizer.sep_token\n unk_token = tokenizer.unk_token\n pad_token_id = tokenizer.pad_token_id\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 5000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n # Tokenize word by word (for NER)\n tokens = []\n slot_labels_ids = []\n for word, slot_label in zip(example.words, example.slot_labels):\n word_tokens = tokenizer.tokenize(word)\n if not word_tokens:\n word_tokens = [unk_token] # For handling the bad-encoded word\n tokens.extend(word_tokens)\n # Use the real label id for the first token of the word, and padding ids for the remaining tokens\n slot_labels_ids.extend([int(slot_label)] + [pad_token_label_id] * (len(word_tokens) - 1))\n\n # Account for [CLS] and [SEP]\n special_tokens_count = 2\n if len(tokens) > max_seq_len - special_tokens_count:\n tokens = tokens[:(max_seq_len - special_tokens_count)]\n slot_labels_ids = slot_labels_ids[:(max_seq_len - special_tokens_count)]\n\n # Add [SEP] token\n tokens += [sep_token]\n slot_labels_ids += [pad_token_label_id]\n token_type_ids = [sequence_a_segment_id] * len(tokens)\n\n # Add [CLS] token\n tokens = [cls_token] + tokens\n slot_labels_ids = [pad_token_label_id] + slot_labels_ids\n token_type_ids = [cls_token_segment_id] + token_type_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_len - len(input_ids)\n input_ids = input_ids + ([pad_token_id] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n slot_labels_ids = slot_labels_ids + ([pad_token_label_id] * padding_length)\n\n assert len(input_ids) == max_seq_len, \"Error with input length {} vs {}\".format(len(input_ids), max_seq_len)\n assert len(attention_mask) == max_seq_len, \"Error with attention mask length {} vs {}\".format(len(attention_mask), max_seq_len)\n assert len(token_type_ids) == max_seq_len, \"Error with token type length {} vs {}\".format(len(token_type_ids), max_seq_len)\n assert len(slot_labels_ids) == max_seq_len, \"Error with slot labels length {} vs {}\".format(len(slot_labels_ids), max_seq_len)\n\n intent_label_id = int(example.intent_label)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % example.guid)\n logger.info(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"intent_label: %s (id = %d)\" % (example.intent_label, intent_label_id))\n logger.info(\"slot_labels: %s\" % \" \".join([str(x) for x in slot_labels_ids]))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n intent_label_id=intent_label_id,\n slot_labels_ids=slot_labels_ids\n ))\n\n return features\n\n\ndef load_and_cache_examples(args, tokenizer, mode):\n processor = processors[args.task](args)\n\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(\n args.data_dir,\n 'cached_{}_{}_{}_{}'.format(\n mode,\n args.task,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n args.max_seq_len\n )\n )\n\n if os.path.exists(cached_features_file):\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n # Load data features from dataset file\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n if mode == \"train\":\n examples = processor.get_examples(\"train\")\n elif mode == \"dev\":\n examples = processor.get_examples(\"dev\")\n elif mode == \"test\":\n examples = processor.get_examples(\"test\")\n else:\n raise Exception(\"For mode, Only train, dev, test is available\")\n\n # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later\n pad_token_label_id = args.ignore_index\n features = convert_examples_to_features(examples, args.max_seq_len, tokenizer,\n pad_token_label_id=pad_token_label_id)\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_intent_label_ids = torch.tensor([f.intent_label_id for f in features], dtype=torch.long)\n all_slot_labels_ids = torch.tensor([f.slot_labels_ids for f in features], dtype=torch.long)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask,\n all_token_type_ids, all_intent_label_ids, all_slot_labels_ids)\n return dataset\n"
] | [
[
"torch.save",
"torch.tensor",
"torch.utils.data.TensorDataset",
"torch.load"
]
] |
tristandeleu/rlpyt | [
"22eccb4e2b33d3c52947a27b6d300b575e36a3ea"
] | [
"rlpyt/samplers/async_/gpu_sampler.py"
] | [
"\nimport torch\nimport multiprocessing as mp\nimport ctypes\nimport psutil\nfrom collections import namedtuple\n\nfrom rlpyt.agents.base import AgentInputs\nfrom rlpyt.samplers.async_.base import AsyncParallelSamplerMixin\nfrom rlpyt.samplers.parallel.base import ParallelSamplerBase\nfrom rlpyt.samplers.parallel.gpu.sampler import GpuSamplerBase, build_step_buffer\nfrom rlpyt.samplers.async_.collectors import DbGpuResetCollector\nfrom rlpyt.samplers.parallel.gpu.collectors import GpuEvalCollector\nfrom rlpyt.samplers.async_.action_server import AsyncActionServer\nfrom rlpyt.samplers.parallel.worker import sampling_process\nfrom rlpyt.utils.logging import logger\nfrom rlpyt.utils.seed import make_seed\n\n\nSync = namedtuple('Sync', ['obs_ready', 'act_ready', 'stop_eval', 'db_idx'])\n\nclass AsyncGpuSamplerBase(AsyncParallelSamplerMixin, ParallelSamplerBase):\n \"\"\"Main definitions for asynchronous parallel sampler using GPU(s) for\n action selection. The main sampler process (forked from the overall\n master), forks action-server processes, one per GPU to be used, and\n the action-server process(es) fork their own parallel CPU workers.\n This same sampler object is used in the main sampler process and in\n the action server process(es), but for different methods, labeled by\n comments in the code (easier way to pass arguments along).\n \"\"\"\n\n def __init__(self, *args, CollectorCls=DbGpuResetCollector,\n eval_CollectorCls=GpuEvalCollector, **kwargs):\n super().__init__(*args, CollectorCls=CollectorCls,\n eval_CollectorCls=eval_CollectorCls, **kwargs)\n\n ##########################################\n # In forked sampler runner process.\n ##########################################\n\n def initialize(self, affinity):\n \"\"\"Initialization inside the main sampler process. Builds one level\n of parallel synchronization objects, and forks action-server processes,\n one per GPU to be used.\n \"\"\"\n torch.set_num_threads(1) # Needed to avoid MKL hang :( .\n self.world_size = n_server = len(affinity)\n n_envs_lists = self._get_n_envs_lists(affinity)\n n_server = len(n_envs_lists)\n n_worker = sum([len(n_envs_list) for n_envs_list in n_envs_lists])\n self.n_worker = n_worker\n\n if self.eval_n_envs > 0:\n self.eval_n_envs_per = max(1, self.eval_n_envs // n_worker)\n self.eval_n_envs = eval_n_envs = self.eval_n_envs_per * n_worker\n logger.log(f\"Total parallel evaluation envs: {eval_n_envs}.\")\n self.eval_max_T = eval_max_T = int(self.eval_max_steps // eval_n_envs)\n\n self._build_parallel_ctrl(n_server, n_worker)\n\n servers_kwargs = self._assemble_servers_kwargs(affinity, self.seed,\n n_envs_lists)\n servers = [mp.Process(target=self.action_server_process,\n kwargs=s_kwargs)\n for s_kwargs in servers_kwargs]\n for s in servers:\n s.start()\n self.servers = servers\n self.ctrl.barrier_out.wait() # Wait for workers to decorrelate envs.\n\n # obtain_samples() and evaluate_agent() remain the same.\n\n def shutdown(self):\n self.ctrl.quit.value = True\n self.ctrl.barrier_in.wait()\n for s in self.servers:\n s.join()\n\n def _get_n_envs_lists(self, affinity):\n B = self.batch_spec.B\n n_server = len(affinity)\n n_workers = [len(aff[\"workers_cpus\"]) for aff in affinity]\n if B < n_server:\n raise ValueError(f\"Request fewer envs ({B}) than action servers \"\n f\"({n_server}).\")\n server_Bs = [B // n_server] * n_server\n if n_workers.count(n_workers[0]) != len(n_workers):\n logger.log(\"WARNING: affinity requested different number of \"\n \"environment workers per action server, but environments \"\n \"will be assigned equally across action servers anyway.\")\n if B % n_server > 0:\n for s in range(B % n_server):\n server_Bs[s] += 1 # Spread across action servers.\n\n n_envs_lists = list()\n for s_worker, s_B in zip(n_workers, server_Bs):\n n_envs_lists.append(self._get_n_envs_list(n_worker=s_worker, B=s_B))\n\n return n_envs_lists\n\n def _build_parallel_ctrl(self, n_server, n_worker):\n super()._build_parallel_ctrl(n_worker + n_server)\n\n def _assemble_servers_kwargs(self, affinity, seed, n_envs_lists):\n servers_kwargs = list()\n i_env = 0\n i_worker = 0\n for rank, server_affinity in enumerate(affinity):\n n_worker = len(server_affinity[\"workers_cpus\"])\n n_env = sum(n_envs_lists[rank])\n slice_B = slice(i_env, i_env + n_env)\n server_kwargs = dict(\n rank=rank,\n env_ranks=list(range(i_env, i_env + n_env)),\n double_buffer_slice=tuple(buf[:, slice_B]\n for buf in self.double_buffer),\n affinity=server_affinity,\n n_envs_list=n_envs_lists[rank],\n seed=seed + i_worker,\n )\n servers_kwargs.append(server_kwargs)\n i_worker += n_worker\n i_env += n_env\n return servers_kwargs\n\n ############################################\n # In action server processes (forked again).\n ############################################\n\n def action_server_process(self, rank, env_ranks, double_buffer_slice,\n affinity, seed, n_envs_list):\n \"\"\"Target method used for forking action-server process(es) from the\n main sampler process. By inheriting the sampler object from the\n sampler process, can more easily pass args to the environment worker\n processes, which are forked from here.\n\n Assigns hardware affinity, and then forks parallel worker processes\n and moves agent model to device. Then enters infinite loop: waits for\n signals from main sampler process to collect training samples or\n perform evaluation, and then serves actions during collection. At\n every loop, calls agent to retrieve new parameter values from the\n training process, which are communicated through shared CPU memory.\n \"\"\"\n self.rank = rank\n p = psutil.Process()\n if affinity.get(\"set_affinity\", True):\n p.cpu_affinity(affinity[\"master_cpus\"])\n # torch.set_num_threads(affinity[\"master_torch_threads\"])\n torch.set_num_threads(1) # Possibly needed to avoid MKL hang.\n self.launch_workers(double_buffer_slice, affinity, seed, n_envs_list)\n self.agent.to_device(cuda_idx=affinity[\"cuda_idx\"])\n self.agent.collector_initialize(global_B=self.batch_spec.B, # Not updated.\n env_ranks=env_ranks) # For vector eps-greedy.\n self.ctrl.barrier_out.wait() # Wait for workers to decorrelate envs.\n \n while True:\n self.sync.stop_eval.value = False # Reset.\n self.ctrl.barrier_in.wait()\n if self.ctrl.quit.value:\n break\n self.agent.recv_shared_memory()\n if self.ctrl.do_eval.value:\n self.agent.eval_mode(self.ctrl.itr.value)\n self.serve_actions_evaluation(self.ctrl.itr.value)\n else:\n self.agent.sample_mode(self.ctrl.itr.value)\n # Only for bootstrap_value:\n self.samples_np = self.double_buffer[self.ctrl.db_idx.value]\n if hasattr(self, \"double_bootstrap_value_pair\"): # Alternating sampler.\n self.bootstrap_value_pair = \\\n self.double_bootstrap_value_pair[self.ctrl.db_idx.value]\n self.serve_actions(self.ctrl.itr.value)\n self.ctrl.barrier_out.wait()\n self.shutdown_workers()\n\n def launch_workers(self, double_buffer_slice, affinity, seed, n_envs_list):\n self.n_worker = n_worker = len(n_envs_list)\n # A little slight-of-hand to make 2-level signal:\n self.ctrl.stop_eval = self.sync.stop_eval\n self.sync = Sync(\n obs_ready=[mp.Semaphore(0) for _ in range(n_worker)],\n act_ready=[mp.Semaphore(0) for _ in range(n_worker)],\n stop_eval=mp.RawValue(ctypes.c_bool, False), # Overwrite.\n # stop_eval=self.ctrl.stop_eval, # No, make 2-level signal.\n db_idx=self.ctrl.db_idx, # Copy into sync which passes to Collector.\n )\n self.step_buffer_pyt, self.step_buffer_np = build_step_buffer(\n self.examples, sum(n_envs_list))\n self.agent_inputs = AgentInputs(self.step_buffer_pyt.observation,\n self.step_buffer_pyt.action, self.step_buffer_pyt.reward)\n\n if self.eval_n_envs > 0:\n eval_n_envs = self.eval_n_envs_per * n_worker\n eval_step_buffer_pyt, eval_step_buffer_np = build_step_buffer(\n self.examples, eval_n_envs)\n self.eval_step_buffer_pyt = eval_step_buffer_pyt\n self.eval_step_buffer_np = eval_step_buffer_np\n self.eval_agent_inputs = AgentInputs(\n self.eval_step_buffer_pyt.observation,\n self.eval_step_buffer_pyt.action,\n self.eval_step_buffer_pyt.reward,\n )\n # eval_max_T already made in earlier initialize.\n\n self.double_buffer = double_buffer_slice # Now only see my part.\n common_kwargs = self._assemble_common_kwargs(affinity)\n common_kwargs[\"agent\"] = None # Remove.\n workers_kwargs = self._assemble_workers_kwargs(affinity, seed,\n n_envs_list)\n\n # Yes, fork again.\n self.workers = [mp.Process(target=sampling_process,\n kwargs=dict(common_kwargs=common_kwargs, worker_kwargs=w_kwargs))\n for w_kwargs in workers_kwargs]\n for w in self.workers:\n w.start()\n\n def shutdown_workers(self):\n for w in self.workers:\n w.join() # Already signaled to quit by central master.\n\n def _assemble_workers_kwargs(self, affinity, seed, n_envs_list):\n workers_kwargs = super()._assemble_workers_kwargs(affinity, seed,\n n_envs_list)\n i_env = 0\n for rank, w_kwargs in enumerate(workers_kwargs):\n n_envs = n_envs_list[rank]\n slice_B = slice(i_env, i_env + n_envs)\n w_kwargs[\"sync\"] = Sync(\n obs_ready=self.sync.obs_ready[rank],\n act_ready=self.sync.act_ready[rank],\n stop_eval=self.sync.stop_eval,\n db_idx=self.sync.db_idx,\n )\n w_kwargs[\"step_buffer_np\"] = self.step_buffer_np[slice_B]\n if self.eval_n_envs > 0:\n eval_slice_B = slice(self.eval_n_envs_per * rank,\n self.eval_n_envs_per * (rank + 1))\n w_kwargs[\"eval_step_buffer_np\"] = \\\n self.eval_step_buffer_np[eval_slice_B]\n i_env += n_envs\n return workers_kwargs\n\n\nclass AsyncGpuSampler(AsyncActionServer, AsyncGpuSamplerBase):\n pass\n"
] | [
[
"torch.set_num_threads"
]
] |
Linxius/HPCnet | [
"689057cd3ec206e187fd3ac01fb3ae8ae41110ed"
] | [
"data_utils/dataset.py"
] | [
"import os\nimport numpy as np\nimport torch.utils.data as torch_data\nfrom data_utils import kitti_utils\nimport cv2\nfrom PIL import Image\n\n\nUSE_INTENSITY = False\n\n\nclass KittiDataset(torch_data.Dataset):\n def __init__(self, root_dir, split='train', mode='TRAIN'):\n self.split = split\n self.mode = mode\n self.classes = ['Car']\n is_test = self.split == 'test'\n self.imageset_dir = os.path.join(root_dir, 'KITTI', 'object', 'testing' if is_test else 'training')\n\n split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', split + '.txt')\n self.image_idx_list = [x.strip() for x in open(split_dir).readlines()]\n self.sample_id_list = [int(sample_id) for sample_id in self.image_idx_list]\n self.num_sample = self.image_idx_list.__len__()\n\n self.npoints = 16384\n\n self.image_dir = os.path.join(self.imageset_dir, 'image_2')\n self.lidar_dir = os.path.join(self.imageset_dir, 'velodyne')\n self.calib_dir = os.path.join(self.imageset_dir, 'calib')\n self.label_dir = os.path.join(self.imageset_dir, 'label_2')\n self.plane_dir = os.path.join(self.imageset_dir, 'planes')\n\n def get_image(self, idx):\n img_file = os.path.join(self.image_dir, '%06d.png' % idx)\n assert os.path.exists(img_file)\n return cv2.imread(img_file) # (H, W, 3) BGR mode\n\n def get_image_shape(self, idx):\n img_file = os.path.join(self.image_dir, '%06d.png' % idx)\n assert os.path.exists(img_file)\n im = Image.open(img_file)\n width, height = im.size\n return height, width, 3\n\n def get_lidar(self, idx):\n lidar_file = os.path.join(self.lidar_dir, '%06d.bin' % idx)\n assert os.path.exists(lidar_file)\n return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)\n\n def get_calib(self, idx):\n calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx)\n assert os.path.exists(calib_file)\n return kitti_utils.Calibration(calib_file)\n\n def get_label(self, idx):\n label_file = os.path.join(self.label_dir, '%06d.txt' % idx)\n assert os.path.exists(label_file)\n return kitti_utils.get_objects_from_label(label_file)\n\n @staticmethod\n def get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape):\n val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])\n val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])\n val_flag_merge = np.logical_and(val_flag_1, val_flag_2)\n pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)\n return pts_valid_flag\n\n def filtrate_objects(self, obj_list):\n type_whitelist = self.classes\n if self.mode == 'TRAIN':\n type_whitelist = list(self.classes)\n if 'Car' in self.classes:\n type_whitelist.append('Van')\n\n valid_obj_list = []\n for obj in obj_list:\n if obj.cls_type not in type_whitelist:\n continue\n\n valid_obj_list.append(obj)\n return valid_obj_list\n\n def __len__(self):\n return len(self.sample_id_list)\n\n def __getitem__(self, index):\n sample_id = int(self.sample_id_list[index])\n calib = self.get_calib(sample_id)\n img_shape = self.get_image_shape(sample_id)\n pts_lidar = self.get_lidar(sample_id)\n\n # get valid point (projected points should be in image)\n pts_rect = calib.lidar_to_rect(pts_lidar[:, 0:3])\n pts_intensity = pts_lidar[:, 3]\n\n pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)\n pts_valid_flag = self.get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape)\n\n pts_rect = pts_rect[pts_valid_flag][:, 0:3]\n pts_intensity = pts_intensity[pts_valid_flag]\n\n if self.npoints < len(pts_rect):\n pts_depth = pts_rect[:, 2]\n pts_near_flag = pts_depth < 40.0\n far_idxs_choice = np.where(pts_near_flag == 0)[0]\n near_idxs = np.where(pts_near_flag == 1)[0]\n near_idxs_choice = np.random.choice(near_idxs, self.npoints - len(far_idxs_choice), replace=False)\n\n choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \\\n if len(far_idxs_choice) > 0 else near_idxs_choice\n np.random.shuffle(choice)\n else:\n choice = np.arange(0, len(pts_rect), dtype=np.int32)\n if self.npoints > len(pts_rect):\n extra_choice = np.random.choice(choice, self.npoints - len(pts_rect), replace=False)\n choice = np.concatenate((choice, extra_choice), axis=0)\n np.random.shuffle(choice)\n\n ret_pts_rect = pts_rect[choice, :]\n ret_pts_intensity = pts_intensity[choice] - 0.5 # translate intensity to [-0.5, 0.5]\n\n pts_features = [ret_pts_intensity.reshape(-1, 1)]\n ret_pts_features = np.concatenate(pts_features, axis=1) if pts_features.__len__() > 1 else pts_features[0]\n\n sample_info = {'sample_id': sample_id}\n\n if self.mode == 'TEST':\n if USE_INTENSITY:\n pts_input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1) # (N, C)\n else:\n pts_input = ret_pts_rect\n sample_info['pts_input'] = pts_input\n sample_info['pts_rect'] = ret_pts_rect\n sample_info['pts_features'] = ret_pts_features\n return sample_info\n\n gt_obj_list = self.filtrate_objects(self.get_label(sample_id))\n\n gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)\n\n # prepare input\n if USE_INTENSITY:\n pts_input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1) # (N, C)\n else:\n pts_input = ret_pts_rect\n\n # generate training labels\n cls_labels = self.generate_training_labels(ret_pts_rect, gt_boxes3d)\n sample_info['pts_input'] = pts_input\n sample_info['pts_rect'] = ret_pts_rect\n sample_info['cls_labels'] = cls_labels\n return sample_info\n\n @staticmethod\n def generate_training_labels(pts_rect, gt_boxes3d):\n cls_label = np.zeros((pts_rect.shape[0]), dtype=np.int32)\n gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, rotate=True)\n extend_gt_boxes3d = kitti_utils.enlarge_box3d(gt_boxes3d, extra_width=0.2)\n extend_gt_corners = kitti_utils.boxes3d_to_corners3d(extend_gt_boxes3d, rotate=True)\n for k in range(gt_boxes3d.shape[0]):\n box_corners = gt_corners[k]\n fg_pt_flag = kitti_utils.in_hull(pts_rect, box_corners)\n cls_label[fg_pt_flag] = 1\n\n # enlarge the bbox3d, ignore nearby points\n extend_box_corners = extend_gt_corners[k]\n fg_enlarge_flag = kitti_utils.in_hull(pts_rect, extend_box_corners)\n ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)\n cls_label[ignore_flag] = -1\n\n return cls_label\n\n def collate_batch(self, batch):\n batch_size = batch.__len__()\n ans_dict = {}\n\n for key in batch[0].keys():\n if isinstance(batch[0][key], np.ndarray):\n ans_dict[key] = np.concatenate([batch[k][key][np.newaxis, ...] for k in range(batch_size)], axis=0)\n\n else:\n ans_dict[key] = [batch[k][key] for k in range(batch_size)]\n if isinstance(batch[0][key], int):\n ans_dict[key] = np.array(ans_dict[key], dtype=np.int32)\n elif isinstance(batch[0][key], float):\n ans_dict[key] = np.array(ans_dict[key], dtype=np.float32)\n\n return ans_dict\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.logical_xor",
"numpy.zeros",
"numpy.random.shuffle",
"numpy.logical_and",
"numpy.where",
"numpy.fromfile"
]
] |
srnn/sRNN | [
"00618c719553bf54f693b51bef6bec8eb233e51a"
] | [
"utils.py"
] | [
"import numpy as np\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nfrom RNN_Cell import OrthoRNNCell, RNNCell, OrthoRNNCell2\nfrom cells import RNNCell1, RNNCell2, RNNCellLT, EURNNCell\nfrom LSTM import LSTM\nfrom expRNN.exprnn import ExpRNN, ExpRNN2\nimport argparse\nfrom expRNN.initialization import (henaff_init,cayley_init,\n random_orthogonal_init)\ndef rvs(dim=3):\n random_state = np.random\n H = np.eye(dim)\n D = np.ones((dim,))\n for n in range(1, dim):\n x = random_state.normal(size=(dim-n+1,))\n D[n-1] = np.sign(x[0])\n x[0] -= D[n-1]*np.sqrt((x*x).sum())\n # Householder transformation\n Hx = (np.eye(dim-n+1) - 2.*np.outer(x, x)/(x*x).sum())\n mat = np.eye(dim)\n mat[n-1:, n-1:] = Hx\n H = np.dot(H, mat)\n # Fix the last sign such that the determinant is 1\n D[-1] = (-1)**(1-(dim % 2))*D.prod()\n # Equivalent to np.dot(np.diag(D), H) but faster, apparently\n H = (D*H.T).T\n return H\n\ndef c_mm(A_r,A_i,B_r,B_i):\n C_r = torch.mm(A_r,B_r) - torch.mm(A_i,B_i)\n C_i = torch.mm(A_i,B_r) + torch.mm(A_r,B_i)\n return C_r,C_i\ndef star(A_r,A_i):\n return A_r.t(),-A_i.t()\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef select_network(args, inp_size):\n iinit, rinit = get_initializers(args)\n if args.net_type == 'RNN':\n rnn = RNNCell(inp_size,args.nhid,\n args.nonlin,\n bias=True,\n cuda=args.cuda,\n r_initializer=rinit,\n i_initializer=iinit)\n elif args.net_type == 'RNN1':\n rnn = RNNCell1(inp_size,args.nhid,\n args.nonlin,\n bias=True,\n cuda=args.cuda,\n r_initializer=rinit,\n i_initializer=iinit)\n\n elif args.net_type == 'RNN2':\n rnn = RNNCell2(inp_size,args.nhid, args.bptt,\n args.nonlin,\n bias=True,\n cuda=args.cuda,\n r_initializer=rinit,\n i_initializer=iinit)\n\n elif args.net_type == 'RNN-Orth':\n rnn = RNNCellLT(inp_size,args.nhid,\n args.nonlin,\n bias=True,\n cuda=args.cuda,\n r_initializer=rinit,\n i_initializer=iinit)\n elif args.net_type == 'EURNN':\n rnn = EURNNCell(input_size=inp_size,hidden_size=args.nhid, capacity=2)\n\n elif args.net_type == 'nnRNN':\n rnn = OrthoRNNCell(inp_size,args.nhid,args.nonlin,\n bias=False,\n cuda=args.cuda,\n r_initializer=rinit,\n i_initializer=iinit)\n elif args.net_type == 'nnRNN2':\n rnn = OrthoRNNCell2(inp_size,args.nhid, args.bptt, args.nonlin,\n bias=False,\n cuda=args.cuda,\n r_initializer=rinit,\n i_initializer=iinit)\n\n elif args.net_type == 'expRNN':\n rnn = ExpRNN(inp_size,args.nhid, args.nonlin,\n skew_initializer=rinit,\n input_initializer=iinit)\n elif args.net_type == 'expRNN2':\n rnn = ExpRNN2(inp_size,args.nhid, args.bptt, args.nonlin,\n skew_initializer=rinit,\n input_initializer=iinit)\n elif args.net_type == 'LSTM':\n rnn = LSTM(inp_size,\n args.nhid,\n cuda=args.cuda)\n # print(\"rnn: \", rnn.parameters)\n return rnn\n\ndef calc_hidden_size(net_type, n_params, n_in, n_out):\n\n if net_type in ['RNN', 'RNN1', 'RNN2', 'RNN3', 'EURNN', 'RNN-Orth', 'RNNSN'] :\n a = 1\n b = n_in + n_out\n c = - n_params + n_out \n\n elif net_type in ['RORNN2', 'ARORNN2','NRNN2', 'NSRNN2']:\n a = 1\n b = n_in + n_out - 1/2\n c = -n_params + n_out\n\n elif net_type in ['EXPRNN']:\n a = 0.5\n b = n_in + n_out\n c = -n_params + n_out\n elif net_type == 'LSTM':\n a = 4\n b = 4*(n_in + n_out)\n c = -n_params + n_out\n \n return int(np.roots([a,b,c])[1])\n\n\ndef calc_hidden_size_PTB(net_type, n_params, n_chars, n_emb):\n\n if net_type in ['RNN', 'RNN1', 'RNN2', 'RNN3', 'EURNN', 'RNN-Orth', 'RNNSN']:\n a = 1\n b = n_chars + n_emb \n c = - n_params + n_chars + n_chars*n_emb \n\n elif net_type in ['RORNN2', 'ARORNN2','NRNN2', 'NSRNN2']:\n a = 1\n b = n_emb + n_chars - 1/2\n c = - n_params + n_chars + n_chars*n_emb \n\n elif net_type in ['EXPRNN']:\n a = 0.5\n b = n_emb + n_chars\n c = - n_params + n_chars + n_chars*n_emb \n elif net_type == 'LSTM':\n a = 4\n b = 4*(n_emb + n_chars)\n c = -n_params + n_chars + n_chars*n_emb \n \n return int(np.roots([a,b,c])[1])\n\n\ndef retrieve_weight_matrices(path,test):\n data = torch.load(path)\n\ndef get_initializers(args):\n if args.rinit == \"cayley\":\n rinit = cayley_init\n elif args.rinit == \"henaff\":\n rinit = henaff_init\n elif args.rinit == \"random\":\n rinit = random_orthogonal_init\n elif args.rinit == 'xavier':\n rinit = nn.init.xavier_normal_\n if args.iinit == \"xavier\":\n iinit = nn.init.xavier_normal_\n elif args.iinit == 'kaiming':\n iinit = nn.init.kaiming_normal_\n\n return iinit, rinit\n\ndef select_optimizer(net, args):\n if args.net_type in ['nnRNN', 'nnRNN2']:\n x = [\n {'params': (param for param in net.parameters()\n if param is not net.rnn.log_P\n and param is not net.rnn.P\n and param is not net.rnn.UppT)},\n {'params': net.rnn.UppT, 'weight_decay': args.Tdecay},\n #{'params': net.rnn.gamma}\n ]\n y = [\n {'params': (param for param in net.parameters() if param is net.rnn.log_P)}\n ]\n elif args.net_type in ['expRNN', 'expRNN2']:\n x = [\n {'params': (param for param in net.parameters()\n if param is not net.rnn.log_recurrent_kernel\n and param is not net.rnn.recurrent_kernel)}\n ]\n y = [\n {'params': (param for param in net.parameters()\n if param is net.rnn.log_recurrent_kernel)}\n ]\n else:\n x = [\n {'params': (param for param in net.parameters())}#, 'weight_decay': args.Tdecay}\n ]\n if args.net_type in ['nnRNN', 'nnRNN2', 'expRNN', 'expRNN2']:\n if args.optimizer == 'RMSprop':\n optimizer = optim.RMSprop(x, lr=args.lr, alpha=args.alpha)\n orthog_optimizer = optim.RMSprop(y, lr=args.lr_orth, alpha=args.alpha)\n elif args.optimizer == 'Adam':\n optimizer = optim.Adam(x, lr=args.lr, betas=args.betas)\n orthog_optimizer = optim.Adam(y, lr=args.lr_orth, betas=args.betas)\n else:\n if args.optimizer == 'RMSprop':\n optimizer = optim.RMSprop(x, lr=args.lr, alpha=args.alpha)\n orthog_optimizer = None\n elif args.optimizer == 'Adam':\n optimizer = optim.Adam(x, lr=args.lr, betas=args.betas)\n orthog_optimizer = None\n return optimizer, orthog_optimizer\n\n\n"
] | [
[
"numpy.dot",
"torch.optim.RMSprop",
"numpy.roots",
"numpy.ones",
"torch.optim.Adam",
"numpy.eye",
"torch.mm",
"numpy.sign",
"torch.load",
"numpy.outer"
]
] |
egrahl/iolite | [
"064e30d9d7ec8c08f60c486cf9d6c48cca6562b5"
] | [
"src/iolite/classification/classify_sigma.py"
] | [
"import os\nimport os.path\nfrom src.iolite.sigma.sigma_values import read_sigma_from_txt\nimport scipy\nimport scipy.stats\nfrom scipy.stats import percentileofscore\n\n\nclass SigmaClassifier:\n \"\"\"\n This class reads out sigma values from dials.integrate.log files, classifies\n and ranks these values, and writes an outputfile containing these labels.\n \n Requires pre-processing with xia2.\n \"\"\"\n def __init__(self,input_directory,output_directory):\n \"\"\"Initialising the classifier with input and output directory.\n \n :param str input_directory: path to input directory (default: cwd)\n :param str output_directory: path to output directory (default: cwd)\n \"\"\"\n self.input_directory=input_directory\n self.output_directory=output_directory\n\n def extract_sigma_values(self, path):\n \"\"\"\n This function extracts the sigma values from the dials.integrate.log \n file that is in the directory.\n\n :param str path: path to the directory containing the log file\n\n :returns: sigma_b and sigma_m\n \"\"\"\n dest_file=os.path.join(path, \"DEFAULT/NATIVE/SWEEP1/integrate/dials.integrate.log\")\n \n #checking if log file was copied and is therefore directly in directory\n entries=os.listdir(path)\n for e in entries:\n if e==\"dials.integrate.log\":\n dest_file=\"dials.integrate.log\"\n \n #read sigma values from log file\n with open(\n dest_file\n ) as f:\n f = f.readlines()\n\n for line in f[:200]:\n\n index_b = line.find(\"sigma b\")\n index_m = line.find(\"sigma m\")\n if index_b > (-1):\n tokens = line.split(\" \")\n sigma_b = float(tokens[3])\n\n if index_m > (-1):\n tokens = line.split(\" \")\n sigma_m = float(tokens[3])\n\n return sigma_b, sigma_m\n\n def classify_sigma(self, sigma, sigma_list):\n \"\"\"This function ranks the sigma value according to the distribution\n in sigma_values.txt and labels the dataset.\n \n :param float sigma: sigma value of dataset\n :param list sigma_list: list containg reference sigma values\n\n :returns: rank and label of dataset\n \"\"\"\n #get rank\n rank = percentileofscore(sigma_list, sigma)\n \n #get label\n if rank<20:\n label=\"low\"\n elif rank<80:\n label=\"medium\"\n else:\n label=\"high\"\n\n return label, rank\n\n def write_output_file(self, label_b,rank_b, label_m, rank_m):\n \"\"\"This function writes an outputfile label_sigma.txt in the\n output directory.\n\n :param str label_b: label for sigma b value\n :param float rank_b: rank of sigma b value\n :param str label_m: label for sigma m value\n :param float rank_m: rank of sigma m value\n \"\"\"\n\n labels = [label_b,rank_b, label_m,rank_m]\n text = [\"Classification of sigma b: \", \"Rank of sigma b: \", \"Classification of sigma m: \", \"Rank of sigma m: \"]\n name_outfile=self.output_directory+\"/label_sigma.txt\"\n with open(name_outfile, \"w\") as outfile:\n for t, l in zip(text, labels):\n outfile.write(\"%s %s\\n\" % (t, l))\n\n \n\n def main(self):\n \"\"\"\n The main program of SigmaClassifier that extracts the sigma\n values from the log file, classifies them and writes an\n output file with the labels.\n\n :returns: list containing the classifications\n \"\"\"\n path=self.input_directory\n sigma_b_list, sigma_m_list = read_sigma_from_txt(\n \"/dls/science/users/gwx73773/iolite/share/sigma_values.txt\"\n )\n \n sigma_b, sigma_m = self.extract_sigma_values(path)\n\n #classify sigma values\n label_b, rank_b = self.classify_sigma(sigma_b, sigma_b_list)\n label_m, rank_m = self.classify_sigma(sigma_m, sigma_m_list)\n \n #write output file\n self.write_output_file(label_b,rank_b,label_m,rank_m)\n\n #write classifications to list\n data=[label_b,rank_b,label_m,rank_m]\n\n return data\n\n\ndef run():\n \"\"\"Allows classify_sigma to be called from command line.\"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(description=\"command line argument\")\n\n parser.add_argument(\n \"--input_directory\",\n dest=\"input_directory\",\n type=str,\n help=\"Path to the input directory.\",\n default=os.getcwd(),\n )\n parser.add_argument(\n \"--output_directory\",\n dest=\"output_directory\",\n type=str,\n help=\"Path to the output directory.\",\n default=os.getcwd(),\n )\n\n args = parser.parse_args()\n sigma_classifier = SigmaClassifier(args.input_directory,args.output_directory)\n sigma_classifier.main()\n\n\nif __name__ == \"__main__\":\n run()\n"
] | [
[
"scipy.stats.percentileofscore"
]
] |
kkothari2001/Gesture-Mouse-using-OpenCV | [
"a43e67e48ccdaf644ecfc9be1aa937d5e44ff537"
] | [
"final.py"
] | [
"\nimport cv2\nimport numpy as np\nfrom pynput.mouse import Button, Controller\nimport wx\nmouse = Controller()\n\n\n# To get the screen size\napp = wx.App(False)\n(sx, sy) = wx.GetDisplaySize()\n\n\n# To set the HSV values of ranges\nlower_blue = np.array([110, 100, 100])\nupper_blue = np.array([130, 255, 255])\nlower_red = np.array([170, 120, 70])\nupper_red = np.array([180, 255, 255])\n\n# To resize the image according to the screen size\nimw = 1000\nimh = int(imw * (sy / sx))\nmousePress = False\ncap = cv2.VideoCapture(0)\n\n\nwhile True:\n _, img = cap.read()\n img = cv2.resize(img, (imw, imh))\n # Conversion to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n # Detection using HSV segmentation\n red_mask = cv2.inRange(hsv, lower_red, upper_red)\n cv2.imshow(\"red_init\", red_mask)\n\n blue_mask = cv2.inRange(hsv, lower_blue, upper_blue)\n cv2.imshow(\"blue_init\", blue_mask)\n\n # MIX MASK\n mix = cv2.bitwise_or(red_mask, blue_mask)\n cv2.imshow(\"mix_init\", mix)\n\n mix = cv2.GaussianBlur(mix, (7, 7), 0)\n _, mix = cv2.threshold(mix, 50, 255, cv2.THRESH_BINARY)\n\n # Blurs\n red_blur = cv2.medianBlur(red_mask, 11)\n blue_blur = cv2.medianBlur(blue_mask, 11)\n mix_blur = cv2.medianBlur(mix, 11)\n\n cv2.imshow(\"red_blur\", red_blur)\n cv2.imshow(\"blue_blur\", blue_blur)\n cv2.imshow(\"mix_blur\", mix_blur)\n\n # Detect Contours\n mix_cont, heir = cv2.findContours(\n mix_blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n red_cont, heir = cv2.findContours(\n red_blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n blue_cont, heir = cv2.findContours(\n blue_blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n if len(mix_cont) == 2 and len(red_cont) == 1 and len(blue_cont) == 1:\n red_hull = cv2.convexHull(red_cont[0], False)\n img = cv2.drawContours(img, [red_hull], -1, (0, 0, 255), 1)\n\n blue_hull = cv2.convexHull(blue_cont[0], False)\n img = cv2.drawContours(img, [blue_hull], -1, (255, 0, 0), 1)\n\n M_red = cv2.moments(red_hull)\n flag1 = False\n\n if mousePress:\n mouse.release(Button.left)\n mousePress = False\n\n # Detect the centroid\n if M_red[\"m00\"] != 0:\n flag1 = True\n c1X = int(M_red[\"m10\"] / M_red[\"m00\"])\n c1Y = int(M_red[\"m01\"] / M_red[\"m00\"])\n img = cv2.circle(img, (c1X, c1Y), 3, (0, 0, 255), -1)\n\n M_blue = cv2.moments(blue_hull)\n flag2 = False\n\n # Detect the centroid\n if M_blue[\"m00\"] != 0:\n flag2 = True\n c2X = int(M_blue[\"m10\"] / M_blue[\"m00\"])\n c2Y = int(M_blue[\"m01\"] / M_blue[\"m00\"])\n img = cv2.circle(img, (c2X, c2Y), 3, (255, 0, 0), -1)\n\n if flag1 and flag2:\n cX = int((c1X+c2X)/2)\n cY = int((c1Y+c2Y)/2)\n mouseLoc = (sx - (sx * (cX/imw)), sy*(cY/imh))\n mouse.position = mouseLoc\n\n elif len(mix_cont) == 1 and len(red_cont) == 1 and len(blue_cont) == 1:\n red_hull = cv2.convexHull(red_cont[0], False)\n img = cv2.drawContours(img, [red_hull], -1, (0, 0, 255), 1)\n\n blue_hull = cv2.convexHull(blue_cont[0], False)\n img = cv2.drawContours(img, [blue_hull], -1, (255, 0, 0), 1)\n\n if not(mousePress):\n mouse.press(Button.left)\n mousePress = True\n\n M_red = cv2.moments(red_hull)\n flag1 = False\n\n # Detect the centroid\n if M_red[\"m00\"] != 0:\n flag1 = True\n c1X = int(M_red[\"m10\"] / M_red[\"m00\"])\n c1Y = int(M_red[\"m01\"] / M_red[\"m00\"])\n img = cv2.circle(img, (c1X, c1Y), 3, (0, 0, 255), -1)\n\n M_blue = cv2.moments(blue_hull)\n flag2 = False\n\n # Detect the centroid\n if M_blue[\"m00\"] != 0:\n flag2 = True\n c2X = int(M_blue[\"m10\"] / M_blue[\"m00\"])\n c2Y = int(M_blue[\"m01\"] / M_blue[\"m00\"])\n img = cv2.circle(img, (c2X, c2Y), 3, (255, 0, 0), -1)\n\n if flag1 and flag2:\n cX = int((c1X+c2X)/2)\n cY = int((c1Y+c2Y)/2)\n mouseLoc = (sx - (sx * (cX/imw)), sy*(cY/imh))\n mouse.position = mouseLoc\n cv2.imshow(\"img\", img)\n # cv2.imshow(\"mix\", mix_blur)\n # cv2.imshow(\"red\", red_blur)\n # cv2.imshow(\"blue\", blue_blur)\n\n key = cv2.waitKey(1)\n\n if key & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.array"
]
] |
abalone1/Image-Slicer | [
"c3f03d432f240f550f5a6b23f465ee44c66ada34"
] | [
"ImageSlicer.py"
] | [
"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n \r\nclass ImageSlicer(object):\r\n \r\n def __init__(self, source, size, strides=[None, None], BATCH = False, PADDING=False):\r\n self.source = source\r\n self.size = size\r\n self.strides = strides\r\n self.BATCH = BATCH\r\n self.PADDING = PADDING\r\n \r\n def __read_images(self):\r\n Images = []\r\n image_names = sorted(os.listdir(self.source))\r\n for im in image_names:\r\n image = plt.imread(os.path.join(dir_path,im))\r\n Images.append(image)\r\n return Images\r\n\r\n def __offset_op(self, input_length, output_length, stride):\r\n offset = (input_length) - (stride*((input_length - output_length)//stride)+output_length)\r\n return offset\r\n \r\n def __padding_op(self, Image):\r\n if self.offset_x > 0:\r\n padding_x = self.strides[0] - self.offset_x\r\n else:\r\n padding_x = 0\r\n if self.offset_y > 0:\r\n padding_y = self.strides[1] - self.offset_y\r\n else:\r\n padding_y = 0\r\n Padded_Image = np.zeros(shape=(Image.shape[0]+padding_x, Image.shape[1]+padding_y, Image.shape[2]),dtype=Image.dtype)\r\n Padded_Image[padding_x//2:(padding_x//2)+(Image.shape[0]),padding_y//2:(padding_y//2)+Image.shape[1],:] = Image \r\n return Padded_Image\r\n\r\n def __convolution_op(self, Image):\r\n start_x = 0\r\n start_y = 0\r\n n_rows = Image.shape[0]//self.strides[0] + 1\r\n n_columns = Image.shape[1]//self.strides[1] + 1\r\n small_images = []\r\n for i in range(n_rows-1):\r\n for j in range(n_columns-1):\r\n new_start_x = start_x+i*self.strides[0]\r\n new_start_y= start_y+j*self.strides[1]\r\n small_images.append(Image[new_start_x:new_start_x+self.size[0],new_start_y:new_start_y+self.size[1],:])\r\n return small_images\r\n\r\n def transform(self):\r\n \r\n if not(os.path.exists(self.source)):\r\n raise Exception(\"Path does not exist!\")\r\n \r\n else:\r\n if self.source and not(self.BATCH):\r\n Image = plt.imread(self.source)\r\n Images = [Image]\r\n else: \r\n Images = self.__read_images()\r\n\r\n im_size = Images[0].shape\r\n num_images = len(Images)\r\n transformed_images = dict()\r\n Images = np.array(Images)\r\n \r\n if self.PADDING:\r\n \r\n padded_images = []\r\n\r\n if self.strides[0]==None and self.strides[1]==None:\r\n self.strides[0] = self.size[0]\r\n self.strides[1] = self.size[1]\r\n self.offset_x = Images.shape[1]%self.size[0]\r\n self.offset_y = Images.shape[2]%self.size[1]\r\n padded_images = list(map(self.__padding_op, Images))\r\n \r\n elif self.strides[0]==None and self.strides[1]!=None: \r\n self.strides[0] = self.size[0]\r\n self.offset_x = Images.shape[1]%self.size[0]\r\n if self.strides[1] <= Images.shape[2]:\r\n self.offset_y = self.__offset_op(Images.shape[2], self.size[1], self.strides[1])\r\n else:\r\n raise Exception(\"stride_y must be between {0} and {1}\".format(1,Images.shape[2]))\r\n padded_images = list(map(self.__padding_op, Images))\r\n\r\n elif self.strides[0]!=None and self.strides[1]==None: \r\n self.strides[1] = self.size[1]\r\n self.offset_y = Images.shape[2]%self.size[1]\r\n if self.strides[0] <=Images.shape[1]:\r\n self.offset_x = self.__offset_op(Images.shape[1], self.size[0], self.strides[0])\r\n else:\r\n raise Exception(\"stride_x must be between {0} and {1}\".format(1,Images.shape[1]))\r\n padded_images = list(map(self.__padding_op, Images))\r\n \r\n else:\r\n if self.strides[0] > Images.shape[1]:\r\n raise Exception(\"stride_x must be between {0} and {1}\".format(1,Images.shape[1]))\r\n \r\n elif self.strides[1] > Images.shape[2]:\r\n raise Exception(\"stride_y must be between {0} and {1}\".format(1,Images.shape[2]))\r\n \r\n else:\r\n self.offset_x = self.__offset_op(Images.shape[1], self.size[0], self.strides[0])\r\n self.offset_y = self.__offset_op(Images.shape[2], self.size[1], self.strides[1])\r\n padded_images = list(map(self.__padding_op, Images))\r\n\r\n for i, Image in enumerate(padded_images):\r\n transformed_images[str(i)] = self.__convolution_op(Image)\r\n\r\n else:\r\n if self.strides[0]==None and self.strides[1]==None:\r\n self.strides[0] = self.size[0]\r\n self.strides[1] = self.size[1]\r\n\r\n elif self.strides[0]==None and self.strides[1]!=None:\r\n if self.strides[1] > Images.shape[2]:\r\n raise Exception(\"stride_y must be between {0} and {1}\".format(1,Images.shape[2])) \r\n self.strides[0] = self.size[0]\r\n\r\n elif self.strides[0]!=None and self.strides[1]==None:\r\n if self.strides[0] > Images.shape[1]:\r\n raise Exception(\"stride_x must be between {0} and {1}\".format(1,Images.shape[1])) \r\n self.strides[1] = self.size[1]\r\n else:\r\n if self.strides[0] > Images.shape[1]:\r\n raise Exception(\"stride_x must be between {0} and {1}\".format(1,Images.shape[1])) \r\n elif self.strides[1] > Images.shape[2]:\r\n raise Exception(\"stride_y must be between {0} and {1}\".format(1,Images.shape[2]))\r\n \r\n for i, Image in enumerate(Images):\r\n transformed_images[str(i)] = self.__convolution_op(Image)\r\n\r\n return transformed_images\r\n \r\n def save_images(self,transformed, save_dir):\r\n if not(os.path.exists(save_dir)):\r\n raise Exception(\"Path does not exist!\")\r\n else:\r\n for key, val in transformed.items():\r\n path = os.path.join(save_dir, key)\r\n os.mkdir(path)\r\n for i, j in enumerate(val):\r\n plt.imsave(os.path.join(path, str(i+1)+'.png'), j)\r\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.imread",
"numpy.zeros"
]
] |
hieuhoang/FasterTransformer | [
"440695ccac874574b1d2e1121788e8fa674b4381"
] | [
"examples/pytorch/swin/Swin-Transformer-Quantization/data.py"
] | [
"# --------------------------------------------------------\n# Swin Transformer\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ze Liu\n# --------------------------------------------------------\n# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport torch\nimport numpy as np\nimport torch.distributed as dist\nfrom torchvision import datasets, transforms\nfrom timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom timm.data import Mixup\nfrom timm.data import create_transform\nfrom timm.data.transforms import _pil_interp\n\nfrom SwinTransformer.data.samplers import SubsetRandomSampler\nfrom SwinTransformer.data.build import build_dataset\n\ndef build_val_loader(config):\n config.freeze()\n dataset_val, _ = build_dataset(is_train=False, config=config)\n # print(f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset\")\n\n indices = np.arange(0, len(dataset_val), 1)\n sampler_val = SubsetRandomSampler(indices)\n\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val, sampler=sampler_val,\n batch_size=config.DATA.BATCH_SIZE,\n shuffle=False,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n drop_last=False\n )\n \n return dataset_val, data_loader_val\n\ndef build_loader(config, args):\n config.defrost()\n dataset_train, config.MODEL.NUM_CLASSES = build_dataset(is_train=True, config=config)\n config.freeze()\n print(f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset\")\n dataset_val, _ = build_dataset(is_train=False, config=config)\n print(f\"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset\")\n\n num_tasks = dist.get_world_size()\n global_rank = dist.get_rank()\n if config.DATA.ZIP_MODE and config.DATA.CACHE_MODE == 'part':\n indices = np.arange(dist.get_rank(), len(dataset_train), dist.get_world_size())\n sampler_train = SubsetRandomSampler(indices)\n else:\n sampler_train = torch.utils.data.DistributedSampler(\n dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True\n )\n\n indices = np.arange(dist.get_rank(), len(dataset_val), dist.get_world_size())\n sampler_val = SubsetRandomSampler(indices)\n\n if args.calib:\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train, sampler=sampler_train,\n batch_size=args.calib_batchsz,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n drop_last=True,\n )\n else:\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train, sampler=sampler_train,\n batch_size=config.DATA.BATCH_SIZE,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n drop_last=True,\n )\n\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val, sampler=sampler_val,\n batch_size=config.DATA.BATCH_SIZE,\n shuffle=False,\n num_workers=config.DATA.NUM_WORKERS,\n pin_memory=config.DATA.PIN_MEMORY,\n drop_last=False\n )\n\n # setup mixup / cutmix\n mixup_fn = None\n mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None\n if mixup_active:\n mixup_fn = Mixup(\n mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,\n prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,\n label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)\n\n return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn"
] | [
[
"torch.utils.data.DistributedSampler",
"torch.distributed.get_world_size",
"torch.distributed.get_rank",
"torch.utils.data.DataLoader"
]
] |
nadheesh/probability | [
"5f576230f1e261a823e20a49c442ff38c8f381d3"
] | [
"tensorflow_probability/python/glm/fisher_scoring_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for GLM Fisher Scoring.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow.python.framework import test_util\n\ntfd = tfp.distributions\n\n\nclass FitTestFast(tf.test.TestCase):\n\n dtype = np.float32\n fast = True\n\n def make_dataset(self, n, d, link, scale=1.):\n seed = tfd.SeedStream(\n seed=213356351, salt='tfp.glm.fisher_scoring_test')\n model_coefficients = tfd.Uniform(\n low=np.array(-0.5, self.dtype),\n high=np.array(0.5, self.dtype)).sample(d, seed=seed())\n radius = np.sqrt(2.)\n model_coefficients *= radius / tf.linalg.norm(model_coefficients)\n model_matrix = tfd.Normal(\n loc=np.array(0, self.dtype),\n scale=np.array(1, self.dtype)).sample([n, d], seed=seed())\n scale = tf.convert_to_tensor(scale, self.dtype)\n linear_response = tf.tensordot(\n model_matrix, model_coefficients, axes=[[1], [0]])\n if link == 'linear':\n response = tfd.Normal(loc=linear_response, scale=scale).sample(\n seed=seed())\n elif link == 'probit':\n response = tf.cast(\n tfd.Normal(loc=linear_response, scale=scale).sample(seed=seed()) > 0,\n self.dtype)\n elif link == 'logit':\n response = tfd.Bernoulli(logits=linear_response).sample(seed=seed())\n else:\n raise ValueError('unrecognized true link: {}'.format(link))\n return model_matrix, response, model_coefficients, linear_response\n\n @test_util.run_in_graph_and_eager_modes()\n def testProbitWorksCorrectly(self):\n [\n model_matrix,\n response,\n model_coefficients_true,\n linear_response_true,\n ] = self.make_dataset(n=int(1e4), d=3, link='probit')\n model_coefficients, linear_response, is_converged, num_iter = tfp.glm.fit(\n model_matrix,\n response,\n tfp.glm.BernoulliNormalCDF(),\n fast_unsafe_numerics=self.fast,\n maximum_iterations=10)\n [\n model_coefficients_,\n linear_response_,\n is_converged_,\n num_iter_,\n model_coefficients_true_,\n linear_response_true_,\n response_,\n ] = self.evaluate([\n model_coefficients,\n linear_response,\n is_converged,\n num_iter,\n model_coefficients_true,\n linear_response_true,\n response,\n ])\n prediction = linear_response_ > 0.\n accuracy = np.mean(response_ == prediction)\n # Since both the true data generating process and model are the same, the\n # diff between true and predicted linear responses should be zero, on\n # average.\n avg_response_diff = np.mean(linear_response_ - linear_response_true_)\n\n self.assertTrue(num_iter_ < 10)\n self.assertNear(0., avg_response_diff, err=4e-3)\n self.assertAllClose(0.8, accuracy, atol=0., rtol=0.03)\n self.assertAllClose(model_coefficients_true_, model_coefficients_,\n atol=0.03, rtol=0.15)\n self.assertTrue(is_converged_)\n\n @test_util.run_in_graph_and_eager_modes()\n def testLinearWorksCorrectly(self):\n [\n model_matrix,\n response,\n model_coefficients_true,\n linear_response_true,\n ] = self.make_dataset(n=int(1e4), d=3, link='linear')\n model_coefficients, linear_response, is_converged, num_iter = tfp.glm.fit(\n model_matrix,\n response,\n tfp.glm.Normal(),\n fast_unsafe_numerics=self.fast,\n maximum_iterations=10)\n [\n model_coefficients_,\n linear_response_,\n is_converged_,\n num_iter_,\n model_coefficients_true_,\n linear_response_true_,\n ] = self.evaluate([\n model_coefficients,\n linear_response,\n is_converged,\n num_iter,\n model_coefficients_true,\n linear_response_true,\n ])\n # Since both the true data generating process and model are the same, the\n # diff between true and predicted linear responses should be zero, on\n # average.\n avg_response_diff = np.mean(linear_response_ - linear_response_true_)\n self.assertNear(0., avg_response_diff, err=3e-3)\n self.assertAllClose(model_coefficients_true_, model_coefficients_,\n atol=0.03, rtol=0.15)\n self.assertTrue(is_converged_)\n # Since linear regression is a quadratic objective and because\n # we're using a Newton-Raphson solver, we actually expect to obtain the\n # solution in one step. It takes two because the way we structure the while\n # loop means that the procedure can only terminate on the second iteration.\n self.assertTrue(num_iter_ < 3)\n\n @test_util.run_in_graph_and_eager_modes()\n def testBatchedOperationConverges(self):\n model_1 = self.make_dataset(n=10, d=3, link='linear')\n model_2 = self.make_dataset(n=10, d=3, link='probit')\n model_matrices = [model_1[0], model_2[0]]\n responses = [model_1[1], model_2[1]]\n\n _, _, is_converged, _ = self.evaluate(\n tfp.glm.fit(\n model_matrices,\n responses,\n tfp.glm.Normal(),\n fast_unsafe_numerics=self.fast,\n maximum_iterations=10))\n self.assertTrue(is_converged)\n\n\nclass FitTestSlow(FitTestFast):\n\n fast = False\n\n # Only need to run this test once since it compares fast to slow.\n # We use `fast` as a baseline since core TF implements the L2 regularization\n # in this case.\n def _testL2RegularizationWorksCorrectly(self, static_l2):\n n = int(1e3)\n [\n model_matrix,\n response,\n _, # model_coefficients_true\n _, # linear_response_true\n ] = self.make_dataset(n=n, d=3, link='probit')\n l2_regularizer = np.array(0.07 * n, model_matrix.dtype.as_numpy_dtype)\n if not static_l2:\n l2_regularizer = tf.placeholder_with_default(l2_regularizer, shape=[])\n [\n expected_model_coefficients,\n expected_linear_response,\n expected_is_converged,\n expected_num_iter,\n ] = tfp.glm.fit(\n model_matrix,\n response,\n tfp.glm.BernoulliNormalCDF(),\n l2_regularizer=l2_regularizer,\n fast_unsafe_numerics=True,\n maximum_iterations=10)\n [\n actual_model_coefficients,\n actual_linear_response,\n actual_is_converged,\n actual_num_iter,\n ] = tfp.glm.fit(\n model_matrix,\n response,\n tfp.glm.BernoulliNormalCDF(),\n l2_regularizer=l2_regularizer,\n fast_unsafe_numerics=False,\n maximum_iterations=10)\n\n [\n expected_model_coefficients_,\n expected_linear_response_,\n expected_is_converged_,\n expected_num_iter_,\n actual_model_coefficients_,\n actual_linear_response_,\n actual_is_converged_,\n actual_num_iter_,\n ] = self.evaluate([\n expected_model_coefficients,\n expected_linear_response,\n expected_is_converged,\n expected_num_iter,\n actual_model_coefficients,\n actual_linear_response,\n actual_is_converged,\n actual_num_iter,\n ])\n\n self.assertAllClose(\n expected_model_coefficients_, actual_model_coefficients_,\n atol=1e-6, rtol=1e-6)\n self.assertAllClose(\n expected_linear_response_, actual_linear_response_,\n atol=1e-5, rtol=1e-5)\n self.assertEqual(expected_is_converged_, actual_is_converged_)\n self.assertEqual(expected_num_iter_, actual_num_iter_)\n\n @test_util.run_in_graph_and_eager_modes()\n def testStaticL2RegularizationWorksCorrectly(self):\n self._testL2RegularizationWorksCorrectly(static_l2=True)\n\n# TODO(jvdillon): Re-enable once matrix_solve_ls correctly casts\n# l2_regularization.\n# @test_util.run_in_graph_and_eager_modes()\n# def testDynamicL2RegularizationWorksCorrectly(self):\n# self._testL2RegularizationWorksCorrectly(static_l2=False)\n\n\n# TODO(b/79377499): Add additional unit-tests, esp, those to cover cases when\n# grad_mean=variance=0 or either isn't finite.\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.convert_to_tensor",
"numpy.array",
"tensorflow.tensordot",
"numpy.mean",
"tensorflow.linalg.norm",
"numpy.sqrt",
"tensorflow.test.main",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.placeholder_with_default"
]
] |
rouseguy/DL_from_scratch | [
"5f6db821732f3766a686f07733879396ea2791ac"
] | [
"ann1.py"
] | [
"# Building neural network from scratch.\n# This version is based on the following blog:\n# https://enlight.nyc/projects/neural-network/\n\n# import the required libraries\nimport numpy as np\n\n\n\n# Create the training dataset - input features and labels\nX = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)\ny = np.array(([92], [86], [89]), dtype=float)\n\n\n# Scale X and y\nX = X/np.amax(X, axis=0)\ny = y/100\n\nprint(X)\nprint(y)\n\n# Creating the class for Neural Network\n\nclass Neural_Network(object):\n def __init__(self):\n #parameters\n self.inputSize = 2\n self.outputSize = 1\n self.hiddenSize = 3\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize)\n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)\n\n def forward(self, X):\n # forward propagation through the Neural Network\n self.z1 = np.dot(X, self.W1)\n self.z2 = self.sigmoid(self.z1)\n self.z3 = np.dot(self.z2, self.W2)\n output = self.sigmoid(self.z3)\n\n return output\n\n def sigmoid(self, s):\n # sigmoid activation function\n return 1/(1+np.exp(-s))\n\n# Create an instance of the class neural network\nNN = Neural_Network()\n\nmodel_output = NN.forward(X)\n\nprint(\"\\n Actual output:\", str(y))\nprint(\"\\n Predicted output:\", str(model_output))\n\n\n \n\n"
] | [
[
"numpy.array",
"numpy.dot",
"numpy.random.randn",
"numpy.exp",
"numpy.amax"
]
] |
thorstenwagner/keras | [
"670349607fb0dd708ec8598ccea9e6689a3ea46c"
] | [
"keras/engine/saving.py"
] | [
"\"\"\"Model saving utilities.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\nimport os\nimport json\nimport yaml\nimport warnings\nfrom six.moves import zip\n\nfrom .. import backend as K\nfrom .. import optimizers\nfrom ..utils.io_utils import ask_to_proceed_with_overwrite\nfrom ..utils.io_utils import H5Dict\nfrom ..utils import conv_utils\n\ntry:\n import h5py\n HDF5_OBJECT_HEADER_LIMIT = 64512\nexcept ImportError:\n h5py = None\n\n\ndef _serialize_model(model, h5dict, include_optimizer=True):\n \"\"\"Model serialization logic.\n\n This method is used for both writing to HDF5 file/group,\n as well as pickling. This is achieved via a\n `keras.utils.hdf5_utls.H5Dict` object, which can wrap HDF5\n files, groups and dicts with a common API.\n\n # Arguments\n model: Keras model instance to be serialized.\n h5dict: keras.utils.io_utils.HD5Dict instance.\n include_optimizer: If True, serialize optimizer's state together.\n\n \"\"\"\n def get_json_type(obj):\n \"\"\"Serialize any object to a JSON-serializable structure.\n\n # Arguments\n obj: the object to serialize\n\n # Returns\n JSON-serializable structure representing `obj`.\n\n # Raises\n TypeError: if `obj` cannot be serialized.\n \"\"\"\n # if obj is a serializable Keras class instance\n # e.g. optimizer, layer\n if hasattr(obj, 'get_config'):\n return {'class_name': obj.__class__.__name__,\n 'config': obj.get_config()}\n\n # if obj is any numpy type\n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return obj.item()\n\n # misc functions (e.g. loss function)\n if callable(obj):\n return obj.__name__\n\n # if obj is a python 'type'\n if type(obj).__name__ == type.__name__:\n return obj.__name__\n\n raise TypeError('Not JSON Serializable: %s' % (obj,))\n\n from .. import __version__ as keras_version\n\n h5dict['keras_version'] = str(keras_version).encode('utf8')\n h5dict['backend'] = K.backend().encode('utf8')\n\n model_config = {}\n model_config['class_name'] = model.__class__.__name__\n model_config['config'] = model.get_config()\n model_config = json.dumps(model_config, default=get_json_type)\n model_config = model_config.encode('utf-8')\n h5dict['model_config'] = model_config\n\n model_weights_group = h5dict['model_weights']\n model_layers = model.layers\n model_weights_group['layer_names'] = [layer.name.encode('utf8')\n for layer in model_layers]\n model_weights_group['backend'] = K.backend().encode('utf8')\n model_weights_group['keras_version'] = str(keras_version).encode('utf8')\n for layer in model_layers:\n layer_group = model_weights_group[layer.name]\n symbolic_weights = layer.weights\n weight_values = K.batch_get_value(symbolic_weights)\n weight_names = []\n for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):\n if hasattr(w, 'name') and w.name:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n if name in weight_names:\n idx = 2\n unique_name = name + '_1'\n while unique_name in weight_names:\n unique_name = name + '_' + str(idx)\n idx += 1\n name = unique_name\n weight_names.append(name.encode('utf8'))\n layer_group['weight_names'] = weight_names\n for name, val in zip(weight_names, weight_values):\n layer_group[name] = val\n if include_optimizer and model.optimizer:\n if isinstance(model.optimizer, optimizers.TFOptimizer):\n warnings.warn(\n 'TensorFlow optimizers do not '\n 'make it possible to access '\n 'optimizer attributes or optimizer state '\n 'after instantiation. '\n 'As a result, we cannot save the optimizer '\n 'as part of the model save file.'\n 'You will have to compile your model again '\n 'after loading it. '\n 'Prefer using a Keras optimizer instead '\n '(see keras.io/optimizers).')\n else:\n h5dict['training_config'] = json.dumps({\n 'optimizer_config': {\n 'class_name': model.optimizer.__class__.__name__,\n 'config': model.optimizer.get_config()\n },\n 'loss': model.loss,\n 'metrics': model.metrics,\n 'weighted_metrics': model.weighted_metrics,\n 'sample_weight_mode': model.sample_weight_mode,\n 'loss_weights': model.loss_weights,\n }, default=get_json_type).encode('utf8')\n symbolic_weights = getattr(model.optimizer, 'weights')\n if symbolic_weights:\n optimizer_weights_group = h5dict['optimizer_weights']\n weight_values = K.batch_get_value(symbolic_weights)\n weight_names = []\n for i, (w, val) in enumerate(zip(symbolic_weights,\n weight_values)):\n # Default values of symbolic_weights is /variable\n # for Theano and CNTK\n if K.backend() == 'theano' or K.backend() == 'cntk':\n if hasattr(w, 'name'):\n if w.name.split('/')[-1] == 'variable':\n name = str(w.name) + '_' + str(i)\n else:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n else:\n if hasattr(w, 'name') and w.name:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n if name in weight_names:\n idx = 2\n unique_name = name + '_1'\n while unique_name in weight_names:\n unique_name = name + '_' + str(idx)\n idx += 1\n name = unique_name\n weight_names.append(name.encode('utf8'))\n optimizer_weights_group['weight_names'] = weight_names\n for name, val in zip(weight_names, weight_values):\n optimizer_weights_group[name] = val\n\n\ndef _deserialize_model(h5dict, custom_objects=None, compile=True):\n \"\"\"De-serializes a model serialized via _serialize_model\n\n # Arguments\n h5dict: `keras.utils.hdf5_utils.HFDict` instance.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n compile: Boolean, whether to compile the model\n after loading.\n\n # Returns\n A Keras model instance. If an optimizer was found\n as part of the saved model, the model is already\n compiled. Otherwise, the model is uncompiled and\n a warning will be displayed. When `compile` is set\n to False, the compilation is omitted without any\n warning.\n \"\"\"\n if not custom_objects:\n custom_objects = {}\n\n def convert_custom_objects(obj):\n \"\"\"Handles custom object lookup.\n\n # Arguments\n obj: object, dict, or list.\n\n # Returns\n The same structure, where occurrences\n of a custom object name have been replaced\n with the custom object.\n \"\"\"\n if isinstance(obj, list):\n deserialized = []\n for value in obj:\n deserialized.append(convert_custom_objects(value))\n return deserialized\n if isinstance(obj, dict):\n deserialized = {}\n for key, value in obj.items():\n deserialized[key] = convert_custom_objects(value)\n return deserialized\n if obj in custom_objects:\n return custom_objects[obj]\n return obj\n\n model_config = h5dict['model_config']\n if model_config is None:\n raise ValueError('No model found in config.')\n model_config = json.loads(model_config.decode('utf-8'))\n model = model_from_config(model_config, custom_objects=custom_objects)\n model_weights_group = h5dict['model_weights']\n\n if 'keras_version' in model_weights_group:\n original_keras_version = model_weights_group['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in model_weights_group:\n original_backend = model_weights_group['backend'].decode('utf8')\n else:\n original_backend = None\n\n layer_names = model_weights_group['layer_names']\n\n layers = model.layers\n\n filtered_layers = []\n for layer in layers:\n weights = layer.weights\n if weights:\n filtered_layers.append(layer)\n\n filtered_layer_names = []\n for name in layer_names:\n layer_weights = model_weights_group[name]\n weight_names = layer_weights['weight_names']\n if len(weight_names) > 0:\n filtered_layer_names.append(name)\n\n layer_names = filtered_layer_names\n if len(layer_names) != len(filtered_layers):\n raise ValueError('You are trying to load a weight file'\n ' containing {} layers into a model with {} layers'\n .format(len(layer_names), len(filtered_layers))\n )\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n layer_weights = model_weights_group[name]\n weight_names = layer_weights['weight_names']\n weight_values = [layer_weights[weight_name] for weight_name in weight_names]\n layer = filtered_layers[k]\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(layer,\n weight_values,\n original_keras_version,\n original_backend,\n reshape=False)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\" in the current model) was found to '\n 'correspond to layer ' + name +\n ' in the save file. '\n 'However the new layer ' + layer.name +\n ' expects ' + str(len(symbolic_weights)) +\n ' weights, but the saved weights have ' +\n str(len(weight_values)) +\n ' elements.')\n weight_value_tuples += zip(symbolic_weights, weight_values)\n K.batch_set_value(weight_value_tuples)\n\n if compile:\n training_config = h5dict.get('training_config')\n if training_config is None:\n warnings.warn('No training configuration found in save file: '\n 'the model was *not* compiled. '\n 'Compile it manually.')\n return model\n training_config = json.loads(training_config.decode('utf-8'))\n optimizer_config = training_config['optimizer_config']\n optimizer = optimizers.deserialize(optimizer_config,\n custom_objects=custom_objects)\n\n # Recover loss functions and metrics.\n loss = convert_custom_objects(training_config['loss'])\n metrics = convert_custom_objects(training_config['metrics'])\n sample_weight_mode = training_config['sample_weight_mode']\n loss_weights = training_config['loss_weights']\n\n # Compile model.\n model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics,\n loss_weights=loss_weights,\n sample_weight_mode=sample_weight_mode)\n\n # Set optimizer weights.\n if 'optimizer_weights' in h5dict:\n # Build train function (to get weight updates).\n model._make_train_function()\n optimizer_weights_group = h5dict['optimizer_weights']\n optimizer_weight_names = [\n n.decode('utf8') for n in\n optimizer_weights_group['weight_names']]\n optimizer_weight_values = [optimizer_weights_group[n] for n in\n optimizer_weight_names]\n try:\n model.optimizer.set_weights(optimizer_weight_values)\n except ValueError:\n warnings.warn('Error in loading the saved optimizer '\n 'state. As a result, your model is '\n 'starting with a freshly initialized '\n 'optimizer.')\n\n return model\n\n\ndef save_model(model, filepath, overwrite=True, include_optimizer=True):\n \"\"\"Save a model to a HDF5 file.\n\n Note: Please also see\n [How can I install HDF5 or h5py to save my models in Keras?](\n /getting-started/faq/\n #how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)\n in the FAQ for instructions on how to install `h5py`.\n\n The saved model contains:\n - the model's configuration (topology)\n - the model's weights\n - the model's optimizer's state (if any)\n\n Thus the saved model can be reinstantiated in\n the exact same state, without any of the code\n used for model definition or training.\n\n # Arguments\n model: Keras model instance to be saved.\n filepath: one of the following:\n - string, path where to save the model, or\n - h5py.File or h5py.Group object where to save the model\n overwrite: Whether we should overwrite any existing\n model at the target location, or instead\n ask the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n # Raises\n ImportError: if h5py is not available.\n \"\"\"\n if h5py is None:\n raise ImportError('`save_model` requires h5py.')\n\n if not isinstance(filepath, h5py.Group):\n # If file exists and should not be overwritten.\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n opened_new_file = True\n else:\n opened_new_file = False\n\n h5dict = H5Dict(filepath, mode='w')\n try:\n _serialize_model(model, h5dict, include_optimizer)\n finally:\n if opened_new_file:\n h5dict.close()\n\n\ndef load_model(filepath, custom_objects=None, compile=True):\n \"\"\"Loads a model saved via `save_model`.\n\n # Arguments\n filepath: one of the following:\n - string, path to the saved model, or\n - h5py.File or h5py.Group object from which to load the model\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n compile: Boolean, whether to compile the model\n after loading.\n\n # Returns\n A Keras model instance. If an optimizer was found\n as part of the saved model, the model is already\n compiled. Otherwise, the model is uncompiled and\n a warning will be displayed. When `compile` is set\n to False, the compilation is omitted without any\n warning.\n\n # Raises\n ImportError: if h5py is not available.\n ValueError: In case of an invalid savefile.\n \"\"\"\n if h5py is None:\n raise ImportError('`load_model` requires h5py.')\n model = None\n opened_new_file = not isinstance(filepath, h5py.Group)\n h5dict = H5Dict(filepath, 'r')\n try:\n model = _deserialize_model(h5dict, custom_objects, compile)\n finally:\n if opened_new_file:\n h5dict.close()\n return model\n\n\ndef pickle_model(model):\n d = {}\n h5dict = H5Dict(d)\n _serialize_model(model, h5dict)\n return d\n\n\ndef unpickle_model(state):\n h5dict = H5Dict(state, mode='r')\n return _deserialize_model(h5dict)\n\n\ndef model_from_config(config, custom_objects=None):\n \"\"\"Instantiates a Keras model from its config.\n\n # Arguments\n config: Configuration dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n # Returns\n A Keras model instance (uncompiled).\n\n # Raises\n TypeError: if `config` is not a dictionary.\n \"\"\"\n if isinstance(config, list):\n raise TypeError('`model_from_config` expects a dictionary, '\n 'not a list. Maybe you meant to use '\n '`Sequential.from_config(config)`?')\n from ..layers import deserialize\n return deserialize(config, custom_objects=custom_objects)\n\n\ndef model_from_yaml(yaml_string, custom_objects=None):\n \"\"\"Parses a yaml model configuration file and returns a model instance.\n\n # Arguments\n yaml_string: YAML string encoding a model configuration.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n # Returns\n A Keras model instance (uncompiled).\n \"\"\"\n config = yaml.load(yaml_string)\n from ..layers import deserialize\n return deserialize(config, custom_objects=custom_objects)\n\n\ndef model_from_json(json_string, custom_objects=None):\n \"\"\"Parses a JSON model configuration file and returns a model instance.\n\n # Arguments\n json_string: JSON string encoding a model configuration.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n # Returns\n A Keras model instance (uncompiled).\n \"\"\"\n config = json.loads(json_string)\n from ..layers import deserialize\n return deserialize(config, custom_objects=custom_objects)\n\n\ndef save_attributes_to_hdf5_group(group, name, data):\n \"\"\"Saves attributes (data) of the specified name into the HDF5 group.\n\n This method deals with an inherent problem of HDF5 file which is not\n able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n # Arguments\n group: A pointer to a HDF5 group.\n name: A name of the attributes to save.\n data: Attributes data to store.\n \"\"\"\n # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`\n # because in that case even chunking the array would not make the saving\n # possible.\n bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n\n # Expecting this to never be true.\n if len(bad_attributes) > 0:\n raise RuntimeError('The following attributes cannot be saved to HDF5 '\n 'file because they are larger than %d bytes: %s'\n % (HDF5_OBJECT_HEADER_LIMIT,\n ', '.join([x for x in bad_attributes])))\n\n data_npy = np.asarray(data)\n\n num_chunks = 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n # This will never loop forever thanks to the test above.\n while any(map(lambda x: x.nbytes > HDF5_OBJECT_HEADER_LIMIT, chunked_data)):\n num_chunks += 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n if num_chunks > 1:\n for chunk_id, chunk_data in enumerate(chunked_data):\n group.attrs['%s%d' % (name, chunk_id)] = chunk_data\n else:\n group.attrs[name] = data\n\n\ndef load_attributes_from_hdf5_group(group, name):\n \"\"\"Loads attributes of the specified name from the HDF5 group.\n\n This method deals with an inherent problem\n of HDF5 file which is not able to store\n data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n # Arguments\n group: A pointer to a HDF5 group.\n name: A name of the attributes to load.\n\n # Returns\n data: Attributes data.\n \"\"\"\n if name in group.attrs:\n data = [n.decode('utf8') for n in group.attrs[name]]\n else:\n data = []\n chunk_id = 0\n while ('%s%d' % (name, chunk_id)) in group.attrs:\n data.extend([n.decode('utf8')\n for n in group.attrs['%s%d' % (name, chunk_id)]])\n chunk_id += 1\n return data\n\n\ndef save_weights_to_hdf5_group(group, layers):\n \"\"\"Saves weights into the HDF5 group.\n\n # Arguments\n group: A pointer to a HDF5 group.\n layers: Layers to load.\n \"\"\"\n from .. import __version__ as keras_version\n\n save_attributes_to_hdf5_group(\n group, 'layer_names', [layer.name.encode('utf8') for layer in layers])\n group.attrs['backend'] = K.backend().encode('utf8')\n group.attrs['keras_version'] = str(keras_version).encode('utf8')\n\n for layer in layers:\n g = group.create_group(layer.name)\n symbolic_weights = layer.weights\n weight_values = K.batch_get_value(symbolic_weights)\n weight_names = []\n for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):\n if hasattr(w, 'name') and w.name:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n weight_names.append(name.encode('utf8'))\n save_attributes_to_hdf5_group(g, 'weight_names', weight_names)\n for name, val in zip(weight_names, weight_values):\n param_dset = g.create_dataset(name, val.shape,\n dtype=val.dtype)\n if not val.shape:\n # scalar\n param_dset[()] = val\n else:\n param_dset[:] = val\n\n\ndef preprocess_weights_for_loading(layer, weights,\n original_keras_version=None,\n original_backend=None,\n reshape=False):\n \"\"\"Converts layers weights from Keras 1 format to Keras 2.\n\n # Arguments\n layer: Layer instance.\n weights: List of weights values (Numpy arrays).\n original_keras_version: Keras version for the weights, as a string.\n original_backend: Keras backend the weights were trained with,\n as a string.\n reshape: Reshape weights to fit the layer when the correct number\n of values are present but the shape does not match.\n\n # Returns\n A list of weights values (Numpy arrays).\n \"\"\"\n def convert_nested_bidirectional(weights):\n \"\"\"Converts layers nested in `Bidirectional` wrapper.\n\n # Arguments\n weights: List of weights values (Numpy arrays).\n # Returns\n A list of weights values (Numpy arrays).\n \"\"\"\n num_weights_per_layer = len(weights) // 2\n forward_weights = preprocess_weights_for_loading(\n layer.forward_layer,\n weights[:num_weights_per_layer],\n original_keras_version,\n original_backend)\n backward_weights = preprocess_weights_for_loading(\n layer.backward_layer,\n weights[num_weights_per_layer:],\n original_keras_version,\n original_backend)\n return forward_weights + backward_weights\n\n def convert_nested_time_distributed(weights):\n \"\"\"Converts layers nested in `TimeDistributed` wrapper.\n\n # Arguments\n weights: List of weights values (Numpy arrays).\n # Returns\n A list of weights values (Numpy arrays).\n \"\"\"\n return preprocess_weights_for_loading(\n layer.layer, weights, original_keras_version, original_backend)\n\n def convert_nested_model(weights):\n \"\"\"Converts layers nested in `Model` or `Sequential`.\n\n # Arguments\n weights: List of weights values (Numpy arrays).\n # Returns\n A list of weights values (Numpy arrays).\n \"\"\"\n new_weights = []\n # trainable weights\n for sublayer in layer.layers:\n num_weights = len(sublayer.trainable_weights)\n if num_weights > 0:\n new_weights.extend(preprocess_weights_for_loading(\n layer=sublayer,\n weights=weights[:num_weights],\n original_keras_version=original_keras_version,\n original_backend=original_backend))\n weights = weights[num_weights:]\n\n # non-trainable weights\n for sublayer in layer.layers:\n num_weights = len([l for l in sublayer.weights\n if l not in sublayer.trainable_weights])\n if num_weights > 0:\n new_weights.extend(preprocess_weights_for_loading(\n layer=sublayer,\n weights=weights[:num_weights],\n original_keras_version=original_keras_version,\n original_backend=original_backend))\n weights = weights[num_weights:]\n return new_weights\n\n # Convert layers nested in Bidirectional/TimeDistributed/Model/Sequential.\n # Both transformation should be ran for both Keras 1->2 conversion\n # and for conversion of CuDNN layers.\n if layer.__class__.__name__ == 'Bidirectional':\n weights = convert_nested_bidirectional(weights)\n if layer.__class__.__name__ == 'TimeDistributed':\n weights = convert_nested_time_distributed(weights)\n\n if original_keras_version == '1':\n if layer.__class__.__name__ in ['Model', 'Sequential']:\n weights = convert_nested_model(weights)\n\n if layer.__class__.__name__ == 'TimeDistributed':\n weights = preprocess_weights_for_loading(layer.layer,\n weights,\n original_keras_version,\n original_backend)\n\n if layer.__class__.__name__ == 'Conv1D':\n shape = weights[0].shape\n # Handle Keras 1.1 format\n if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters:\n # Legacy shape:\n # (filters, input_dim, filter_length, 1)\n assert (shape[0] == layer.filters and\n shape[2:] == (layer.kernel_size[0], 1))\n weights[0] = np.transpose(weights[0], (2, 3, 1, 0))\n weights[0] = weights[0][:, 0, :, :]\n\n if layer.__class__.__name__ == 'Conv2D':\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, stack_size, filters)\n weights[0] = np.transpose(weights[0], (2, 3, 1, 0))\n\n if layer.__class__.__name__ == 'Conv2DTranspose':\n if layer.data_format == 'channels_last':\n # old: (kernel_rows, kernel_cols, stack_size, filters)\n # new: (kernel_rows, kernel_cols, filters, stack_size)\n weights[0] = np.transpose(weights[0], (0, 1, 3, 2))\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, filters, stack_size)\n weights[0] = np.transpose(weights[0], (2, 3, 0, 1))\n\n if layer.__class__.__name__ == 'Conv3D':\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, ...)\n # new: (..., stack_size, filters)\n weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))\n\n if layer.__class__.__name__ == 'GRU':\n if len(weights) == 9:\n kernel = np.concatenate([weights[0],\n weights[3],\n weights[6]], axis=-1)\n recurrent_kernel = np.concatenate([weights[1],\n weights[4],\n weights[7]], axis=-1)\n bias = np.concatenate([weights[2],\n weights[5],\n weights[8]], axis=-1)\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ == 'LSTM':\n if len(weights) == 12:\n # old: i, c, f, o\n # new: i, f, c, o\n kernel = np.concatenate([weights[0],\n weights[6],\n weights[3],\n weights[9]], axis=-1)\n recurrent_kernel = np.concatenate([weights[1],\n weights[7],\n weights[4],\n weights[10]], axis=-1)\n bias = np.concatenate([weights[2],\n weights[8],\n weights[5],\n weights[11]], axis=-1)\n weights = [kernel, recurrent_kernel, bias]\n\n if layer.__class__.__name__ == 'ConvLSTM2D':\n if len(weights) == 12:\n kernel = np.concatenate([weights[0],\n weights[6],\n weights[3],\n weights[9]], axis=-1)\n recurrent_kernel = np.concatenate([weights[1],\n weights[7],\n weights[4],\n weights[10]], axis=-1)\n bias = np.concatenate([weights[2],\n weights[8],\n weights[5],\n weights[11]], axis=-1)\n if layer.data_format == 'channels_first':\n # old: (filters, stack_size, kernel_rows, kernel_cols)\n # new: (kernel_rows, kernel_cols, stack_size, filters)\n kernel = np.transpose(kernel, (2, 3, 1, 0))\n recurrent_kernel = np.transpose(recurrent_kernel,\n (2, 3, 1, 0))\n weights = [kernel, recurrent_kernel, bias]\n\n conv_layers = ['Conv1D',\n 'Conv2D',\n 'Conv3D',\n 'Conv2DTranspose',\n 'ConvLSTM2D']\n if layer.__class__.__name__ in conv_layers:\n layer_weights_shape = K.int_shape(layer.weights[0])\n if _need_convert_kernel(original_backend):\n weights[0] = conv_utils.convert_kernel(weights[0])\n if layer.__class__.__name__ == 'ConvLSTM2D':\n weights[1] = conv_utils.convert_kernel(weights[1])\n if reshape and layer_weights_shape != weights[0].shape:\n if weights[0].size != np.prod(layer_weights_shape):\n raise ValueError('Weights must be of equal size to ' +\n 'apply a reshape operation. ' +\n 'Layer ' + layer.name +\n '\\'s weights have shape ' +\n str(layer_weights_shape) + ' and size ' +\n str(np.prod(layer_weights_shape)) + '. ' +\n 'The weights for loading have shape ' +\n str(weights[0].shape) + ' and size ' +\n str(weights[0].size) + '. ')\n weights[0] = np.reshape(weights[0], layer_weights_shape)\n elif layer_weights_shape != weights[0].shape:\n weights[0] = np.transpose(weights[0], (3, 2, 0, 1))\n if layer.__class__.__name__ == 'ConvLSTM2D':\n weights[1] = np.transpose(weights[1], (3, 2, 0, 1))\n\n # convert CuDNN layers\n weights = _convert_rnn_weights(layer, weights)\n\n return weights\n\n\ndef _convert_rnn_weights(layer, weights):\n \"\"\"Converts weights for RNN layers between native and CuDNN format.\n\n Input kernels for each gate are transposed and converted between Fortran\n and C layout, recurrent kernels are transposed. For LSTM biases are summed/\n split in half, for GRU biases are reshaped.\n\n Weights can be converted in both directions between `LSTM` and`CuDNNSLTM`\n and between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not\n compatible with `CuDNNGRU`.\n\n For missing biases in `LSTM`/`GRU` (`use_bias=False`),\n no conversion is made.\n\n # Arguments\n layer: Target layer instance.\n weights: List of source weights values (input kernels, recurrent\n kernels, [biases]) (Numpy arrays).\n\n # Returns\n A list of converted weights values (Numpy arrays).\n\n # Raises\n ValueError: for incompatible GRU layer/weights or incompatible biases\n \"\"\"\n\n def transform_kernels(kernels, func, n_gates):\n \"\"\"Transforms kernel for each gate separately using given function.\n\n # Arguments\n kernels: Stacked array of kernels for individual gates.\n func: Function applied to kernel of each gate.\n n_gates: Number of gates (4 for LSTM, 3 for GRU).\n # Returns\n Stacked array of transformed kernels.\n \"\"\"\n return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])\n\n def transpose_input(from_cudnn):\n \"\"\"Makes a function that transforms input kernels from/to CuDNN format.\n\n It keeps the shape, but changes between the layout (Fortran/C). Eg.:\n\n ```\n Keras CuDNN\n [[0, 1, 2], <---> [[0, 2, 4],\n [3, 4, 5]] [1, 3, 5]]\n ```\n\n It can be passed to `transform_kernels()`.\n\n # Arguments\n from_cudnn: `True` if source weights are in CuDNN format, `False`\n if they're in plain Keras format.\n # Returns\n Function that converts input kernel to the other format.\n \"\"\"\n order = 'F' if from_cudnn else 'C'\n\n def transform(kernel):\n return kernel.T.reshape(kernel.shape, order=order)\n\n return transform\n\n target_class = layer.__class__.__name__\n\n # convert the weights between CuDNNLSTM and LSTM\n if target_class in ['LSTM', 'CuDNNLSTM'] and len(weights) == 3:\n # determine if we're loading a CuDNNLSTM layer\n # from the number of bias weights:\n # CuDNNLSTM has (units * 8) weights; while LSTM has (units * 4)\n # if there's no bias weight in the file, skip this conversion\n units = weights[1].shape[0]\n bias_shape = weights[2].shape\n n_gates = 4\n\n if bias_shape == (2 * units * n_gates,):\n source = 'CuDNNLSTM'\n elif bias_shape == (units * n_gates,):\n source = 'LSTM'\n else:\n raise ValueError('Invalid bias shape: ' + str(bias_shape))\n\n def convert_weights(weights, from_cudnn=True):\n # transpose (and reshape) input and recurrent kernels\n kernels = transform_kernels(weights[0],\n transpose_input(from_cudnn),\n n_gates)\n recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n if from_cudnn:\n # merge input and recurrent biases into a single set\n biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)\n else:\n # Split single set of biases evenly to two sets. The way of\n # splitting doesn't matter as long as the two sets sum is kept.\n biases = np.tile(0.5 * weights[2], 2)\n return [kernels, recurrent_kernels, biases]\n\n if source != target_class:\n weights = convert_weights(weights, from_cudnn=source == 'CuDNNLSTM')\n\n # convert the weights between CuDNNGRU and GRU(reset_after=True)\n if target_class in ['GRU', 'CuDNNGRU'] and len(weights) == 3:\n # We can determine the source of the weights from the shape of the bias.\n # If there is no bias we skip the conversion\n # since CuDNNGRU always has biases.\n\n units = weights[1].shape[0]\n bias_shape = weights[2].shape\n n_gates = 3\n\n def convert_weights(weights, from_cudnn=True):\n kernels = transform_kernels(weights[0],\n transpose_input(from_cudnn),\n n_gates)\n recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)\n return [kernels, recurrent_kernels, biases]\n\n if bias_shape == (2 * units * n_gates,):\n source = 'CuDNNGRU'\n elif bias_shape == (2, units * n_gates):\n source = 'GRU(reset_after=True)'\n elif bias_shape == (units * n_gates,):\n source = 'GRU(reset_after=False)'\n else:\n raise ValueError('Invalid bias shape: ' + str(bias_shape))\n\n if target_class == 'CuDNNGRU':\n target = 'CuDNNGRU'\n elif layer.reset_after:\n target = 'GRU(reset_after=True)'\n else:\n target = 'GRU(reset_after=False)'\n\n # only convert between different types\n if source != target:\n types = (source, target)\n if 'GRU(reset_after=False)' in types:\n raise ValueError('%s is not compatible with %s' % types)\n if source == 'CuDNNGRU':\n weights = convert_weights(weights, from_cudnn=True)\n elif source == 'GRU(reset_after=True)':\n weights = convert_weights(weights, from_cudnn=False)\n\n return weights\n\n\ndef _need_convert_kernel(original_backend):\n \"\"\"Checks if conversion on kernel matrices is required during weight loading.\n\n The convolution operation is implemented differently in different backends.\n While TH implements convolution, TF and CNTK implement the correlation operation.\n So the channel axis needs to be flipped when TF weights are loaded on a TH model,\n or vice versa. However, there's no conversion required between TF and CNTK.\n\n # Arguments\n original_backend: Keras backend the weights were trained with, as a string.\n\n # Returns\n `True` if conversion on kernel matrices is required, otherwise `False`.\n \"\"\"\n if original_backend is None:\n # backend information not available\n return False\n uses_correlation = {'tensorflow': True,\n 'theano': False,\n 'cntk': True}\n if original_backend not in uses_correlation:\n # By default, do not convert the kernels if the original backend is unknown\n return False\n if K.backend() in uses_correlation:\n current_uses_correlation = uses_correlation[K.backend()]\n else:\n # Assume unknown backends use correlation\n current_uses_correlation = True\n return uses_correlation[original_backend] != current_uses_correlation\n\n\ndef load_weights_from_hdf5_group(f, layers, reshape=False):\n \"\"\"Implements topological (order-based) weight loading.\n\n # Arguments\n f: A pointer to a HDF5 group.\n layers: a list of target layers.\n reshape: Reshape weights to fit the layer when the correct number\n of values are present but the shape does not match.\n\n # Raises\n ValueError: in case of mismatch between provided layers\n and weights file.\n \"\"\"\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n filtered_layers = []\n for layer in layers:\n weights = layer.weights\n if weights:\n filtered_layers.append(layer)\n\n layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n filtered_layer_names = []\n for name in layer_names:\n g = f[name]\n weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n if weight_names:\n filtered_layer_names.append(name)\n layer_names = filtered_layer_names\n if len(layer_names) != len(filtered_layers):\n raise ValueError('You are trying to load a weight file '\n 'containing ' + str(len(layer_names)) +\n ' layers into a model with ' +\n str(len(filtered_layers)) + ' layers.')\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]\n layer = filtered_layers[k]\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(layer,\n weight_values,\n original_keras_version,\n original_backend,\n reshape=reshape)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\" in the current model) was found to '\n 'correspond to layer ' + name +\n ' in the save file. '\n 'However the new layer ' + layer.name +\n ' expects ' + str(len(symbolic_weights)) +\n ' weights, but the saved weights have ' +\n str(len(weight_values)) +\n ' elements.')\n weight_value_tuples += zip(symbolic_weights, weight_values)\n K.batch_set_value(weight_value_tuples)\n\n\ndef load_weights_from_hdf5_group_by_name(f, layers, skip_mismatch=False,\n reshape=False):\n \"\"\"Implements name-based weight loading.\n\n (instead of topological weight loading).\n\n Layers that have no matching name are skipped.\n\n # Arguments\n f: A pointer to a HDF5 group.\n layers: A list of target layers.\n skip_mismatch: Boolean, whether to skip loading of layers\n where there is a mismatch in the number of weights,\n or a mismatch in the shape of the weights.\n reshape: Reshape weights to fit the layer when the correct number\n of values are present but the shape does not match.\n\n # Raises\n ValueError: in case of mismatch between provided layers\n and weights file and skip_mismatch=False.\n \"\"\"\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n # New file format.\n layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n\n # Reverse index of layer name to list of layers with name.\n index = {}\n for layer in layers:\n if layer.name:\n index.setdefault(layer.name, []).append(layer)\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]\n\n for layer in index.get(name, []):\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(\n layer,\n weight_values,\n original_keras_version,\n original_backend,\n reshape=reshape)\n if len(weight_values) != len(symbolic_weights):\n if skip_mismatch:\n warnings.warn('Skipping loading of weights for '\n 'layer {}'.format(layer.name) + ' due to mismatch '\n 'in number of weights ({} vs {}).'.format(\n len(symbolic_weights), len(weight_values)))\n continue\n else:\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\") expects ' +\n str(len(symbolic_weights)) +\n ' weight(s), but the saved weights' +\n ' have ' + str(len(weight_values)) +\n ' element(s).')\n # Set values.\n for i in range(len(weight_values)):\n symbolic_shape = K.int_shape(symbolic_weights[i])\n if symbolic_shape != weight_values[i].shape:\n if skip_mismatch:\n warnings.warn('Skipping loading of weights for '\n 'layer {}'.format(layer.name) + ' due to '\n 'mismatch in shape ({} vs {}).'.format(\n symbolic_weights[i].shape,\n weight_values[i].shape))\n continue\n else:\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\"), weight ' +\n str(symbolic_weights[i]) +\n ' has shape {}'.format(symbolic_shape) +\n ', but the saved weight has shape ' +\n str(weight_values[i].shape) + '.')\n else:\n weight_value_tuples.append((symbolic_weights[i],\n weight_values[i]))\n\n K.batch_set_value(weight_value_tuples)\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.reshape",
"numpy.asarray",
"numpy.hsplit",
"numpy.tile",
"numpy.split",
"numpy.prod",
"numpy.transpose",
"numpy.array_split"
]
] |
hitfee01/3DDeepBoxRetina2D | [
"40e81a014657ba995d8043fac8feef2cdeb285c2"
] | [
"Archs_2D/build_model.py"
] | [
"import torch\nfrom Archs_2D.configs import config\nfrom nets.Backbone import build_backbone\n\nfrom Archs_2D.RetinaNet import RetinaNet\nimport numpy as np\n\ntest_config = 'MOBIL-V2-RETINA-FPN'\ntest_backbone = 'MOBI-V2'\n\ndef build_model(name):\n backbone = None\n cfg = None\n if name == 'MOBI-V2-RETINA-FPN':\n backbone = build_backbone('MOBI-V2')\n cfg = config.get_model_config('MOBI-V2-RETINA-FPN')\n if name == 'MOBI-V3-RETINA-FPN':\n backbone = build_backbone('MOBI-V3')\n cfg = config.get_model_config('MOBI-V3-RETINA-FPN')\n elif name == 'SHUFFLE-RETINA-FPN':\n backbone = build_backbone('SHUFFLE')\n cfg = config.get_model_config('SHUFFLE-RETINA-FPN')\n else:\n assert backbone is not None\n model = RetinaNet(backbone, cfg)\n return model, backbone, cfg\n\nif __name__ == '__main__':\n model, backbone, cfg = build_model('MOBI-V2-RETINA-FPN')\n input = torch.tensor(np.ones((1, 3, cfg.INTENSOR_SHAPE[0], cfg.INTENSOR_SHAPE[1]), dtype=np.float), dtype=torch.float32)\n logits, bboxes = model(input)\n print(logits)\n print(bboxes)\n\n\n"
] | [
[
"numpy.ones"
]
] |
kepolol/craftassist | [
"f60a7edd0b4ea72b774cca45ba468d2e275445c2"
] | [
"python/craftassist/craftassist_agent.py"
] | [
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\nimport os\nimport sys\n\n# python/ dir, for agent.so\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\n\nimport faulthandler\nimport itertools\nimport logging\nimport numpy as np\nimport random\nimport re\nimport sentry_sdk\nimport signal\nimport time\nfrom multiprocessing import set_start_method\n\nfrom agent import Agent\nfrom agent_connection import default_agent_name\nfrom voxel_models.subcomponent_classifier import SubComponentClassifier\nfrom voxel_models.geoscorer import Geoscorer\n\nimport memory\nimport perception\nimport shapes\nfrom util import to_block_pos, pos_to_np, TimingWarn, hash_user\n\nimport default_behaviors\nfrom ttad_model_dialogue_manager import TtadModelDialogueManager\n\n\nfaulthandler.register(signal.SIGUSR1)\n\nrandom.seed(0)\nlog_formatter = logging.Formatter(\n \"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s\"\n)\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.getLogger().handlers.clear()\n\nsentry_sdk.init() # enabled if SENTRY_DSN set in env\n\nDEFAULT_BEHAVIOUR_TIMEOUT = 20\nDEFAULT_PORT = 25565\n\n\nclass CraftAssistAgent(Agent):\n def __init__(\n self,\n host=\"localhost\",\n port=DEFAULT_PORT,\n name=None,\n ttad_prev_model_path=None,\n ttad_model_dir=None,\n ttad_bert_data_dir=None,\n ttad_embeddings_path=None,\n ttad_grammar_path=None,\n semseg_model_path=None,\n voxel_model_gpu_id=-1,\n get_perception_interval=20,\n draw_fn=None,\n no_default_behavior=False,\n geoscorer_model_path=None,\n ):\n logging.info(\"CraftAssistAgent.__init__ started\")\n self.name = name or default_agent_name()\n self.no_default_behavior = no_default_behavior\n\n # files needed to set up ttad model\n if ttad_prev_model_path is None:\n ttad_prev_model_path = os.path.join(os.path.dirname(__file__), \"models/ttad/ttad.pth\")\n if ttad_model_dir is None:\n ttad_model_dir = os.path.join(os.path.dirname(__file__), \"models/ttad_bert/model/\")\n if ttad_bert_data_dir is None:\n ttad_bert_data_dir = os.path.join(\n os.path.dirname(__file__), \"models/ttad_bert/annotated_data/\"\n )\n if ttad_embeddings_path is None:\n ttad_embeddings_path = os.path.join(\n os.path.dirname(__file__), \"models/ttad/ttad_ft_embeds.pth\"\n )\n if ttad_grammar_path is None:\n ttad_grammar_path = os.path.join(\n os.path.dirname(__file__), \"models/ttad/dialogue_grammar.json\"\n )\n\n # set up the SubComponentClassifier model\n if semseg_model_path is not None:\n self.subcomponent_classifier = SubComponentClassifier(\n voxel_model_path=semseg_model_path\n )\n else:\n self.subcomponent_classifier = None\n\n # set up the Geoscorer model\n if geoscorer_model_path is not None:\n self.geoscorer = Geoscorer(merger_model_path=geoscorer_model_path)\n else:\n self.geoscorer = None\n\n self.memory = memory.AgentMemory(\n db_file=os.environ.get(\"DB_FILE\", \":memory:\"),\n db_log_path=\"agent_memory.{}.log\".format(self.name),\n )\n logging.info(\"Initialized AgentMemory\")\n\n self.dialogue_manager = TtadModelDialogueManager(\n self,\n ttad_prev_model_path,\n ttad_model_dir,\n ttad_bert_data_dir,\n ttad_embeddings_path,\n ttad_grammar_path,\n )\n logging.info(\"Initialized DialogueManager\")\n\n # Log to file\n fh = logging.FileHandler(\"agent.{}.log\".format(self.name))\n fh.setFormatter(log_formatter)\n fh.setLevel(logging.DEBUG)\n logging.getLogger().addHandler(fh)\n\n # Login to server\n logging.info(\"Attempting to connect to port {}\".format(port))\n super().__init__(host, port, self.name)\n logging.info(\"Logged in to server\")\n\n # Wrap C++ agent methods\n self._cpp_send_chat = self.send_chat\n self.send_chat = self._send_chat\n self.last_chat_time = 0\n\n self.get_perception_interval = get_perception_interval\n self.uncaught_error_count = 0\n self.last_task_memid = None\n self.point_targets = []\n\n def start(self):\n logging.info(\"CraftAssistAgent.start() called\")\n # start the subcomponent classification model\n if self.subcomponent_classifier:\n self.subcomponent_classifier.start()\n for self.count in itertools.count(): # count forever\n try:\n if self.count == 0:\n logging.info(\"First top-level step()\")\n self.step()\n\n except Exception as e:\n logging.exception(\n \"Default handler caught exception, db_log_idx={}\".format(\n self.memory.get_db_log_idx()\n )\n )\n self.send_chat(\"Oops! I got confused and wasn't able to complete my last task :(\")\n sentry_sdk.capture_exception(e)\n self.memory.task_stack_clear()\n self.dialogue_manager.dialogue_stack.clear()\n self.uncaught_error_count += 1\n if self.uncaught_error_count >= 100:\n sys.exit(1)\n\n def step(self):\n self.pos = to_block_pos(pos_to_np(self.get_player().pos))\n\n # remove old point targets\n self.point_targets = [pt for pt in self.point_targets if time.time() - pt[1] < 6]\n\n # Update memory with current world state\n # Removed get_perception call due to very slow updates on non-flatworlds\n with TimingWarn(2):\n self.memory.update(self)\n\n # Process incoming chats\n self.dialogue_step()\n\n # Step topmost task on stack\n self.task_step()\n\n def task_step(self, sleep_time=0.25):\n # Clean finished tasks\n while (\n self.memory.task_stack_peek() and self.memory.task_stack_peek().task.check_finished()\n ):\n self.memory.task_stack_pop()\n\n # Maybe add default task\n if not self.no_default_behavior:\n self.maybe_run_slow_defaults()\n\n # If nothing to do, wait a moment\n if self.memory.task_stack_peek() is None:\n time.sleep(sleep_time)\n return\n\n # If something to do, step the topmost task\n task_mem = self.memory.task_stack_peek()\n if task_mem.memid != self.last_task_memid:\n logging.info(\"Starting task {}\".format(task_mem.task))\n self.last_task_memid = task_mem.memid\n task_mem.task.step(self)\n self.memory.task_stack_update_task(task_mem.memid, task_mem.task)\n\n def get_time(self):\n # round to 100th of second, return as\n # n hundreth of seconds since agent init\n return self.memory.get_time()\n\n def get_perception(self, force=False):\n \"\"\"\n Get both block objects and component objects and put them\n in memory\n \"\"\"\n if not force and (\n self.count % self.get_perception_interval != 0\n or self.memory.task_stack_peek() is not None\n ):\n return\n\n block_objs_for_vision = []\n for obj in perception.all_nearby_objects(self.get_blocks, self.pos):\n memory.BlockObjectNode.create(self.memory, obj)\n # If any xyz of obj is has not been labeled\n if any([(not self.memory.get_component_object_ids_by_xyz(xyz)) for xyz, _ in obj]):\n block_objs_for_vision.append(obj)\n\n # TODO formalize this, make a list of all perception calls to make, etc.\n # note this directly adds the memories\n perception.get_all_nearby_holes(self, self.pos, radius=15)\n perception.get_nearby_airtouching_blocks(self, self.pos, radius=15)\n\n if self.subcomponent_classifier is None:\n return\n\n for obj in block_objs_for_vision:\n self.subcomponent_classifier.block_objs_q.put(obj)\n\n # everytime we try to retrieve as many recognition results as possible\n while not self.subcomponent_classifier.loc2labels_q.empty():\n loc2labels, obj = self.subcomponent_classifier.loc2labels_q.get()\n loc2ids = dict(obj)\n label2blocks = {}\n\n def contaminated(blocks):\n \"\"\"\n Check if blocks are still consistent with the current world\n \"\"\"\n mx, Mx, my, My, mz, Mz = shapes.get_bounds(blocks)\n yzxb = self.get_blocks(mx, Mx, my, My, mz, Mz)\n for b, _ in blocks:\n x, y, z = b\n if loc2ids[b][0] != yzxb[y - my, z - mz, x - mx, 0]:\n return True\n return False\n\n for loc, labels in loc2labels.items():\n b = (loc, loc2ids[loc])\n for l in labels:\n if l in label2blocks:\n label2blocks[l].append(b)\n else:\n label2blocks[l] = [b]\n for l, blocks in label2blocks.items():\n ## if the blocks are contaminated we just ignore\n if not contaminated(blocks):\n memory.ComponentObjectNode.create(self.memory, blocks, [l])\n\n def maybe_run_slow_defaults(self):\n \"\"\"Pick a default task task to run\n with a low probability\"\"\"\n if self.memory.task_stack_peek() or len(self.dialogue_manager.dialogue_stack) > 0:\n return\n\n # list of (prob, default function) pairs\n visible_defaults = [\n (0.001, default_behaviors.build_random_shape),\n (0.005, default_behaviors.come_to_player),\n ]\n # default behaviors of the agent not visible in the game\n invisible_defaults = []\n\n defaults = (\n visible_defaults + invisible_defaults\n if time.time() - self.last_chat_time > DEFAULT_BEHAVIOUR_TIMEOUT\n else invisible_defaults\n )\n\n defaults = [(p, f) for (p, f) in defaults if f not in self.memory.banned_default_behaviors]\n\n def noop(*args):\n pass\n\n defaults.append((1 - sum(p for p, _ in defaults), noop)) # noop with remaining prob\n\n # weighted random choice of functions\n p, fns = zip(*defaults)\n fn = np.random.choice(fns, p=p)\n if fn != noop:\n logging.info(\"Default behavior: {}\".format(fn))\n fn(self)\n\n def dialogue_step(self):\n \"\"\"Process incoming chats and modify task stack\"\"\"\n raw_incoming_chats = self.get_incoming_chats()\n if raw_incoming_chats:\n # force to get objects\n self.get_perception(force=True)\n # logging.info(\"Incoming chats: {}\".format(raw_incoming_chats))\n\n incoming_chats = []\n for raw_chat in raw_incoming_chats:\n match = re.search(\"^<([^>]+)> (.*)\", raw_chat)\n if match is None:\n logging.info(\"Ignoring chat: {}\".format(raw_chat))\n continue\n\n speaker, chat = match.group(1), match.group(2)\n speaker_hash = hash_user(speaker)\n logging.info(\"Incoming chat: ['{}' -> {}]\".format(speaker_hash, chat))\n if chat.startswith(\"/\"):\n continue\n incoming_chats.append((speaker, chat))\n self.memory.add_chat(self.memory.get_player_by_name(speaker).memid, chat)\n\n if len(incoming_chats) > 0:\n # change this to memory.get_time() format?\n self.last_chat_time = time.time()\n # for now just process the first incoming chat\n self.dialogue_manager.step(incoming_chats[0])\n else:\n self.dialogue_manager.step((None, \"\"))\n\n # TODO reset all blocks in point area to what they\n # were before the point action no matter what\n # so e.g. player construction in pointing area during point\n # is reverted\n def safe_get_changed_blocks(self):\n blocks = self.get_changed_blocks()\n safe_blocks = []\n if len(self.point_targets) > 0:\n for point_target in self.point_targets:\n pt = point_target[0]\n for b in blocks:\n x, y, z = b[0]\n xok = x < pt[0] or x > pt[3]\n yok = y < pt[1] or y > pt[4]\n zok = z < pt[2] or z > pt[5]\n if xok and yok and zok:\n safe_blocks.append(b)\n else:\n safe_blocks = blocks\n return safe_blocks\n\n def point_at(self, target, sleep=None):\n \"\"\"Bot pointing.\n\n Args:\n target: list of x1 y1 z1 x2 y2 z2, where:\n x1 <= x2,\n y1 <= y2,\n z1 <= z2.\n \"\"\"\n assert len(target) == 6\n self.send_chat(\"/point {} {} {} {} {} {}\".format(*target))\n self.point_targets.append((target, time.time()))\n # sleep before the bot can take any actions\n # otherwise there might be bugs since the object is flashing\n # deal with this in the task...\n if sleep:\n time.sleep(sleep)\n\n def relative_head_pitch(self, angle):\n # warning: pitch is flipped!\n new_pitch = self.get_player().look.pitch - angle\n self.set_look(self.get_player().look.yaw, new_pitch)\n\n def _send_chat(self, chat: str):\n logging.info(\"Sending chat: {}\".format(chat))\n self.memory.add_chat(self.memory.self_memid, chat)\n return self._cpp_send_chat(chat)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--semseg_model_path\", type=str, help=\"path to semantic segmentation model\"\n )\n parser.add_argument(\"--gpu_id\", type=int, default=-1, help=\"GPU id (-1 for cpu)\")\n parser.add_argument(\"--ttad_prev_model_path\", help=\"path to previous TTAD model\")\n parser.add_argument(\"--ttad_model_dir\", help=\"path to current listener model dir\")\n parser.add_argument(\"--ttad_bert_data_dir\", help=\"path to annotated data\")\n parser.add_argument(\"--geoscorer_model_path\", help=\"path to geoscorer model\")\n parser.add_argument(\"--draw_vis\", action=\"store_true\", help=\"use visdom to draw agent vision\")\n parser.add_argument(\n \"--no_default_behavior\",\n action=\"store_true\",\n help=\"do not perform default behaviors when idle\",\n )\n parser.add_argument(\"--name\", help=\"Agent login name\")\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\", help=\"Debug logging\")\n parser.add_argument(\"--port\", type=int, default=25565)\n opts = parser.parse_args()\n\n # set up stdout logging\n sh = logging.StreamHandler()\n sh.setLevel(logging.DEBUG if opts.verbose else logging.INFO)\n sh.setFormatter(log_formatter)\n logging.getLogger().addHandler(sh)\n logging.info(\"Info logging\")\n logging.debug(\"Debug logging\")\n\n draw_fn = None\n if opts.draw_vis:\n import train_cnn\n\n draw_fn = train_cnn.draw_img\n\n set_start_method(\"spawn\", force=True)\n\n sa = CraftAssistAgent(\n ttad_prev_model_path=opts.ttad_prev_model_path,\n port=opts.port,\n ttad_model_dir=opts.ttad_model_dir,\n ttad_bert_data_dir=opts.ttad_bert_data_dir,\n semseg_model_path=opts.semseg_model_path,\n voxel_model_gpu_id=opts.gpu_id,\n draw_fn=draw_fn,\n no_default_behavior=opts.no_default_behavior,\n name=opts.name,\n geoscorer_model_path=opts.geoscorer_model_path,\n )\n sa.start()\n"
] | [
[
"numpy.random.choice"
]
] |
GauravJain28/ML-Assignments | [
"0de464fe6564a0bad43f7962c92563fe0a988285"
] | [
"A5-Yoga-Pose-Classification/Week 4/train_2019CS10407_2019CS10349.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils, models\n\nfrom skimage import io, transform\n\nimport matplotlib.pyplot as plt # for plotting\nimport numpy as np\nimport pandas as pd\nimport glob\nimport sys\nimport os\nimport PIL\nfrom sklearn.model_selection import KFold\nimport torchvision.models as models\n\nfrom IPython.display import Image\n\nfrom torch.autograd import Variable\nfrom torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout\nfrom torch.optim import Adam, SGD\n\ndevice = (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ntrainfile = sys.argv[1]\nmodelfile = sys.argv[2]\nmodelfile += \"model.pth\"\n\nimg_train_folder=\"\"\n\n\nclass CustomDataset(torch.utils.data.Dataset):\n def __init__(self, csv_path, images_folder, transform = None, train=True):\n self.df = pd.read_csv(csv_path)\n self.is_train = train\n self.images_folder = images_folder\n self.transform = transform\n self.class2index = {\n \"Virabhadrasana\":0,\n \"Vrikshasana\":1,\n \"Utkatasana\":2,\n \"Padahastasana\":3,\n \"Katichakrasana\":4,\n \"TriyakTadasana\":5,\n \"Gorakshasana\":6,\n \"Tadasana\":7,\n \"Natarajasana\":8, \n \"Pranamasana\":9,\n \"ParivrittaTrikonasana\":10,\n \"Tuladandasana\":11,\n \"Santolanasana\":12,\n \"Still\":13,\n \"Natavarasana\":14,\n \"Garudasana\":15,\n \"Naukasana\":16,\n \"Ardhachakrasana\":17,\n \"Trikonasana\":18,\n\n }\n\n def __len__(self):\n return len(self.df)\n def __getitem__(self, index):\n filename = self.df[\"name\"].iloc[index]\n if self.is_train:\n label = self.class2index[self.df[\"category\"].iloc[index]]\n else:\n label = -1\n image = PIL.Image.open(os.path.join(self.images_folder, filename))\n if self.transform is not None:\n image = self.transform(image)\n sample = {\"images\": image, \"labels\": label}\n return sample\n\n\n\nBATCH_SIZE = 80\nNUM_WORKERS = 20\nstats = ((0.4914, 0.4822, 0.5065), (0.2023, 0.1994, 0.2010))\n\n\nimg_transforms = transforms.Compose([transforms.RandomHorizontalFlip(),\n transforms.Resize(size=(299,299)),\n transforms.ToTensor(),\n transforms.Normalize(*stats,inplace=True)])\n\ntrain_data = trainfile \ntrain_dataset = CustomDataset(csv_path = train_data, images_folder = img_train_folder, transform=img_transforms, train=True)\n\n#architecture 2\n\nclass Net_drop_1(Module): \n def __init__(self):\n super(Net_drop_1, self).__init__()\n\n self.cnn_layers = Sequential(\n \n Conv2d(3, 32, kernel_size=3, stride=1,padding=1),\n BatchNorm2d(32),\n ReLU(inplace=True),\n Dropout(p = 0.2),\n \n Conv2d(32, 64, kernel_size=3, stride=1,padding=1),\n BatchNorm2d(64),\n ReLU(inplace=True),\n MaxPool2d(kernel_size=2, stride=2),\n Dropout(p = 0.2),\n \n Conv2d(64, 128, kernel_size=3, stride=1,padding=1),\n BatchNorm2d(128),\n ReLU(inplace=True),\n MaxPool2d(kernel_size=2, stride=2),\n Dropout(p = 0.2),\n \n Conv2d(128, 128, kernel_size=3, stride=1,padding=1),\n BatchNorm2d(128),\n ReLU(inplace=True),\n MaxPool2d(kernel_size=2, stride=2),\n Dropout(p = 0.2),\n \n Conv2d(128, 256, kernel_size=3, stride=1,padding=1),\n BatchNorm2d(256),\n ReLU(inplace=True),\n MaxPool2d(kernel_size=2, stride=2),\n Dropout(p = 0.2),\n \n Conv2d(256, 512, kernel_size=3, stride=1,padding=1),\n ReLU(inplace=True),\n Dropout(p = 0.2),\n )\n\n self.linear_layers = Sequential(\n Linear(512*4*4 , 512),\n ReLU(inplace=True),\n Dropout(p = 0.2),\n Linear(512, 64),\n ReLU(inplace=True),\n Dropout(p = 0.2),\n Linear(64 , 19),\n )\n\n def forward(self, x):\n x = self.cnn_layers(x)\n x = x.view(x.size(0), -1)\n x = self.linear_layers(x)\n return x\n\nclass Inception_Model(Module):\n def __init__(self, pretrained=True):\n super(Inception_Model,self).__init__()\n \n self.m = models.inception_v3(pretrained=True)\n self.m.fc = nn.Linear(self.m.fc.in_features, 19)\n\n def forward(self, xb):\n return self.m(xb)\n\ndef train(epoch, x, y, criterion, optimizer, model):\n model.train()\n \n x_train, y_train = Variable(x), Variable(y)\n \n if torch.cuda.is_available():\n x_train = x_train.cuda()\n y_train = y_train.cuda()\n \n optimizer.zero_grad()\n \n output_train = model(x_train).logits\n \n loss_train = criterion(output_train, y_train)\n \n loss_train.backward()\n optimizer.step()\n tr_loss = loss_train.item()\n \n return tr_loss\n\n\ndef reset_weights(m):\n \n for layer in m.children():\n if hasattr(layer, 'reset_parameters'):\n print(f'Reset trainable parameters of layer = {layer}')\n layer.reset_parameters()\n\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, \n batch_size=BATCH_SIZE, num_workers = NUM_WORKERS, shuffle=False)\n\n\ntorch.manual_seed(51)\ncnnmodel = Inception_Model()\n\nprint(sum(p.numel() for p in cnnmodel.parameters()))\n\noptimizer = SGD(cnnmodel.parameters(), lr=0.1, momentum=0.9,nesterov=True)\ncriterion = CrossEntropyLoss()\nscheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr = 0.1, epochs = 20, steps_per_epoch = len(train_loader))\n\nif torch.cuda.is_available():\n cnnmodel = cnnmodel.cuda()\n criterion = criterion.cuda()\n\nepochs = 9\n\nfor epoch in range(epochs):\n \n loss_avg = 0\n count = 0\n for batch_idx, sample in enumerate(train_loader):\n images = sample['images']\n labels = sample['labels']\n \n if torch.cuda.is_available():\n images = images.cuda()\n labels = labels.cuda()\n \n loss = train(epoch, images, labels, criterion, optimizer, cnnmodel)\n loss_avg += loss\n count+=1\n scheduler.step()\n \n loss_avg = loss_avg/count\n #lossf.write(str(loss_avg) + '\\n')\n print(\"Training loss -> Epoch\" + str(epoch), loss_avg)\n\n torch.save(cnnmodel.state_dict(), modelfile)\n \n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.autograd.Variable",
"torch.nn.BatchNorm2d",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"torch.nn.CrossEntropyLoss"
]
] |
Rakeshpatil01/ReProj | [
"446c9b1876fc014389cb87e68cf221850cd294d2"
] | [
"simple_inventory_mrp.py"
] | [
"from dataclasses import dataclass\nfrom typing import Tuple, Dict, Mapping\nfrom modules.markov_process import MarkovRewardProcess\nfrom modules.markov_process import FiniteMarkovRewardProcess\nfrom modules.markov_process import State, NonTerminal\nfrom scipy.stats import poisson\nfrom modules.distribution import SampledDistribution, Categorical, \\\n FiniteDistribution\nimport numpy as np\n\n\n@dataclass(frozen=True)\nclass InventoryState:\n on_hand: int\n on_order: int\n\n def inventory_position(self) -> int:\n return self.on_hand + self.on_order\n\n\nclass SimpleInventoryMRP(MarkovRewardProcess[InventoryState]):\n\n def __init__(\n self,\n capacity: int,\n poisson_lambda: float,\n holding_cost: float,\n stockout_cost: float\n ):\n self.capacity = capacity\n self.poisson_lambda: float = poisson_lambda\n self.holding_cost: float = holding_cost\n self.stockout_cost: float = stockout_cost\n\n def transition_reward(\n self,\n state: NonTerminal[InventoryState]\n ) -> SampledDistribution[Tuple[State[InventoryState], float]]:\n\n def sample_next_state_reward(state=state) ->\\\n Tuple[State[InventoryState], float]:\n demand_sample: int = np.random.poisson(self.poisson_lambda)\n ip: int = state.state.inventory_position()\n next_state: InventoryState = InventoryState(\n max(ip - demand_sample, 0),\n max(self.capacity - ip, 0)\n )\n reward: float = - self.holding_cost * state.on_hand\\\n - self.stockout_cost * max(demand_sample - ip, 0)\n return NonTerminal(next_state), reward\n\n return SampledDistribution(sample_next_state_reward)\n\n\nclass SimpleInventoryMRPFinite(FiniteMarkovRewardProcess[InventoryState]):\n\n def __init__(\n self,\n capacity: int,\n poisson_lambda: float,\n holding_cost: float,\n stockout_cost: float\n ):\n self.capacity: int = capacity\n self.poisson_lambda: float = poisson_lambda\n self.holding_cost: float = holding_cost\n self.stockout_cost: float = stockout_cost\n\n self.poisson_distr = poisson(poisson_lambda)\n super().__init__(self.get_transition_reward_map())\n\n def get_transition_reward_map(self) -> \\\n Mapping[\n InventoryState,\n FiniteDistribution[Tuple[InventoryState, float]]\n ]:\n d: Dict[InventoryState, Categorical[Tuple[InventoryState, float]]] = {}\n for alpha in range(self.capacity + 1):\n for beta in range(self.capacity + 1 - alpha):\n state = InventoryState(alpha, beta)\n ip = state.inventory_position()\n beta1 = self.capacity - ip\n base_reward = - self.holding_cost * state.on_hand\n sr_probs_map: Dict[Tuple[InventoryState, float], float] =\\\n {(InventoryState(ip - i, beta1), base_reward):\n self.poisson_distr.pmf(i) for i in range(ip)}\n probability = 1 - self.poisson_distr.cdf(ip - 1)\n reward = base_reward - self.stockout_cost *\\\n (probability * (self.poisson_lambda - ip) +\n ip * self.poisson_distr.pmf(ip))\n sr_probs_map[(InventoryState(0, beta1), reward)] = probability\n d[state] = Categorical(sr_probs_map)\n return d\n\n\nif __name__ == '__main__':\n user_capacity = 2\n user_poisson_lambda = 1.0\n user_holding_cost = 1.0\n user_stockout_cost = 10.0\n\n user_gamma = 0.9\n\n si_mrp = SimpleInventoryMRPFinite(\n capacity=user_capacity,\n poisson_lambda=user_poisson_lambda,\n holding_cost=user_holding_cost,\n stockout_cost=user_stockout_cost\n )\n\n from modules.markov_process import FiniteMarkovProcess\n print(\"Transition Map\")\n print(\"--------------\")\n print(FiniteMarkovProcess(\n {s.state: Categorical({s1.state: p for s1, p in v.table().items()})\n for s, v in si_mrp.transition_map.items()}\n ))\n\n print(\"Transition Reward Map\")\n print(\"---------------------\")\n print(si_mrp)\n\n print(\"Stationary Distribution\")\n print(\"-----------------------\")\n si_mrp.display_stationary_distribution()\n print()\n\n print(\"Reward Function\")\n print(\"---------------\")\n si_mrp.display_reward_function()\n print()\n\n print(\"Value Function\")\n print(\"--------------\")\n si_mrp.display_value_function(gamma=user_gamma)\n print()\n"
] | [
[
"scipy.stats.poisson",
"numpy.random.poisson"
]
] |
iory/chainercv | [
"ecb1953f78c526dfd38308d68a4094c9f4df3a8d"
] | [
"examples/faster_rcnn/train.py"
] | [
"from __future__ import division\n\nimport argparse\nimport numpy as np\n\nimport chainer\nfrom chainer.datasets import ConcatenatedDataset\nfrom chainer.datasets import TransformDataset\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer.training.triggers import ManualScheduleTrigger\n\nfrom chainercv.datasets import voc_bbox_label_names\nfrom chainercv.datasets import VOCBboxDataset\nfrom chainercv.extensions import DetectionVOCEvaluator\nfrom chainercv.links import FasterRCNNVGG16\nfrom chainercv.links.model.faster_rcnn import FasterRCNNTrainChain\nfrom chainercv import transforms\n\n\nclass Transform(object):\n\n def __init__(self, faster_rcnn):\n self.faster_rcnn = faster_rcnn\n\n def __call__(self, in_data):\n img, bbox, label = in_data\n _, H, W = img.shape\n img = self.faster_rcnn.prepare(img)\n _, o_H, o_W = img.shape\n scale = o_H / H\n bbox = transforms.resize_bbox(bbox, (H, W), (o_H, o_W))\n\n # horizontally flip\n img, params = transforms.random_flip(\n img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(\n bbox, (o_H, o_W), x_flip=params['x_flip'])\n\n return img, bbox, label, scale\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='ChainerCV training example: Faster R-CNN')\n parser.add_argument('--dataset', choices=('voc07', 'voc0712'),\n help='The dataset to use: VOC07, VOC07+12',\n default='voc07')\n parser.add_argument('--gpu', '-g', type=int, default=-1)\n parser.add_argument('--lr', '-l', type=float, default=1e-3)\n parser.add_argument('--out', '-o', default='result',\n help='Output directory')\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--step_size', '-ss', type=int, default=50000)\n parser.add_argument('--iteration', '-i', type=int, default=70000)\n args = parser.parse_args()\n\n np.random.seed(args.seed)\n\n if args.dataset == 'voc07':\n train_data = VOCBboxDataset(split='trainval', year='2007')\n elif args.dataset == 'voc0712':\n train_data = ConcatenatedDataset(\n VOCBboxDataset(year='2007', split='trainval'),\n VOCBboxDataset(year='2012', split='trainval'))\n test_data = VOCBboxDataset(split='test', year='2007',\n use_difficult=True, return_difficult=True)\n faster_rcnn = FasterRCNNVGG16(n_fg_class=len(voc_bbox_label_names),\n pretrained_model='imagenet')\n faster_rcnn.use_preset('evaluate')\n model = FasterRCNNTrainChain(faster_rcnn)\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))\n\n train_data = TransformDataset(train_data, Transform(faster_rcnn))\n\n train_iter = chainer.iterators.MultiprocessIterator(\n train_data, batch_size=1, n_processes=None, shared_mem=100000000)\n test_iter = chainer.iterators.SerialIterator(\n test_data, batch_size=1, repeat=False, shuffle=False)\n updater = chainer.training.updaters.StandardUpdater(\n train_iter, optimizer, device=args.gpu)\n\n trainer = training.Trainer(\n updater, (args.iteration, 'iteration'), out=args.out)\n\n trainer.extend(\n extensions.snapshot_object(model.faster_rcnn, 'snapshot_model.npz'),\n trigger=(args.iteration, 'iteration'))\n trainer.extend(extensions.ExponentialShift('lr', 0.1),\n trigger=(args.step_size, 'iteration'))\n\n log_interval = 20, 'iteration'\n plot_interval = 3000, 'iteration'\n print_interval = 20, 'iteration'\n\n trainer.extend(chainer.training.extensions.observe_lr(),\n trigger=log_interval)\n trainer.extend(extensions.LogReport(trigger=log_interval))\n trainer.extend(extensions.PrintReport(\n ['iteration', 'epoch', 'elapsed_time', 'lr',\n 'main/loss',\n 'main/roi_loc_loss',\n 'main/roi_cls_loss',\n 'main/rpn_loc_loss',\n 'main/rpn_cls_loss',\n 'validation/main/map',\n ]), trigger=print_interval)\n trainer.extend(extensions.ProgressBar(update_interval=10))\n\n if extensions.PlotReport.available():\n trainer.extend(\n extensions.PlotReport(\n ['main/loss'],\n file_name='loss.png', trigger=plot_interval\n ),\n trigger=plot_interval\n )\n\n trainer.extend(\n DetectionVOCEvaluator(\n test_iter, model.faster_rcnn, use_07_metric=True,\n label_names=voc_bbox_label_names),\n trigger=ManualScheduleTrigger(\n [args.step_size, args.iteration], 'iteration'))\n\n trainer.extend(extensions.dump_graph('main/loss'))\n\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.seed"
]
] |
jobovy/flexce | [
"92808d9f93109e698f035359f9a0bb7efa8524dd"
] | [
"flexCE/calc_yields/karakas10_yields.py"
] | [
"\"\"\"Generate finely spaced grid of AGB yields using the Karakas (2010) yields.\n\nKarakas & Lattanzio (2010): M = 1.0, 1.25, 1.5, 1.75, 1.9, 2.0, 2.1, 2.25,\n2.5, 3.0, 3.5, 4.0, 5.0, 6.0, 6.5 (Zsolar only); Z = 0.0001, 0.004, 0.008,\n0.02\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport os\nfrom os.path import join\nimport sys\nimport copy\n\nimport numpy as np\nfrom scipy import interpolate\nimport pandas as pd\n\n\n#---- Set Paths -----\npath_calc_yields = join(os.path.abspath(os.path.dirname(__file__)), '')\npath_flexce = join('/'.join(path_calc_yields.split('/')[:-2]), '')\npath_fileio = join(path_flexce, 'fileio')\npath_data = join(path_flexce, 'data')\npath_yields = join(path_data, 'yields')\npath_yldgen = join(path_yields, 'general')\npath_k10 = join(path_yields, 'karakas10', 'iso_yields')\nsys.path.append(path_fileio)\n#-------------------\n\nfrom pickle_io import pickle_write\n\n#----- Read in Computed Yields -----\n\n# K10 yields: 61 metallicity/mass combinations; 4 metallicities: Z = 1e-4,\n# 4e-3, 8e-3, 2e-2; 17 masses (but not all masses at each metallicity): 1.0,\n# 1.25, 1.5, 1.75, 1.9, 2.0, 2.1, 2.25, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0,\n# 6.5 Msun; 19 elements (74 species)\n\n# K10 yields do not include synthetic thermal pulses (see K10 Section 4),\n# whereas KL07 recommend using the lambda_last = lambda_final yields (see KL07\n# Section 4)that account for extra thermal pulses that likely occur near the\n# tip of the AGB where the code has difficulty converging (but there is still a\n# large envelope remaining).\n\n# K10 computed yields for the 6 Msun, 1e-4 Zsun model assuming two different\n# mass loss prescriptions (VW93 and Reimer's). I chose to use the Reimer's\n# mass loss because that prescription is used for the other intermediate mass\n# models (3.0--5.5 Msun) at that metallicity. See K10 Table 1 for mass loss\n# prescriptions for each model.\n\n# The K10 \"yield\" for a given species refers to actual mass of that species\n# returned to the ISM minus the mass of that species that would have been\n# returned if the wind had the initial composition of the star (i.e., the net\n# yield). When I implement mass return in the simulation, I need to calculate\n# how much total mass was lost, multiply this by the initial composition of the\n# star, and then add the net yield (K10 call this the \"net yield\" in their data\n# table), which can be negative for species that are destroyed.\n\ndata = pd.read_csv(join(path_k10, 'tablea2.dat'), delim_whitespace=True,\n usecols=[3, 4], names=['spec', 'at_mass'])\nspecies = np.array(data.spec)[:77]\natomic_mass = np.array(data.at_mass)[:77]\n\ncols = ['mgrid', 'zgrid', 'mrem', 'yield', 'mlost', 'minit']\nkwargs = dict(delim_whitespace=True, usecols=(0, 1, 2, 5, 6, 7), names=cols)\nz22tab = np.array(pd.read_csv(join(path_k10, 'tablea2.dat'), **kwargs)).T\nz83tab = np.array(pd.read_csv(join(path_k10, 'tablea3.dat'), **kwargs)).T\nz43tab = np.array(pd.read_csv(join(path_k10, 'tablea4.dat'), **kwargs)).T\nz14tab = np.array(pd.read_csv(join(path_k10, 'tablea5.dat'), **kwargs)).T\n\ntab = [z14tab, z43tab, z83tab, z22tab]\nzm_grid = [] # metallicities and masses of the models\nm_grid = [] # masses of the models\nz_grid = [] # metallicities of the models\nm_rem = [] # remnant (final) mass\nfor i in range(4):\n for j in range(tab[i].shape[1]):\n ztmp_str = '{:.0e}'.format(tab[i][1, j])[0] + \\\n '{:.0e}'.format(tab[i][1, j])[-1]\n zmtmp = ''.join(('z', ztmp_str, 'm', str(tab[i][0, j])))\n if zmtmp in zm_grid:\n pass\n else:\n zm_grid.append(zmtmp)\n m_grid.append(tab[i][0, j])\n z_grid.append(tab[i][1, j])\n m_rem.append(tab[i][2, j])\n\nzm_grid = np.array(zm_grid)\nm_grid = np.array(m_grid)\nz_grid = np.array(z_grid)\nm_rem = np.array(m_rem)\nm_ejected = m_grid - m_rem # ejected mass\n\n# net yields of each species\nyields_spec = np.concatenate([z14tab[3], z43tab[3], z83tab[3], z22tab[3]])\nyields_spec.resize(62, 77)\n# delete model with Z = 1e-4, M = 6 and VW93 mass loss\nyields_spec = np.delete(yields_spec, np.s_[15], axis=0)\n\n# mass lost of each species\nm_lost_spec = np.concatenate([z14tab[4], z43tab[4], z83tab[4], z22tab[4]])\nm_lost_spec.resize(62, 77)\nm_lost_spec = np.delete(m_lost_spec, np.s_[15], axis=0)\n\n# mass lost of each species if wind had the initial composition of the star\nm_init_wind_spec = np.concatenate([z14tab[5], z43tab[5], z83tab[5], z22tab[5]])\nm_init_wind_spec.resize(62, 77)\nm_init_wind_spec = np.delete(m_init_wind_spec, np.s_[15], axis=0)\n\nk10_m = np.array([1.0, 1.25, 1.5, 1.75, 1.9, 2.0, 2.1, 2.25, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 5.5, 6.0, 6.5])\n\n\n# Included 'al-6' but not 'al*6' [always = 0], which are al26, in the total\n# Al mass ejected. Renamed 'al-6' as 'al26'. Does not keep track of 'g'\n# (elements from ni64 to Bi). s34 includes the abundances of species from s34\n# to Mn, and the total S mass ejected includes these other species, although\n# they almost always are sub-dominant to s32 by about a factor of 20.)\n\nk10_iso = np.delete(copy.deepcopy(species[2:]), np.s_[39])\nk10_iso[38] = 'al26'\nn_sym = len(k10_iso)\n\n# net yields for 74 isotopes\nyields = np.delete(copy.deepcopy(yields_spec), np.s_[0, 1, 41], axis=1)\n\n# mass lost for 74 isotopes\nm_lost = np.delete(copy.deepcopy(m_lost_spec), np.s_[0, 1, 41], axis=1)\n\n\n# -----------------------------\n\n\n# ---- Extrapolated Yields -----\n\n# --- Major grid points (filling out computed grid)\n\n# -- (M > 6 Msun) ---\n\n# linearly extrapolate yields up to 8 Msun\n\nextrap_yld_z14 = np.zeros((4, n_sym))\nextrap_yld_z43 = np.zeros((4, n_sym))\nextrap_yld_z83 = np.zeros((4, n_sym))\nextrap_yld_z22 = np.zeros((3, n_sym))\n\nm_extrap = np.arange(6.5, 8.1, 0.5)\nzm_grid_extrap = np.array(['z14m6.5', 'z14m7.0', 'z14m7.5', 'z14m8.0',\n 'z43m6.5', 'z43m7.0', 'z43m7.5', 'z43m8.0',\n 'z83m6.5', 'z83m7.0', 'z83m7.5', 'z83m8.0',\n 'z22m7.0', 'z22m7.5', 'z22m8.0'])\nz_grid_extrap = np.concatenate((np.ones(4) * 1e-4, np.ones(4) * 4e-3,\n np.ones(4) * 8e-3, np.ones(3) * 2e-2))\nm_grid_extrap = np.concatenate((m_extrap, m_extrap, m_extrap, m_extrap[1:]))\nmej_extrap = np.zeros(len(m_grid_extrap))\nind_extrap = [np.arange(4), np.arange(4, 8), np.arange(8, 12),\n np.arange(12, 15)]\nind_grid = [[13, 14], [28, 29], [43, 44], [59, 60]]\nfor i in range(4):\n itmp = interpolate.InterpolatedUnivariateSpline(m_grid[ind_grid[i]],\n m_ejected[ind_grid[i]],\n k=1)\n mej_extrap[ind_extrap[i]] = itmp(m_grid_extrap[ind_extrap[i]])\n\nfor k in range(n_sym):\n itmp = interpolate.InterpolatedUnivariateSpline(m_grid[13:15],\n yields[13:15, k], k=1)\n extrap_yld_z14[:, k] = itmp(m_extrap)\n\nfor k in range(n_sym):\n itmp = interpolate.InterpolatedUnivariateSpline(m_grid[28:30],\n yields[28:30, k], k=1)\n extrap_yld_z43[:, k] = itmp(m_extrap)\n\nfor k in range(n_sym):\n itmp = interpolate.InterpolatedUnivariateSpline(m_grid[43:45],\n yields[43:45, k], k=1)\n extrap_yld_z83[:, k] = itmp(m_extrap)\n\nfor k in range(n_sym):\n itmp = interpolate.InterpolatedUnivariateSpline(m_grid[59:],\n yields[59:, k], k=1)\n extrap_yld_z22[:, k] = itmp(m_extrap[1:])\n\n# -----------------------------\n\n\n# ---- chemical evolution model mass bins\n# IMF\nalpha = 2.35\nGamma = 1. - alpha\nalpha2 = 2. - alpha\nm_min = 0.1\nm_max = 100.\na = alpha2 / (m_max**alpha2 - m_min**alpha2)\nm_cutoff = 8.\n\n# Bins of Stars\n'''Bin lower bounds (bins, bins_low, bins_high). Bin width (dbin_low,\ndbin_high). Number of bins (n_bins, n_bins_low, n_bins_high). Average mass\nper bin (m_ave_high, m_ave_low, m_ave), fraction of total mass (f_mtot).\nFraction of total mass in a stellar generation going into each mass bin (f_m,\nf_m_low, f_m_high). '''\n\ndbin_low = 0.1\nbins_low = np.arange(m_min, m_cutoff, dbin_low)\nn_bins_low = len(bins_low)\nm_ave_low = (Gamma / alpha2) * \\\n ((bins_low + dbin_low)**alpha2 - bins_low**alpha2) / \\\n ((bins_low + dbin_low)**Gamma - bins_low**Gamma)\n\ndbin_high = 1.\nbins_high = np.arange(m_cutoff, m_max, dbin_high)\nn_bins_high = len(bins_high)\nm_ave_high = (Gamma / alpha2) * \\\n ((bins_high + dbin_high)**alpha2 - bins_high**alpha2) / \\\n ((bins_high + dbin_high)**Gamma - bins_high**Gamma)\n\nm_ave = np.append(m_ave_low, m_ave_high)\n# -----\n\n\n\n# ---- Minor grid points (mass bins spaced in ~0.1 Msun, but at the original 4\n# ---- metallicity values [Z = 1e-4, 4e-3, 8e-3, 2e-2])\n\n# Interpolate across mass to generate yields at each mass bin of m_ave_low for\n# 4 metallicity values: Z = 1e-4, 4e-3, 8e-3, 2e-2 (solar)\n\n# computed and extrapolated yields in coarse grid\nk10_grid_yld_z14 = np.concatenate((yields[:15], extrap_yld_z14), axis=0)\nk10_grid_yld_z43 = np.concatenate((yields[15:30], extrap_yld_z43), axis=0)\nk10_grid_yld_z83 = np.concatenate((yields[30:45], extrap_yld_z83), axis=0)\nk10_grid_yld_z22 = np.concatenate((yields[45:], extrap_yld_z22), axis=0)\n\nk10_grid_yld_list = [k10_grid_yld_z14, k10_grid_yld_z43, k10_grid_yld_z83,\n k10_grid_yld_z22]\n\nk10_interp_mass = np.zeros((4, n_bins_low, n_sym))\nk10_interp_mej = np.zeros((4, n_bins_low))\nk10_interp_rem = np.zeros((4, n_bins_low))\n\nind1 = [np.arange(15), np.arange(15, 30), np.arange(30, 45), np.arange(45, 61)]\nind2 = [np.arange(4), np.arange(4), np.arange(4), np.arange(1, 4)]\nfor i in range(4):\n m_tmp = np.zeros((n_bins_low, n_sym))\n m_gr_tmp = np.concatenate((m_grid[ind1[i]], m_grid_extrap[ind2[i]]))\n for k in range(n_sym):\n itmp = interpolate.InterpolatedUnivariateSpline(\n m_gr_tmp, k10_grid_yld_list[i][:, k], k=1)\n m_tmp[:, k] = itmp(m_ave_low)\n k10_interp_mass[i] = m_tmp\n # mass ejected\n m_ej_tmp = np.concatenate((m_ejected[ind1[i]], mej_extrap[ind2[i]]))\n itmp = interpolate.InterpolatedUnivariateSpline(m_gr_tmp, m_ej_tmp, k=1)\n k10_interp_mej[i] = itmp(m_ave_low)\n k10_interp_mej[i][np.where(k10_interp_mej[i] < 0.)] = 0.\n # remnant mass\n k10_interp_rem[i] = m_ave_low - k10_interp_mej[i]\n\n\n# ---- Interpolate across metallicity to generate yields at each mass bin of\n# ---- m_ave_low for N = n_metal_bin metallicity values\n\n# use same metallicity grid as Limongi & Chieffi SN yields\nn_metal_bin = 1001\nz_grid2 = np.array([1e-6, 1e-4, 1e-3, 6e-3, 2e-2])\nlogz_grid2 = np.log10(z_grid2)\nz_grid3 = np.array([1e-4, 4e-3, 8e-3, 2e-2])\n\n# evenly sample metallicity (in log Z) between grid points\nlogz_final = np.zeros(n_metal_bin)\ndind = int((n_metal_bin - 1) / (len(z_grid2) - 1))\nfor i in range(len(z_grid2) - 1):\n dlogz = (logz_grid2[i+1] - logz_grid2[i]) / \\\n ((n_metal_bin - 1) / (len(z_grid2) - 1))\n logz_final[i*dind:i*dind+dind+1] = np.arange(logz_grid2[i],\n logz_grid2[i+1]+1e-9, dlogz)\n\n# metallicity of final grid\nz_final = 10.**logz_final\n\n\n# output interpolated yields\nk10_final = np.zeros((n_metal_bin, n_bins_low, n_sym))\nk10_final_mej = np.zeros((n_metal_bin, n_bins_low))\nk10_final_rem = np.zeros((n_metal_bin, n_bins_low))\n# at each mass, interpolate each element for each metallicity\nfor i in range(n_bins_low):\n for j in range(n_sym):\n itmp = interpolate.InterpolatedUnivariateSpline(\n z_grid3, k10_interp_mass[:, i, j], k=1)\n k10_final[:, i, j] = itmp(z_final)\n # mass ejected\n itmp = interpolate.InterpolatedUnivariateSpline(z_grid3,\n k10_interp_mej[:, i], k=1)\n k10_final_mej[:, i] = itmp(z_final)\n k10_final_mej[:, i][np.where(k10_final_mej[:, i] < 0.)] = 0.\n # remnant mass\n k10_final_rem[:, i] = (np.ones(n_metal_bin) * m_ave_low[i] -\n k10_final_mej[:, i])\n\n\n# pickle the interpolated yields array and the metallicity grid used\npickle_write(k10_final, join(path_k10, 'interp_yields.pck'))\npickle_write(k10_final_mej, join(path_k10, 'interp_meject.pck'))\npickle_write(k10_final_rem, join(path_k10, 'interp_mremnant.pck'))\n# -----------------------------\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.delete",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.zeros",
"numpy.ones",
"numpy.where",
"numpy.arange",
"numpy.append",
"numpy.log10"
]
] |
SimlaBurcu/Megatron-LM | [
"6b7ae136dc48d99a1e2defd4041ba0db67c99b1b"
] | [
"megatron/model/utils.py"
] | [
"# coding=utf-8\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for models.\"\"\"\n\nimport math\n\nimport torch\n\nfrom megatron import get_args\nfrom megatron.bfp.bfp_ops import BFPLinear\n\ndef init_method_normal(sigma):\n \"\"\"Init method based on N(0, sigma).\"\"\"\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_\n\n\ndef scaled_init_method_normal(sigma, num_layers):\n \"\"\"Init method based on N(0, sigma/sqrt(2*num_layers).\"\"\"\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_\n\n\ndef get_linear_layer(rows, columns, init_method, args):\n \"\"\"Simple linear layer with weight initialization.\"\"\"\n layer = BFPLinear(rows, columns, num_format=args.hbfp_num_format,\n mant_bits=args.hbfp_mant_bits,\n weight_mant_bits=args.hbfp_weight_mant_bits)\n init_method(layer.weight)\n with torch.no_grad():\n layer.bias.zero_()\n return layer\n\[email protected]\ndef gelu_impl(x):\n \"\"\"OpenAI's gelu implementation.\"\"\"\n return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *\n (1.0 + 0.044715 * x * x)))\ndef openai_gelu(x):\n return gelu_impl(x)\n\n#This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter\[email protected]\ndef erf_gelu(x):\n return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype))\n"
] | [
[
"torch.no_grad",
"torch.erf",
"torch.nn.init.normal_",
"torch.ones_like",
"torch.tanh"
]
] |
FrieAT/MD_CompressedWavelet | [
"82bd10edd611485cd5f0b81da744e07a3b7c98eb"
] | [
"BIQAA.py"
] | [
"\n# Source: https://notebooks.azure.com/salva/projects/Digital-Image-Processing/html/001%20Anisotropic%20Quality%20Index%20(AQI).ipynb\n# Author: Salvador Gabarda\n\nfrom IProcess import IProcess, EDataType\n\nimport numpy as np\n#import matplotlib.pyplot as plt\n#import matplotlib.image as mpimg\nfrom scipy.fftpack import fft, ifft\nimport math\nfrom scipy import signal\n\nclass BIQAA(IProcess):\n\n\tdef oriented_pattern(self,seq_length,angle):\n\t\t\"\"\"\n\t\tThis function originates a pattern that is later used for the orientation of the operational pseudo-Wigner distribution\n\t\tcomputation window, seq_length: sequence length in pixels, angle: orientation in degrees.\n\t\t\"\"\"\n\t\tangle = np.mod(angle,180)\n\t\t# correction to set axes in the image (x: rows, y: columns) to observer cartesian coordinates x,y\n\t\tangle = np.mod((angle+90),180)\n\t\tangle =math.radians(angle)\n\t\tpi = math.pi\n\t\th = int((seq_length/2))\n\t\tvalues = np.r_[float(-h):float(h+1)]\n\t\tnew_positions = np.zeros([2*h+1, 2])\n\t\tfor position in range(seq_length):\n\t\t\tif angle >= 0 and angle < pi/4:\n\t\t\t\tnew_positions[position,0] = values[position]+h\n\t\t\t\tnew_positions[position,1] = values[position]*math.tan(angle)+h\n\t\t\telif angle >= pi/4 and angle < pi/2:\n\t\t\t\tnew_positions[position,0] = values[position]*math.tan(pi/2-angle)+h\n\t\t\t\tnew_positions[position,1] = values[position]+h\n\t\t\telif angle >= pi/2 and angle < 3*pi/4:\n\t\t\t\tnew_positions[position,0] = values[position]*math.tan(pi/2-angle)+h\n\t\t\t\tnew_positions[position,1] = values[position]+h\n\t\t\telif angle >= 3*pi/4 and angle <= pi:\n\t\t\t\tnew_positions[position,0] = 1*values[position]+h\n\t\t\t\tnew_positions[position,1] = values[position]*math.tan(angle)+h\n\t\t\tnew_points = np.round_(new_positions)\n\t\treturn new_points.astype(int)\n\n\tdef image_arrangement (self,test_image,seq_length,angle,indices):\n\t\t\"\"\"\n\t\tarrangement operation for time reduction\n\t\t\"\"\"\n\t\trows = test_image.shape[0]\n\t\tcolumns = test_image.shape[1]\n\t\tlayers = np.zeros(((seq_length,rows,columns)))\n\t\tfor k in range(seq_length):\n\t\t\tmask = np.zeros((seq_length,seq_length))\n\t\t\tmask[indices[k,0],indices[k,1]] = 1\n\t\t\tlayers[k,:,:] = signal.convolve2d(test_image, mask, boundary='symm', mode='same')\n\t\treturn layers\n\n\tdef layer_product_function(self,layers,seq_length):\n\t\t\"\"\"\n\t\tproduct function of the Wigner distribution\n\t\t\"\"\"\n\t\tfaces = layers.shape[0]\n\t\trows =layers.shape[1]\n\t\tcolumns = layers.shape[2]\n\t\tlayer_product = np.ones(((seq_length-1,rows,columns)))\n\t\tlayers[faces-1,:,:]=layers[0,:,:]\n\t\tfor i in range(faces-1):\n\t\t\tlayer_product[i,:,:]= layers[i,:,:]*layers[faces-1-i]\n\t\treturn layer_product\n\n\tdef layer_wigner_distribution(self,test_image,seq_length,angle):\n\t\t\"\"\"\n\t\tWigner distribution of test_image, seq_lengthe: odd number of pixels, e.g.: 9, angle: degrees, e.g.: 45\n\t\t\"\"\"\n\t\tindices = self.oriented_pattern(seq_length,angle)\n\t\tlayers = self.image_arrangement (test_image,seq_length,angle,indices)\n\t\tlayer_product = self.layer_product_function(layers,seq_length)\n\t\tdistribution = fft(layer_product, axis = 0)\n\t\tdistribution = np.real(distribution)\n\t\t# set zero frequency in central position\n\t\tHead = distribution[int(seq_length/2):seq_length,:,:]\n\t\tTail = distribution[0:int(seq_length/2),:,:]\n\t\tdistribution = np.append(Head,Tail, axis = 0)\n\t\treturn distribution\n\n\tdef renyi_entropy(self,distribution,order):\n\t\t\"\"\"\n\t\tThis function calculates the Rényi entropy of an image based on its pseudo-Wigner distribution (distribution).\n\t\tThe \"order\" variabe represents the exponential order of the Rényi entropy (3 is the most common value)\n\t\t\"\"\"\n\t\teps = np.finfo(float).eps\n\t\trows = distribution.shape[1]\n\t\tcolumns = distribution.shape[2]\n\t\tlayers = distribution.shape[0]\n\t\tsquared_wl = np.ones([layers,rows,columns])\n\t\tfor layer in range(layers):\n\t\t\t# square distribution local values\n\t\t\tworking_layer = distribution[layer,:,:]\n\t\t\tsquared_wl[layer,:,:] = np.multiply(working_layer,working_layer)\n\t\tsquared_wd = squared_wl\n\t\t# sum squared wigner distribution along coordinate 1\n\t\tsum_sq_wd = np.sum(squared_wd, axis = 0)\n\t\t# normalize squared values\n\t\tnormalized_distribution =np.zeros([layers,rows,columns])\n\t\tfor layer in range(layers):\n\t\t\tnormalized_distribution[layer,:,:] = np.divide(squared_wd[layer,:,:],sum_sq_wd+eps)\n\t\t# raise elements to the power defined by input variable \"order\"\n\t\tpower_nor_dis = np.power(normalized_distribution,order)\n\t\t# sum pixelwise\n\t\tentropy_1 = np.sum(power_nor_dis, axis = 0)+eps\n\t\t# pixelwise entropy\n\t\tentropy_2 =np.log2(entropy_1)\n\t\tentropy =(1/(1-order))*entropy_2\n\t\tsuper_threshold_indices = entropy < 0\n\t\tentropy[super_threshold_indices] = 0\n\t\tentropy = np.nan_to_num(entropy)\n\t\t# normalize entropy\n\t\tentropy = entropy*(1/np.log2(layers))\n\t\treturn entropy\n\n\tdef show_wigner_frequencies(self,distribution):\n\t\t\"\"\"\n\t\tStarting from the pseudo-Wigner distribution (distribution) of the input test image, this function gives a visualization\n\t\tof the frequency components of such distribution and images are saved in pdf's\n\t\t\"\"\"\n\t\trows = distribution.shape[1]\n\t\tcolumns = distribution.shape[2]\n\t\tlayers = distribution.shape[0]\n\t\tfrequencies = np.zeros([layers,rows,columns])\n\t\tfor layer in range(layers):\n\t\t\tfrequency = distribution[layer,:,:]\n\t\t\tmin_val =np.amin(frequency)\n\t\t\tfrequency = frequency - min_val\n\t\t\tmax_val = np.amax(frequency)\n\t\t\tfrequency = (1/max_val)*frequency\n\t\t\tfrequency = np.uint8(255*frequency)\n\t\t\tname = \"wigner_distribution_\" + str(layer) + \".pdf\"\n\t\t\tmsg = \"Wigner distribution, frequency #\" + str(layer)\n\n\t\t\tfrequencies[layer,:,:]= frequency\n\t\treturn frequencies\n\n\tdef layer_image_anisotropy(self,test_image,seq_length,orientations,order):\n\t\t\"\"\"\n\t\tThis function calculates a parameter that behaves as an objective measure of the quality of the image for Gaussian blur\n\t\tand Gaussian noise. It is based on the frequency content given by the pseudo-Wigner distribution.\n\t\t\"\"\"\n\t\tentropy_val = np.zeros([orientations])\n\t\tfor orientation in range(orientations):\n\t\t\tangle = (180/orientations)*orientation\n\t\t\t#print( angle, \" degrees distribution\")\n\t\t\tdistribution = self.layer_wigner_distribution(test_image,seq_length,angle)\n\t\t\tentropy_pixelwise = self.renyi_entropy(distribution,order)\n\t\t\tentropy = np.mean(entropy_pixelwise)\n\t\t\t#print(\"entropy is %.4f\" % entropy)\n\t\t\tentropy_val[orientation] = entropy\n\t\tanisotropy = np.var(entropy_val)\n\t\tanisotropy = math.sqrt(anisotropy)\n\n\t\treturn anisotropy\n\n\t\"\"\"\n\tdef input_test_image(self,subfolder,name):\n\t\ttotal_name = subfolder + name\n\t\tinput_image = mpimg.imread(total_name)\n\t\timage_dimension = len(input_image.shape)\n\t\tif image_dimension == 3:\n\t\t\ttest_image = (1/3)*(input_image[:,:,0]+\n\t\t\tinput_image[:,:,1]+input_image[:,:,2])\n\t\telse:\n\t\t\ttest_image = input_image\n\n\t\t# convert image to regular gray levels\n\t\ttest_image =np.uint8(255*test_image)\n\t\treturn test_image\n\t\t\"\"\"\n\n\tdef __init__(self):\n\t\tIProcess.__init__(self)\n\n\t\tself.orientations = 4\n\t\tself.order = 3\n\t\tself.seq_length = 9\n\n\tdef toId(self):\n\t\treturn str(__class__.__name__)\n\n\tdef getType(self):\n\t\treturn EDataType.BIQAA\n\n\tdef do(self, imageData):\n\t\tIProcess.do(self, imageData)\n\n\t\tinputImgData = np.array(self.data[-1])\n\n\t\tself.biqaa_score = self.layer_image_anisotropy(inputImgData,self.seq_length,self.orientations,self.order)\n\n\t\treturn self\n"
] | [
[
"numpy.mean",
"numpy.multiply",
"numpy.finfo",
"scipy.signal.convolve2d",
"numpy.divide",
"numpy.uint8",
"numpy.nan_to_num",
"scipy.fftpack.fft",
"numpy.append",
"numpy.mod",
"numpy.array",
"numpy.zeros",
"numpy.real",
"numpy.amax",
"numpy.power",
"numpy.amin",
"numpy.log2",
"numpy.round_",
"numpy.sum",
"numpy.ones",
"numpy.var"
]
] |
dendisuhubdy/kaggle-rsna | [
"4b690b2ce0e5d4b324d757e8a808accd15c951aa"
] | [
"src/utils/logger.py"
] | [
"# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514.\nfrom io import BytesIO # Python 3.x\nfrom io import StringIO\n\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\n\n\nclass Logger(object):\n def __init__(self, log_dir: str):\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n self.writer = tf.compat.v1.summary.FileWriter(log_dir) # tf.summary.FileWriter(log_dir)\n\n def scalar_summary(self, tag: str, value: float, step: int):\n \"\"\"Log a scalar variable.\"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n # summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value.tolist())]) # .tolist() is a wierd way to convert GPU tensor to float\n self.writer.add_summary(summary, step)\n self.writer.flush()\n\n def image_summary(self, tag: str, images: list, step: int):\n \"\"\"Log a list of images.\"\"\"\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag=\"%s/%d\" % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)\n\n def histo_summary(self, tag: str, values: list, step: int, bins: int=1000):\n \"\"\"Log a histogram of the tensor of values.\"\"\"\n\n # Create a histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill the fields of the histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values ** 2))\n\n # Drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n self.writer.add_summary(summary, step)\n self.writer.flush()\n"
] | [
[
"tensorflow.Summary",
"numpy.histogram",
"tensorflow.compat.v1.summary.FileWriter",
"numpy.max",
"tensorflow.HistogramProto",
"numpy.sum",
"numpy.min",
"numpy.prod",
"tensorflow.Summary.Value"
]
] |
hareshkm999/greenr | [
"24c03ac532e41731a3c78161542cb4f44ae08d43"
] | [
"app.py"
] | [
"#deep learning libraries\nfrom fastai.vision import *\nimport torch\ndefaults.device = torch.device('cpu')\n\n#web frameworks\nfrom starlette.applications import Starlette\nfrom starlette.responses import JSONResponse, HTMLResponse, RedirectResponse\nimport uvicorn\nimport aiohttp\nimport asyncio\n\nimport os\nimport sys\nimport base64 \nfrom PIL import Image\n\nasync def get_bytes(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n return await response.read()\n\napp = Starlette()\npath = Path('')\nlearner = load_learner(path)\n\[email protected](\"/upload\", methods = [\"POST\"])\nasync def upload(request):\n data = await request.form()\n bytes = await (data[\"file\"].read())\n return predict_image_from_bytes(bytes)\n\[email protected](\"/classify-url\", methods = [\"GET\"])\nasync def classify_url(request):\n bytes = await get_bytes(request.query_params[\"url\"])\n return predict_image_from_bytes(bytes)\n\ndef predict_image_from_bytes(bytes):\n #load byte data into a stream\n img_file = io.BytesIO(bytes)\n #encoding the image in base64 to serve in HTML\n img_pil = Image.open(img_file)\n img_pil.save(\"img.jpg\", format=\"JPEG\")\n img_uri = base64.b64encode(open(\"img.jpg\", 'rb').read()).decode('utf-8')\n \n #make inference on image and return an HTML response\n img = open_image(img_file)\n pred_class, pred_idx, outputs = learner.predict(img)\n formatted_outputs = [\"{:.1f}%\".format(value) for value in [x * 100 for x in torch.nn.functional.softmax(outputs, dim = 0)]]\n pred_probs = sorted(zip(learner.data.classes, map(str, formatted_outputs)),\n key = lambda p: p[1],\n reverse = True\n )\n return HTMLResponse(\n \"\"\"\n <html>\n <body>\n <p> Prediction: <b> %s </b> </p>\n <p> Confidence: <b> %s </b> </p>\n </body>\n <figure class = \"figure\">\n <img src=\"data:image/png;base64, %s\" class = \"figure-img\">\n </figure>\n </html>\n \"\"\" %(pred_class, pred_probs, img_uri))\n \[email protected](\"/\")\ndef form(request):\n return HTMLResponse(\n \"\"\"\n <h1> Greenr </h1>\n <p> Is your picture of a dandelion or grass? </p>\n <form action=\"/upload\" method = \"post\" enctype = \"multipart/form-data\">\n <u> Select picture to upload: </u> <br> <p>\n 1. <input type=\"file\" name=\"file\"><br><p>\n 2. <input type=\"submit\" value=\"Upload\">\n </form>\n <br>\n <br>\n <u> Submit picture URL </u>\n <form action = \"/classify-url\" method=\"get\">\n 1. <input type=\"url\" name=\"url\" size=\"60\"><br><p>\n 2. <input type=\"submit\" value=\"Upload\">\n </form>\n \"\"\")\n \[email protected](\"/form\")\ndef redirect_to_homepage(request):\n return RedirectResponse(\"/\")\n \nif __name__ == \"__main__\":\n if \"serve\" in sys.argv:\n port = int(os.environ.get(\"PORT\", 8008)) \n uvicorn.run(app, host = \"0.0.0.0\", port = port)\n"
] | [
[
"torch.device",
"torch.nn.functional.softmax"
]
] |
huyvohcmc/tensorflow | [
"ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5"
] | [
"tensorflow/python/training/moving_averages_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional test for moving_averages.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import moving_averages\nfrom tensorflow.python.training import saver as saver_lib\n\n\nclass MovingAveragesTest(test.TestCase):\n\n @test_util.run_deprecated_v1\n def testAssignMovingAverageWithoutZeroDebias(self):\n with self.cached_session():\n var = variables.Variable([10.0, 11.0])\n val = constant_op.constant([1.0, 2.0], dtypes.float32)\n decay = 0.25\n assign = moving_averages.assign_moving_average(\n var, val, decay, zero_debias=False)\n variables.global_variables_initializer().run()\n self.assertAllClose([10.0, 11.0], self.evaluate(var))\n assign.op.run()\n self.assertAllClose(\n [10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)],\n self.evaluate(var))\n\n @test_util.run_deprecated_v1\n def testAssignMovingAverage(self):\n with self.cached_session():\n var = variables.Variable([0.0, 0.0])\n val = constant_op.constant([1.0, 2.0], dtypes.float32)\n decay = 0.25\n assign = moving_averages.assign_moving_average(var, val, decay)\n variables.global_variables_initializer().run()\n self.assertAllClose([0.0, 0.0], self.evaluate(var))\n assign.op.run()\n self.assertAllClose(\n [1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)],\n self.evaluate(var))\n\n @test_util.run_deprecated_v1\n def testAssignMovingAverageNewNamingMultipleCalls(self):\n with variable_scope.variable_scope(\"scope1\") as vs1:\n with variable_scope.variable_scope(\"scope2\"):\n var = variables.Variable(1.0, name=\"Var\")\n moving_averages.assign_moving_average(var, 0.0, 0.99)\n moving_averages.assign_moving_average(var, 0.0, 0.99)\n expected_names = [\"scope1/scope2/Var:0\",\n \"scope1/scope2/scope1/scope2/Var/biased:0\",\n \"scope1/scope2/scope1/scope2/Var/local_step:0\",\n \"scope1/scope2/scope1/scope2/Var/biased_1:0\",\n \"scope1/scope2/scope1/scope2/Var/local_step_1:0\"]\n actual_names = [v.name for v in vs1.global_variables()]\n self.assertSetEqual(set(expected_names), set(actual_names))\n\n @test_util.run_deprecated_v1\n def testAssignMovingAverageNewNamingMultipleCallsWithReuse(self):\n with variable_scope.variable_scope(\"scope1\") as vs1:\n var = variable_scope.get_variable(\"Var\", shape=[])\n moving_averages.assign_moving_average(var, 0.0, 0.99)\n moving_averages.assign_moving_average(var, 0.0, 0.99)\n with variable_scope.variable_scope(vs1, reuse=True):\n var = variable_scope.get_variable(\"Var\", shape=[])\n moving_averages.assign_moving_average(var, 0.0, 0.99)\n moving_averages.assign_moving_average(var, 0.0, 0.99)\n\n @test_util.run_deprecated_v1\n def testWeightedMovingAverage(self):\n with self.cached_session() as sess:\n decay = 0.5\n weight = array_ops.placeholder(dtypes.float32, [])\n val = array_ops.placeholder(dtypes.float32, [])\n\n wma = moving_averages.weighted_moving_average(val, decay, weight)\n variables.global_variables_initializer().run()\n\n # Get the first weighted moving average.\n val_1 = 3.0\n weight_1 = 4.0\n wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})\n numerator_1 = val_1 * weight_1 * (1.0 - decay)\n denominator_1 = weight_1 * (1.0 - decay)\n self.assertAllClose(numerator_1 / denominator_1, wma_array)\n\n # Get the second weighted moving average.\n val_2 = 11.0\n weight_2 = 22.0\n wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})\n numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)\n denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)\n self.assertAllClose(numerator_2 / denominator_2, wma_array)\n\n @test_util.run_deprecated_v1\n def testWeightedMovingAverageBfloat16(self):\n bfloat16 = pywrap_tensorflow.TF_bfloat16_type()\n with self.cached_session() as sess:\n decay = 0.5\n weight = array_ops.placeholder(dtypes.bfloat16, [])\n val = array_ops.placeholder(dtypes.bfloat16, [])\n\n wma = moving_averages.weighted_moving_average(val, decay, weight)\n variables.global_variables_initializer().run()\n\n # Get the first weighted moving average.\n val_1 = 3.0\n weight_1 = 4.0\n wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})\n numerator_1 = val_1 * weight_1 * (1.0 - decay)\n denominator_1 = weight_1 * (1.0 - decay)\n self.assertAllClose(numerator_1 / denominator_1, wma_array)\n\n # Get the second weighted moving average.\n val_2 = 11.0\n weight_2 = 22.0\n wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})\n numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)\n denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)\n self.assertAllClose(bfloat16(numerator_2 / denominator_2), wma_array)\n\n\ndef _Repeat(value, dim):\n if dim == 1:\n return value\n return [value] * dim\n\n\nclass ExponentialMovingAverageTest(test.TestCase):\n\n def _CheckDecay(self, ema, actual_decay, dim):\n\n def _Scale(dk, steps):\n if ema._zero_debias:\n return 1 - dk**steps\n else:\n return 1\n\n tens = _Repeat(10.0, dim)\n thirties = _Repeat(30.0, dim)\n var0 = variables.Variable(tens, name=\"v0\")\n var1 = variables.Variable(thirties, name=\"v1\")\n variables.global_variables_initializer().run()\n # Note that tensor2 is not a Variable but just a plain Tensor resulting\n # from the sum operation.\n tensor2 = var0 + var1\n update = ema.apply([var0, var1, tensor2])\n avg0 = ema.average(var0)\n avg1 = ema.average(var1)\n avg2 = ema.average(tensor2)\n\n self.assertItemsEqual([var0, var1], variables.moving_average_variables())\n\n self.assertFalse(avg0 in variables.trainable_variables())\n self.assertFalse(avg1 in variables.trainable_variables())\n self.assertFalse(avg2 in variables.trainable_variables())\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"v0/ExponentialMovingAverage:0\", avg0.name)\n self.assertEqual(\"v1/ExponentialMovingAverage:0\", avg1.name)\n self.assertEqual(\"add/ExponentialMovingAverage:0\", avg2.name)\n\n # Check initial values.\n self.assertAllClose(tens, self.evaluate(var0))\n self.assertAllClose(thirties, self.evaluate(var1))\n self.assertAllClose(_Repeat(10.0 + 30.0, dim), self.evaluate(tensor2))\n\n # Check that averages are initialized correctly.\n self.assertAllClose(tens, self.evaluate(avg0))\n self.assertAllClose(thirties, self.evaluate(avg1))\n # Note that averages of Tensor's initialize to zeros_like since no value\n # of the Tensor is known because the Op has not been run (yet).\n self.assertAllClose(_Repeat(0.0, dim), self.evaluate(avg2))\n\n # Update the averages and check.\n update.run()\n dk = actual_decay\n\n expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)\n self.assertAllClose(expected, self.evaluate(avg0))\n expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)\n self.assertAllClose(expected, self.evaluate(avg1))\n expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk) / _Scale(dk, 1), dim)\n self.assertAllClose(expected, self.evaluate(avg2))\n\n # Again, update the averages and check.\n update.run()\n expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),\n dim)\n self.assertAllClose(expected, self.evaluate(avg0))\n expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),\n dim)\n self.assertAllClose(expected, self.evaluate(avg1))\n expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +\n (10.0 + 30.0) * (1 - dk)) / _Scale(dk, 2), dim)\n self.assertAllClose(expected, self.evaluate(avg2))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNoNumUpdates_Scalar(self):\n with self.cached_session():\n ema = moving_averages.ExponentialMovingAverage(0.25)\n self._CheckDecay(ema, actual_decay=0.25, dim=1)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNoNumUpdates_Scalar_Debias(self):\n with self.cached_session():\n ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)\n self._CheckDecay(ema, actual_decay=0.25, dim=1)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNoNumUpdates_Vector(self):\n with self.cached_session():\n ema = moving_averages.ExponentialMovingAverage(0.25)\n self._CheckDecay(ema, actual_decay=0.25, dim=5)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNoNumUpdates_Vector_Debias(self):\n with self.cached_session():\n ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)\n self._CheckDecay(ema, actual_decay=0.25, dim=5)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNumUpdates_Scalar(self):\n with self.cached_session():\n # With num_updates 1, the decay applied is 0.1818\n ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)\n self._CheckDecay(ema, actual_decay=0.181818, dim=1)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNumUpdates_Scalar_Debias(self):\n with self.cached_session():\n # With num_updates 1, the decay applied is 0.1818\n ema = moving_averages.ExponentialMovingAverage(\n 0.25, num_updates=1, zero_debias=True)\n self._CheckDecay(ema, actual_decay=0.181818, dim=1)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNumUpdates_Vector(self):\n with self.cached_session():\n # With num_updates 1, the decay applied is 0.1818\n ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)\n self._CheckDecay(ema, actual_decay=0.181818, dim=5)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNumUpdates_Vector_Debias(self):\n with self.cached_session():\n # With num_updates 1, the decay applied is 0.1818\n ema = moving_averages.ExponentialMovingAverage(\n 0.25, num_updates=1, zero_debias=True)\n self._CheckDecay(ema, actual_decay=0.181818, dim=5)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesWithControlDeps(self):\n with self.cached_session() as sess:\n v0 = variables.Variable(0, name=\"v0\")\n add_to_v0 = v0.assign_add(1)\n v1 = variables.Variable([10.0], name=\"v1\")\n assign_to_v1 = v1.assign([20.0])\n ema = moving_averages.ExponentialMovingAverage(0.25)\n with ops.control_dependencies([add_to_v0]):\n ema_op = ema.apply([v1])\n # the moving average of v1 should not have any control inputs\n v1_avg = ema.average(v1)\n self.assertEqual([], v1_avg.initializer.control_inputs)\n self.assertEqual([], v1_avg.value().op.control_inputs)\n self.assertEqual([], v1_avg.value().op.control_inputs)\n # We should be able to initialize v1_avg before v0.\n self.evaluate(v1_avg.initializer)\n self.evaluate(v0.initializer)\n self.assertEqual([10.0], self.evaluate(v1_avg))\n # running ema_op should add to v0 (in addition to updating v1_avg)\n self.evaluate(assign_to_v1)\n self.evaluate(ema_op)\n self.assertEqual(1, self.evaluate(v0))\n self.assertEqual([17.5], self.evaluate(v1_avg))\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.run_v1_only(\"b/120545219\")\n def testBasicEager(self):\n v0 = variables.Variable(1.0)\n v1 = variables.Variable(2.0)\n\n ema = moving_averages.ExponentialMovingAverage(0.25)\n op = ema.apply([v0, v1])\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(op)\n\n self.evaluate(v0.assign(2.0))\n self.evaluate(v1.assign(4.0))\n\n self.evaluate(ema.apply([v0, v1]))\n\n self.assertAllEqual(self.evaluate(ema.average(v0)), 1.75)\n self.assertAllEqual(self.evaluate(ema.average(v1)), 3.5)\n\n def averageVariablesNamesHelper(self, zero_debias):\n with self.cached_session():\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(30.0, name=\"v1\")\n # Add a non-trainable variable.\n v2 = variables.Variable(20.0, name=\"v2\", trainable=False)\n tensor2 = v0 + v1\n ema = moving_averages.ExponentialMovingAverage(\n 0.25, zero_debias=zero_debias, name=\"foo\")\n self.assertEqual(\"foo\", ema.name)\n self.assertEqual(\"v0/foo\", ema.average_name(v0))\n self.assertEqual(\"v1/foo\", ema.average_name(v1))\n self.assertEqual(\"add/foo\", ema.average_name(tensor2))\n ema.apply([v0, v1, tensor2])\n vars_to_restore = ema.variables_to_restore()\n # vars_to_restore should contain the following:\n # {v0/foo : v0,\n # v1/foo : v1,\n # add/foo : add/foo,\n # v2 : v2}\n expected_names = [\n ema.average_name(v0), ema.average_name(v1), ema.average_name(tensor2),\n v2.op.name\n ]\n if zero_debias:\n # vars_to_restore should also contain the following:\n # {add/foo/biased: add/foo/biased,\n # add/foo/local_step: add/foo/local_step}\n expected_names += [\n ema.average_name(tensor2) + \"/biased\",\n ema.average_name(tensor2) + \"/local_step\"\n ]\n self.assertEqual(sorted(expected_names), sorted(vars_to_restore.keys()))\n self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))\n self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))\n self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNames(self):\n self.averageVariablesNamesHelper(zero_debias=True)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNamesNoDebias(self):\n self.averageVariablesNamesHelper(zero_debias=False)\n\n def averageVariablesNamesRespectScopeHelper(self, zero_debias):\n # See discussion on #2740.\n with self.cached_session():\n with variable_scope.variable_scope(\"scope1\"):\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(30.0, name=\"v1\")\n # Add a non-trainable variable.\n v2 = variables.Variable(20.0, name=\"v2\", trainable=False)\n tensor2 = v0 + v1\n with variable_scope.variable_scope(\"scope2\"):\n ema = moving_averages.ExponentialMovingAverage(\n 0.25, zero_debias=zero_debias, name=\"foo\")\n self.assertEqual(\"scope2/scope1/v0/foo\", ema.average_name(v0))\n self.assertEqual(\"scope2/scope1/v1/foo\", ema.average_name(v1))\n self.assertEqual(\"scope2/scope1/add/foo\", ema.average_name(tensor2))\n ema.apply([v0, v1, tensor2])\n vars_to_restore = ema.variables_to_restore()\n # `vars_to_restore` should contain the following:\n # {scope2/scope1/v0/foo : v0,\n # scope2/scope1/v1/foo : v1,\n # scope2/scope1/add/foo : add/foo,\n # scope1/v2 : v2}\n expected_names = [\n ema.average_name(v0), ema.average_name(v1),\n ema.average_name(tensor2), v2.op.name\n ]\n if zero_debias:\n # `vars_to_restore` should also contain the following:\n # {scope2/scope2/scope1/add/foo/biased: add/foo/biased,\n # scope2/scope2/scope1/add/foo/local_step: add/foo/local_step}\n sc = \"scope2/\"\n expected_names += [\n sc + ema.average_name(tensor2) + \"/biased\",\n sc + ema.average_name(tensor2) + \"/local_step\"\n ]\n\n self.assertEqual(sorted(expected_names), sorted(vars_to_restore.keys()))\n self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))\n self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))\n self.assertEqual(\n ema.average(tensor2).op.name, ema.average_name(tensor2))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNamesRespectScope(self):\n self.averageVariablesNamesRespectScopeHelper(zero_debias=True)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesNamesRespectScopeNoDebias(self):\n self.averageVariablesNamesRespectScopeHelper(zero_debias=False)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testSubsetAverageVariablesNames(self):\n with self.cached_session():\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(30.0, name=\"v1\")\n # Add a non-trainable variable.\n v2 = variables.Variable(20.0, name=\"v2\", trainable=False)\n tensor2 = v0 + v1\n ema = moving_averages.ExponentialMovingAverage(0.25, name=\"foo_avg\")\n self.assertEqual(\"v0/foo_avg\", ema.average_name(v0))\n self.assertEqual(\"v1/foo_avg\", ema.average_name(v1))\n self.assertEqual(\"add/foo_avg\", ema.average_name(tensor2))\n vars_to_restore = ema.variables_to_restore([v0, tensor2])\n # vars_to_restore should contain the following:\n # {v0/foo_avg : v0,\n # add/foo_avg : add\n # v1 : v1,\n # v2 : v2}\n self.assertEqual(\n sorted(vars_to_restore.keys()),\n sorted([\n ema.average_name(v0), ema.average_name(tensor2), v1.op.name,\n v2.op.name\n ]))\n ema.apply([v0, v1, tensor2])\n self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))\n self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))\n self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))\n\n @test_util.run_v1_only(\"b/120545219\")\n def testAverageVariablesDeviceAssignment(self):\n with ops.device(\"/job:dev_v0\"):\n v0 = variables.Variable(10.0, name=\"v0\")\n with ops.device(\"/job:dev_v1\"):\n v1 = gen_state_ops.variable(\n shape=[1],\n dtype=dtypes.float32,\n name=\"v1\",\n container=\"\",\n shared_name=\"\")\n v1.set_shape([1])\n tensor2 = v0 + v1\n ema = moving_averages.ExponentialMovingAverage(0.25, name=\"foo_avg\")\n with ops.device(\"/job:default\"):\n ema.apply([v0, v1, tensor2])\n self.assertDeviceEqual(\"/job:dev_v0\", ema.average(v0).device)\n self.assertDeviceEqual(\"/job:dev_v1\", ema.average(v1).device)\n # However, the colocation property is maintained.\n self.assertEqual([b\"loc:@v1\"], ema.average(v1).op.colocation_groups())\n self.assertDeviceEqual(\"/job:default\", ema.average(tensor2).device)\n\n def _ExportAndImportGraph(self, graph):\n \"\"\"Export and import graph into a new graph.\"\"\"\n meta_graph = saver_lib.export_meta_graph(\n graph=graph, collection_list=graph.get_all_collection_keys())\n graph_copy = ops.Graph()\n with graph_copy.as_default():\n _ = saver_lib.import_meta_graph(meta_graph)\n return graph_copy\n\n @test_util.run_deprecated_v1\n def testImportedGraphVariablesToRestore(self):\n g = ops.Graph()\n with g.as_default():\n variables.Variable(10.0, name=\"v\")\n # Export and import the graph into a new graph.\n g_copy = self._ExportAndImportGraph(g)\n with g_copy.as_default():\n ema = moving_averages.ExponentialMovingAverage(0.25, name=\"foo_avg\")\n vars_to_restore = ema.variables_to_restore()\n # There should only be one variable in vars_to_restore. This is important\n # to check because when importing from a GraphDef, TF makes duplicate\n # python Variable objects referring to the same underlying variable. We\n # need to be sure that two variables referring to the same variable don't\n # both get added to vars_to_restore.\n self.assertEqual(len(vars_to_restore), 1)\n self.assertTrue(\"v/foo_avg\" in vars_to_restore)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.variables.moving_average_variables",
"tensorflow.python.pywrap_tensorflow.TF_bfloat16_type",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.device",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.python.ops.gen_state_ops.variable",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.training.moving_averages.weighted_moving_average",
"tensorflow.python.training.saver.import_meta_graph",
"tensorflow.python.training.moving_averages.ExponentialMovingAverage",
"tensorflow.python.framework.constant_op.constant"
]
] |
chicm/clouds | [
"66baff6527a55767ba39a531edec6f230d5e58e8"
] | [
"5fold/ensemble.py"
] | [
"import os\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, DataLoader,Dataset\nfrom loader import CloudDataset, get_train_val_loaders, get_test_loader\nfrom catalyst.dl.runner import SupervisedRunner\nfrom catalyst.dl.callbacks import DiceCallback, InferCallback, CheckpointCallback\nfrom models import create_model\nfrom tqdm import tqdm\nimport cv2\nfrom utils import post_process, sigmoid, dice, get_validation_augmentation, get_preprocessing, mask2rle\nimport segmentation_models_pytorch as smp\nimport ttach as tta\nimport settings\n\n#w = np.array([1., 0.5, 0.5, 0.5, 0.5, 0.3, 1, 1, 0.5])\n#w = np.array([1.]*5)\n#w = np.array([1.,1.,1.,1.,1.])\n#w /= w.sum()\n#print(w)\n\ndef create_models(args):\n models = []\n for encoder_type, ckp in zip(args.encoder_types.split(','), args.ckps.split(',')):\n model = create_model(encoder_type, ckp, 'sigmoid').cuda()\n model = tta.SegmentationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode='mean')\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.eval()\n models.append(model)\n return models\n\ndef predict_loader(models, loader):\n probs, masks = [], []\n with torch.no_grad():\n for batch in tqdm(loader):\n img, mask = batch[0].cuda(), batch[1]\n masks.append(mask)\n outputs = []\n for i, model in enumerate(models):\n output = model(img).cpu() #** 0.5\n outputs.append(output)\n avg_ouput = torch.stack(outputs).mean(0)\n probs.append(avg_ouput)\n probs = torch.cat(probs, 0).numpy()\n masks = torch.cat(masks, 0).numpy()\n print('probs:', probs.shape)\n print('masks:', masks.shape)\n return probs, masks\n\ndef ensemble(args):\n #class_params = {0: (0.5, 10000), 1: (0.5, 10000), 2: (0.5, 10000), 3: (0.5, 10000)} lb652\n #class_params = {0: (0.35, 15000), 1: (0.65, 15000), 2: (0.3, 15000), 3: (0.4, 10000)} lb656\n #class_params = {0: (0.35, 10000), 1: (0.45, 10000), 2: (0.35, 10000), 3: (0.35, 10000)} \n\n models = create_models(args)\n class_params = find_class_params(args, models)\n #exit(0)\n\n test_loader = get_test_loader(args.encoder_types.split(',')[0], args.batch_size)\n probs, _ = predict_loader(models, test_loader)\n\n encoded_pixels = []\n image_id = 0\n for img_out in tqdm(probs):\n #runner_out = runner.predict_batch({\"features\": test_batch[0].cuda()})['logits']\n #for i, batch in enumerate(runner_out):\n for probability in img_out:\n \n #probability = probability.cpu().detach().numpy()\n if probability.shape != (350, 525):\n probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n predict, num_predict = post_process(probability, class_params[image_id % 4][0], class_params[image_id % 4][1])\n if num_predict == 0:\n encoded_pixels.append('')\n else:\n r = mask2rle(predict)\n encoded_pixels.append(r)\n image_id += 1\n\n sub = pd.read_csv(os.path.join(settings.DATA_DIR, 'sample_submission.csv'))\n\n sub['EncodedPixels'] = encoded_pixels\n sub.to_csv(args.out, columns=['Image_Label', 'EncodedPixels'], index=False)\n\ndef find_class_params(args, models):\n val_loader = get_train_val_loaders(args.encoder_types.split(',')[0], batch_size=args.batch_size, ifold=args.ifold)['valid']\n probs, masks = predict_loader(models, val_loader)\n print(probs.shape, masks.shape)\n\n valid_masks = []\n probabilities = np.zeros((probs.shape[0]*4, 350, 525))\n for i, (img_probs, img_masks) in enumerate(zip(probs, masks)):\n for m in img_masks:\n if m.shape != (350, 525):\n m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n valid_masks.append(m)\n\n for j, probability in enumerate(img_probs):\n if probability.shape != (350, 525):\n probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n probabilities[i * 4 + j, :, :] = probability\n\n print(len(valid_masks), len(probabilities), valid_masks[0].shape, probabilities[0].shape)\n class_params = {}\n for class_id in range(4):\n print(class_id)\n attempts = []\n for t in range(30, 90, 5):\n t /= 100\n #for ms in [0, 100, 1200, 5000, 10000]:\n for ms in [100, 1000, 2000, 5000, 10000, 15000, 20000]:\n \n masks = []\n for i in range(class_id, len(probabilities), 4):\n probability = probabilities[i]\n predict, num_predict = post_process(probability, t, ms)\n masks.append(predict)\n\n d = []\n for i, j in zip(masks, valid_masks[class_id::4]):\n if (i.sum() == 0) & (j.sum() == 0):\n d.append(1)\n else:\n d.append(dice(i, j))\n\n attempts.append((t, ms, np.mean(d)))\n\n attempts_df = pd.DataFrame(attempts, columns=['threshold', 'size', 'dice'])\n\n\n attempts_df = attempts_df.sort_values('dice', ascending=False)\n print(attempts_df.head())\n best_threshold = attempts_df['threshold'].values[0]\n best_size = attempts_df['size'].values[0]\n \n class_params[class_id] = (best_threshold, best_size)\n print(class_params)\n return class_params\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Landmark detection')\n parser.add_argument('--encoder_types', type=str, required=True)\n parser.add_argument('--ckps', type=str, required=True)\n parser.add_argument('--out', type=str, default='ensemble.csv')\n parser.add_argument('--batch_size', default=16, type=int, help='batch_size')\n parser.add_argument('--ifold', default=-1, type=int, help='lr scheduler patience')\n \n args = parser.parse_args()\n print(args)\n ensemble(args)\n #find_class_params(args)\n #tmp = torch.load(args.ckp)\n #print(tmp.keys())\n #print(tmp['valid_metrics'])\n"
] | [
[
"torch.cat",
"torch.stack",
"numpy.zeros",
"pandas.DataFrame",
"torch.no_grad",
"numpy.mean",
"torch.cuda.device_count",
"torch.nn.DataParallel"
]
] |
EvanBianco/bruges | [
"344238775961369740d36ee9aea368be006ba7fe"
] | [
"bruges/attribute/complex.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nComplex trace attributes.\n\n:copyright: 2021 Agile Geoscience\n:license: Apache 2.0\n\"\"\"\nimport numpy as np\nfrom scipy.signal import hilbert\n\n\ndef instantaneous_amplitude(traces):\n \"\"\"\n Compute instantaneous amplitude, also known as the envelope or\n reflection strength.\n\n The attribute is computed over the last dimension. That is, time should\n be in the last dimension, so a 100 inline, 100 crossline seismic volume\n with 250 time slices would have shape (100, 100, 250).\n\n Args:\n traces (ndarray): The data array to use for calculating energy.\n Returns:\n ndarray: An array the same dimensions as the input array.\n \"\"\"\n return np.abs(hilbert(traces))\n\n\nenvelope = instantaneous_amplitude\nreflection_strength = instantaneous_amplitude\n\n\ndef quadrature(traces):\n \"\"\"\n Compute the quadrature trace.\n\n See https://wiki.seg.org/wiki/Instantaneous_attributes.\n\n Args:\n traces (ndarray): The data array to use for calculating energy.\n\n Returns:\n ndarray: An array the same dimensions as the input array.\n \"\"\"\n h = hilbert(traces)\n return np.abs(h) * np.sin(np.log(h).imag)\n\n\ndef instantaneous_phase(traces):\n \"\"\"\n Compute the instantaneous phase of the data.\n\n .. math::\n\n \\\\phi(t) = {\\\\rm Im}[\\\\ln h(t)]\n \n \n See https://wiki.seg.org/wiki/Instantaneous_attributes.\n\n Args:\n traces (ndarray): The data array to use for calculating energy.\n\n Returns:\n ndarray: An array the same dimensions as the input array.\n \"\"\"\n return np.angle(hilbert(traces))\n\n\ndef _inst_freq_claerbout(traces, dt):\n \"\"\"\n Compute the instantaneous frequency using Claerbout's (1985) approximation.\n This is also the formulation given in Yilmaz (2001).\n\n Formulation from Barnes, A, 2016, Handbook of Poststack Seismic Attributes,\n SEG Books.\n\n Args:\n traces (ndarray): The data array to use for calculating energy.\n dt (float): The sample interval in seconds, e.g. 0.004 for 4 ms sample\n interval (250 Hz sample frequency).\n Returns:\n ndarray: An array the same dimensions as the input array.\n \"\"\"\n h = hilbert(traces)\n term = (h[1:] - h[:-1]) / (h[1:] + h[:-1])\n return (1 / (np.pi * dt)) * np.imag(term)\n\n\ndef _inst_freq_scheuer_oldenburg(traces, dt):\n \"\"\"Instantaneous frequency after Scheuer & Oldenburg (1988).\n \n Scheuer, TE and DW Oldenburg (1988). Local phase velocity from complex seismic data.\n Geophysics 53 (12), p1503. DOI: http://dx.doi.org/10.1190/1.1442431.\n \n Formulation from Barnes, A, 2016, Handbook of Poststack Seismic Attributes,\n SEG Books:\n \n .. math::\n\n f_i(t) = \\frac{1}{2\\pi} \\ \\mathrm{Im} \\left[\\frac{h'(t)}{h(t)} \\right]\n \\approx \\frac{1}{\\pi T} \\ \\mathrm{Im} \\left[\\frac{h(t+T) - h(t)}{h(t+T) + h(t)} \\right] \n\n Args:\n traces (ndarray): The data array to use for calculating energy.\n dt (float): The sample interval in seconds, e.g. 0.004 for 4 ms sample\n interval (250 Hz sample frequency).\n Returns:\n ndarray: An array the same dimensions as the input array.\n \"\"\"\n y = quadrature(traces)\n expr = (traces[:-1] * y[1:] - traces[1:] * y[:-1]) / (traces[:-1] * traces[1:] + y[1:] * y[:-1])\n return (1 / (2 * np.pi * dt)) * np.arctan(expr)\n\n\ndef instantaneous_frequency(traces, dt, kind='so', percentile_clip=99):\n \"\"\"\n Compute instantaneous frequency with a discrete approximation.\n\n The attribute is computed over the last dimension. That is, time should\n be in the last dimension, so a 100 inline, 100 crossline seismic volume\n with 250 time slices would have shape (100, 100, 250).\n\n These attributes can be noisy so a percentile clips is applied.\n\n Args:\n traces (ndarray): The data array to use for calculating energy.\n dt (float): The sample interval in seconds, e.g. 0.004 for 4 ms sample\n interval (250 Hz sample frequency).\n kind (str): \"scipy\", \"claerbout\" or \"so\" to denote a naive method from\n the SciPy docs, Claerbout's (1985) method or that of Scheuer & Oldenburg\n (1988). Claerbout's approximation is not stable above about half the\n Nyquist frequency (i.e. one quarter of the sampling frequency). The\n SciPy implementation is not recommended for seismic data.\n percentile_clip (float): Percentile at which to clip the data.\n Computed from the absolute values, clipped symmetrically\n at -p and +p, where p is the value at the 98th percentile.\n Returns:\n ndarray: An array the same dimensions as the input array.\n \"\"\"\n methods = {'claerbout': _inst_freq_claerbout,\n 'so': _inst_freq_scheuer_oldenburg,\n 'scipy': lambda traces, dt: np.diff(instantaneous_phase(traces)) / (2.0 * np.pi * dt),\n }\n func = methods.get(kind)\n if func is None:\n m = f'{kind} is not supported, use \"so\" (Scheuer-Oldenburg, recommended), \"claerbout\" or \"scipy\".'\n raise NotImplementedError(m)\n f = func(traces, dt)\n p = np.percentile(np.abs(f), percentile_clip)\n return np.clip(f, a_min=-p, a_max=p)\n"
] | [
[
"numpy.log",
"numpy.arctan",
"scipy.signal.hilbert",
"numpy.abs",
"numpy.clip",
"numpy.imag"
]
] |
morriso1/analysing_imaging_data | [
"3bb79c2b328cd6df8f1f244cf799659756de57cf"
] | [
"ratioimage/ratioimage_old.py"
] | [
"import pandas as pd\nimport dask.dataframe as dd\nimport numpy as np\nimport glob\nimport os\nimport re\n\n\"\"\"Module contains functions: read_csv_folder_into_tidy_df, grouped_tidy_data_summary_stats, exp_analysis_name, \ncompile_DF_from_CSVdirectory, extract_gut_names, combining_gut_DFs, analyse_imagej_CSVs, df_to_pzfx, write_pzfx,\ncheck_if_list_of_folders_exists, summarise_and_sort_list_of_DFs and compileCSVs_sortbycondition_apply_method\"\"\"\n\n\ndef read_csv_folder_into_tidy_df(csv_glob, drop_columns=[' '], sample_id_categories=None, regex_exp=\"[a-z]\\dg\\d\\d?\"):\n \"\"\"\n Input\n -----\n Takes glob (str) to csv folder as input. Optional sample_id_categories (e.g. list).\n\n Function\n --------\n Combines into tidy dataframe.\n\n Returns\n -------\n Returns tidy dataframe.\n \"\"\"\n\n df = (\n dd.read_csv(csv_glob, include_path_column=\"sample_gut_id\")\n .compute()\n .drop(columns=drop_columns)\n )\n\n if sample_id_categories is None:\n df = df.assign(\n sample_gut_id=lambda x: x[\"sample_gut_id\"].str.findall(\n regex_exp).str[-1],\n sample_id=lambda x: pd.Categorical(\n x[\"sample_gut_id\"].str.split(\"g\", expand=True)[0],\n ),\n gut_id=lambda x: x[\"sample_gut_id\"].str.split(\"g\", expand=True)[1],\n )\n\n else:\n df = df.assign(\n sample_gut_id=lambda x: x[\"sample_gut_id\"].str.findall(\n regex_exp).str[-1],\n sample_id=lambda x: pd.Categorical(\n x[\"sample_gut_id\"].str.split(\"g\", expand=True)[\n 0], categories=sample_id_categories\n ),\n gut_id=lambda x: x[\"sample_gut_id\"].str.split(\"g\", expand=True)[1],\n )\n\n return df\n\n\ndef grouped_tidy_data_summary_stats(\n tidy_df, group_col=\"image_key\", categories=None, agg_funcs=['mean'], **agg_kwargs\n):\n \"\"\"\n Input\n -----\n Takes tidy DataFrame, group_col (str), categories (e.g. list) and aggregation functions.\n\n Returns\n -------\n Tidy DataFrame with 'summary_stats' performed on selected group.\n \"\"\"\n\n tidy_df_grouped = (\n tidy_df.groupby(by=group_col)\n .agg(agg_funcs, **agg_kwargs, axis=\"column\")\n .stack()\n .reset_index()\n .rename(columns={\"level_1\": \"summary_stat\"})\n )\n\n if categories is not None:\n tidy_df_grouped[[\"sample_id\", \"gut_id\"]] = tidy_df_grouped[group_col].str.split(\n \"g\", expand=True\n )\n tidy_df_grouped[\"sample_id\"] = pd.Categorical(\n tidy_df_grouped[\"sample_id\"], categories=categories\n )\n\n return(tidy_df_grouped)\n\n\ndef exp_analysis_name(Exp_Folder=os.getcwd()):\n\n Exp_Folder_List = [items.replace(\" \", \"_\")\n for items in Exp_Folder.split(\"/\")]\n if len(Exp_Folder_List) < 4:\n ExpAnalysisName = 'test'\n return ExpAnalysisName\n elif \"Anterior\" or \"Posterior\" in Exp_Folder_List:\n ExpAnalysisName = (\n Exp_Folder_List[-5]\n + \"_\"\n + Exp_Folder_List[-4]\n + \"_\"\n + Exp_Folder_List[-2]\n + \"_\"\n + Exp_Folder_List[-1]\n )\n else:\n ExpAnalysisName = (\n Exp_Folder_List[-4] + \"_\" + Exp_Folder_List[-3] +\n \"_\" + Exp_Folder_List[-1]\n )\n\n return ExpAnalysisName\n\n\ndef compile_DF_from_CSVdirectory(Path_Dir, usecolumns=[\"Mean\"]):\n \"\"\"\n Input: Function takes\n \"\"\"\n CSV_FullPath_L = sorted(glob.glob(os.path.join(Path_Dir, \"*.csv\")))\n Compiled_DF = pd.DataFrame()\n for Files in CSV_FullPath_L:\n Temp_DF = pd.read_csv(Files, usecols=usecolumns)\n Temp_DF.columns = [os.path.basename(Files)]\n Compiled_DF = pd.concat([Compiled_DF, Temp_DF], axis=1)\n return Compiled_DF\n\n\ndef extract_gut_names(DF):\n Columns_DF = DF.columns.to_list()\n RE_Match_L = list()\n for Names in Columns_DF:\n RE_Match = re.search(r\"\\w\\dg\\d+\", Names)\n RE_Match_L.append(RE_Match.group(0))\n return RE_Match_L\n\n\ndef combining_gut_DFs(DF):\n DF.columns = DF.columns.str[0:3]\n UniqueConditions = sorted(list(set(DF.columns.tolist())))\n\n Sorted_DF = pd.DataFrame()\n for Condition in UniqueConditions:\n DF_Temp = DF[Condition].T\n Sorted_DF_Temp = (\n pd.DataFrame(DF_Temp.values.flatten()\n ).dropna().reset_index(drop=True)\n )\n Sorted_DF = pd.concat([Sorted_DF, Sorted_DF_Temp], axis=1)\n\n Sorted_DF.columns = UniqueConditions\n\n return Sorted_DF\n\n\ndef analyse_imagej_CSVs(\n Exp_Folder=os.getcwd(), Num_Dir=\"Output_C0\", Denom_Dir=None, usecolumns=[\"Mean\"]\n):\n Path_Num_Dir = os.path.join(Exp_Folder, Num_Dir)\n\n if os.path.isdir(Path_Num_Dir):\n Num_DF = compile_DF_from_CSVdirectory(\n Path_Num_Dir, usecolumns=usecolumns)\n\n else:\n print(\n f\"Num_Dir input '{Num_Dir}' is not a directory located in:\\n{Exp_Folder}\")\n return\n\n if Denom_Dir == None:\n Denom_DF = 1\n pass\n else:\n Path_Denom_Dir = os.path.join(Exp_Folder, Denom_Dir)\n if os.path.isdir(Path_Denom_Dir):\n Denom_DF = compile_DF_from_CSVdirectory(\n Path_Denom_Dir, usecolumns=usecolumns\n )\n else:\n print(\n f\"Denom_Dir input '{Denom_Dir}' is not a directory located in:\\n{Exp_Folder}\"\n )\n return\n\n if Denom_Dir == None:\n Gut_Names_Num = extract_gut_names(Num_DF)\n Num_DF.columns = Gut_Names_Num\n\n else:\n Gut_Names_Num = extract_gut_names(Num_DF)\n Gut_Names_Denom = extract_gut_names(Denom_DF)\n\n if Gut_Names_Denom == Gut_Names_Num:\n Num_DF.columns = Gut_Names_Num\n Denom_DF.columns = Gut_Names_Denom\n else:\n print(f\"{Num_Dir} and {Denom_Dir} do not contain matched CSVs\")\n\n Divided_DF = Num_DF.div(Denom_DF)\n Sorted_Div_DF = combining_gut_DFs(Divided_DF)\n Sorted_Div_DF_Mean = combining_gut_DFs(pd.DataFrame(Divided_DF.mean()).T)\n Sorted_Div_DF_Median = combining_gut_DFs(\n pd.DataFrame(Divided_DF.median()).T)\n\n ExpAnalysisName = exp_analysis_name(Exp_Folder)\n return (\n ExpAnalysisName,\n Sorted_Div_DF,\n Sorted_Div_DF_Mean,\n Sorted_Div_DF_Median,\n Divided_DF,\n )\n\n\ndef df_to_pzfx(DF_1, DF_2, DF_3, Index=0):\n \"\"\"Function takes three pd dataframes and combines them into a prism file\n based on template located at Template_Path\"\"\"\n\n Template_Path = [\n \"/Users/morriso1/Documents/MacVersion_Buck + Genentech Work/Buck + Genentech Lab Work/Mito Ca2+/Experiments/Prism files/Template_Prism_Files/Asamples.pzfx\",\n \"/Users/morriso1/Documents/MacVersion_Buck + Genentech Work/Buck + Genentech Lab Work/Mito Ca2+/Experiments/Prism files/Template_Prism_Files/AandBsamples.pzfx\",\n ]\n with open(Template_Path[Index], \"r\") as f:\n Content = f.readlines()\n Indices = [i for i, Elements in enumerate(\n Content) if \"sample\" in Elements]\n # find the location of every sample in the template.to_pzfx\n\n DF_1 = \"<d>\" + DF_1.astype(str) + \"</d>\\n\"\n DF_2 = \"<d>\" + DF_2.astype(str) + \"</d>\\n\"\n DF_3 = \"<d>\" + DF_3.astype(str) + \"</d>\\n\"\n\n Content_Head = Content[: Indices[0]]\n Content_Middle = [\n \"</Subcolumn>\\n\",\n \"</YColumn>\\n\",\n '<YColumn Width=\"224\" Decimals=\"6\" Subcolumns=\"1\">\\n',\n ]\n Content_TB = [\n \"</Table>\\n\",\n '<Table ID=\"Table38\" XFormat=\"none\" TableType=\"OneWay\" EVFormat=\"AsteriskAfterNumber\">\\n',\n \"<Title>TableB</Title>\\n\",\n '<RowTitlesColumn Width=\"1\">\\n',\n \"<Subcolumn></Subcolumn>\\n\",\n \"</RowTitlesColumn>\\n\",\n '<YColumn Width=\"211\" Decimals=\"6\" Subcolumns=\"1\">\\n',\n ]\n Content_TC = [\n \"</Table>\\n\",\n '<Table ID=\"Table41\" XFormat=\"none\" TableType=\"OneWay\" EVFormat=\"AsteriskAfterNumber\">\\n',\n \"<Title>TableC</Title>\\n\",\n '<RowTitlesColumn Width=\"1\">\\n',\n \"<Subcolumn></Subcolumn>\\n\",\n \"</RowTitlesColumn>\\n\",\n '<YColumn Width=\"211\" Decimals=\"6\" Subcolumns=\"1\">\\n',\n ]\n Content_Tail = Content[(Indices[-1] + 3):]\n\n Temp_A = []\n Temp_B = []\n Temp_C = []\n\n for Key, _ in DF_1.iteritems():\n Content_Up = [f\"<Title>{Key}</Title>\\n\", \"<Subcolumn>\\n\"]\n Temp_A = (\n Temp_A\n + Content_Up\n + DF_1[Key][DF_1[Key] != \"<d>nan</d>\\n\"].tolist()\n + Content_Middle\n )\n Temp_B = (\n Temp_B\n + Content_Up\n + DF_2[Key][DF_2[Key] != \"<d>nan</d>\\n\"].tolist()\n + Content_Middle\n )\n Temp_C = (\n Temp_C\n + Content_Up\n + DF_3[Key][DF_3[Key] != \"<d>nan</d>\\n\"].tolist()\n + Content_Middle\n )\n\n del Temp_A[-1]\n del Temp_B[-1]\n del Temp_C[-1]\n # required to get rid of trailing subcolumn formatting\n\n Prism_Output = (\n Content_Head + Temp_A + Content_TB + Temp_B + Content_TC + Temp_C + Content_Tail\n )\n\n return Prism_Output\n\n\ndef write_pzfx(Prism_Output, Save_Dir=os.getcwd(), ExpAnalysisName=None):\n if ExpAnalysisName == None:\n print(\"Please provide ExpAnalysisName\")\n return\n\n with open(os.path.join(Save_Dir, ExpAnalysisName) + \".pzfx\", \"w+\") as f_out:\n for item in Prism_Output:\n f_out.write(\"{}\".format(item))\n\n print(f\"The prism file:\\n'{ExpAnalysisName}' \\nwas saved at\\n'{Save_Dir}'\")\n\n\ndef check_if_list_of_folders_exists(\n Exp_Folder=os.getcwd(), FoldersToCount=[\"Output_C0\", \"Output_C2\"]\n):\n \"\"\"Input: Takes path to experiment folder and list of folders.\n\n Function: Checks if list of folders are in the experiment folder.\n\n Output: Returns boolean if they exist (first output) and list of full paths of input folders (second output)\"\"\"\n\n Path_Num_Dir_L = list()\n for Folder in FoldersToCount:\n Path_Num_Dir = os.path.join(Exp_Folder, Folder)\n if os.path.isdir(Path_Num_Dir):\n Path_Num_Dir_L.append(Path_Num_Dir)\n continue\n else:\n print(f\"{Path_Num_Dir} is not a path to a directory.\")\n return False\n return True, Path_Num_Dir_L\n\n\ndef summarise_and_sort_list_of_DFs(\n L_DFs, Method=\"count\", Folders=[\"Output_C0\", \"Output_C2\"]\n):\n \"\"\"Input: Takes as input list of pd.DataFrames, method to summarise to them (e.g. count, median or median) and \n folders from which list of pd.DataFrames was constructed.\n\n Function: Checks if lists of pd.DataFrames and folders are same length, sorts and concats pd.DataFrames by unique\n condition and places them into single dictionary.\n\n Output: Returns sorted pd.DataFrames in dictionary with folder name as key.\"\"\"\n\n if type(L_DFs) is list and type(Folders) is list and len(L_DFs) == len(Folders):\n\n Sorted_DFs_L = []\n for DFs in L_DFs:\n DFs.columns = DFs.columns.str.extract(\"([a-z][0-9]g[0-9][0-9]?)\")[\n 0\n ].tolist()\n if Method == \"count\":\n DFs_method = DFs.count()\n elif Method == \"mean\":\n DFs_method = DFs.mean()\n elif Method == \"median\":\n DFs_method = DFs.median()\n else:\n print(\"Available methods are 'count', 'mean' and 'median'.\")\n return\n\n Split_Indexes = DFs_method.index.str.split(\"g\")\n Split_Indexes = [x[0] for x in Split_Indexes]\n Unique_Conditions = sorted(set(Split_Indexes))\n\n DF_sorted = pd.DataFrame()\n\n # sorts by number of conditions e.g. 'a1' in unique conditions\n for Condition in Unique_Conditions:\n DF_temp = pd.DataFrame(\n DFs_method[DFs_method.index.str.match(Condition)]\n )\n DF_temp.index = DF_temp.index.str.replace(Condition, \"\")\n DF_temp.index.name = \"Guts\"\n DF_temp.columns = [Condition]\n DF_sorted = pd.concat([DF_sorted, DF_temp], axis=1, sort=False)\n DF_sorted = DF_sorted.astype(\"float\")\n\n Sorted_DFs_L.append(DF_sorted)\n\n return dict(zip(Folders, Sorted_DFs_L))\n\n else:\n print(f\"L_DFs and {Folders} are not lists of the same length\")\n\n\ndef compileCSVs_sortbycondition_apply_method(\n Exp_Folder=os.getcwd(),\n FoldersToApplyMethodTo=[\"Output_C0\", \"Output_C2\"],\n Method=\"count\",\n):\n \"\"\"Input: Takes path to experiment folder, list of folders to apply a summarise method and type of\n summarise method to apply (e.g. count, median or median).\n\n Function: Checks if list of folders exists, reads in and compiles CSVs by folder as list of pd.DataFrames,\n summarises and sorts list of pd.DataFrames.\n\n Returns: Returns sorted pd.DataFrames in dictionary with folder name as key.\n \"\"\"\n\n if check_if_list_of_folders_exists(Exp_Folder, FoldersToApplyMethodTo):\n _, Path_Num_Dir_L = check_if_list_of_folders_exists(\n Exp_Folder, FoldersToApplyMethodTo\n )\n L_DFs = list()\n for Folder in Path_Num_Dir_L:\n DF_Temp = compile_DF_from_CSVdirectory(Folder)\n L_DFs.append(DF_Temp)\n\n Dict_of_DFs = summarise_and_sort_list_of_DFs(\n L_DFs=L_DFs, Folders=FoldersToApplyMethodTo, Method=Method\n )\n return Dict_of_DFs\n\n else:\n return\n"
] | [
[
"pandas.concat",
"pandas.DataFrame",
"pandas.Categorical",
"pandas.read_csv"
]
] |
monokrome/tensorflow | [
"2533ada7dd45b84d60677b8735e013d21044651a"
] | [
"tensorflow/python/estimator/canned/dnn_linear_combined.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow estimators for Linear and DNN joined training models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport six\n\nfrom tensorflow.python.estimator import estimator\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.estimator.canned import optimizers\nfrom tensorflow.python.feature_column import feature_column as feature_column_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.layers import core as core_layers\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import sync_replicas_optimizer\nfrom tensorflow.python.training import training_util\n\n# The default learning rates are a historical artifact of the initial\n# implementation.\n_DNN_LEARNING_RATE = 0.001\n_LINEAR_LEARNING_RATE = 0.005\n\n\ndef _check_no_sync_replicas_optimizer(optimizer):\n if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):\n raise ValueError(\n 'SyncReplicasOptimizer does not support multi optimizers case. '\n 'Therefore, it is not supported in DNNLinearCombined model. '\n 'If you want to use this optimizer, please use either DNN or Linear '\n 'model.')\n\n\ndef _linear_learning_rate(num_linear_feature_columns):\n \"\"\"Returns the default learning rate of the linear model.\n\n The calculation is a historical artifact of this initial implementation, but\n has proven a reasonable choice.\n\n Args:\n num_linear_feature_columns: The number of feature columns of the linear\n model.\n\n Returns:\n A float.\n \"\"\"\n default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)\n return min(_LINEAR_LEARNING_RATE, default_learning_rate)\n\n\ndef _add_layer_summary(value, tag):\n summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))\n summary.histogram('%s/activation' % tag, value)\n\n\ndef _dnn_linear_combined_model_fn(\n features, labels, mode, head,\n linear_feature_columns=None, linear_optimizer='Ftrl',\n dnn_feature_columns=None, dnn_optimizer='Adagrad', dnn_hidden_units=None,\n dnn_activation_fn=nn.relu, dnn_dropout=None,\n input_layer_partitioner=None, config=None):\n \"\"\"Deep Neural Net and Linear combined model_fn.\n\n Args:\n features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype\n `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n head: A `Head` instance.\n linear_feature_columns: An iterable containing all the feature columns used\n by the Linear model.\n linear_optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training the Linear model. Defaults to the Ftrl\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used by\n the DNN model.\n dnn_optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training the DNN model. Defaults to the Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per DNN layer.\n dnn_activation_fn: Activation function applied to each DNN layer. If `None`,\n will use `tf.nn.relu`.\n dnn_dropout: When not `None`, the probability we will drop out a given DNN\n coordinate.\n input_layer_partitioner: Partitioner for input layer.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n `ModelFnOps`\n\n Raises:\n ValueError: If both `linear_feature_columns` and `dnn_features_columns`\n are empty at the same time, or `input_layer_partitioner` is missing.\n \"\"\"\n if not linear_feature_columns and not dnn_feature_columns:\n raise ValueError(\n 'Either linear_feature_columns or dnn_feature_columns must be defined.')\n num_ps_replicas = config.num_ps_replicas if config else 0\n input_layer_partitioner = input_layer_partitioner or (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=64 << 20))\n\n # Build DNN Logits.\n dnn_parent_scope = 'dnn'\n\n if not dnn_feature_columns:\n dnn_logits = None\n else:\n dnn_optimizer = optimizers.get_optimizer_instance(\n dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)\n _check_no_sync_replicas_optimizer(dnn_optimizer)\n if not dnn_hidden_units:\n raise ValueError(\n 'dnn_hidden_units must be defined when dnn_feature_columns is '\n 'specified.')\n dnn_partitioner = (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas))\n with variable_scope.variable_scope(\n dnn_parent_scope,\n values=tuple(six.itervalues(features)),\n partitioner=dnn_partitioner):\n with variable_scope.variable_scope('input',\n partitioner=input_layer_partitioner):\n net = feature_column_lib.input_layer(\n features=features,\n feature_columns=dnn_feature_columns)\n\n for layer_id, num_hidden_units in enumerate(dnn_hidden_units):\n with variable_scope.variable_scope(\n 'hiddenlayer_%d' % layer_id,\n values=(net,)) as dnn_hidden_layer_scope:\n net = core_layers.dense(\n net,\n units=num_hidden_units,\n activation=dnn_activation_fn,\n kernel_initializer=init_ops.glorot_uniform_initializer(),\n name=dnn_hidden_layer_scope)\n if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:\n net = core_layers.dropout(net, rate=dnn_dropout, training=True)\n _add_layer_summary(net, dnn_hidden_layer_scope.name)\n\n with variable_scope.variable_scope(\n 'logits',\n values=(net,)) as dnn_logits_scope:\n dnn_logits = core_layers.dense(\n net,\n units=head.logits_dimension,\n activation=None,\n kernel_initializer=init_ops.glorot_uniform_initializer(),\n name=dnn_logits_scope)\n _add_layer_summary(dnn_logits, dnn_logits_scope.name)\n\n linear_parent_scope = 'linear'\n\n if not linear_feature_columns:\n linear_logits = None\n else:\n linear_optimizer = optimizers.get_optimizer_instance(\n linear_optimizer,\n learning_rate=_linear_learning_rate(len(linear_feature_columns)))\n _check_no_sync_replicas_optimizer(linear_optimizer)\n with variable_scope.variable_scope(\n linear_parent_scope,\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner) as scope:\n linear_logits = feature_column_lib.linear_model(\n features=features,\n feature_columns=linear_feature_columns,\n units=head.logits_dimension)\n _add_layer_summary(linear_logits, scope.name)\n\n # Combine logits and build full model.\n if dnn_logits is not None and linear_logits is not None:\n logits = dnn_logits + linear_logits\n elif dnn_logits is not None:\n logits = dnn_logits\n else:\n logits = linear_logits\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n train_ops = []\n global_step = training_util.get_global_step()\n if dnn_logits is not None:\n train_ops.append(\n dnn_optimizer.minimize(\n loss,\n var_list=ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=dnn_parent_scope)))\n if linear_logits is not None:\n train_ops.append(\n linear_optimizer.minimize(\n loss,\n var_list=ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=linear_parent_scope)))\n\n train_op = control_flow_ops.group(*train_ops)\n with ops.control_dependencies([train_op]):\n with ops.colocate_with(global_step):\n return state_ops.assign_add(global_step, 1)\n\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=_train_op_fn,\n logits=logits)\n\n\nclass DNNLinearCombinedClassifier(estimator.Estimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined classification models.\n\n Note: This estimator is also known as wide-n-deep.\n\n Example:\n\n ```python\n numeric_feature = numeric_column(...)\n sparse_column_a = categorical_column_with_hash_bucket(...)\n sparse_column_b = categorical_column_with_hash_bucket(...)\n\n sparse_feature_a_x_sparse_feature_b = crossed_column(...)\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNLinearCombinedClassifier(\n # wide settings\n linear_feature_columns=[sparse_feature_a_x_sparse_feature_b],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[\n sparse_feature_a_emb, sparse_feature_b_emb, numeric_feature],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))\n\n # To apply L1 and L2 regularization, you can set optimizers as follows:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # It is same for FtrlOptimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using softmax cross entropy.\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n input_layer_partitioner=None,\n config=None):\n \"\"\"Initializes a DNNLinearCombinedClassifier instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. Defaults to FTRL optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. Defaults to Adagrad optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n n_classes: Number of label classes. Defaults to 2, namely binary\n classification. Must be > 1.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are\n already encoded as integer or float within [0, 1] for `n_classes=2` and\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\n Also there will be errors if vocabulary is not provided and labels are\n string.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: RunConfig object to configure the runtime settings.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n linear_feature_columns = linear_feature_columns or []\n dnn_feature_columns = dnn_feature_columns or []\n self._feature_columns = (\n list(linear_feature_columns) + list(dnn_feature_columns))\n if not self._feature_columns:\n raise ValueError('Either linear_feature_columns or dnn_feature_columns '\n 'must be defined.')\n if n_classes == 2:\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access\n weight_column=weight_column,\n label_vocabulary=label_vocabulary)\n else:\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access\n n_classes,\n weight_column=weight_column,\n label_vocabulary=label_vocabulary)\n def _model_fn(features, labels, mode, config):\n return _dnn_linear_combined_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config)\n\n super(DNNLinearCombinedClassifier, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config)\n\n\nclass DNNLinearCombinedRegressor(estimator.Estimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined models for regression.\n\n Note: This estimator is also known as wide-n-deep.\n\n Example:\n\n ```python\n numeric_feature = numeric_column(...)\n sparse_column_a = categorical_column_with_hash_bucket(...)\n sparse_column_b = categorical_column_with_hash_bucket(...)\n\n sparse_feature_a_x_sparse_feature_b = crossed_column(...)\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNLinearCombinedRegressor(\n # wide settings\n linear_feature_columns=[sparse_feature_a_x_sparse_feature_b],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[\n sparse_feature_a_emb, sparse_feature_b_emb, numeric_feature],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))\n\n # To apply L1 and L2 regularization, you can set optimizers as follows:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # It is same for FtrlOptimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using mean squared error.\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n label_dimension=1,\n weight_column=None,\n input_layer_partitioner=None,\n config=None):\n \"\"\"Initializes a DNNLinearCombinedRegressor instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. Defaults to FTRL optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. Defaults to Adagrad optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n label_dimension: Number of regression targets per example. This is the\n size of the last dimension of the labels and logits `Tensor` objects\n (typically, these have shape `[batch_size, label_dimension]`).\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: RunConfig object to configure the runtime settings.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n linear_feature_columns = linear_feature_columns or []\n dnn_feature_columns = dnn_feature_columns or []\n self._feature_columns = (\n list(linear_feature_columns) + list(dnn_feature_columns))\n if not self._feature_columns:\n raise ValueError('Either linear_feature_columns or dnn_feature_columns '\n 'must be defined.')\n\n def _model_fn(features, labels, mode, config):\n return _dnn_linear_combined_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib. # pylint: disable=protected-access\n _regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension, weight_column=weight_column),\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config)\n\n super(DNNLinearCombinedRegressor, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config)\n"
] | [
[
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.layers.core.dropout",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.python.feature_column.feature_column.linear_model",
"tensorflow.python.feature_column.feature_column.input_layer",
"tensorflow.python.ops.init_ops.glorot_uniform_initializer",
"tensorflow.python.training.training_util.get_global_step",
"tensorflow.python.estimator.canned.optimizers.get_optimizer_instance",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.estimator.canned.head._multi_class_head_with_softmax_cross_entropy_loss",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.estimator.canned.head._regression_head_with_mean_squared_error_loss"
]
] |
btrspg/temp-scripts | [
"7898b751acb0074340802432447124317582b865"
] | [
"test_merge.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2018/6/12 上午11:25\n# @Author : Chen Yuelong\n# @Mail : [email protected]\n# @File : test_merge.py\n# @Software: PyCharm\n\nfrom __future__ import absolute_import, unicode_literals\n\n__author__ = 'Chen Yuelong'\nimport os, sys\nimport pandas as pd\nimport glob\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef main():\n files =glob.glob('/*/*/*/*/*/*/*/ALIGNMENT/*csv')\n data_list = []\n for file in files:\n # print(file)\n tmp = pd.read_csv(file,usecols=[0,1])\n data_list.append(tmp)\n print(len(data_list))\n merge_data = data_list.pop(0)\n\n for data in data_list:\n merge_data = pd.merge(merge_data,data,)\n\n # print(merge_data.columns)\n # plt.subplot(211)\n # fig,axes = plt.subplots(ncols=2)\n merge_data.to_csv('all.csv')\n data=pd.DataFrame([[merge_data.iloc[i][0],\n merge_data.iloc[i][1:].mean(),\n merge_data.iloc[i][1:].mean()+2*merge_data.iloc[i][1:].std(),\n merge_data.iloc[i][1:].mean()-2*merge_data.iloc[i][1:].std(),\n merge_data.iloc[i][1:].min()] for i in range(1,merge_data.shape[0])],\n columns=['amplicon','mean','mean+2std','mean-2std','min'])\n\n data.sort_values('mean',inplace=True,ascending=True)\n data.to_csv('meanstd.csv')\n data.plot(x='amplicon')\n\n\n\n # plt.subplot(2)\n #\n plt.savefig('test.stat.png', dpi=300)\n # plt.subplot(212)\n subdata =data[data['mean'] < 3000]\n subdata.plot(x='amplicon',title=subdata.shape[0])\n\n plt.plot(y=500)\n plt.savefig('test2.png',dpi=300)\n\n\n # merge_data=merge_data.transpose()\n # print(merge_data.columns)\n # merge_data.plot(kind='box')\n # plt.savefig('test1.stat.png', dpi=300)\n #\n # print(merge_data.shape)\n\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.use",
"pandas.merge",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"pandas.read_csv"
]
] |
VincentKaras/end2you | [
"2e49bc825ddf98dd49215ec2317247985de9070e"
] | [
"end2you/models/audio/zhao19.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom .base import Base\n\n\nclass Zhao19(nn.Module):\n \n def __init__(self, input_size:int):\n \"\"\" Speech emotion recognition model proposed in:\n \n `Zhao, J., Mao, X. and Chen, L.. \"Speech emotion recognition using \n deep 1D & 2D CNN LSTM networks\". Biomedical Signal Processing and Control, \n 47, (pp.312-323), 2019.`\n \n Args:\n input_size (int): Input size to the model. \n \"\"\"\n \n super(Zhao19, self).__init__()\n self.model, self.num_features = self.build_audio_model(input_size)\n \n def build_audio_model(self, input_size:int):\n \"\"\" Build the audio model: 3 blocks of convolution + max-pooling.\n \n Args:\n input_size (int): Input size of frame.\n \"\"\"\n \n out_channels = [64, 64, 128, 128]\n in_channels = [1]\n in_channels.extend([x for x in out_channels[:-1]])\n kernel_size = [3, 3, 3, 3]\n stride = [1, 1, 1, 1]\n padding = ((np.array(kernel_size)-1)//2).tolist()\n \n num_layers = len(in_channels)\n conv_args = {\n f'layer{i}':\n {\n 'in_channels': in_channels[i],\n 'out_channels': out_channels[i],\n 'kernel_size': kernel_size[i],\n 'stride': stride[i],\n 'padding': padding[i]\n }\n for i in range(num_layers)\n }\n \n kernel_size = [4, 4, 4, 4]\n stride = [4, 4, 4, 4]\n maxpool_args = {f'layer{i}': {\n 'kernel_size': kernel_size[i],\n 'stride': stride[i]\n } for i in range(num_layers)\n }\n \n audio_model = Base(conv_args, maxpool_args, normalize=True, activ_fn=nn.ELU())\n conv_red_size = Base._num_out_features(input_size, conv_args, maxpool_args)\n num_layers = len(in_channels) - 1\n num_out_features = conv_red_size*conv_args[f'layer{num_layers}']['out_channels']\n \n return audio_model, num_out_features\n \n def forward(self, x):\n '''\n Args:\n x (BS x 1 x T)\n '''\n return self.model(x)\n"
] | [
[
"numpy.array",
"torch.nn.ELU"
]
] |
XL2248/MSCTD | [
"0bcba846855274c56e0c6b1321641ec624644bdf"
] | [
"src_code/thumt-dialog-wo-sp-decoder-w-mask-all-mlp-four/thumt/data/dataset.py"
] | [
"# coding=utf-8\n# Copyright 2017-2019 The THUMT Authors\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math, code,os\nimport operator\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport thumt.utils.distribute as distribute\n\n\ndef batch_examples(example, batch_size, max_length, mantissa_bits,\n shard_multiplier=1, length_multiplier=1, constant=False,\n num_threads=4, drop_long_sequences=True):\n \"\"\" Batch examples\n\n :param example: A dictionary of <feature name, Tensor>.\n :param batch_size: The number of tokens or sentences in a batch\n :param max_length: The maximum length of a example to keep\n :param mantissa_bits: An integer\n :param shard_multiplier: an integer increasing the batch_size to suit\n splitting across data shards.\n :param length_multiplier: an integer multiplier that is used to\n increase the batch sizes and sequence length tolerance.\n :param constant: Whether to use constant batch size\n :param num_threads: Number of threads\n :param drop_long_sequences: Whether to drop long sequences\n\n :returns: A dictionary of batched examples\n \"\"\"\n\n with tf.name_scope(\"batch_examples\"):\n max_length = max_length or batch_size\n min_length = 8\n mantissa_bits = mantissa_bits\n\n # Compute boundaries\n x = min_length\n boundaries = []\n\n while x < max_length:\n boundaries.append(x)\n x += 2 ** max(0, int(math.log(x, 2)) - mantissa_bits)\n\n # Whether the batch size is constant\n if not constant:\n batch_sizes = [max(1, batch_size // length)\n for length in boundaries + [max_length]]\n batch_sizes = [b * shard_multiplier for b in batch_sizes]\n bucket_capacities = [2 * b for b in batch_sizes]\n else:\n batch_sizes = batch_size * shard_multiplier\n bucket_capacities = [2 * n for n in boundaries + [max_length]]\n\n max_length *= length_multiplier\n boundaries = [boundary * length_multiplier for boundary in boundaries]\n max_length = max_length if drop_long_sequences else 10 ** 9\n\n # The queue to bucket on will be chosen based on maximum length\n max_example_length = 0\n for v in example.values():\n if v.shape.ndims > 0:\n seq_length = tf.shape(v)[0]\n max_example_length = tf.maximum(max_example_length, seq_length)\n\n (_, outputs) = tf.contrib.training.bucket_by_sequence_length(\n max_example_length,\n example,\n batch_sizes,\n [b + 1 for b in boundaries],\n num_threads=num_threads,\n capacity=2, # Number of full batches to store, we don't need many.\n bucket_capacities=bucket_capacities,\n dynamic_pad=True,\n keep_input=(max_example_length <= max_length)\n )\n\n return outputs\n\ndef get_turn_position_eos(file1):\n with open(file1, 'r', encoding='utf-8') as fr:\n content = fr.readlines()\n turn_position = []\n mask = []\n for line in content:\n tmp = []\n mask_tmp = []\n index = 0\n flag = 0\n lines = line.strip() + \" <eos>\"\n for i in lines.strip().split(' ')[::-1]:\n #tmp.append(str(index))\n# flag = 0\n if i == '[SEP]':\n index += 1\n flag = 1\n tmp.append(str(index))\n mask_tmp.append(str(flag))\n if len(lines.strip().split(' ')) != len(tmp):\n print(line)\n turn_position.append(tmp)\n mask.append(mask_tmp)\n\n base_path = '/'.join(file1.split('/')[:-1])\n signal = file1.split('/')[-1] #.split('.')[0]\n position_file = base_path + '/' + signal + '.turn_position'\n mask_file = base_path + '/' + signal + '.mask'\n\n# if os.path.exists(position_file) and os.path.exists(mask_file):\n if os.path.exists(position_file) and os.path.exists(mask_file) and not os.path.getsize(position_file) and not os.path.getsize(mask_file):\n return position_file, mask_file\n\n with open(position_file, 'w', encoding='utf-8') as fw:\n for line_position in turn_position:\n line_position = sorted(line_position, reverse=True)\n fw.write(' '.join(line_position) + '\\n')\n\n with open(mask_file, 'w', encoding='utf-8') as fw:\n for line_mask in mask:\n line_mask = sorted(line_mask, reverse=True)\n fw.write(' '.join(line_mask) + '\\n')\n #code.interact(local=locals())\n return position_file, mask_file\n\ndef get_turn_position(file1):\n with open(file1, 'r', encoding='utf-8') as fr:\n content = fr.readlines()\n turn_position = []\n for line in content:\n tmp = []\n index = 0\n for i in line.strip().split(' ')[::-1]:\n #tmp.append(str(index))\n if i == '[SEP]':\n index += 1\n tmp.append(str(index))\n# if len(line.strip().split()) != len(tmp):\n if len(line.strip().split(' ')) != len(tmp):\n print(line)\n turn_position.append(tmp)\n base_path = '/'.join(file1.split('/')[:-1])\n signal = file1.split('/')[-1] #.split('.')[0]\n position_file = base_path + '/' + signal + '.turn_position'\n if os.path.exists(position_file):\n return position_file\n with open(position_file, 'w', encoding='utf-8') as fw:\n for line_position in turn_position:\n line_position = sorted(line_position, reverse=True)\n fw.write(' '.join(line_position) + '\\n')\n #code.interact(local=locals())\n return position_file\n\ndef get_turn_position_src(file1, file2):\n with open(file1, 'r', encoding='utf-8') as fr:\n sentence = fr.readlines()\n with open(file2, 'r', encoding='utf-8') as fr:\n content = fr.readlines()\n\n turn_position = []\n mask = []\n for src, ctx in zip(sentence, content):\n line = ctx.replace('\\n', ' ') + src.replace('\\n', ' ') + '<eos> '\n tmp = []\n mask_sent = []\n index = 1\n flag = 0\n for i in line.split()[::-1]:\n if flag == 0:\n mask_sent.append(str(1))\n tmp.append(str(0))\n else:\n #mask_sent.append(str(0))\n tmp.append(str(index))\n if i == '[SEP]':\n index += 1\n\n if i == '<eos>':\n flag = 1\n\n if len(line.split()) != len(tmp):\n print(line)\n turn_position.append(tmp)\n mask.append(mask_sent) \n\n base_path = '/'.join(file1.split('/')[:-1])\n signal = file1.split('/')[-1] #.split('.')[0]\n position_file = base_path + '/' + signal + '.src_ctx_turn_position'\n mask_file = base_path + '/' + signal + '.src_ctx.mask'\n\n with open(position_file, 'w', encoding='utf-8') as fw:\n for line_position in turn_position:\n line_position = sorted(line_position, reverse=True)\n fw.write(' '.join(line_position) + '\\n')\n\n with open(mask_file, 'w', encoding='utf-8') as fw:\n for sub_mask in mask:\n fw.write(' '.join(sub_mask) + '\\n')\n\n #code.interact(local=locals())\n return position_file, mask_file\n\ndef get_sampled_file(file1):\n\n with open(file1, 'r', encoding='utf-8') as fr:\n sentence = fr.readlines()\n random.shuffle(sentence)\n base_path = '/'.join(file1.split('/')[:-1])\n signal = file1.split('/')[-1]\n sample_file = base_path + '/' + signal + '.sample'\n with open(sample_file, 'w', encoding='utf-8') as fw:\n for sa in sentence:\n fw.write(' '.join(sa.strip().split()) + '\\n')\n return sample_file\n\ndef get_training_input_contextual(filenames, params):\n \"\"\" Get input for training stage\n\n :param filenames: A list contains [source_filenames, target_filenames]\n :param params: Hyper-parameters\n\n :returns: A dictionary of pair <Key, Tensor>\n \"\"\"\n\n with tf.device(\"/cpu:0\"):\n src_dataset = tf.data.TextLineDataset(filenames[0])\n tgt_dataset = tf.data.TextLineDataset(filenames[1])\n #sample_file = get_sampled_file(filenames[1])\n sample_dataset = tf.data.TextLineDataset(params.sample)\n \n context_source_dataset = tf.data.TextLineDataset(params.context_source)\n ctx_dia_src_dataset = tf.data.TextLineDataset(params.dialog_src_context)\n ctx_dia_tgt_dataset = tf.data.TextLineDataset(params.dialog_tgt_context)\n# ctx_sty_src_dataset = tf.data.TextLineDataset(params.style_src_context)\n# ctx_sty_tgt_dataset = tf.data.TextLineDataset(params.style_tgt_context)\n# ctx_lan_src_dataset = tf.data.TextLineDataset(params.language_src_context)\n# ctx_lan_tgt_dataset = tf.data.TextLineDataset(params.language_tgt_context)\n\n position_file_ctx_src = get_turn_position(params.context_source)\n\n position_file_src_dia = get_turn_position(params.dialog_src_context)\n# position_file_ctx_src, mask_file = get_turn_position_src(filenames[0], params.dialog_src_context)\n position_file_tgt_dia = get_turn_position(params.dialog_tgt_context)\n# position_file_src_sty = get_turn_position(params.style_src_context)\n# position_file_tgt_sty = get_turn_position(params.style_tgt_context)\n# position_file_src_lan = get_turn_position(params.language_src_context)\n# position_file_tgt_lan = get_turn_position(params.language_tgt_context)\n\n position_ctx_src_dataset = tf.data.TextLineDataset(position_file_ctx_src)\n# mask_dataset = tf.data.TextLineDataset(mask_file)\n\n position_src_dia_dataset = tf.data.TextLineDataset(position_file_src_dia)\n position_tgt_dia_dataset = tf.data.TextLineDataset(position_file_tgt_dia)\n# position_src_sty_dataset = tf.data.TextLineDataset(position_file_src_sty)\n# position_tgt_sty_dataset = tf.data.TextLineDataset(position_file_tgt_sty)\n# position_src_lan_dataset = tf.data.TextLineDataset(position_file_src_lan)\n# position_tgt_lan_dataset = tf.data.TextLineDataset(position_file_tgt_lan)\n# code.interact(local=locals())\n# dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset, emo_dataset, ctx_dia_src_dataset, ctx_dia_tgt_dataset, ctx_sty_src_dataset, ctx_sty_tgt_dataset, ctx_lan_src_dataset, ctx_lan_tgt_dataset, position_src_dia_dataset, position_tgt_dia_dataset, position_src_sty_dataset, position_tgt_sty_dataset, position_src_lan_dataset, position_tgt_lan_dataset, sample_dataset))\n dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset, context_source_dataset, position_ctx_src_dataset, ctx_dia_src_dataset, ctx_dia_tgt_dataset, position_src_dia_dataset, position_tgt_dia_dataset, sample_dataset))\n\n if distribute.is_distributed_training_mode():\n dataset = dataset.shard(distribute.size(), distribute.rank())\n\n dataset = dataset.shuffle(params.buffer_size)\n dataset = dataset.repeat()\n\n # Split string\n dataset = dataset.map(\n lambda src, tgt, ctx_src, pos_ctx_src, ctx_dia_src, ctx_dia_tgt, pos_dia_src, pos_dia_tgt, sample: (\n tf.string_split([src]).values,\n tf.string_split([tgt]).values,\n tf.string_split([ctx_src]).values,\n tf.string_split([pos_ctx_src]).values,\n tf.string_split([ctx_dia_src]).values,\n tf.string_split([ctx_dia_tgt]).values,\n tf.string_split([pos_dia_src]).values,\n tf.string_split([pos_dia_tgt]).values,\n tf.string_split([sample]).values,\n ),\n num_parallel_calls=params.num_threads\n )\n\n # Append <eos> symbol\n dataset = dataset.map(\n lambda src, tgt, ctx_src, pos_ctx_src, ctx_dia_src, ctx_dia_tgt, pos_dia_src, pos_dia_tgt, sample: (\n src,\n tf.concat([tgt, [tf.constant(params.eos)]], axis=0),\n ctx_src, #tf.concat([ctx_src, [tf.constant(params.eos)]], axis=0),\n pos_ctx_src,\n ctx_dia_src,#tf.concat([src, [tf.constant(params.eos)], ctx_dia_src], axis=0),\n ctx_dia_tgt, \n pos_dia_src, \n pos_dia_tgt, \n sample\n ),\n num_parallel_calls=params.num_threads\n )\n\n # Convert to dictionary\n dataset = dataset.map(\n lambda src, tgt, ctx_src, pos_ctx_src, ctx_dia_src, ctx_dia_tgt, pos_dia_src, pos_dia_tgt, sample: {\n \"source\": src,\n \"target\": tgt,\n \"context_source\": ctx_src,\n \"position_ctx_src\": pos_ctx_src,\n \"context_dia_src\": ctx_dia_src,\n \"context_dia_tgt\": ctx_dia_tgt,\n \"position_dia_src\": pos_dia_src,\n \"position_dia_tgt\": pos_dia_tgt,\n \"sample\": sample,\n \"source_length\": tf.shape(src),\n \"target_length\": tf.shape(tgt),\n \"context_source_length\": tf.shape(ctx_src),\n \"context_dia_src_length\": tf.shape(ctx_dia_src),\n \"context_dia_tgt_length\": tf.shape(ctx_dia_tgt),\n \"sample_length\": tf.shape(sample)\n },\n num_parallel_calls=params.num_threads\n )\n\n # Create iterator\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n # Create lookup table\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n tgt_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"target\"]),\n default_value=params.mapping[\"target\"][params.unk]\n )\n pos_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"position\"]),\n default_value=1\n )\n\n # String to index lookup\n features[\"source\"] = src_table.lookup(features[\"source\"])\n features[\"target\"] = tgt_table.lookup(features[\"target\"])\n features[\"context_source\"] = src_table.lookup(features[\"context_source\"])\n\n features[\"sample\"] = tgt_table.lookup(features[\"sample\"])\n features[\"context_dia_src\"] = src_table.lookup(features[\"context_dia_src\"])\n\n features[\"context_dia_tgt\"] = tgt_table.lookup(features[\"context_dia_tgt\"])\n\n# features[\"emotion\"] = emo_table.lookup(features[\"emotion\"])\n features[\"position_ctx_src\"] = pos_table.lookup(features[\"position_ctx_src\"])\n features[\"position_dia_src\"] = pos_table.lookup(features[\"position_dia_src\"])\n features[\"position_dia_tgt\"] = pos_table.lookup(features[\"position_dia_tgt\"])\n\n # Batching\n features = batch_examples(features, params.batch_size,\n params.max_length, params.mantissa_bits,\n shard_multiplier=len(params.device_list),\n length_multiplier=params.length_multiplier,\n constant=params.constant_batch_size,\n num_threads=params.num_threads)\n\n # Convert to int32\n features[\"source\"] = tf.to_int32(features[\"source\"])\n features[\"context_source\"] = tf.to_int32(features[\"context_source\"])\n features[\"target\"] = tf.to_int32(features[\"target\"])\n features[\"sample\"] = tf.to_int32(features[\"sample\"])\n\n\n features[\"source_length\"] = tf.to_int32(features[\"source_length\"])\n features[\"target_length\"] = tf.to_int32(features[\"target_length\"])\n features[\"sample_length\"] = tf.to_int32(features[\"sample_length\"])\n features[\"context_source_length\"] = tf.to_int32(features[\"context_source_length\"])\n\n features[\"context_source_length\"] = tf.squeeze(features[\"context_source_length\"], 1)\n features[\"source_length\"] = tf.squeeze(features[\"source_length\"], 1)\n features[\"target_length\"] = tf.squeeze(features[\"target_length\"], 1)\n features[\"sample_length\"] = tf.squeeze(features[\"sample_length\"], 1)\n\n features[\"context_dia_src\"] = tf.to_int32(features[\"context_dia_src\"])\n features[\"context_dia_tgt\"] = tf.to_int32(features[\"context_dia_tgt\"])\n \n features[\"position_ctx_src\"] = tf.to_int32(features[\"position_ctx_src\"])\n features[\"position_dia_src\"] = tf.to_int32(features[\"position_dia_src\"])\n features[\"position_dia_tgt\"] = tf.to_int32(features[\"position_dia_tgt\"])\n\n features[\"context_dia_src_length\"] = tf.to_int32(features[\"context_dia_src_length\"])\n features[\"context_dia_src_length\"] = tf.squeeze(features[\"context_dia_src_length\"], 1)\n features[\"context_dia_tgt_length\"] = tf.to_int32(features[\"context_dia_tgt_length\"])\n features[\"context_dia_tgt_length\"] = tf.squeeze(features[\"context_dia_tgt_length\"], 1)\n\n return features\n\ndef get_training_input_contextual_emo(filenames, params):\n\n \"\"\" Get input for training stage\n\n :param filenames: A list contains [source_filenames, target_filenames]\n :param params: Hyper-parameters\n\n :returns: A dictionary of pair <Key, Tensor>\n \"\"\"\n #print(\"filenames:\", filenames)\n with tf.device(\"/cpu:0\"):\n\n datasets = []\n #code.interact(local=locals())\n for data in filenames:# bianli 4 ge file\n dataset = tf.data.Dataset.from_tensor_slices(data)\n # Split string\n dataset = dataset.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n # Append <eos>\n dataset = dataset.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n datasets.append(dataset)\n #code.interact(local=locals())\n dataset = tf.data.Dataset.zip(tuple(datasets))\n dataset = dataset.shuffle(params.buffer_size)\n dataset = dataset.repeat()\n #for one_element in tfe.Iterator(dataset):\n # print(one_element)\n # Convert to dictionary\n dataset = dataset.map(\n lambda *x: {\n \"source\": x[0],\n \"target\": x[1],\n \"context\": x[2],\n \"emotion\": x[3],\n \"source_length\": tf.shape(x[0]),\n \"target_length\": tf.shape(x[1]),\n \"context_length\": tf.shape(x[2])\n },\n num_parallel_calls=params.num_threads\n )\n\n # Create iterator\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n # Create lookup table\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n tgt_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"target\"]),\n default_value=params.mapping[\"target\"][params.unk]\n )\n\n # String to index lookup\n features[\"source\"] = src_table.lookup(features[\"source\"])\n features[\"target\"] = tgt_table.lookup(features[\"target\"])\n features[\"context\"] = src_table.lookup(features[\"context\"])\n features[\"emotion\"] = src_table.lookup(features[\"emotion\"])\n\n # Batching\n features = batch_examples(features, params.batch_size,\n params.max_length, params.mantissa_bits,\n shard_multiplier=len(params.device_list),\n length_multiplier=params.length_multiplier,\n constant=params.constant_batch_size,\n num_threads=params.num_threads)\n\n # Convert to int32\n features[\"source\"] = tf.to_int32(features[\"source\"])\n features[\"target\"] = tf.to_int32(features[\"target\"])\n features[\"context\"] = tf.to_int32(features[\"context\"])\n features[\"emotion\"] = tf.to_int32(features[\"emotion\"])\n\n features[\"source_length\"] = tf.to_int32(features[\"source_length\"])\n features[\"target_length\"] = tf.to_int32(features[\"target_length\"])\n features[\"context_length\"] = tf.to_int32(features[\"context_length\"])\n features[\"source_length\"] = tf.squeeze(features[\"source_length\"], 1)\n features[\"target_length\"] = tf.squeeze(features[\"target_length\"], 1)\n features[\"context_length\"] = tf.squeeze(features[\"context_length\"], 1)\n\n return features\n\ndef get_training_input(filenames, params):\n \"\"\" Get input for training stage\n\n :param filenames: A list contains [source_filenames, target_filenames]\n :param params: Hyper-parameters\n\n :returns: A dictionary of pair <Key, Tensor>\n \"\"\"\n\n with tf.device(\"/cpu:0\"):\n src_dataset = tf.data.TextLineDataset(filenames[0])\n tgt_dataset = tf.data.TextLineDataset(filenames[1])\n\n dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))\n\n if distribute.is_distributed_training_mode():\n dataset = dataset.shard(distribute.size(), distribute.rank())\n\n dataset = dataset.shuffle(params.buffer_size)\n dataset = dataset.repeat()\n\n # Split string\n dataset = dataset.map(\n lambda src, tgt: (\n tf.string_split([src]).values,\n tf.string_split([tgt]).values\n ),\n num_parallel_calls=params.num_threads\n )\n\n # Append <eos> symbol\n dataset = dataset.map(\n lambda src, tgt: (\n tf.concat([src, [tf.constant(params.eos)]], axis=0),\n tf.concat([tgt, [tf.constant(params.eos)]], axis=0)\n ),\n num_parallel_calls=params.num_threads\n )\n\n # Convert to dictionary\n dataset = dataset.map(\n lambda src, tgt: {\n \"source\": src,\n \"target\": tgt,\n \"source_length\": tf.shape(src),\n \"target_length\": tf.shape(tgt)\n },\n num_parallel_calls=params.num_threads\n )\n\n # Create iterator\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n # Create lookup table\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n tgt_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"target\"]),\n default_value=params.mapping[\"target\"][params.unk]\n )\n\n # String to index lookup\n features[\"source\"] = src_table.lookup(features[\"source\"])\n features[\"target\"] = tgt_table.lookup(features[\"target\"])\n\n # Batching\n features = batch_examples(features, params.batch_size,\n params.max_length, params.mantissa_bits,\n shard_multiplier=len(params.device_list),\n length_multiplier=params.length_multiplier,\n constant=params.constant_batch_size,\n num_threads=params.num_threads)\n\n # Convert to int32\n features[\"source\"] = tf.to_int32(features[\"source\"])\n features[\"target\"] = tf.to_int32(features[\"target\"])\n features[\"source_length\"] = tf.to_int32(features[\"source_length\"])\n features[\"target_length\"] = tf.to_int32(features[\"target_length\"])\n features[\"source_length\"] = tf.squeeze(features[\"source_length\"], 1)\n features[\"target_length\"] = tf.squeeze(features[\"target_length\"], 1)\n\n return features\n\n\ndef sort_input_file(filename, reverse=True):\n # Read file\n with tf.gfile.Open(filename) as fd:\n inputs = [line.strip() for line in fd]\n\n input_lens = [\n (i, len(line.strip().split())) for i, line in enumerate(inputs)\n ]\n\n sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1),\n reverse=reverse)\n sorted_keys = {}\n sorted_inputs = []\n\n for i, (index, _) in enumerate(sorted_input_lens):\n sorted_inputs.append(inputs[index])\n sorted_keys[index] = i\n\n return sorted_keys, sorted_inputs\n\ndef sort_input_file_ctx(filename, f1, f2, f3, f4, f5, f6, reverse=True):\n # Read file\n with tf.gfile.Open(filename) as fd:\n inputs = [line.strip() for line in fd]\n\n # with tf.gfile.Open(filename_ctx) as fd:\n # ctxs = [line.strip() for line in fd]\n\n with tf.gfile.Open(f1) as fd:\n ctx1 = [line.strip() for line in fd]\n with tf.gfile.Open(f2) as fd:\n ctx2 = [line.strip() for line in fd]\n with tf.gfile.Open(f3) as fd:\n ctx3 = [line.strip() for line in fd]\n with tf.gfile.Open(f4) as fd:\n ctx4 = [line.strip() for line in fd]\n with tf.gfile.Open(f5) as fd:\n ctx5 = [line.strip() for line in fd]\n with tf.gfile.Open(f6) as fd:\n ctx6 = [line.strip() for line in fd]\n\n# return inputs,ctx1,ctx2,ctx3,ctx4,ctx5,ctx6,ctx7,ctx8,ctx9,ctx10,ctx11,ctx12,ctx13\n input_lens = [\n (i, len(line.strip().split())) for i, line in enumerate(inputs)\n ]\n\n sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1),\n reverse=reverse)\n sorted_keys = {}\n sorted_inputs = []\n sorted_ctxs = []\n dialog_src_context, pos_src_dia, dialog_tgt_context, pos_tgt_dia, style_src_context, pos_src_sty = [], [], [], [],[], []\n\n for i, (index, _) in enumerate(sorted_input_lens):\n sorted_inputs.append(inputs[index])\n# sorted_ctxs.append(ctxs[index])\n\n dialog_src_context.append(ctx1[index])\n pos_src_dia.append(ctx2[index])\n dialog_tgt_context.append(ctx3[index])\n pos_tgt_dia.append(ctx4[index])\n style_src_context.append(ctx5[index])\n pos_src_sty.append(ctx6[index])\n\n sorted_keys[index] = i\n\n return sorted_keys, sorted_inputs, dialog_src_context, pos_src_dia, dialog_tgt_context, pos_tgt_dia, style_src_context, pos_src_sty\n\ndef sort_and_zip_files(names):\n inputs = []\n input_lens = []\n files = [tf.gfile.GFile(name) for name in names]\n\n count = 0\n\n for lines in zip(*files):\n lines = [line.strip() for line in lines]\n input_lens.append((count, len(lines[0].split())))\n inputs.append(lines)\n count += 1\n\n # Close files\n for fd in files:\n fd.close()\n\n sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1),\n reverse=True)\n sorted_inputs = []\n\n for i, (index, _) in enumerate(sorted_input_lens):\n sorted_inputs.append(inputs[index])\n #a = []\n #for x in zip(*sorted_inputs):\n # a.append(list(x))\n #code.interact(local=locals())\n return [list(x) for x in zip(*sorted_inputs)]\n\ndef get_evaluation_input_ctx(inputs, params):\n with tf.device(\"/cpu:0\"):\n # Create datasets\n datasets = []\n print(len(inputs))\n for i, data in enumerate(inputs):\n dataset = tf.data.Dataset.from_tensor_slices(data)\n # Split string\n dataset = dataset.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n # Append <eos>\n if i > 7:\n dataset = dataset.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n else:\n dataset = dataset.map(\n lambda x: x,\n num_parallel_calls=params.num_threads\n )\n\n datasets.append(dataset)\n\n dataset = tf.data.Dataset.zip(tuple(datasets))\n\n # Convert tuple to dictionary\n dataset = dataset.map(\n lambda *x: {\n \"source\": x[0],\n \"source_length\": tf.shape(x[0])[0],\n \"context_dia_src\": x[1],\n \"position_dia_src\": x[2],\n \"context_dia_src_length\": tf.shape(x[1])[0],\n \"context_dia_tgt\": x[3],\n \"position_dia_tgt\": x[4],\n \"context_dia_tgt_length\": tf.shape(x[3])[0],\n \"position_ctx_src\": x[5],\n \"sample\": x[6],\n \"sample_length\": tf.shape(x[6])[0],\n \"context_source\": x[7],\n \"context_source_length\": tf.shape(x[7])[0],\n \"references\": x[8:]\n },\n num_parallel_calls=params.num_threads\n )\n# code.interact(local=locals())\n dataset = dataset.padded_batch(\n params.eval_batch_size,\n {\n \"source\": [tf.Dimension(None)],\n \"source_length\": [],\n \"context_dia_src\": [tf.Dimension(None)],\n \"context_dia_tgt\": [tf.Dimension(None)],\n \"context_source\": [tf.Dimension(None)],\n \"context_dia_src_length\": [],\n \"context_dia_tgt_length\": [],\n \"context_source_length\": [],\n \"position_dia_src\": [tf.Dimension(None)],\n \"position_dia_tgt\": [tf.Dimension(None)],\n \"position_ctx_src\": [tf.Dimension(None)],\n \"sample\": [tf.Dimension(None)],\n \"sample_length\": [],\n \"references\": (tf.Dimension(None),) * (len(inputs) - 8)\n },\n {\n \"source\": params.pad,\n \"source_length\": 0,\n \"context_dia_src\": params.pad,\n \"context_dia_tgt\": params.pad,\n \"context_source\": params.pad,\n \"context_dia_src_length\": 0,\n \"context_dia_tgt_length\": 0,\n \"context_source_length\": 0,\n \"position_dia_src\": params.pad,\n \"position_dia_tgt\": params.pad,\n \"position_ctx_src\": params.pad,\n \"sample\": params.pad,\n \"sample_length\": 0,\n \"references\": (params.pad,) * (len(inputs) - 8)\n }\n )\n\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n # Covert source symbols to ids\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n tgt_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"target\"]),\n default_value=params.mapping[\"target\"][params.unk]\n )\n pos_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"position\"]),\n default_value=1\n )\n features[\"source\"] = src_table.lookup(features[\"source\"])\n features[\"sample\"] = tgt_table.lookup(features[\"sample\"])\n\n features[\"context_source\"] = src_table.lookup(features[\"context_source\"])\n\n features[\"context_dia_src\"] = src_table.lookup(features[\"context_dia_src\"])\n features[\"context_dia_tgt\"] = tgt_table.lookup(features[\"context_dia_tgt\"])\n\n\n features[\"position_dia_src\"] = pos_table.lookup(features[\"position_dia_src\"])\n features[\"position_dia_tgt\"] = pos_table.lookup(features[\"position_dia_tgt\"])\n\n features[\"position_ctx_src\"] = pos_table.lookup(features[\"position_ctx_src\"])\n\n return features\n\ndef get_evaluation_input(inputs, params):\n with tf.device(\"/cpu:0\"):\n # Create datasets\n datasets = []\n\n for data in inputs:\n dataset = tf.data.Dataset.from_tensor_slices(data)\n # Split string\n dataset = dataset.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n # Append <eos>\n dataset = dataset.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n datasets.append(dataset)\n\n dataset = tf.data.Dataset.zip(tuple(datasets))\n\n # Convert tuple to dictionary\n dataset = dataset.map(\n lambda *x: {\n \"source\": x[0],\n \"source_length\": tf.shape(x[0])[0],\n \"references\": x[1:]\n },\n num_parallel_calls=params.num_threads\n )\n\n dataset = dataset.padded_batch(\n params.eval_batch_size,\n {\n \"source\": [tf.Dimension(None)],\n \"source_length\": [],\n \"references\": (tf.Dimension(None),) * (len(inputs) - 1)\n },\n {\n \"source\": params.pad,\n \"source_length\": 0,\n \"references\": (params.pad,) * (len(inputs) - 1)\n }\n )\n\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n # Covert source symbols to ids\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n\n features[\"source\"] = src_table.lookup(features[\"source\"])\n\n return features\n\ndef get_inference_input_bak(inputs, params):\n if params.generate_samples:\n batch_size = params.sample_batch_size\n else:\n batch_size = params.decode_batch_size\n\n with tf.device(\"/cpu:0\"):\n dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant(inputs)\n )\n\n # Split string\n dataset = dataset.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n\n # Append <eos>\n dataset = dataset.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n\n # Convert tuple to dictionary\n dataset = dataset.map(\n lambda x: {\"source\": x, \"source_length\": tf.shape(x)[0]},\n num_parallel_calls=params.num_threads\n )\n\n dataset = dataset.padded_batch(\n batch_size * len(params.device_list),\n {\"source\": [tf.Dimension(None)], \"source_length\": []},\n {\"source\": params.pad, \"source_length\": 0}\n )\n\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n features[\"source\"] = src_table.lookup(features[\"source\"])\n\n return features\n\ndef get_inference_input(inputs, data_type, params):\n\n with tf.device(\"/cpu:0\"):\n dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant(inputs)\n )\n\n # Split string\n dataset = dataset.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n\n # Append <eos>\n if data_type == \"x\":\n print(\"input x:\", data_type)\n dataset = dataset.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n else:\n dataset = dataset.map(\n lambda x: x,\n num_parallel_calls=params.num_threads\n )\n\n # Convert tuple to dictionary\n dataset = dataset.map(\n lambda x: {\"source\": x, \"source_length\": tf.shape(x)[0]},\n num_parallel_calls=params.num_threads\n )\n\n dataset = dataset.padded_batch(\n params.decode_batch_size * len(params.device_list),\n {\"source\": [tf.Dimension(None)], \"source_length\": []},\n {\"source\": params.pad, \"source_length\": 0}\n )\n\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n pos_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"position\"]),\n default_value=10\n )\n if data_type == \"x\":\n features[\"source\"] = src_table.lookup(features[\"source\"])\n if data_type == \"ctx\":\n features[\"source\"] = src_table.lookup(features[\"source\"])\n if data_type == \"pos\":\n features[\"source\"] = pos_table.lookup(features[\"source\"])\n\n return features\n\n\ndef get_relevance_input(inputs, outputs, params):\n # inputs\n dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant(inputs)\n )\n\n # Split string\n dataset = dataset.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n\n # Append <eos>\n dataset = dataset.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n\n # Convert tuple to dictionary\n dataset = dataset.map(\n lambda x: {\"source\": x, \"source_length\": tf.shape(x)[0]},\n num_parallel_calls=params.num_threads\n )\n\n dataset = dataset.padded_batch(\n params.decode_batch_size,\n {\"source\": [tf.Dimension(None)], \"source_length\": []},\n {\"source\": params.pad, \"source_length\": 0}\n )\n\n iterator = dataset.make_one_shot_iterator()\n features = iterator.get_next()\n\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"source\"]),\n default_value=params.mapping[\"source\"][params.unk]\n )\n features[\"source\"] = src_table.lookup(features[\"source\"])\n\n # outputs\n dataset_o = tf.data.Dataset.from_tensor_slices(\n tf.constant(outputs)\n )\n\n # Split string\n dataset_o = dataset_o.map(lambda x: tf.string_split([x]).values,\n num_parallel_calls=params.num_threads)\n\n # Append <eos>\n dataset_o = dataset_o.map(\n lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0),\n num_parallel_calls=params.num_threads\n )\n\n # Convert tuple to dictionary\n dataset_o = dataset_o.map(\n lambda x: {\"target\": x, \"target_length\": tf.shape(x)[0]},\n num_parallel_calls=params.num_threads\n )\n\n dataset_o = dataset_o.padded_batch(\n params.decode_batch_size,\n {\"target\": [tf.Dimension(None)], \"target_length\": []},\n {\"target\": params.pad, \"target_length\": 0}\n )\n\n iterator = dataset_o.make_one_shot_iterator()\n features_o = iterator.get_next()\n\n src_table = tf.contrib.lookup.index_table_from_tensor(\n tf.constant(params.vocabulary[\"target\"]),\n default_value=params.mapping[\"target\"][params.unk]\n )\n features[\"target\"] = src_table.lookup(features_o[\"target\"])\n features[\"target_length\"] = features_o[\"target_length\"]\n\n return features\n"
] | [
[
"tensorflow.shape",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.contrib.training.bucket_by_sequence_length",
"tensorflow.gfile.Open",
"tensorflow.gfile.GFile",
"tensorflow.Dimension",
"tensorflow.constant",
"tensorflow.data.TextLineDataset",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.device",
"tensorflow.maximum",
"tensorflow.string_split",
"tensorflow.data.Dataset.zip",
"tensorflow.to_int32"
]
] |
H1d3r/getaltname | [
"0aa63c648e0b8613c0a04af4af5aa87349e77394"
] | [
"gsan/output.py"
] | [
"import json\nimport click\nimport pandas as pd\n\n\ndef dump_filename(filename, subdomain_df):\n \"\"\"Output to CSV, JSON or Clipboard.\"\"\"\n filename = filename.lower()\n if filename.endswith(\".json\"):\n click.secho(f\"\\n[+] Contents dumped into JSON file: {filename}\", bold=True)\n with open(filename, \"w+\") as file_object:\n file_object.write(json.dumps(subdomain_df.to_dict(orient=\"list\")))\n elif filename.endswith(\".csv\"):\n click.secho(f\"\\n[+] Contents dumped into CSV file: {filename}\", bold=True)\n subdomain_df.to_csv(filename, index=False)\n elif filename == \"cb\":\n click.secho(f\"\\n[+] Contents dumped into clipboard.\", bold=True)\n subdomain_df.to_clipboard(index=False)\n elif filename.endswith(\".txt\"):\n melted_df = pd.melt(subdomain_df).value.tolist()\n subdomains = [subdomain for subdomain in melted_df if str(subdomain) != \"nan\"]\n with open(filename, \"w\") as file_object:\n for subdomain in subdomains:\n file_object.write(f\"{subdomain}\\n\")\n click.secho(f\"\\n[+] Contents dumped into a text file: {filename}\", bold=True)\n else:\n click.secho(\"\\n[!] Extension not recognized, dumping using CSV format.\", bold=True)\n subdomain_df.to_csv(filename, index=False)\n"
] | [
[
"pandas.melt"
]
] |
ksg14/DHIC | [
"dd1b226496e5f0a448740cf40f4438269c25f8e4"
] | [
"utils/dataset.py"
] | [
"from pathlib import Path\nfrom typing import Callable, Tuple\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\n\nfrom PIL import Image\n# import cv2\n\nimport os\nimport numpy as np\nimport json\n\nclass HVGDataset (Dataset):\n\tdef __init__ (self, captions_file : Path, word_to_index_file : Path, index_to_word_file : Path, images_path : Path, max_len : int, text_transform : Callable=None, image_transform : Callable=None) -> None:\n\t\twith open (captions_file, 'r') as file_io:\n\t\t\tself.captions = json.load (file_io)\n\t\t\n\t\twith open (word_to_index_file, 'r') as file_io:\n\t\t\tself.word_to_index = json.load (file_io)\n\t\t\n\t\twith open (index_to_word_file, 'r') as file_io:\n\t\t\tself.index_to_word = json.load (file_io)\n\t\t\n\t\tself.max_len = max_len\n\t\tself.images_path = images_path\n\t\tself.text_transform = text_transform\n\t\tself.image_transform = image_transform\n\n\tdef __len__ (self) -> int:\n\t\treturn len (self.captions)\n\t\t\n\tdef __getitem__ (self, idx: str) -> Tuple:\n\t\timage_id = self.captions ['annotations'] [idx] ['image_id']\n\t\tcaption_str = self.captions ['annotations'] [idx] ['caption']\n\n\t\t# Image\n\t\t# image_file = os.path.join (self.images_path, f'{image_id}.jpg')\n\t\timage_file = os.path.join (self.images_path, f'16.jpg')\n\t\timage = Image.open (image_file)\n\t\tif self.image_transform:\n\t\t\timage = self.image_transform (image)\n\n\t\t# Target Caption\n\t\tif self.text_transform:\n\t\t\tcaption = self.text_transform (f\"start {caption_str}\", self.word_to_index)\n\t\t\n\t\tif self.text_transform:\n\t\t\ttarget = self.text_transform (f\"{caption_str} end\", self.word_to_index)\n\n\t\ttarget_seq_len = target.shape [0]\n\n\t\tcaption = F.pad (caption, pad=(0, self.max_len-target_seq_len))\n\t\ttarget = F.pad (target, pad=(0, self.max_len-target_seq_len))\n\t\t\n\t\treturn image, caption, target, target_seq_len\n\n# if __name__ == '__main__':\n#\t train_dataset = HVGDataset ()\n#\t train_dataloader = DataLoader (train_dataset, batch_size=1, shuffle=False)\n\n#\t for _, (image, caption, target, target_seq_len) in enumerate (train_dataloader):\n#\t\t # print (f'caption - {q}')\n#\t\t print (f'image - {image.shape}')\n#\t\t print (f'audio - {audio_file}')\n#\t\t print (f'context - {context_tensor.shape}')\n#\t\t print (f'target - {target.shape}')\n#\t\t print (f'context len - {context_len}')\n#\t\t print (f'target len - {target_len}')\n#\t\t break"
] | [
[
"torch.nn.functional.pad"
]
] |
ines-chami/KnowledgeGraphEmbedding | [
"584ee29a14b82f7c64947f2f6b2c3547213b6d29"
] | [
"codes/run.py"
] | [
"#!/usr/bin/python3\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport datetime\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\n\nfrom models import EKGEModel, HKGEModel, O2MEKGEModel, EUC_MODELS,HYP_MODELS, ONE_2_MANY_E_MODELS\n\nfrom dataloader import TrainDataset\nfrom dataloader import BidirectionalOneShotIterator\nfrom radam import RiemannianAdam\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n description='Training and Testing Knowledge Graph Embedding Models',\n usage='train.py [<args>] [-h | --help]'\n )\n\n parser.add_argument('--cuda', action='store_true', help='use GPU')\n\n parser.add_argument('--do_train', action='store_true')\n parser.add_argument('--do_valid', action='store_true')\n parser.add_argument('--do_test', action='store_true')\n parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')\n\n parser.add_argument('--countries', action='store_true', help='Use Countries S1/S2/S3 datasets')\n parser.add_argument('--regions', type=int, nargs='+', default=None,\n help='Region Id for Countries S1/S2/S3 datasets, DO NOT MANUALLY SET')\n\n parser.add_argument('--data_path', type=str, default=None)\n parser.add_argument('--model', default='TransE', type=str)\n parser.add_argument('--entity_embedding_multiple', type=int, default=1)\n parser.add_argument('--relation_embedding_multiple', type=int, default=1)\n\n parser.add_argument('-n', '--negative_sample_size', default=128, type=int)\n parser.add_argument('-d', '--hidden_dim', default=500, type=int)\n parser.add_argument('-g', '--gamma', default=12.0, type=float)\n parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')\n parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)\n parser.add_argument('-b', '--batch_size', default=1024, type=int)\n parser.add_argument('-r', '--regularization', default=0.0, type=float)\n parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')\n parser.add_argument('--uni_weight', action='store_true',\n help='Otherwise use subsampling weighting like in word2vec')\n\n parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)\n parser.add_argument('-cpu', '--cpu_num', default=10, type=int)\n parser.add_argument('-init', '--init_checkpoint', default=None, type=str)\n parser.add_argument('-save', '--save_path', default=None, type=str)\n parser.add_argument('--max_steps', default=100000, type=int)\n parser.add_argument('--dropout', default=0, type=float)\n parser.add_argument('--warm_up_steps', default=None, type=int)\n parser.add_argument('--p_norm', default=1, type=int, help='p norm for scoring function')\n\n parser.add_argument('--nsib', default=1, type=int, help='number of siblings in one_to_many model')\n parser.add_argument('--rho', default=10, type=float, help='softmin param. in one_to_many model')\n\n parser.add_argument('--save_checkpoint_steps', default=10000, type=int)\n parser.add_argument('--valid_steps', default=10000, type=int)\n parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')\n parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')\n\n parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')\n parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')\n\n return parser.parse_args(args)\n\n\ndef override_config(args):\n '''\n Override model and data configuration\n '''\n\n with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:\n argparse_dict = json.load(fjson)\n\n args.countries = argparse_dict['countries']\n if args.data_path is None:\n args.data_path = argparse_dict['data_path']\n args.model = argparse_dict['model']\n args.entity_embedding_multiple = argparse_dict['entity_embedding_multiple']\n args.relation_embedding_multiple = argparse_dict['relation_embedding_multiple']\n args.hidden_dim = argparse_dict['hidden_dim']\n args.test_batch_size = argparse_dict['test_batch_size']\n\n\ndef save_model(model, optimizer, save_variable_list, args):\n '''\n Save the parameters of the model and the optimizer,\n as well as some other variables such as step and learning_rate\n '''\n\n argparse_dict = vars(args)\n with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:\n json.dump(argparse_dict, fjson)\n\n torch.save({\n **save_variable_list,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()},\n os.path.join(args.save_path, 'checkpoint')\n )\n\n entity_embedding = model.entity_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(args.save_path, 'entity_embedding'),\n entity_embedding\n )\n\n relation_embedding = model.relation_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(args.save_path, 'relation_embedding'),\n relation_embedding\n )\n\n\ndef read_triple(file_path, entity2id, relation2id):\n '''\n Read triples and map them into ids.\n '''\n triples = []\n with open(file_path) as fin:\n for line in fin:\n h, r, t = line.strip().split('\\t')\n triples.append((entity2id[h], relation2id[r], entity2id[t]))\n return triples\n\n\ndef set_logger(args):\n '''\n Write logs to checkpoint and console\n '''\n\n if args.do_train:\n log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')\n else:\n log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')\n\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=log_file,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\ndef init_optimizer(model, learning_rate):\n if model.model_name in HYP_MODELS:\n OptimizerClass = RiemannianAdam\n else:\n OptimizerClass = torch.optim.Adam\n return OptimizerClass(\n filter(lambda p: p.requires_grad, model.parameters()),\n lr=learning_rate\n )\n\n\ndef log_metrics(mode, step, metrics):\n '''\n Print the evaluation logs\n '''\n for metric in metrics:\n logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))\n\n\ndef write_metrics(writer, step, metrics, split=None):\n '''\n Writes metrics to tensorboard logs\n '''\n for metric in metrics:\n if split is not None:\n metric_name = '{}_{}'.format(split, metric)\n else:\n metric_name = metric\n writer.add_scalar(metric_name, metrics[metric], global_step=step)\n\n\ndef main(args):\n if (not args.do_train) and (not args.do_valid) and (not args.do_test):\n raise ValueError('one of train/val/test mode must be choosed.')\n\n if args.init_checkpoint:\n override_config(args)\n elif args.data_path is None:\n raise ValueError('one of init_checkpoint/data_path must be choosed.')\n\n if args.do_train and args.save_path is None:\n # create default save directory\n dt = datetime.datetime.now()\n args.save_path = os.path.join(os.environ['LOG_DIR'], args.data_path.split('/')[-1], args.model,\n datetime.datetime.now().strftime('%m%d%H%M%S'))\n # raise ValueError('Where do you want to save your trained model?')\n\n if args.save_path and not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n # Write logs to checkpoint and console\n set_logger(args)\n writer = SummaryWriter(log_dir=args.save_path)\n\n with open(os.path.join(args.data_path, 'entities.dict')) as fin:\n entity2id = dict()\n for line in fin:\n eid, entity = line.strip().split('\\t')\n entity2id[entity] = int(eid)\n\n with open(os.path.join(args.data_path, 'relations.dict')) as fin:\n relation2id = dict()\n for line in fin:\n rid, relation = line.strip().split('\\t')\n relation2id[relation] = int(rid)\n\n # Read regions for Countries S* datasets\n if args.countries:\n regions = list()\n with open(os.path.join(args.data_path, 'regions.list')) as fin:\n for line in fin:\n region = line.strip()\n regions.append(entity2id[region])\n args.regions = regions\n\n nentity = len(entity2id)\n nrelation = len(relation2id)\n\n args.nentity = nentity\n args.nrelation = nrelation\n\n logging.info('Model: %s' % args.model)\n logging.info('Data Path: %s' % args.data_path)\n logging.info('Save Path: {}'.format(args.save_path))\n logging.info('#entity: %d' % nentity)\n logging.info('#relation: %d' % nrelation)\n\n train_triples = read_triple(os.path.join(args.data_path, 'train.txt'), entity2id, relation2id)\n logging.info('#train: %d' % len(train_triples))\n valid_triples = read_triple(os.path.join(args.data_path, 'valid.txt'), entity2id, relation2id)\n logging.info('#valid: %d' % len(valid_triples))\n test_triples = read_triple(os.path.join(args.data_path, 'test.txt'), entity2id, relation2id)\n logging.info('#test: %d' % len(test_triples))\n\n # All true triples\n all_true_triples = train_triples + valid_triples + test_triples\n\n if args.model in EUC_MODELS:\n ModelClass = EKGEModel\n elif args.model in HYP_MODELS:\n ModelClass = HKGEModel\n elif args.model in ONE_2_MANY_E_MODELS:\n ModelClass = O2MEKGEModel\n else:\n raise ValueError('model %s not supported' % args.model)\n\n if ModelClass != O2MEKGEModel:\n kge_model = ModelClass(\n model_name=args.model,\n nentity=nentity,\n nrelation=nrelation,\n hidden_dim=args.hidden_dim,\n gamma=args.gamma,\n p_norm=args.p_norm,\n dropout=args.dropout,\n entity_embedding_multiple=args.entity_embedding_multiple,\n relation_embedding_multiple=args.relation_embedding_multiple\n )\n else:\n kge_model = ModelClass(\n model_name=args.model,\n nentity=nentity,\n nrelation=nrelation,\n hidden_dim=args.hidden_dim,\n gamma=args.gamma,\n p_norm=args.p_norm,\n dropout=args.dropout,\n entity_embedding_multiple=args.entity_embedding_multiple,\n relation_embedding_multiple=args.relation_embedding_multiple,\n nsiblings=args.nsib,\n rho=args.rho\n )\n\n\n logging.info('Model Parameter Configuration:')\n for name, param in kge_model.named_parameters():\n logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))\n\n if args.cuda:\n kge_model = kge_model.cuda()\n\n if args.do_train:\n # Set training dataloader iterator\n train_dataloader_head = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'head-batch'),\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=max(1, args.cpu_num // 2),\n collate_fn=TrainDataset.collate_fn\n )\n\n train_dataloader_tail = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'tail-batch'),\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=max(1, args.cpu_num // 2),\n collate_fn=TrainDataset.collate_fn\n )\n\n train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)\n\n # Set training configuration\n current_learning_rate = args.learning_rate\n optimizer = init_optimizer(kge_model, current_learning_rate)\n if args.warm_up_steps:\n warm_up_steps = args.warm_up_steps\n else:\n warm_up_steps = args.max_steps // 2\n\n if args.init_checkpoint:\n # Restore model from checkpoint directory\n logging.info('Loading checkpoint %s...' % args.init_checkpoint)\n checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))\n init_step = checkpoint['step']\n kge_model.load_state_dict(checkpoint['model_state_dict'])\n if args.do_train:\n current_learning_rate = checkpoint['current_learning_rate']\n warm_up_steps = checkpoint['warm_up_steps']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n else:\n logging.info('Ramdomly Initializing %s Model...' % args.model)\n init_step = 0\n\n step = init_step\n\n if args.do_train:\n logging.info('Start Training...')\n logging.info('init_step = %d' % init_step)\n logging.info('hidden_dim = %d' % args.hidden_dim)\n logging.info('learning_rate = %d' % current_learning_rate)\n logging.info('batch_size = %d' % args.batch_size)\n logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)\n\n logging.info('gamma = %f' % args.gamma)\n logging.info('dropout = %f' % args.dropout)\n if args.negative_adversarial_sampling:\n logging.info('adversarial_temperature = %f' % args.adversarial_temperature)\n\n # Set valid dataloader as it would be evaluated during training\n training_logs = []\n\n # Training Loop\n for step in range(init_step, args.max_steps):\n\n log = kge_model.train_step(kge_model, optimizer, train_iterator, args)\n training_logs.append(log)\n write_metrics(writer, step, log, split='train')\n write_metrics(writer, step, {'current_learning_rate': current_learning_rate})\n\n if step >= warm_up_steps:\n current_learning_rate = current_learning_rate / 10\n logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))\n optimizer = init_optimizer(kge_model, current_learning_rate)\n\n warm_up_steps = warm_up_steps * 3\n\n if step % args.save_checkpoint_steps == 0:\n save_variable_list = {\n 'step': step,\n 'current_learning_rate': current_learning_rate,\n 'warm_up_steps': warm_up_steps\n }\n save_model(kge_model, optimizer, save_variable_list, args)\n\n if step % args.log_steps == 0:\n metrics = {}\n for metric in training_logs[0].keys():\n metrics[metric] = sum([log[metric] for log in training_logs]) / len(training_logs)\n log_metrics('Training average', step, metrics)\n write_metrics(writer, step, metrics, split='train')\n training_logs = []\n\n if args.do_valid and step % args.valid_steps == 0:\n logging.info('Evaluating on Valid Dataset...')\n metrics = kge_model.test_step(kge_model, valid_triples, all_true_triples, args)\n log_metrics('Valid', step, metrics)\n write_metrics(writer, step, metrics, split='valid')\n\n save_variable_list = {\n 'step': step,\n 'current_learning_rate': current_learning_rate,\n 'warm_up_steps': warm_up_steps\n }\n\n save_model(kge_model, optimizer, save_variable_list, args)\n\n if args.do_valid:\n logging.info('Evaluating on Valid Dataset...')\n metrics = kge_model.test_step(kge_model, valid_triples, all_true_triples, args)\n log_metrics('Valid', step, metrics)\n\n if args.do_test:\n logging.info('Evaluating on Test Dataset...')\n metrics = kge_model.test_step(kge_model, test_triples, all_true_triples, args)\n log_metrics('Test', step, metrics)\n\n if args.evaluate_train:\n logging.info('Evaluating on Training Dataset...')\n metrics = kge_model.test_step(kge_model, train_triples, all_true_triples, args)\n log_metrics('Test', step, metrics)\n\n\nif __name__ == '__main__':\n main(parse_args())\n"
] | [
[
"torch.utils.tensorboard.SummaryWriter"
]
] |
CAPSTONE-Interpreter/dev_app-data | [
"e95a4550dfaeaa4370b6337253f6b9a1cd37a2d0"
] | [
"tensorflow_examples/lite/model_maker/third_party/efficientdet/hparams_config.py"
] | [
"# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Hparams for model architecture and trainer.\"\"\"\nimport ast\nimport collections\nimport copy\nfrom typing import Any, Dict, Text\nimport six\nimport tensorflow as tf\nimport yaml\n\n\ndef eval_str_fn(val):\n if val in {'true', 'false'}:\n return val == 'true'\n try:\n return ast.literal_eval(val)\n except (ValueError, SyntaxError):\n return val\n\n\n# pylint: disable=protected-access\nclass Config(object):\n \"\"\"A config utility class.\"\"\"\n\n def __init__(self, config_dict=None):\n self.update(config_dict)\n\n def __setattr__(self, k, v):\n self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)\n\n def __getattr__(self, k):\n return self.__dict__[k]\n\n def __getitem__(self, k):\n return self.__dict__[k]\n\n def __repr__(self):\n return repr(self.as_dict())\n\n def __deepcopy__(self, memodict):\n return type(self)(self.as_dict())\n\n def __str__(self):\n try:\n return yaml.dump(self.as_dict(), indent=4)\n except TypeError:\n return str(self.as_dict())\n\n def _update(self, config_dict, allow_new_keys=True):\n \"\"\"Recursively update internal members.\"\"\"\n if not config_dict:\n return\n\n for k, v in six.iteritems(config_dict):\n if k not in self.__dict__:\n if allow_new_keys:\n self.__setattr__(k, v)\n else:\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\n else:\n if isinstance(self.__dict__[k], Config) and isinstance(v, dict):\n self.__dict__[k]._update(v, allow_new_keys)\n elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):\n self.__dict__[k]._update(v.as_dict(), allow_new_keys)\n else:\n self.__setattr__(k, v)\n\n def get(self, k, default_value=None):\n return self.__dict__.get(k, default_value)\n\n def update(self, config_dict):\n \"\"\"Update members while allowing new keys.\"\"\"\n self._update(config_dict, allow_new_keys=True)\n\n def keys(self):\n return self.__dict__.keys()\n\n def override(self, config_dict_or_str, allow_new_keys=False):\n \"\"\"Update members while disallowing new keys.\"\"\"\n if isinstance(config_dict_or_str, str):\n if not config_dict_or_str:\n return\n elif '=' in config_dict_or_str:\n config_dict = self.parse_from_str(config_dict_or_str)\n elif config_dict_or_str.endswith('.yaml'):\n config_dict = self.parse_from_yaml(config_dict_or_str)\n else:\n raise ValueError(\n 'Invalid string {}, must end with .yaml or contains \"=\".'.format(\n config_dict_or_str))\n elif isinstance(config_dict_or_str, dict):\n config_dict = config_dict_or_str\n else:\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\n\n self._update(config_dict, allow_new_keys)\n\n def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:\n \"\"\"Parses a yaml file and returns a dictionary.\"\"\"\n with tf.io.gfile.GFile(yaml_file_path, 'r') as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n return config_dict\n\n def save_to_yaml(self, yaml_file_path):\n \"\"\"Write a dictionary into a yaml file.\"\"\"\n with tf.io.gfile.GFile(yaml_file_path, 'w') as f:\n yaml.dump(self.as_dict(), f, default_flow_style=False)\n\n def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:\n \"\"\"Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}.\"\"\"\n if not config_str:\n return {}\n config_dict = {}\n try:\n for kv_pair in config_str.split(','):\n if not kv_pair: # skip empty string\n continue\n key_str, value_str = kv_pair.split('=')\n key_str = key_str.strip()\n\n def add_kv_recursive(k, v):\n \"\"\"Recursively parse x.y.z=tt to {x: {y: {z: tt}}}.\"\"\"\n if '.' not in k:\n if '*' in v:\n # we reserve * to split arrays.\n return {k: [eval_str_fn(vv) for vv in v.split('*')]}\n return {k: eval_str_fn(v)}\n pos = k.index('.')\n return {k[:pos]: add_kv_recursive(k[pos + 1:], v)}\n\n def merge_dict_recursive(target, src):\n \"\"\"Recursively merge two nested dictionary.\"\"\"\n for k in src.keys():\n if ((k in target and isinstance(target[k], dict) and\n isinstance(src[k], collections.Mapping))):\n merge_dict_recursive(target[k], src[k])\n else:\n target[k] = src[k]\n\n merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))\n return config_dict\n except ValueError:\n raise ValueError('Invalid config_str: {}'.format(config_str))\n\n def as_dict(self):\n \"\"\"Returns a dict representation.\"\"\"\n config_dict = {}\n for k, v in six.iteritems(self.__dict__):\n if isinstance(v, Config):\n config_dict[k] = v.as_dict()\n else:\n config_dict[k] = copy.deepcopy(v)\n return config_dict\n # pylint: enable=protected-access\n\n\ndef default_detection_configs():\n \"\"\"Returns a default detection configs.\"\"\"\n h = Config()\n\n # model name.\n h.name = 'efficientdet-d1'\n\n # activation type: see activation_fn in utils.py.\n h.act_type = 'swish'\n\n # input preprocessing parameters\n h.image_size = 640 # An integer or a string WxH such as 640x320.\n h.target_size = None\n h.input_rand_hflip = True\n h.jitter_min = 0.1\n h.jitter_max = 2.0\n h.autoaugment_policy = None\n h.grid_mask = False\n h.sample_image = None\n h.map_freq = 5 # AP eval frequency in epochs.\n\n # dataset specific parameters\n # TODO(tanmingxing): update this to be 91 for COCO, and 21 for pascal.\n h.num_classes = 90 # 1+ actual classes, 0 is reserved for background.\n h.seg_num_classes = 3 # segmentation classes\n h.heads = ['object_detection'] # 'object_detection', 'segmentation'\n\n h.skip_crowd_during_training = True\n h.label_map = None # a dict or a string of 'coco', 'voc', 'waymo'.\n h.max_instances_per_image = 100 # Default to 100 for COCO.\n h.regenerate_source_id = False\n\n # model architecture\n h.min_level = 3\n h.max_level = 7\n h.num_scales = 3\n # ratio w/h: 2.0 means w=1.4, h=0.7. Can be computed with k-mean per dataset.\n h.aspect_ratios = [1.0, 2.0, 0.5] # [[0.7, 1.4], [1.0, 1.0], [1.4, 0.7]]\n h.anchor_scale = 4.0\n # is batchnorm training mode\n h.is_training_bn = True\n # optimization\n h.momentum = 0.9\n h.optimizer = 'sgd' # can be 'adam' or 'sgd'.\n h.learning_rate = 0.08 # 0.008 for adam.\n h.lr_warmup_init = 0.008 # 0.0008 for adam.\n h.lr_warmup_epoch = 1.0\n h.first_lr_drop_epoch = 200.0\n h.second_lr_drop_epoch = 250.0\n h.poly_lr_power = 0.9\n h.clip_gradients_norm = 10.0\n h.num_epochs = 300\n h.data_format = 'channels_last'\n # The default image normalization is identical to Cloud TPU ResNet.\n h.mean_rgb = [0.485 * 255, 0.456 * 255, 0.406 * 255]\n h.stddev_rgb = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n\n # classification loss\n h.label_smoothing = 0.0 # 0.1 is a good default\n # Behold the focal loss parameters\n h.alpha = 0.25\n h.gamma = 1.5\n\n # localization loss\n h.delta = 0.1 # regularization parameter of huber loss.\n # total loss = box_loss * box_loss_weight + iou_loss * iou_loss_weight\n h.box_loss_weight = 50.0\n h.iou_loss_type = None\n h.iou_loss_weight = 1.0\n\n # regularization l2 loss.\n h.weight_decay = 4e-5\n h.strategy = None # 'tpu', 'gpus', None\n h.mixed_precision = False # If False, use float32.\n h.loss_scale = None # set to 2**16 enables dynamic loss scale\n h.model_optimizations = {} # 'prune':{}\n\n # For detection.\n h.box_class_repeats = 3\n h.fpn_cell_repeats = 3\n h.fpn_num_filters = 88\n h.separable_conv = True\n h.apply_bn_for_resampling = True\n h.conv_after_downsample = False\n h.conv_bn_act_pattern = False\n h.drop_remainder = True # drop remainder for the final batch eval.\n\n # For post-processing nms, must be a dict.\n h.nms_configs = {\n 'method': 'gaussian',\n 'iou_thresh': None, # use the default value based on method.\n 'score_thresh': 0.,\n 'sigma': None,\n 'pyfunc': False,\n 'max_nms_inputs': 0,\n 'max_output_size': 100,\n }\n h.tflite_max_detections = 100\n\n # version.\n h.fpn_name = None\n h.fpn_weight_method = None\n h.fpn_config = None\n\n # No stochastic depth in default.\n h.survival_prob = None\n h.img_summary_steps = None\n\n h.lr_decay_method = 'cosine'\n h.moving_average_decay = 0.9998\n h.ckpt_var_scope = None # ckpt variable scope.\n # If true, skip loading pretrained weights if shape mismatches.\n h.skip_mismatch = True\n\n h.backbone_name = 'efficientnet-b1'\n h.backbone_config = None\n h.var_freeze_expr = None\n\n # A temporary flag to switch between legacy and keras models.\n h.use_keras_model = True\n h.dataset_type = None\n h.positives_momentum = None\n h.grad_checkpoint = False\n\n # Parameters for the Checkpoint Callback.\n h.verbose = 1\n h.save_freq = 'epoch'\n\n return h\n\n\nefficientdet_model_param_dict = {\n 'efficientdet-d0':\n dict(\n name='efficientdet-d0',\n backbone_name='efficientnet-b0',\n image_size=512,\n fpn_num_filters=64,\n fpn_cell_repeats=3,\n box_class_repeats=3,\n ),\n 'efficientdet-d1':\n dict(\n name='efficientdet-d1',\n backbone_name='efficientnet-b1',\n image_size=640,\n fpn_num_filters=88,\n fpn_cell_repeats=4,\n box_class_repeats=3,\n ),\n 'efficientdet-d2':\n dict(\n name='efficientdet-d2',\n backbone_name='efficientnet-b2',\n image_size=768,\n fpn_num_filters=112,\n fpn_cell_repeats=5,\n box_class_repeats=3,\n ),\n 'efficientdet-d3':\n dict(\n name='efficientdet-d3',\n backbone_name='efficientnet-b3',\n image_size=896,\n fpn_num_filters=160,\n fpn_cell_repeats=6,\n box_class_repeats=4,\n ),\n 'efficientdet-d4':\n dict(\n name='efficientdet-d4',\n backbone_name='efficientnet-b4',\n image_size=1024,\n fpn_num_filters=224,\n fpn_cell_repeats=7,\n box_class_repeats=4,\n ),\n 'efficientdet-d5':\n dict(\n name='efficientdet-d5',\n backbone_name='efficientnet-b5',\n image_size=1280,\n fpn_num_filters=288,\n fpn_cell_repeats=7,\n box_class_repeats=4,\n ),\n 'efficientdet-d6':\n dict(\n name='efficientdet-d6',\n backbone_name='efficientnet-b6',\n image_size=1280,\n fpn_num_filters=384,\n fpn_cell_repeats=8,\n box_class_repeats=5,\n fpn_weight_method='sum', # Use unweighted sum for stability.\n ),\n 'efficientdet-d7':\n dict(\n name='efficientdet-d7',\n backbone_name='efficientnet-b6',\n image_size=1536,\n fpn_num_filters=384,\n fpn_cell_repeats=8,\n box_class_repeats=5,\n anchor_scale=5.0,\n fpn_weight_method='sum', # Use unweighted sum for stability.\n ),\n 'efficientdet-d7x':\n dict(\n name='efficientdet-d7x',\n backbone_name='efficientnet-b7',\n image_size=1536,\n fpn_num_filters=384,\n fpn_cell_repeats=8,\n box_class_repeats=5,\n anchor_scale=4.0,\n max_level=8,\n fpn_weight_method='sum', # Use unweighted sum for stability.\n ),\n}\n\n\nlite_common_param = dict(\n mean_rgb=127.0,\n stddev_rgb=128.0,\n act_type='relu6',\n fpn_weight_method='sum',\n)\n\nefficientdet_lite_param_dict = {\n # lite models are in progress and subject to changes.\n # mean_rgb and stddev_rgb are consistent with EfficientNet-Lite models in\n # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/lite/efficientnet_lite_builder.py#L28\n 'efficientdet-lite0':\n dict(\n name='efficientdet-lite0',\n backbone_name='efficientnet-lite0',\n image_size=320,\n fpn_num_filters=64,\n fpn_cell_repeats=3,\n box_class_repeats=3,\n anchor_scale=3.0,\n **lite_common_param,\n ),\n 'efficientdet-lite1':\n dict(\n name='efficientdet-lite1',\n backbone_name='efficientnet-lite1',\n image_size=384,\n fpn_num_filters=88,\n fpn_cell_repeats=4,\n box_class_repeats=3,\n anchor_scale=3.0,\n **lite_common_param,\n ),\n 'efficientdet-lite2':\n dict(\n name='efficientdet-lite2',\n backbone_name='efficientnet-lite2',\n image_size=448,\n fpn_num_filters=112,\n fpn_cell_repeats=5,\n box_class_repeats=3,\n anchor_scale=3.0,\n **lite_common_param,\n ),\n 'efficientdet-lite3':\n dict(\n name='efficientdet-lite3',\n backbone_name='efficientnet-lite3',\n image_size=512,\n fpn_num_filters=160,\n fpn_cell_repeats=6,\n box_class_repeats=4,\n **lite_common_param,\n ),\n 'efficientdet-lite3x':\n dict(\n name='efficientdet-lite3x',\n backbone_name='efficientnet-lite3',\n image_size=640,\n fpn_num_filters=200,\n fpn_cell_repeats=6,\n box_class_repeats=4,\n anchor_scale=3.0,\n **lite_common_param,\n ),\n 'efficientdet-lite4':\n dict(\n name='efficientdet-lite4',\n backbone_name='efficientnet-lite4',\n image_size=640,\n fpn_num_filters=224,\n fpn_cell_repeats=7,\n box_class_repeats=4,\n **lite_common_param,\n ),\n}\n\n\ndef get_efficientdet_config(model_name='efficientdet-d1'):\n \"\"\"Get the default config for EfficientDet based on model name.\"\"\"\n h = default_detection_configs()\n if model_name in efficientdet_model_param_dict:\n h.override(efficientdet_model_param_dict[model_name])\n elif model_name in efficientdet_lite_param_dict:\n h.override(efficientdet_lite_param_dict[model_name])\n else:\n raise ValueError('Unknown model name: {}'.format(model_name))\n\n return h\n\n\ndef get_detection_config(model_name):\n if model_name.startswith('efficientdet'):\n return get_efficientdet_config(model_name)\n else:\n raise ValueError('model name must start with efficientdet.')\n"
] | [
[
"tensorflow.io.gfile.GFile"
]
] |
andykee/loupe | [
"8b10781598973aac7c129e190209acad7e5a9559"
] | [
"tests/test_zernike.py"
] | [
"import pytest\nimport numpy as np\nimport loupe\n\n\ndef test_zernike_rho_theta():\n with pytest.raises(ValueError):\n loupe.zernike(mask=1, index=1, normalize=True, rho=1, theta=None)\n\n\ndef test_zernike_positive():\n with pytest.raises(ValueError):\n loupe.zernike(mask=1, index=-1)\n\n\ndef test_zernike_basis():\n basis = loupe.zernike_basis(mask=np.ones((3, 3)), modes=1, vectorize=False)\n assert np.array_equal(basis, np.ones((1, 3, 3)))\n\n\ndef test_zernike_basis_vectorize():\n basis = loupe.zernike_basis(mask=np.ones((3, 3)), modes=1, vectorize=True)\n assert np.array_equal(basis, np.ones((1, 9)))\n\n\ndef test_zernike_fit():\n mask = loupe.circlemask((256, 256), 128)\n coeffs = np.random.rand(4)*100e-9\n phase = loupe.zernike_compose(mask, coeffs)\n fit_coeffs = loupe.zernike_fit(phase, mask, np.arange(1, 5))\n assert np.all(np.isclose(coeffs, fit_coeffs))\n\n\ndef test_zernike_remove():\n mask = loupe.circlemask((256, 256), 128)\n coeffs = np.random.rand(4)*100e-9\n phase = loupe.zernike_compose(mask, coeffs)\n residual = loupe.zernike_remove(phase, mask, np.arange(1, 5))\n assert np.all(np.isclose(residual, np.zeros_like(residual)))\n"
] | [
[
"numpy.zeros_like",
"numpy.isclose",
"numpy.random.rand",
"numpy.ones",
"numpy.arange"
]
] |
OzFlux/data_analysis | [
"d3c3316fbee865a5ecf7dc23c15a223ec5b1fe80"
] | [
"eddy_pro_files/(co)spectras/plot_(co)spectras.py"
] | [
"'''\nThis script identifies all timestamps with a Foken flag 0 and plots the\ncorresponding (co)spectras including lowess fits.\nDaniel Metzen, 23/07/2019\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom glob import glob\nfrom tqdm import tqdm\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\n\n\ndef get_good_files(ep_output_folder):\n \"\"\"\n Function extract all time when the CO2 flux Foken-flag was 0.\n\n Parameters\n ----------\n ep_output_folder: string\n path to folder containing EddyPro output files\n\n Returns:\n --------\n good_files: np.array\n Array with filenames when qc_co2_flux was 0.\n \"\"\"\n # read full_output file\n full_output_file = glob(f'{ep_output_folder}/**full_output*.csv*')[0]\n df = pd.read_csv(full_output_file, skiprows=[0, 2])\n # filter for Foken flag of 0 and return raw input filenames\n df = df.query('qc_co2_flux == 0')\n good_files = df['filename'].values\n return good_files\n\n\ndef merge_good_files(good_files, ep_output_folder):\n \"\"\"\n Function to build single dataframe merging all spectras and cospectras with\n Foken flag 0 in EddyPro output folder.\n\n Parameters\n ----------\n good_files: iterable\n iterable containing raw 10Hz filenames when qc_co2_flux was 0\n ep_output_folder: string\n path to EddyPro output folder\n\n Returns:\n --------\n good_spectras, good_cospectras: tuple\n Dataframes with frequency as index and spectras or cosepctras of each\n file as columns\n \"\"\"\n good_spectras = pd.DataFrame()\n good_cospectras = pd.DataFrame()\n # append data from files as columns with timestamp as name\n for f in tqdm(good_files):\n pattern = f'{f[5:13]}-{f[-8:-4]}'\n # for some reason not all qc = 0 timestamps have a full_spectra file\n try:\n full_sectra_file = glob(\n f'{ep_output_folder}/eddypro_full_cospectra/*{pattern}*.csv')[0]\n except IndexError as ie:\n #print(f'no file for {pattern} found in cospectra folder. skipping timestamp.')\n continue\n df = pd.read_csv(full_sectra_file, skiprows=12, index_col=0,\n na_values=-9999)\n df = df.dropna()\n good_spectras[pattern] = df['f_nat*spec(ts)']\n good_cospectras[pattern] = df['f_nat*cospec(w_ts)']\n return good_spectras, good_cospectras\n\n\ndef plot_spectras(df, outfile=None):\n \"\"\"\n Function to plot spectras.\n\n Parameters\n ----------\n df: pd.DataFrame\n dataframe containing spectras\n outfile (optional): string\n filepath for saving plot\n\n Returns:\n --------\n Pyplot figure and optionally saves figure to file\n \"\"\"\n # plot data\n spectra_fig = plt.figure(1)\n plt.plot(df.median(axis=1), 'k.', alpha=.05,\n label='median data with QC flag 0')\n # plot loess smoothed line\n smoothed = lowess(df.median(axis=1).values, df.index, is_sorted=True,\n frac=0.01, it=0)\n plt.plot(smoothed[40:, 0], smoothed[40:, 1], 'b', label='lowess fit')\n # tweak plot\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel('f (Hz)')\n plt.ylabel('spectra (T)')\n plt.legend()\n plt.tight_layout()\n # save plot if desired\n if outfile:\n plt.savefig(outfile, dpi=300, bbox_inches='tight')\n\n\ndef plot_cospectras(df, outfile=None):\n \"\"\"\n Function to plot cospectras.\n\n Parameters\n ----------\n df: pd.DataFrame\n dataframe containing cospectras\n outfile (optional): string\n filepath for saving plot\n\n Returns:\n --------\n Pyplot figure and optionally saves figure to file\n \"\"\"\n # plot data\n cospectra_fig = plt.figure(2)\n plt.plot(df.median(axis=1), 'k.', alpha=.05,\n label='median data with QC flag 0')\n # plot loess smoothed line\n smoothed = lowess(df.median(axis=1).values, df.index, is_sorted=True,\n frac=0.05, it=0)\n plt.plot(smoothed[:, 0], smoothed[:, 1], 'b', label='lowess fit')\n # plot ideal slope\n x = np.linspace(0.2, 5)\n y1 = .006*x**(-4/3)\n plt.plot(x, y1, 'r--', label='-4/3 slope')\n # tweak plot\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel('f (Hz)')\n plt.ylabel('cospectra (w/T)')\n plt.legend()\n plt.tight_layout()\n # save plot if desired\n if outfile:\n plt.savefig(outfile, dpi=300, bbox_inches='tight')\n\n\ndef main():\n good_files = get_good_files(\n r'E:\\flux_data_processing\\10hz_data\\MOFO_understory\\ep_output\\13m_canopy_height')\n good_spectras, good_cospectras = merge_good_files(\n good_files, r'E:\\flux_data_processing\\10hz_data\\MOFO_understory\\ep_output\\13m_canopy_height')\n plot_spectras(good_spectras)\n plot_cospectras(good_cospectras)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.xscale",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.yscale"
]
] |
Hesse73/Database-Explore | [
"3d79c0a5c8a15f5a6169231525e064c45f74473e"
] | [
"interact/car_database/extract.py"
] | [
"import pandas\nimport numpy as np\nimport json\n\nmax_part = 5\ndf = pandas.read_csv('car_database.csv')\n\nintro = {\"item_num\": len(df),\n \"attrs\": {\"discrete\": [],\n \"continuous\": []\n }}\nfor attr in df.columns:\n if 'Unnamed' in attr:\n continue\n if df[attr].dtypes in [np.object, np.unicode, np.str]:\n intro['attrs']['discrete'].append(attr)\n else:\n intro['attrs']['continuous'].append(attr)\nattr_details = {'discrete': {}, 'continuous': {}}\nfor attr in intro['attrs']['discrete']:\n type_counter = {}\n for item in df[attr]:\n if item not in type_counter.keys():\n type_counter[item] = 1\n else:\n type_counter[item] += 1\n if len(type_counter.keys()) < max_part:\n attr_details['discrete'][attr] = type_counter\n else:\n type_counter = {k: v for k, v in sorted(\n type_counter.items(), key=lambda item: item[1], reverse=True)}\n part_counter = {k: type_counter[k]\n for k in list(type_counter.keys())[:max_part]}\n rest_counter = 0\n for val in list(type_counter.values())[max_part:]:\n rest_counter += val\n part_counter['rest'] = rest_counter\n attr_details['discrete'][attr] = part_counter\n\nfor attr in intro['attrs']['continuous']:\n value = np.asarray(df[attr])\n min_val, max_val = np.min(value), np.max(value)\n range_val = max_val-min_val\n sparse = max_part\n counter = []\n for i in range(sparse):\n counter.append(int(((value >= min_val+range_val*i/sparse) &\n (value <= min_val+range_val*(i+1)/sparse)).sum()))\n attr_details['continuous'][attr] = {'min': int(\n min_val), 'max': int(max_val), 'counter': counter}\nintro['attr_details'] = attr_details\njson.dump(intro, open('car_db.json', 'w'))\n"
] | [
[
"numpy.min",
"pandas.read_csv",
"numpy.max",
"numpy.asarray"
]
] |
ntraut/baracus | [
"84e3c72ab088b869d0664fd9bc07b1d34db2affb"
] | [
"scripts/run_brain_age_bids.py"
] | [
"#! /usr/bin/env python\n\nimport argparse\nimport os\n\nimport pandas as pd\nfrom bids.grabbids import BIDSLayout\nfrom pkg_resources import resource_filename, Requirement\n\nfrom baracus import models_list, __version__\nfrom baracus.predict import predict_brain_age_single_subject\nfrom baracus.prepare import run_prepare_all\nfrom baracus.utils import run, get_subjects_session\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='BARACUS: Brain-Age Regression Analysis and Computation Utility Software. BIDS mode. '\n 'You specify a BIDS-formatted freesurfer folder as input. All data '\n 'is extracted automatiacally from that folder. ')\n parser.add_argument('bids_dir', help='The directory with the input dataset '\n 'formatted according to the BIDS standard.')\n parser.add_argument('out_dir', help='Results are put into {out_dir}/baracus.')\n parser.add_argument('analysis_level', help='Level of the analysis that will be performed. '\n '\"participant\": predicts single subject brain age, '\n '\"group\": collects single subject predictions.',\n choices=['participant', 'group'])\n\n parser.add_argument('--participant_label', help='The label of the participant that should be analyzed. The label '\n 'corresponds to sub-<participant_label> from the BIDS spec '\n '(so it does not include \"sub-\"). If this parameter is not '\n 'provided all subjects should be analyzed. Multiple '\n 'participants can be specified with a space separated list.',\n nargs=\"+\")\n parser.add_argument('--freesurfer_dir', help=\"Folder with FreeSurfer subjects formatted according \"\n \"to BIDS standard. If subject's recon-all folder \"\n \"cannot be found, recon-all will be run. \"\n \"If not specified freesurfer data will be saved to {\"\n \"out_dir}/freesurfer\")\n parser.add_argument('--models', choices=models_list, default=[\"Liem2016__OCI_norm\"], help='',\n nargs=\"+\")\n parser.add_argument('--license_key',\n help='FreeSurfer license key - letters and numbers after \"*\" in the email you '\n 'received after registration. To register (for free) visit '\n 'https://surfer.nmr.mgh.harvard.edu/registration.html',\n required=True)\n parser.add_argument('--n_cpus', help='Number of CPUs/cores available to use.', default=1, type=int)\n parser.add_argument('-v', '--version', action='version',\n version='BARACUS version {}'.format(__version__))\n args = parser.parse_args()\n\n # set up output dirs\n if args.freesurfer_dir:\n freesurfer_dir = args.freesurfer_dir\n else:\n freesurfer_dir = os.path.join(args.out_dir, \"freesurfer\")\n out_dir = os.path.join(args.out_dir, \"baracus\")\n if not os.path.isdir(freesurfer_dir):\n os.makedirs(freesurfer_dir)\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n model_dir = resource_filename(Requirement.parse(\"baracus\"), 'models')\n\n run(\"bids-validator \" + args.bids_dir)\n layout = BIDSLayout(args.bids_dir)\n\n truly_longitudinal_study = True if len(layout.get_sessions()) > 1 else False\n subjects_to_analyze, sessions_to_analyze, freesurfer_subjects_to_analyze = get_subjects_session(layout,\n args.participant_label,\n truly_longitudinal_study)\n\n if args.analysis_level == \"participant\":\n\n data_files = run_prepare_all(args.bids_dir, freesurfer_dir, out_dir, subjects_to_analyze,\n sessions_to_analyze, args.n_cpus, args.license_key)\n\n for subject, d in data_files.items():\n d[\"out_dir\"] = out_dir\n d[\"model_dir\"] = model_dir\n d[\"models\"] = args.models\n d[\"subject_label\"] = subject\n predict_brain_age_single_subject(**d)\n\n elif args.analysis_level == \"group\":\n print(\"Creating group table...\")\n df = pd.DataFrame([])\n for subject in freesurfer_subjects_to_analyze:\n in_file = os.path.join(out_dir, subject, subject + \"_predicted_age.tsv\")\n df = df.append(pd.read_csv(in_file, sep=\"\\t\"))\n\n group_out_dir = os.path.join(out_dir, \"00_group\")\n if not os.path.isdir(group_out_dir):\n os.makedirs(group_out_dir)\n out_file = os.path.join(group_out_dir, \"group_predicted_age.tsv\")\n df.to_csv(out_file, sep=\"\\t\", index=False)\n print(\"Finished. Group table created for %s\" % \" \".join(subjects_to_analyze))\n"
] | [
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
ran404/bbgbridge | [
"7f84788e47bf3d34465b02a04168591583683b96"
] | [
"tests/test_bloomberg_result.py"
] | [
"import datetime\nimport unittest\n\nimport numpy as np\nimport numpy.testing as npt\nimport pandas as pd\nfrom collections import OrderedDict\n\nfrom bbgbridge.converters import price_to_frame\nfrom bbgbridge.result import BloombergRequestResult\nfrom bbgbridge.util import to_timestamp\n\n\ndef assert_dict_in_series(testcase, under_test, expected_dict):\n \"\"\" NB: numpy arrays area asserted with assert_almost_equal \"\"\"\n for expected_key, expected_value in expected_dict.items():\n actual_value = under_test[expected_key]\n if isinstance(actual_value, np.ndarray):\n npt.assert_almost_equal(actual_value, expected_value)\n elif isinstance(actual_value, float):\n if np.isnan(actual_value):\n testcase.assertTrue(np.isnan(expected_value), msg='Error: ' + expected_key)\n else:\n testcase.assertAlmostEqual(expected_value, actual_value, msg='Error: ' + expected_key)\n elif actual_value is pd.NaT:\n testcase.assertIs(actual_value, expected_value)\n else:\n testcase.assertEqual(expected_value, actual_value, 'Error for ' + expected_key)\n\n\nclass BloombergRequestResultTest(unittest.TestCase):\n def setUp(self):\n result = [{'securityData': OrderedDict(\n [('security', 'SPY US Equity'), ('eidData', []), ('sequenceNumber', 0), ('fieldExceptions', []), (\n 'fieldData', [{'fieldData': OrderedDict([('date', datetime.date(2015, 8, 25)), ('PX_LOW', 186.92)])},\n {'fieldData': OrderedDict([('date', datetime.date(2015, 8, 26)), ('PX_LOW', 188.37)])},\n {'fieldData': OrderedDict([('date', datetime.date(2015, 8, 27)), ('PX_LOW', 195.21)])},\n {'fieldData': OrderedDict([('date', datetime.date(2015, 8, 28)), ('PX_LOW', 197.92)])}])])}]\n self.bbg_result = BloombergRequestResult(result, {'test_request': 'test_request_params'},\n meta=OrderedDict([('type', 'HistoricalDataRequest'), ('subtype', 'futures')]))\n\n def test_to_dataframe_no_converter(self):\n new_bbg_result = self.bbg_result.with_df_converter(None)\n self.assertRaisesRegex(ValueError, \"I need to know the converter in order to convert to DataFrame\", new_bbg_result.to_dataframe)\n\n def test_to_dataframe_with_converter(self):\n new_bbg_result = self.bbg_result.with_df_converter(price_to_frame)\n df = new_bbg_result.to_dataframe()\n self.assertEqual(len(df), 4)\n assert_dict_in_series(self, df.loc[0], {\n 'symbol': 'SPY US Equity',\n 'date': to_timestamp('2015-08-25').date(),\n 'PX_LOW': 186.92\n })\n assert_dict_in_series(self, df.loc[3], {\n 'symbol': 'SPY US Equity',\n 'date': to_timestamp('2015-08-28').date(),\n 'PX_LOW': 197.92\n })\n\n def test_to_dict(self):\n to_dict = self.bbg_result.to_dict()\n self.assertDictEqual({'type': 'HistoricalDataRequest', 'subtype': 'futures'}, to_dict['meta'])\n\n def test_serialize_to_json(self):\n expected_json = '''{\n \"meta\": {\n \"type\": \"HistoricalDataRequest\",\n \"subtype\": \"futures\"\n },\n \"converter\": null,\n \"request\": {\n \"test_request\": \"test_request_params\"\n },\n \"result\": [\n {\n \"securityData\": {\n \"security\": \"SPY US Equity\",\n \"eidData\": [],\n \"sequenceNumber\": 0,\n \"fieldExceptions\": [],\n \"fieldData\": [\n {\n \"fieldData\": {\n \"date\": \"2015-08-25\",\n \"PX_LOW\": 186.92\n }\n },\n {\n \"fieldData\": {\n \"date\": \"2015-08-26\",\n \"PX_LOW\": 188.37\n }\n },\n {\n \"fieldData\": {\n \"date\": \"2015-08-27\",\n \"PX_LOW\": 195.21\n }\n },\n {\n \"fieldData\": {\n \"date\": \"2015-08-28\",\n \"PX_LOW\": 197.92\n }\n }\n ]\n }\n }\n ]\n}'''\n actual_json = self.bbg_result.to_json()\n # print('******* expected *******\\n{}\\n******* expected *******'.format(expected_json), file=sys.stderr)\n # print('******* actual *******\\n{}\\n******* actual *******'.format(actual_json), file=sys.stderr)\n self.assertEqual(expected_json, actual_json)\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.isnan"
]
] |
anurendra/Web_IE | [
"4ba95320fd46d3c6fc090f3f095c7c7de78453bb"
] | [
"train.py"
] | [
"import numpy as np\nimport time\nimport torch\n\nfrom utils import print_and_log, print_confusion_matrix\n\n\ndef train_model(model, train_loader, optimizer, criterion, n_epochs, device, eval_loader, eval_interval=3, log_file='log.txt', ckpt_path='ckpt.pth'):\n \"\"\"\n Train the `model` (nn.Module) on data loaded by `train_loader` (torch.utils.data.DataLoader) for `n_epochs`.\n evaluate performance on `eval_loader` dataset every `eval_interval` epochs and check for early stopping criteria!\n \"\"\"\n print('Training Model for %d epochs...' % (n_epochs))\n model.train()\n\n best_eval_acc = 0.0\n patience = 5 # number of VAL Acc values observed after best value to stop training\n min_delta = 1e-5 # min improvement in eval_acc value to be considered a valid improvement\n for epoch in range(1, n_epochs+1):\n start = time.time()\n epoch_loss, epoch_correct_preds, n_bboxes = 0.0, 0.0, 0.0\n for i, (images, bboxes, context_indices, labels) in enumerate(train_loader):\n images = images.to(device) # [batch_size, 3, img_H, img_W]\n bboxes = bboxes.to(device) # [total_n_bboxes_in_batch, 5]\n context_indices = context_indices.to(device) # [total_n_bboxes_in_batch, 2 * context_size]\n labels = labels.to(device) # [total_n_bboxes_in_batch]\n n_bboxes += labels.size()[0]\n \n optimizer.zero_grad()\n\n output = model(images, bboxes, context_indices) # [total_n_bboxes_in_batch, n_classes]\n predictions = output.argmax(dim=1)\n epoch_correct_preds += (predictions == labels).sum().item()\n \n loss = criterion(output, labels)\n epoch_loss += loss.item()\n \n loss.backward()\n optimizer.step()\n\n print_and_log('[TRAIN]\\t Epoch: %2d\\t Loss: %.4f\\t Accuracy: %.2f%% (%.2fs)' % (epoch, epoch_loss/n_bboxes, 100*epoch_correct_preds/n_bboxes, time.time()-start), log_file)\n \n if epoch == 1 or epoch % eval_interval == 0 or epoch == n_epochs:\n per_class_accuracy = evaluate_model(model, eval_loader, criterion, device, 'VAL', log_file)\n eval_acc = per_class_accuracy[1:].mean()\n model.train()\n\n if eval_acc - best_eval_acc > min_delta: # best so far so save checkpoint to restore later\n best_eval_acc = eval_acc\n patience_count = 0\n torch.save(model.state_dict(), ckpt_path)\n else:\n patience_count += 1\n if patience_count >= patience:\n print('Early Stopping!')\n break\n \n print('Model Trained! Restoring model to best Eval performance checkpoint...')\n model.load_state_dict(torch.load(ckpt_path))\n\n\ndef evaluate_model(model, eval_loader, criterion, device, split_name='VAL', log_file='log.txt'):\n \"\"\"\n Evaluate model (nn.Module) on data loaded by eval_loader (torch.utils.data.DataLoader)\n eval_loader.batch_size SHOULD BE 1\n \n Returns: per_class_accuracy np.array of shape [n_classes,]\n \"\"\"\n assert eval_loader.batch_size == 1\n \n model.eval()\n start = time.time()\n epoch_loss, epoch_correct_preds, n_bboxes = 0.0, 0.0, 0.0\n n_classes = model.n_classes\n class_names = model.class_names\n confusion_matrix = np.zeros([n_classes, n_classes], dtype=np.int32) # to get per class metrics\n with torch.no_grad():\n for i, (images, bboxes, context_indices, labels) in enumerate(eval_loader):\n images = images.to(device) # [batch_size, 3, img_H, img_W]\n bboxes = bboxes.to(device) # [total_n_bboxes_in_batch, 5]\n context_indices = context_indices.to(device) # [total_n_bboxes_in_batch, 2 * context_size]\n labels = labels.to(device) # [total_n_bboxes_in_batch]\n n_bboxes += labels.size()[0]\n output = model(images, bboxes, context_indices) # [total_n_bboxes_in_batch, n_classes]\n \n price_bb = output[:, 1].argmax()\n image_bb = output[:, 2].argmax()\n title_bb = output[:, 3].argmax()\n labels_flattened = labels.view(-1)\n for j, l in enumerate(labels_flattened):\n l = l.item()\n if l > 0:\n if j == price_bb:\n confusion_matrix[l, 1] += 1\n elif j == image_bb:\n confusion_matrix[l, 2] += 1\n elif j == title_bb:\n confusion_matrix[l, 3] += 1\n else:\n confusion_matrix[l, 0] += 1\n \n if labels_flattened[price_bb].item() == 0:\n confusion_matrix[0, 1] += 1\n if labels_flattened[image_bb].item() == 0:\n confusion_matrix[0, 2] += 1\n if labels_flattened[title_bb].item() == 0:\n confusion_matrix[0, 3] += 1\n \n loss = criterion(output, labels)\n epoch_loss += loss.item()\n \n per_class_accuracy = confusion_matrix.diagonal()/confusion_matrix.sum(1)\n avg_accuracy = per_class_accuracy[1:].mean() # accuracy of classes other than BG\n print_and_log('[%s]\\t Loss: %.4f\\t Avg_class_Accuracy: %.2f%% (%.2fs)' % (split_name, epoch_loss/n_bboxes, 100*avg_accuracy, time.time()-start), log_file)\n # print_confusion_matrix(confusion_matrix, class_names)\n for c in range(1, n_classes):\n print_and_log('%-5s Acc: %.2f%%' % (class_names[c], 100*per_class_accuracy[c]), log_file)\n print_and_log('', log_file)\n \n return per_class_accuracy\n"
] | [
[
"torch.no_grad",
"numpy.zeros",
"torch.load"
]
] |
burstable-ai/modin | [
"ee2440c53a1e3bd47736776e7c643f05c4a0db70"
] | [
"modin/pandas/test/test_series.py"
] | [
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pytest\nimport numpy as np\nimport json\nimport pandas\nimport matplotlib\nimport modin.pandas as pd\nfrom numpy.testing import assert_array_equal\nfrom pandas.core.base import SpecificationError\nfrom modin.utils import get_current_execution\nfrom modin.test.test_utils import warns_that_defaulting_to_pandas\nimport sys\n\nfrom modin.utils import to_pandas\nfrom .utils import (\n random_state,\n RAND_LOW,\n RAND_HIGH,\n df_equals,\n arg_keys,\n name_contains,\n test_data,\n test_data_values,\n test_data_keys,\n test_data_with_duplicates_values,\n test_data_with_duplicates_keys,\n test_string_data_values,\n test_string_data_keys,\n test_string_list_data_values,\n test_string_list_data_keys,\n string_sep_values,\n string_sep_keys,\n string_na_rep_values,\n string_na_rep_keys,\n numeric_dfs,\n no_numeric_dfs,\n agg_func_keys,\n agg_func_values,\n agg_func_except_keys,\n agg_func_except_values,\n numeric_agg_funcs,\n quantiles_keys,\n quantiles_values,\n axis_keys,\n axis_values,\n bool_arg_keys,\n bool_arg_values,\n int_arg_keys,\n int_arg_values,\n encoding_types,\n categories_equals,\n eval_general,\n test_data_small_values,\n test_data_small_keys,\n test_data_categorical_values,\n test_data_categorical_keys,\n generate_multiindex,\n test_data_diff_dtype,\n df_equals_with_non_stable_indices,\n test_data_large_categorical_series_keys,\n test_data_large_categorical_series_values,\n default_to_pandas_ignore_string,\n)\nfrom modin.config import NPartitions\n\n# Our configuration in pytest.ini requires that we explicitly catch all\n# instances of defaulting to pandas, but some test modules, like this one,\n# have too many such instances.\n# TODO(https://github.com/modin-project/modin/issues/3655): catch all instances\n# of defaulting to pandas.\npytestmark = pytest.mark.filterwarnings(default_to_pandas_ignore_string)\n\nNPartitions.put(4)\n\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use(\"Agg\")\n\n\ndef get_rop(op):\n if op.startswith(\"__\") and op.endswith(\"__\"):\n return \"__r\" + op[2:]\n else:\n return None\n\n\ndef inter_df_math_helper(modin_series, pandas_series, op):\n inter_df_math_helper_one_side(modin_series, pandas_series, op)\n rop = get_rop(op)\n if rop:\n inter_df_math_helper_one_side(modin_series, pandas_series, rop)\n\n\ndef inter_df_math_helper_one_side(modin_series, pandas_series, op):\n try:\n pandas_attr = getattr(pandas_series, op)\n except Exception as e:\n with pytest.raises(type(e)):\n _ = getattr(modin_series, op)\n return\n modin_attr = getattr(modin_series, op)\n\n try:\n pandas_result = pandas_attr(4)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_attr(4)) # repr to force materialization\n else:\n modin_result = modin_attr(4)\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_attr(4.0)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_attr(4.0)) # repr to force materialization\n else:\n modin_result = modin_attr(4.0)\n df_equals(modin_result, pandas_result)\n\n # These operations don't support non-scalar `other` or have a strange behavior in\n # the testing environment\n if op in [\n \"__divmod__\",\n \"divmod\",\n \"rdivmod\",\n \"floordiv\",\n \"__floordiv__\",\n \"rfloordiv\",\n \"__rfloordiv__\",\n \"mod\",\n \"__mod__\",\n \"rmod\",\n \"__rmod__\",\n ]:\n return\n\n try:\n pandas_result = pandas_attr(pandas_series)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_attr(modin_series)) # repr to force materialization\n else:\n modin_result = modin_attr(modin_series)\n df_equals(modin_result, pandas_result)\n\n list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_series.shape[0]))\n try:\n pandas_result = pandas_attr(list_test)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_attr(list_test)) # repr to force materialization\n else:\n modin_result = modin_attr(list_test)\n df_equals(modin_result, pandas_result)\n\n series_test_modin = pd.Series(list_test, index=modin_series.index)\n series_test_pandas = pandas.Series(list_test, index=pandas_series.index)\n try:\n pandas_result = pandas_attr(series_test_pandas)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_attr(series_test_modin)) # repr to force materialization\n else:\n modin_result = modin_attr(series_test_modin)\n df_equals(modin_result, pandas_result)\n\n # Level test\n new_idx = pandas.MultiIndex.from_tuples(\n [(i // 4, i // 2, i) for i in modin_series.index]\n )\n modin_df_multi_level = modin_series.copy()\n modin_df_multi_level.index = new_idx\n\n try:\n # Defaults to pandas\n with warns_that_defaulting_to_pandas():\n # Operation against self for sanity check\n getattr(modin_df_multi_level, op)(modin_df_multi_level, level=1)\n except TypeError:\n # Some operations don't support multilevel `level` parameter\n pass\n\n\ndef create_test_series(vals, sort=False, **kwargs):\n if isinstance(vals, dict):\n modin_series = pd.Series(vals[next(iter(vals.keys()))], **kwargs)\n pandas_series = pandas.Series(vals[next(iter(vals.keys()))], **kwargs)\n else:\n modin_series = pd.Series(vals, **kwargs)\n pandas_series = pandas.Series(vals, **kwargs)\n if sort:\n modin_series = modin_series.sort_values().reset_index(drop=True)\n pandas_series = pandas_series.sort_values().reset_index(drop=True)\n return modin_series, pandas_series\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_to_frame(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.to_frame(name=\"miao\"), pandas_series.to_frame(name=\"miao\"))\n\n\ndef test_accessing_index_element_as_property():\n s = pd.Series([10, 20, 30], index=[\"a\", \"b\", \"c\"])\n assert s.b == 20\n with pytest.raises(Exception):\n _ = s.d\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_callable_key_in_getitem(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series[lambda s: s.index % 2 == 0],\n pandas_series[lambda s: s.index % 2 == 0],\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_T(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.T, pandas_series.T)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___abs__(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.__abs__(), pandas_series.__abs__())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___add__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__add__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___and__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__and__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___array__(data):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.__array__()\n assert_array_equal(modin_result, pandas_series.__array__())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___bool__(data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.__bool__()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.__bool__()\n else:\n modin_result = modin_series.__bool__()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___contains__(request, data):\n modin_series, pandas_series = create_test_series(data)\n\n result = False\n key = \"Not Exist\"\n assert result == modin_series.__contains__(key)\n assert result == (key in modin_series)\n\n if \"empty_data\" not in request.node.name:\n result = True\n key = pandas_series.keys()[0]\n assert result == modin_series.__contains__(key)\n assert result == (key in modin_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___copy__(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.copy(), modin_series)\n df_equals(modin_series.copy(), pandas_series.copy())\n df_equals(modin_series.copy(), pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___deepcopy__(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.__deepcopy__(), modin_series)\n df_equals(modin_series.__deepcopy__(), pandas_series.__deepcopy__())\n df_equals(modin_series.__deepcopy__(), pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___delitem__(data):\n modin_series, pandas_series = create_test_series(data)\n del modin_series[modin_series.index[0]]\n del pandas_series[pandas_series.index[0]]\n df_equals(modin_series, pandas_series)\n\n del modin_series[modin_series.index[-1]]\n del pandas_series[pandas_series.index[-1]]\n df_equals(modin_series, pandas_series)\n\n del modin_series[modin_series.index[0]]\n del pandas_series[pandas_series.index[0]]\n df_equals(modin_series, pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_divmod(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"divmod\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rdivmod(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rdivmod\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___eq__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__eq__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___floordiv__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__floordiv__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___ge__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__ge__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___getitem__(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series[0], pandas_series[0])\n df_equals(\n modin_series[modin_series.index[-1]], pandas_series[pandas_series.index[-1]]\n )\n modin_series = pd.Series(list(range(1000)))\n pandas_series = pandas.Series(list(range(1000)))\n df_equals(modin_series[:30], pandas_series[:30])\n df_equals(modin_series[modin_series > 500], pandas_series[pandas_series > 500])\n df_equals(modin_series[::2], pandas_series[::2])\n\n # Test empty series\n df_equals(pd.Series([])[:30], pandas.Series([])[:30])\n\n\ndef test___getitem__1383():\n # see #1383 for more details\n data = [\"\", \"a\", \"b\", \"c\", \"a\"]\n modin_series = pd.Series(data)\n pandas_series = pandas.Series(data)\n df_equals(modin_series[3:7], pandas_series[3:7])\n\n\[email protected](\"start\", [-7, -5, -3, 0, None, 3, 5, 7])\[email protected](\"stop\", [-7, -5, -3, 0, None, 3, 5, 7])\ndef test___getitem_edge_cases(start, stop):\n data = [\"\", \"a\", \"b\", \"c\", \"a\"]\n modin_series = pd.Series(data)\n pandas_series = pandas.Series(data)\n df_equals(modin_series[start:stop], pandas_series[start:stop])\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___gt__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__gt__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___int__(data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = int(pandas_series[0])\n except Exception as e:\n with pytest.raises(type(e)):\n int(modin_series[0])\n else:\n assert int(modin_series[0]) == pandas_result\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___invert__(data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.__invert__()\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_series.__invert__())\n else:\n df_equals(modin_series.__invert__(), pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___iter__(data):\n modin_series, pandas_series = create_test_series(data)\n for m, p in zip(modin_series.__iter__(), pandas_series.__iter__()):\n np.testing.assert_equal(m, p)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___le__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__le__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___len__(data):\n modin_series, pandas_series = create_test_series(data)\n assert len(modin_series) == len(pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___long__(data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series[0].__long__()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series[0].__long__()\n else:\n assert modin_series[0].__long__() == pandas_result\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___lt__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__lt__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___mod__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__mod__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___mul__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__mul__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___ne__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__ne__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___neg__(request, data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.__neg__()\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_series.__neg__())\n else:\n df_equals(modin_series.__neg__(), pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___or__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__or__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___pow__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__pow__\")\n\n\[email protected](\"name\", [\"Dates\", None])\[email protected](\n \"dt_index\", [True, False], ids=[\"dt_index_true\", \"dt_index_false\"]\n)\[email protected](\n \"data\",\n [*test_data_values, \"empty\"],\n ids=[*test_data_keys, \"empty\"],\n)\ndef test___repr__(name, dt_index, data):\n if data == \"empty\":\n modin_series, pandas_series = pd.Series(), pandas.Series()\n else:\n modin_series, pandas_series = create_test_series(data)\n pandas_series.name = modin_series.name = name\n if dt_index:\n index = pandas.date_range(\n \"1/1/2000\", periods=len(pandas_series.index), freq=\"T\"\n )\n pandas_series.index = modin_series.index = index\n\n if get_current_execution() == \"BaseOnPython\" and data == \"empty\":\n # TODO: Remove this when default `dtype` of empty Series will be `object` in pandas (see #3142).\n assert modin_series.dtype == np.object\n assert pandas_series.dtype == np.float64\n df_equals(modin_series.index, pandas_series.index)\n else:\n assert repr(modin_series) == repr(pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___round__(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(round(modin_series), round(pandas_series))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___setitem__(data):\n modin_series, pandas_series = create_test_series(data)\n for key in modin_series.keys():\n modin_series[key] = 0\n pandas_series[key] = 0\n df_equals(modin_series, pandas_series)\n\n\[email protected](\n \"key\",\n [\n pytest.param(lambda idx: slice(1, 3), id=\"location_based_slice\"),\n pytest.param(lambda idx: slice(idx[1], idx[-1]), id=\"index_based_slice\"),\n pytest.param(lambda idx: [idx[0], idx[2], idx[-1]], id=\"list_of_labels\"),\n pytest.param(\n lambda idx: [True if i % 2 else False for i in range(len(idx))],\n id=\"boolean_mask\",\n ),\n ],\n)\[email protected](\n \"index\",\n [\n pytest.param(\n lambda idx_len: [chr(x) for x in range(ord(\"a\"), ord(\"a\") + idx_len)],\n id=\"str_index\",\n ),\n pytest.param(lambda idx_len: list(range(1, idx_len + 1)), id=\"int_index\"),\n ],\n)\ndef test___setitem___non_hashable(key, index):\n data = np.arange(5)\n index = index(len(data))\n key = key(index)\n md_sr, pd_sr = create_test_series(data, index=index)\n\n md_sr[key] = 10\n pd_sr[key] = 10\n df_equals(md_sr, pd_sr)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___sizeof__(data):\n modin_series, pandas_series = create_test_series(data)\n with warns_that_defaulting_to_pandas():\n modin_series.__sizeof__()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___str__(data):\n modin_series, pandas_series = create_test_series(data)\n assert str(modin_series) == str(pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___sub__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__sub__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___truediv__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__truediv__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___xor__(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"__xor__\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_abs(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.abs(), pandas_series.abs())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_add(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"add\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_add_prefix(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.add_prefix(\"PREFIX_ADD_\"), pandas_series.add_prefix(\"PREFIX_ADD_\")\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_add_suffix(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.add_suffix(\"SUFFIX_ADD_\"), pandas_series.add_suffix(\"SUFFIX_ADD_\")\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_agg(data, func):\n eval_general(\n *create_test_series(data),\n lambda df: df.agg(func),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_agg_except(data, func):\n # SpecificationError is arisen because we treat a Series as a DataFrame.\n # See details in pandas issue 36036.\n with pytest.raises(SpecificationError):\n eval_general(\n *create_test_series(data),\n lambda df: df.agg(func),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_agg_numeric(request, data, func):\n if name_contains(request.node.name, numeric_agg_funcs) and name_contains(\n request.node.name, numeric_dfs\n ):\n axis = 0\n eval_general(\n *create_test_series(data),\n lambda df: df.agg(func, axis),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_agg_numeric_except(request, data, func):\n if name_contains(request.node.name, numeric_agg_funcs) and name_contains(\n request.node.name, numeric_dfs\n ):\n axis = 0\n # SpecificationError is arisen because we treat a Series as a DataFrame.\n # See details in pandas issue 36036.\n with pytest.raises(SpecificationError):\n eval_general(\n *create_test_series(data),\n lambda df: df.agg(func, axis),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_aggregate(data, func):\n axis = 0\n eval_general(\n *create_test_series(data),\n lambda df: df.aggregate(func, axis),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_aggregate_except(data, func):\n axis = 0\n # SpecificationError is arisen because we treat a Series as a DataFrame.\n # See details in pandas issues 36036.\n with pytest.raises(SpecificationError):\n eval_general(\n *create_test_series(data),\n lambda df: df.aggregate(func, axis),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_aggregate_numeric(request, data, func):\n if name_contains(request.node.name, numeric_agg_funcs) and name_contains(\n request.node.name, numeric_dfs\n ):\n axis = 0\n eval_general(\n *create_test_series(data),\n lambda df: df.agg(func, axis),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_aggregate_numeric_except(request, data, func):\n if name_contains(request.node.name, numeric_agg_funcs) and name_contains(\n request.node.name, numeric_dfs\n ):\n axis = 0\n # SpecificationError is arisen because we treat a Series as a DataFrame.\n # See details in pandas issues 36036.\n with pytest.raises(SpecificationError):\n eval_general(\n *create_test_series(data),\n lambda df: df.agg(func, axis),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_aggregate_error_checking(data):\n modin_series, pandas_series = create_test_series(data)\n\n assert pandas_series.aggregate(\"ndim\") == 1\n assert modin_series.aggregate(\"ndim\") == 1\n\n def user_warning_checker(series, fn):\n if isinstance(series, pd.Series):\n with warns_that_defaulting_to_pandas():\n return fn(series)\n return fn(series)\n\n eval_general(\n modin_series,\n pandas_series,\n lambda series: user_warning_checker(\n series, fn=lambda series: series.aggregate(\"cumproduct\")\n ),\n )\n eval_general(\n modin_series, pandas_series, lambda series: series.aggregate(\"NOT_EXISTS\")\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_align(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.align(modin_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_all(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.all(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_any(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.any(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_append(data):\n modin_series, pandas_series = create_test_series(data)\n\n data_to_append = {\"append_a\": 2, \"append_b\": 1000}\n\n ignore_idx_values = [True, False]\n\n for ignore in ignore_idx_values:\n try:\n pandas_result = pandas_series.append(data_to_append, ignore_index=ignore)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.append(data_to_append, ignore_index=ignore)\n else:\n modin_result = modin_series.append(data_to_append, ignore_index=ignore)\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_series.append(pandas_series.iloc[-1])\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.append(modin_series.iloc[-1])\n else:\n modin_result = modin_series.append(modin_series.iloc[-1])\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_series.append([pandas_series.iloc[-1]])\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.append([modin_series.iloc[-1]])\n else:\n modin_result = modin_series.append([modin_series.iloc[-1]])\n df_equals(modin_result, pandas_result)\n\n verify_integrity_values = [True, False]\n\n for verify_integrity in verify_integrity_values:\n try:\n pandas_result = pandas_series.append(\n [pandas_series, pandas_series], verify_integrity=verify_integrity\n )\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.append(\n [modin_series, modin_series], verify_integrity=verify_integrity\n )\n else:\n modin_result = modin_series.append(\n [modin_series, modin_series], verify_integrity=verify_integrity\n )\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_series.append(\n pandas_series, verify_integrity=verify_integrity\n )\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.append(modin_series, verify_integrity=verify_integrity)\n else:\n modin_result = modin_series.append(\n modin_series, verify_integrity=verify_integrity\n )\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_apply(data, func):\n eval_general(\n *create_test_series(data),\n lambda df: df.apply(func),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_apply_except(data, func):\n # SpecificationError is arisen because we treat a Series as a DataFrame.\n # See details in pandas issues 36036.\n with pytest.raises(SpecificationError):\n eval_general(\n *create_test_series(data),\n lambda df: df.apply(func),\n )\n\n\ndef test_apply_external_lib():\n json_string = \"\"\"\n {\n \"researcher\": {\n \"name\": \"Ford Prefect\",\n \"species\": \"Betelgeusian\",\n \"relatives\": [\n {\n \"name\": \"Zaphod Beeblebrox\",\n \"species\": \"Betelgeusian\"\n }\n ]\n }\n }\n \"\"\"\n modin_result = pd.DataFrame.from_dict({\"a\": [json_string]}).a.apply(json.loads)\n pandas_result = pandas.DataFrame.from_dict({\"a\": [json_string]}).a.apply(json.loads)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_apply_numeric(request, data, func):\n if name_contains(request.node.name, numeric_dfs):\n eval_general(\n *create_test_series(data),\n lambda df: df.apply(func),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_apply_numeric_except(request, data, func):\n if name_contains(request.node.name, numeric_dfs):\n # SpecificationError is arisen because we treat a Series as a DataFrame.\n # See details in pandas issues 36036.\n with pytest.raises(SpecificationError):\n eval_general(\n *create_test_series(data),\n lambda df: df.apply(func),\n )\n\n\[email protected](\"axis\", [None, 0, 1])\[email protected](\"level\", [None, -1, 0, 1])\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", [\"count\", \"all\", \"kurt\", \"array\", \"searchsorted\"])\ndef test_apply_text_func(level, data, func, axis):\n func_kwargs = {}\n if level:\n func_kwargs.update({\"level\": level})\n if axis:\n func_kwargs.update({\"axis\": axis})\n rows_number = len(next(iter(data.values()))) # length of the first data column\n level_0 = np.random.choice([0, 1, 2], rows_number)\n level_1 = np.random.choice([3, 4, 5], rows_number)\n index = pd.MultiIndex.from_arrays([level_0, level_1])\n\n modin_series, pandas_series = create_test_series(data)\n modin_series.index = index\n pandas_series.index = index\n\n eval_general(modin_series, pandas_series, lambda df: df.apply(func), **func_kwargs)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"skipna\", [True, False])\ndef test_argmax(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.argmax(skipna=skipna), pandas_series.argmax(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"skipna\", [True, False])\ndef test_argmin(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.argmin(skipna=skipna), pandas_series.argmin(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_argsort(data):\n modin_series, pandas_series = create_test_series(data)\n with warns_that_defaulting_to_pandas():\n modin_result = modin_series.argsort()\n df_equals(modin_result, pandas_series.argsort())\n\n\ndef test_asfreq():\n index = pd.date_range(\"1/1/2000\", periods=4, freq=\"T\")\n series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n series.asfreq(freq=\"30S\")\n\n\[email protected](\n \"where\",\n [\n 20,\n 30,\n [10, 40],\n [20, 30],\n [20],\n 25,\n [25, 45],\n [25, 30],\n pandas.Index([20, 30]),\n pandas.Index([10]),\n ],\n)\ndef test_asof(where):\n # With NaN:\n values = [1, 2, np.nan, 4]\n index = [10, 20, 30, 40]\n modin_series, pandas_series = (\n pd.Series(values, index=index),\n pandas.Series(values, index=index),\n )\n df_equals(modin_series.asof(where), pandas_series.asof(where))\n\n # No NaN:\n values = [1, 2, 7, 4]\n modin_series, pandas_series = (\n pd.Series(values, index=index),\n pandas.Series(values, index=index),\n )\n df_equals(modin_series.asof(where), pandas_series.asof(where))\n\n\[email protected](\n \"where\",\n [20, 30, [10.5, 40.5], [10], pandas.Index([20, 30]), pandas.Index([10.5])],\n)\ndef test_asof_large(where):\n values = test_data[\"float_nan_data\"][\"col1\"]\n index = list(range(len(values)))\n modin_series, pandas_series = (\n pd.Series(values, index=index),\n pandas.Series(values, index=index),\n )\n df_equals(modin_series.asof(where), pandas_series.asof(where))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_astype(data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.astype(str)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_series.astype(str)) # repr to force materialization\n else:\n df_equals(modin_series.astype(str), pandas_result)\n\n try:\n pandas_result = pandas_series.astype(np.int64)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_series.astype(np.int64)) # repr to force materialization\n else:\n df_equals(modin_series.astype(np.int64), pandas_result)\n\n try:\n pandas_result = pandas_series.astype(np.float64)\n except Exception as e:\n with pytest.raises(type(e)):\n repr(modin_series.astype(np.float64)) # repr to force materialization\n else:\n df_equals(modin_series.astype(np.float64), pandas_result)\n\n\ndef test_astype_categorical():\n modin_df = pd.Series([\"A\", \"A\", \"B\", \"B\", \"A\"])\n pandas_df = pandas.Series([\"A\", \"A\", \"B\", \"B\", \"A\"])\n\n modin_result = modin_df.astype(\"category\")\n pandas_result = pandas_df.astype(\"category\")\n df_equals(modin_result, pandas_result)\n assert modin_result.dtype == pandas_result.dtype\n\n modin_df = pd.Series([1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2])\n pandas_df = pandas.Series([1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2])\n df_equals(modin_result, pandas_result)\n assert modin_result.dtype == pandas_result.dtype\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_at(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.at[modin_series.index[0]], pandas_series.at[pandas_series.index[0]]\n )\n df_equals(\n modin_series.at[modin_series.index[-1]], pandas_series[pandas_series.index[-1]]\n )\n\n\ndef test_at_time():\n i = pd.date_range(\"2008-01-01\", periods=1000, freq=\"12H\")\n modin_series = pd.Series(list(range(1000)), index=i)\n pandas_series = pandas.Series(list(range(1000)), index=i)\n df_equals(modin_series.at_time(\"12:00\"), pandas_series.at_time(\"12:00\"))\n df_equals(modin_series.at_time(\"3:00\"), pandas_series.at_time(\"3:00\"))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"lag\", [1, 2, 3])\ndef test_autocorr(data, lag):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.autocorr(lag=lag)\n pandas_result = pandas_series.autocorr(lag=lag)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_axes(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.axes[0].equals(pandas_series.axes[0])\n assert len(modin_series.axes) == len(pandas_series.axes)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_attrs(data):\n modin_series, pandas_series = create_test_series(data)\n eval_general(modin_series, pandas_series, lambda df: df.attrs)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_array(data):\n modin_series, pandas_series = create_test_series(data)\n eval_general(modin_series, pandas_series, lambda df: df.array)\n\n\[email protected](reason=\"Using pandas Series.\")\ndef test_between():\n modin_series = create_test_series()\n\n with pytest.raises(NotImplementedError):\n modin_series.between(None, None)\n\n\ndef test_between_time():\n i = pd.date_range(\"2008-01-01\", periods=1000, freq=\"12H\")\n modin_series = pd.Series(list(range(1000)), index=i)\n pandas_series = pandas.Series(list(range(1000)), index=i)\n df_equals(\n modin_series.between_time(\"12:00\", \"17:00\"),\n pandas_series.between_time(\"12:00\", \"17:00\"),\n )\n df_equals(\n modin_series.between_time(\"3:00\", \"8:00\"),\n pandas_series.between_time(\"3:00\", \"8:00\"),\n )\n df_equals(\n modin_series.between_time(\"3:00\", \"8:00\", False),\n pandas_series.between_time(\"3:00\", \"8:00\", False),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_bfill(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.bfill(), pandas_series.bfill())\n # inplace\n modin_series_cp = modin_series.copy()\n pandas_series_cp = pandas_series.copy()\n modin_series_cp.bfill(inplace=True)\n pandas_series_cp.bfill(inplace=True)\n df_equals(modin_series_cp, pandas_series_cp)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_bool(data):\n modin_series, pandas_series = create_test_series(data)\n\n with pytest.raises(ValueError):\n modin_series.bool()\n with pytest.raises(ValueError):\n modin_series.__bool__()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_clip(request, data):\n modin_series, pandas_series = create_test_series(data)\n\n if name_contains(request.node.name, numeric_dfs):\n # set bounds\n lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))\n\n # test only upper scalar bound\n modin_result = modin_series.clip(None, upper)\n pandas_result = pandas_series.clip(None, upper)\n df_equals(modin_result, pandas_result)\n\n # test lower and upper scalar bound\n modin_result = modin_series.clip(lower, upper)\n pandas_result = pandas_series.clip(lower, upper)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_combine(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n modin_series2 = modin_series % (max(modin_series) // 2)\n modin_series.combine(modin_series2, lambda s1, s2: s1 if s1 < s2 else s2)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_combine_first(data):\n modin_series, pandas_series = create_test_series(data)\n modin_series2 = modin_series % (max(modin_series) // 2)\n pandas_series2 = pandas_series % (max(pandas_series) // 2)\n modin_result = modin_series.combine_first(modin_series2)\n pandas_result = pandas_series.combine_first(pandas_series2)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_compress(data):\n modin_series, pandas_series = create_test_series(data) # noqa: F841\n try:\n pandas_series.compress(pandas_series > 30)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.compress(modin_series > 30)\n else:\n modin_series.compress(modin_series > 30)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_constructor(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series, pandas_series)\n df_equals(pd.Series(modin_series), pandas.Series(pandas_series))\n\n\ndef test_constructor_columns_and_index():\n modin_series = pd.Series([1, 1, 10], index=[1, 2, 3], name=\"health\")\n pandas_series = pandas.Series([1, 1, 10], index=[1, 2, 3], name=\"health\")\n df_equals(modin_series, pandas_series)\n df_equals(pd.Series(modin_series), pandas.Series(pandas_series))\n df_equals(\n pd.Series(modin_series, name=\"max_speed\"),\n pandas.Series(pandas_series, name=\"max_speed\"),\n )\n df_equals(\n pd.Series(modin_series, index=[1, 2]),\n pandas.Series(pandas_series, index=[1, 2]),\n )\n with pytest.raises(NotImplementedError):\n pd.Series(modin_series, index=[1, 2, 99999])\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_copy(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series, modin_series.copy())\n df_equals(modin_series.copy(), pandas_series)\n df_equals(modin_series.copy(), pandas_series.copy())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_corr(data):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.corr(modin_series)\n pandas_result = pandas_series.corr(pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\",\n test_data_values + test_data_large_categorical_series_values,\n ids=test_data_keys + test_data_large_categorical_series_keys,\n)\ndef test_count(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.count(), pandas_series.count())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_cov(data):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.cov(modin_series)\n pandas_result = pandas_series.cov(pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_cummax(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.cummax(skipna=skipna)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.cummax(skipna=skipna)\n else:\n df_equals(modin_series.cummax(skipna=skipna), pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_cummin(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.cummin(skipna=skipna)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.cummin(skipna=skipna)\n else:\n df_equals(modin_series.cummin(skipna=skipna), pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_cumprod(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.cumprod(skipna=skipna)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.cumprod(skipna=skipna)\n else:\n df_equals(modin_series.cumprod(skipna=skipna), pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_cumsum(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.cumsum(skipna=skipna)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.cumsum(skipna=skipna)\n else:\n df_equals(modin_series.cumsum(skipna=skipna), pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_describe(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.describe(), pandas_series.describe())\n percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]\n df_equals(\n modin_series.describe(percentiles=percentiles),\n pandas_series.describe(percentiles=percentiles),\n )\n\n try:\n pandas_result = pandas_series.describe(exclude=[np.float64])\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.describe(exclude=[np.float64])\n else:\n modin_result = modin_series.describe(exclude=[np.float64])\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_series.describe(exclude=np.float64)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.describe(exclude=np.float64)\n else:\n modin_result = modin_series.describe(exclude=np.float64)\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_series.describe(\n include=[np.timedelta64, np.datetime64, np.object, np.bool]\n )\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.describe(\n include=[np.timedelta64, np.datetime64, np.object, np.bool]\n )\n else:\n modin_result = modin_series.describe(\n include=[np.timedelta64, np.datetime64, np.object, np.bool]\n )\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_series.describe(include=str(modin_series.dtypes))\n pandas_result = pandas_series.describe(include=str(pandas_series.dtypes))\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_series.describe(include=[np.number])\n pandas_result = pandas_series.describe(include=[np.number])\n df_equals(modin_result, pandas_result)\n\n df_equals(\n modin_series.describe(include=\"all\"), pandas_series.describe(include=\"all\")\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"periods\", int_arg_values, ids=arg_keys(\"periods\", int_arg_keys)\n)\ndef test_diff(data, periods):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.diff(periods=periods)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.diff(periods=periods)\n else:\n modin_result = modin_series.diff(periods=periods)\n df_equals(modin_result, pandas_result)\n\n try:\n pandas_result = pandas_series.T.diff(periods=periods)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.T.diff(periods=periods)\n else:\n modin_result = modin_series.T.diff(periods=periods)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_div(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"div\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_divide(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"divide\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_dot(data):\n modin_series, pandas_series = create_test_series(data)\n ind_len = len(modin_series)\n\n # Test 1D array input\n arr = np.arange(ind_len)\n modin_result = modin_series.dot(arr)\n pandas_result = pandas_series.dot(arr)\n df_equals(modin_result, pandas_result)\n\n # Test 2D array input\n arr = np.arange(ind_len * 2).reshape(ind_len, 2)\n modin_result = modin_series.dot(arr)\n pandas_result = pandas_series.dot(arr)\n assert_array_equal(modin_result, pandas_result)\n\n # Test bad dimensions\n with pytest.raises(ValueError):\n modin_result = modin_series.dot(np.arange(ind_len + 10))\n\n # Test dataframe input\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n modin_result = modin_series.dot(modin_df)\n pandas_result = pandas_series.dot(pandas_df)\n df_equals(modin_result, pandas_result)\n\n # Test series input\n modin_series_2 = pd.Series(np.arange(ind_len), index=modin_series.index)\n pandas_series_2 = pandas.Series(np.arange(ind_len), index=pandas_series.index)\n modin_result = modin_series.dot(modin_series_2)\n pandas_result = pandas_series.dot(pandas_series_2)\n df_equals(modin_result, pandas_result)\n\n # Test when input series index doesn't line up with columns\n with pytest.raises(ValueError):\n modin_result = modin_series.dot(\n pd.Series(\n np.arange(ind_len), index=[\"a\" for _ in range(len(modin_series.index))]\n )\n )\n\n # Test case when left series has size (1 x 1)\n # and right dataframe has size (1 x n)\n modin_result = pd.Series([1]).dot(pd.DataFrame(modin_series).T)\n pandas_result = pandas.Series([1]).dot(pandas.DataFrame(pandas_series).T)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_matmul(data):\n modin_series, pandas_series = create_test_series(data) # noqa: F841\n ind_len = len(modin_series)\n\n # Test 1D array input\n arr = np.arange(ind_len)\n modin_result = modin_series @ arr\n pandas_result = pandas_series @ arr\n df_equals(modin_result, pandas_result)\n\n # Test 2D array input\n arr = np.arange(ind_len * 2).reshape(ind_len, 2)\n modin_result = modin_series @ arr\n pandas_result = pandas_series @ arr\n assert_array_equal(modin_result, pandas_result)\n\n # Test bad dimensions\n with pytest.raises(ValueError):\n modin_result = modin_series @ np.arange(ind_len + 10)\n\n # Test dataframe input\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n modin_result = modin_series @ modin_df\n pandas_result = pandas_series @ pandas_df\n df_equals(modin_result, pandas_result)\n\n # Test series input\n modin_series_2 = pd.Series(np.arange(ind_len), index=modin_series.index)\n pandas_series_2 = pandas.Series(np.arange(ind_len), index=pandas_series.index)\n modin_result = modin_series @ modin_series_2\n pandas_result = pandas_series @ pandas_series_2\n df_equals(modin_result, pandas_result)\n\n # Test when input series index doesn't line up with columns\n with pytest.raises(ValueError):\n modin_result = modin_series @ pd.Series(\n np.arange(ind_len), index=[\"a\" for _ in range(len(modin_series.index))]\n )\n\n\[email protected](reason=\"Using pandas Series.\")\ndef test_drop():\n modin_series = create_test_series()\n\n with pytest.raises(NotImplementedError):\n modin_series.drop(None, None, None, None)\n\n\[email protected](\n \"data\", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys\n)\[email protected](\n \"keep\", [\"last\", \"first\", False], ids=[\"last\", \"first\", \"False\"]\n)\[email protected](\"inplace\", [True, False], ids=[\"True\", \"False\"])\ndef test_drop_duplicates(data, keep, inplace):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.drop_duplicates(keep=keep, inplace=inplace),\n pandas_series.drop_duplicates(keep=keep, inplace=inplace),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"how\", [\"any\", \"all\"], ids=[\"any\", \"all\"])\ndef test_dropna(data, how):\n modin_series, pandas_series = create_test_series(data)\n\n with pytest.raises(TypeError):\n modin_series.dropna(how=None, thresh=None)\n\n modin_result = modin_series.dropna(how=how)\n pandas_result = pandas_series.dropna(how=how)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_dropna_inplace(data):\n modin_series, pandas_series = create_test_series(data)\n pandas_result = pandas_series.dropna()\n modin_series.dropna(inplace=True)\n df_equals(modin_series, pandas_result)\n\n modin_series, pandas_series = create_test_series(data)\n with pytest.raises(TypeError):\n modin_series.dropna(thresh=2, inplace=True)\n\n modin_series, pandas_series = create_test_series(data)\n pandas_series.dropna(how=\"any\", inplace=True)\n modin_series.dropna(how=\"any\", inplace=True)\n df_equals(modin_series, pandas_series)\n\n\ndef test_dtype_empty():\n modin_series, pandas_series = pd.Series(), pandas.Series()\n if get_current_execution() == \"BaseOnPython\":\n # TODO: Remove this when default `dtype` of empty Series will be `object` in pandas (see #3142).\n assert modin_series.dtype == np.object\n assert pandas_series.dtype == np.float64\n else:\n assert modin_series.dtype == pandas_series.dtype\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_dtype(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.dtype, modin_series.dtypes)\n df_equals(modin_series.dtype, pandas_series.dtype)\n df_equals(modin_series.dtype, pandas_series.dtypes)\n\n\ndef test_dt():\n data = pd.date_range(\"2016-12-31\", periods=128, freq=\"D\", tz=\"Europe/Berlin\")\n modin_series = pd.Series(data)\n pandas_series = pandas.Series(data)\n\n df_equals(modin_series.dt.date, pandas_series.dt.date)\n df_equals(modin_series.dt.time, pandas_series.dt.time)\n df_equals(modin_series.dt.timetz, pandas_series.dt.timetz)\n df_equals(modin_series.dt.year, pandas_series.dt.year)\n df_equals(modin_series.dt.month, pandas_series.dt.month)\n df_equals(modin_series.dt.day, pandas_series.dt.day)\n df_equals(modin_series.dt.hour, pandas_series.dt.hour)\n df_equals(modin_series.dt.minute, pandas_series.dt.minute)\n df_equals(modin_series.dt.second, pandas_series.dt.second)\n df_equals(modin_series.dt.microsecond, pandas_series.dt.microsecond)\n df_equals(modin_series.dt.nanosecond, pandas_series.dt.nanosecond)\n df_equals(modin_series.dt.week, pandas_series.dt.week)\n df_equals(modin_series.dt.weekofyear, pandas_series.dt.weekofyear)\n df_equals(modin_series.dt.dayofweek, pandas_series.dt.dayofweek)\n df_equals(modin_series.dt.weekday, pandas_series.dt.weekday)\n df_equals(modin_series.dt.dayofyear, pandas_series.dt.dayofyear)\n df_equals(modin_series.dt.quarter, pandas_series.dt.quarter)\n df_equals(modin_series.dt.is_month_start, pandas_series.dt.is_month_start)\n df_equals(modin_series.dt.is_month_end, pandas_series.dt.is_month_end)\n df_equals(modin_series.dt.is_quarter_start, pandas_series.dt.is_quarter_start)\n df_equals(modin_series.dt.is_quarter_end, pandas_series.dt.is_quarter_end)\n df_equals(modin_series.dt.is_year_start, pandas_series.dt.is_year_start)\n df_equals(modin_series.dt.is_year_end, pandas_series.dt.is_year_end)\n df_equals(modin_series.dt.is_leap_year, pandas_series.dt.is_leap_year)\n df_equals(modin_series.dt.daysinmonth, pandas_series.dt.daysinmonth)\n df_equals(modin_series.dt.days_in_month, pandas_series.dt.days_in_month)\n assert modin_series.dt.tz == pandas_series.dt.tz\n assert modin_series.dt.freq == pandas_series.dt.freq\n df_equals(modin_series.dt.to_period(\"W\"), pandas_series.dt.to_period(\"W\"))\n assert_array_equal(\n modin_series.dt.to_pydatetime(), pandas_series.dt.to_pydatetime()\n )\n df_equals(\n modin_series.dt.tz_localize(None),\n pandas_series.dt.tz_localize(None),\n )\n df_equals(\n modin_series.dt.tz_convert(tz=\"Europe/Berlin\"),\n pandas_series.dt.tz_convert(tz=\"Europe/Berlin\"),\n )\n\n df_equals(modin_series.dt.normalize(), pandas_series.dt.normalize())\n df_equals(\n modin_series.dt.strftime(\"%B %d, %Y, %r\"),\n pandas_series.dt.strftime(\"%B %d, %Y, %r\"),\n )\n df_equals(modin_series.dt.round(\"H\"), pandas_series.dt.round(\"H\"))\n df_equals(modin_series.dt.floor(\"H\"), pandas_series.dt.floor(\"H\"))\n df_equals(modin_series.dt.ceil(\"H\"), pandas_series.dt.ceil(\"H\"))\n df_equals(modin_series.dt.month_name(), pandas_series.dt.month_name())\n df_equals(modin_series.dt.day_name(), pandas_series.dt.day_name())\n\n modin_series = pd.Series(pd.to_timedelta(np.arange(128), unit=\"d\"))\n pandas_series = pandas.Series(pandas.to_timedelta(np.arange(128), unit=\"d\"))\n\n assert_array_equal(\n modin_series.dt.to_pytimedelta(), pandas_series.dt.to_pytimedelta()\n )\n df_equals(modin_series.dt.total_seconds(), pandas_series.dt.total_seconds())\n df_equals(modin_series.dt.days, pandas_series.dt.days)\n df_equals(modin_series.dt.seconds, pandas_series.dt.seconds)\n df_equals(modin_series.dt.microseconds, pandas_series.dt.microseconds)\n df_equals(modin_series.dt.nanoseconds, pandas_series.dt.nanoseconds)\n df_equals(modin_series.dt.components, pandas_series.dt.components)\n\n data_per = pd.date_range(\"1/1/2012\", periods=128, freq=\"M\")\n pandas_series = pandas.Series(data_per, index=data_per).dt.to_period()\n modin_series = pd.Series(data_per, index=data_per).dt.to_period()\n\n df_equals(modin_series.dt.qyear, pandas_series.dt.qyear)\n df_equals(modin_series.dt.start_time, pandas_series.dt.start_time)\n df_equals(modin_series.dt.end_time, pandas_series.dt.end_time)\n df_equals(modin_series.dt.to_timestamp(), pandas_series.dt.to_timestamp())\n\n\[email protected](\n \"data\", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys\n)\[email protected](\n \"keep\", [\"last\", \"first\", False], ids=[\"last\", \"first\", \"False\"]\n)\ndef test_duplicated(data, keep):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.duplicated(keep=keep)\n df_equals(modin_result, pandas_series.duplicated(keep=keep))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_empty(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.empty == pandas_series.empty\n\n\ndef test_empty_series():\n modin_series = pd.Series()\n assert modin_series.empty\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_eq(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"eq\")\n\n\ndef test_equals():\n series_data = [2.9, 3, 3, 3]\n modin_df1 = pd.Series(series_data)\n modin_df2 = pd.Series(series_data)\n\n assert modin_df1.equals(modin_df2)\n assert modin_df1.equals(pd.Series(modin_df1))\n df_equals(modin_df1, modin_df2)\n df_equals(modin_df1, pd.Series(modin_df1))\n\n series_data = [2, 3, 5, 1]\n modin_df3 = pd.Series(series_data, index=list(\"abcd\"))\n\n assert not modin_df1.equals(modin_df3)\n\n with pytest.raises(AssertionError):\n df_equals(modin_df3, modin_df1)\n\n with pytest.raises(AssertionError):\n df_equals(modin_df3, modin_df2)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_ewm(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.ewm(halflife=6)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_expanding(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.expanding()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_factorize(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.factorize()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_ffill(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.ffill(), pandas_series.ffill())\n # inplace\n modin_series_cp = modin_series.copy()\n pandas_series_cp = pandas_series.copy()\n modin_series_cp.ffill(inplace=True)\n pandas_series_cp.ffill(inplace=True)\n df_equals(modin_series_cp, pandas_series_cp)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"reindex\", [None, 2, -2])\[email protected](\"limit\", [None, 1, 2, 0.5, -1, -2, 1.5])\ndef test_fillna(data, reindex, limit):\n modin_series, pandas_series = create_test_series(data)\n index = pandas_series.index\n pandas_replace_series = index.to_series().sample(frac=1)\n modin_replace_series = pd.Series(pandas_replace_series)\n replace_dict = pandas_replace_series.to_dict()\n\n if reindex is not None:\n if reindex > 0:\n pandas_series = pandas_series[:reindex].reindex(index)\n modin_series = pd.Series(pandas_series)\n else:\n pandas_series = pandas_series[reindex:].reindex(index)\n # Because of bug #3178 modin Series has to be created from pandas\n # Series instead of performing the same slice and reindex operations.\n modin_series = pd.Series(pandas_series)\n\n if isinstance(limit, float):\n limit = int(len(modin_series) * limit)\n if limit is not None and limit < 0:\n limit = len(modin_series) + limit\n\n df_equals(modin_series.fillna(0, limit=limit), pandas_series.fillna(0, limit=limit))\n df_equals(\n modin_series.fillna(method=\"bfill\", limit=limit),\n pandas_series.fillna(method=\"bfill\", limit=limit),\n )\n df_equals(\n modin_series.fillna(method=\"ffill\", limit=limit),\n pandas_series.fillna(method=\"ffill\", limit=limit),\n )\n df_equals(\n modin_series.fillna(modin_replace_series, limit=limit),\n pandas_series.fillna(pandas_replace_series, limit=limit),\n )\n df_equals(\n modin_series.fillna(replace_dict, limit=limit),\n pandas_series.fillna(replace_dict, limit=limit),\n )\n\n\[email protected](reason=\"Using pandas Series.\")\ndef test_filter():\n modin_series = create_test_series()\n\n with pytest.raises(NotImplementedError):\n modin_series.filter(None, None, None)\n\n\ndef test_first():\n i = pd.date_range(\"2010-04-09\", periods=400, freq=\"2D\")\n modin_series = pd.Series(list(range(400)), index=i)\n pandas_series = pandas.Series(list(range(400)), index=i)\n df_equals(modin_series.first(\"3D\"), pandas_series.first(\"3D\"))\n df_equals(modin_series.first(\"20D\"), pandas_series.first(\"20D\"))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_first_valid_index(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.first_valid_index(), pandas_series.first_valid_index())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_floordiv(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"floordiv\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_ge(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"ge\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_get(data):\n modin_series, pandas_series = create_test_series(data)\n for key in modin_series.keys():\n df_equals(modin_series.get(key), pandas_series.get(key))\n df_equals(\n modin_series.get(\"NO_EXIST\", \"DEFAULT\"),\n pandas_series.get(\"NO_EXIST\", \"DEFAULT\"),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_gt(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"gt\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_hasnans(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.hasnans == pandas_series.hasnans\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"n\", int_arg_values, ids=arg_keys(\"n\", int_arg_keys))\ndef test_head(data, n):\n modin_series, pandas_series = create_test_series(data)\n\n df_equals(modin_series.head(n), pandas_series.head(n))\n df_equals(\n modin_series.head(len(modin_series)), pandas_series.head(len(pandas_series))\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_hist(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.hist(None)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_iat(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.iat[0], pandas_series.iat[0])\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_idxmax(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n pandas_result = pandas_series.idxmax(skipna=skipna)\n modin_result = modin_series.idxmax(skipna=skipna)\n df_equals(modin_result, pandas_result)\n\n pandas_result = pandas_series.T.idxmax(skipna=skipna)\n modin_result = modin_series.T.idxmax(skipna=skipna)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_idxmin(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n pandas_result = pandas_series.idxmin(skipna=skipna)\n modin_result = modin_series.idxmin(skipna=skipna)\n df_equals(modin_result, pandas_result)\n\n pandas_result = pandas_series.T.idxmin(skipna=skipna)\n modin_result = modin_series.T.idxmin(skipna=skipna)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_iloc(request, data):\n modin_series, pandas_series = create_test_series(data)\n\n if not name_contains(request.node.name, [\"empty_data\"]):\n # Scalar\n np.testing.assert_equal(modin_series.iloc[0], pandas_series.iloc[0])\n\n # Series\n df_equals(modin_series.iloc[1:], pandas_series.iloc[1:])\n df_equals(modin_series.iloc[1:2], pandas_series.iloc[1:2])\n df_equals(modin_series.iloc[[1, 2]], pandas_series.iloc[[1, 2]])\n\n # Write Item\n modin_series.iloc[[1, 2]] = 42\n pandas_series.iloc[[1, 2]] = 42\n df_equals(modin_series, pandas_series)\n\n with pytest.raises(IndexError):\n modin_series.iloc[1:, 1]\n else:\n with pytest.raises(IndexError):\n modin_series.iloc[0]\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_index(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.index, pandas_series.index)\n with pytest.raises(ValueError):\n modin_series.index = list(modin_series.index) + [999]\n\n modin_series.index = modin_series.index.map(str)\n pandas_series.index = pandas_series.index.map(str)\n df_equals(modin_series.index, pandas_series.index)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_interpolate(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.interpolate()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_is_monotonic(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.is_monotonic == pandas_series.is_monotonic\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_is_monotonic_decreasing(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.is_monotonic_decreasing == pandas_series.is_monotonic_decreasing\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_is_monotonic_increasing(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.is_monotonic_increasing == pandas_series.is_monotonic_increasing\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_is_unique(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.is_unique == pandas_series.is_unique\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_isin(data):\n modin_series, pandas_series = create_test_series(data)\n val = [1, 2, 3, 4]\n pandas_result = pandas_series.isin(val)\n modin_result = modin_series.isin(val)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_isnull(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.isnull(), pandas_series.isnull())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_items(data):\n modin_series, pandas_series = create_test_series(data)\n\n modin_items = modin_series.items()\n pandas_items = pandas_series.items()\n for modin_item, pandas_item in zip(modin_items, pandas_items):\n modin_index, modin_scalar = modin_item\n pandas_index, pandas_scalar = pandas_item\n df_equals(modin_scalar, pandas_scalar)\n assert pandas_index == modin_index\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_iteritems(data):\n modin_series, pandas_series = create_test_series(data)\n\n modin_items = modin_series.iteritems()\n pandas_items = pandas_series.iteritems()\n for modin_item, pandas_item in zip(modin_items, pandas_items):\n modin_index, modin_scalar = modin_item\n pandas_index, pandas_scalar = pandas_item\n df_equals(modin_scalar, pandas_scalar)\n assert pandas_index == modin_index\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_keys(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.keys(), pandas_series.keys())\n\n\ndef test_kurtosis_alias():\n # It's optimization. If failed, Series.kurt should be tested explicitly\n # in tests: `test_kurt_kurtosis`, `test_kurt_kurtosis_level`.\n assert pd.Series.kurt == pd.Series.kurtosis\n\n\[email protected](\"axis\", [0, 1])\[email protected](\"skipna\", bool_arg_values, ids=bool_arg_keys)\ndef test_kurtosis(axis, skipna):\n eval_general(\n *create_test_series(test_data[\"float_nan_data\"]),\n lambda df: df.kurtosis(axis=axis, skipna=skipna),\n )\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\[email protected](\"numeric_only\", [True, False, None])\ndef test_kurtosis_numeric_only(axis, numeric_only):\n eval_general(\n *create_test_series(test_data_diff_dtype),\n lambda df: df.kurtosis(axis=axis, numeric_only=numeric_only),\n )\n\n\[email protected](\"level\", [-1, 0, 1])\ndef test_kurtosis_level(level):\n data = test_data[\"int_data\"]\n modin_s, pandas_s = create_test_series(data)\n\n index = generate_multiindex(len(data.keys()))\n modin_s.columns = index\n pandas_s.columns = index\n\n eval_general(\n modin_s,\n pandas_s,\n lambda s: s.kurtosis(axis=1, level=level),\n )\n\n\ndef test_last():\n modin_index = pd.date_range(\"2010-04-09\", periods=400, freq=\"2D\")\n pandas_index = pandas.date_range(\"2010-04-09\", periods=400, freq=\"2D\")\n modin_series = pd.Series(list(range(400)), index=modin_index)\n pandas_series = pandas.Series(list(range(400)), index=pandas_index)\n df_equals(modin_series.last(\"3D\"), pandas_series.last(\"3D\"))\n df_equals(modin_series.last(\"20D\"), pandas_series.last(\"20D\"))\n\n\[email protected](\"func\", [\"all\", \"any\", \"mad\", \"count\"])\ndef test_index_order(func):\n # see #1708 and #1869 for details\n s_modin, s_pandas = create_test_series(test_data[\"float_nan_data\"])\n rows_number = len(s_modin.index)\n level_0 = np.random.choice([x for x in range(10)], rows_number)\n level_1 = np.random.choice([x for x in range(10)], rows_number)\n index = pandas.MultiIndex.from_arrays([level_0, level_1])\n\n s_modin.index = index\n s_pandas.index = index\n\n df_equals(\n getattr(s_modin, func)(level=0).index,\n getattr(s_pandas, func)(level=0).index,\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_last_valid_index(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.last_valid_index() == (pandas_series.last_valid_index())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_le(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"le\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_loc(data):\n modin_series, pandas_series = create_test_series(data)\n for v in modin_series.index:\n df_equals(modin_series.loc[v], pandas_series.loc[v])\n df_equals(modin_series.loc[v:], pandas_series.loc[v:])\n\n indices = [True if i % 3 == 0 else False for i in range(len(modin_series.index))]\n modin_result = modin_series.loc[indices]\n pandas_result = pandas_series.loc[indices]\n df_equals(modin_result, pandas_result)\n\n # From issue #1988\n index = pd.MultiIndex.from_product([np.arange(10), np.arange(10)], names=[\"f\", \"s\"])\n data = np.arange(100)\n modin_series = pd.Series(data, index=index).sort_index()\n pandas_series = pandas.Series(data, index=index).sort_index()\n modin_result = modin_series.loc[\n (slice(None), 1),\n ]\n pandas_result = pandas_series.loc[\n (slice(None), 1),\n ]\n df_equals(modin_result, pandas_result)\n\n\n# This tests the bug from https://github.com/modin-project/modin/issues/3736\ndef test_loc_setting_categorical_series():\n modin_series = pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\")\n pandas_series = pandas.Series([\"a\", \"b\", \"c\"], dtype=\"category\")\n modin_series.loc[1:3] = \"a\"\n pandas_series.loc[1:3] = \"a\"\n df_equals(modin_series, pandas_series)\n\n\n# This tests the bug from https://github.com/modin-project/modin/issues/3736\ndef test_iloc_assigning_scalar_none_to_string_series():\n data = [\"A\"]\n modin_series, pandas_series = create_test_series(data, dtype=\"string\")\n modin_series.iloc[0] = None\n pandas_series.iloc[0] = None\n df_equals(modin_series, pandas_series)\n\n\ndef test_set_ordered_categorical_column():\n data = {\"a\": [1, 2, 3], \"b\": [4, 5, 6]}\n mdf = pd.DataFrame(data)\n pdf = pandas.DataFrame(data)\n mdf[\"a\"] = pd.Categorical(mdf[\"a\"], ordered=True)\n pdf[\"a\"] = pandas.Categorical(pdf[\"a\"], ordered=True)\n df_equals(mdf, pdf)\n\n modin_categories = mdf[\"a\"].dtype\n pandas_categories = pdf[\"a\"].dtype\n assert modin_categories == pandas_categories\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_lt(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"lt\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"axis\", [None, 0])\[email protected](\"skipna\", [None, True, False])\[email protected](\"level\", [0, -1, None])\ndef test_mad(level, data, axis, skipna):\n eval_general(\n *create_test_series(data),\n lambda df: df.mad(axis=axis, skipna=skipna, level=level),\n )\n\n\[email protected](\"na_values\", [\"ignore\", None], ids=[\"na_ignore\", \"na_none\"])\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_map(data, na_values):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.map(str, na_action=na_values),\n pandas_series.map(str, na_action=na_values),\n )\n mapper = {i: str(i) for i in range(100)}\n df_equals(\n modin_series.map(mapper, na_action=na_values),\n pandas_series.map(mapper, na_action=na_values),\n )\n\n # Return list objects\n modin_series_lists = modin_series.map(lambda s: [s, s, s])\n pandas_series_lists = pandas_series.map(lambda s: [s, s, s])\n df_equals(modin_series_lists, pandas_series_lists)\n\n # Index into list objects\n df_equals(\n modin_series_lists.map(lambda l: l[0]), pandas_series_lists.map(lambda l: l[0])\n )\n\n\ndef test_mask():\n modin_series = pd.Series(np.arange(10))\n m = modin_series % 3 == 0\n with warns_that_defaulting_to_pandas():\n try:\n modin_series.mask(~m, -modin_series)\n except ValueError:\n pass\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_max(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.max(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_mean(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.mean(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_median(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.median(skipna=skipna))\n\n\[email protected](\n \"method\", [\"median\", \"skew\", \"std\", \"sum\", \"var\", \"prod\", \"sem\"]\n)\ndef test_median_skew_std_sum_var_prod_sem_1953(method):\n # See #1953 for details\n data = [3, 3, 3, 3, 3, 3, 3, 3, 3]\n arrays = [\n [\"1\", \"1\", \"1\", \"2\", \"2\", \"2\", \"3\", \"3\", \"3\"],\n [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"],\n ]\n modin_s = pd.Series(data, index=arrays)\n pandas_s = pandas.Series(data, index=arrays)\n eval_general(modin_s, pandas_s, lambda s: getattr(s, method)(level=0))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"index\", [True, False], ids=[\"True\", \"False\"])\ndef test_memory_usage(data, index):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.memory_usage(index=index), pandas_series.memory_usage(index=index)\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_min(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.min(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_mod(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"mod\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_mode(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.mode(), pandas_series.mode())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_mul(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"mul\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_multiply(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"multiply\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_name(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.name == pandas_series.name\n modin_series.name = pandas_series.name = \"New_name\"\n assert modin_series.name == pandas_series.name\n assert modin_series._query_compiler.columns == [\"New_name\"]\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_nbytes(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.nbytes == pandas_series.nbytes\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_ndim(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n assert modin_series.ndim == 1\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_ne(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"ne\")\n\n\[email protected](reason=\"Using pandas Series.\")\ndef test_nlargest():\n modin_series = create_test_series()\n\n with pytest.raises(NotImplementedError):\n modin_series.nlargest(None)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_notnull(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.notnull(), pandas_series.notnull())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_nsmallest(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.nsmallest(n=5, keep=\"first\"),\n pandas_series.nsmallest(n=5, keep=\"first\"),\n )\n df_equals(\n modin_series.nsmallest(n=10, keep=\"first\"),\n pandas_series.nsmallest(n=10, keep=\"first\"),\n )\n df_equals(\n modin_series.nsmallest(n=10, keep=\"last\"),\n pandas_series.nsmallest(n=10, keep=\"last\"),\n )\n df_equals(modin_series.nsmallest(keep=\"all\"), pandas_series.nsmallest(keep=\"all\"))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"dropna\", [True, False], ids=[\"True\", \"False\"])\ndef test_nunique(data, dropna):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.nunique(dropna=dropna), pandas_series.nunique(dropna=dropna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_pct_change(data):\n modin_series, pandas_series = create_test_series(data)\n with warns_that_defaulting_to_pandas():\n modin_series.pct_change()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_pipe(data):\n modin_series, pandas_series = create_test_series(data)\n n = len(modin_series.index)\n a, b, c = 2 % n, 0, 3 % n\n\n def h(x):\n return x.dropna()\n\n def g(x, arg1=0):\n for _ in range(arg1):\n x = x.append(x)\n return x\n\n def f(x, arg2=0, arg3=0):\n return x.drop(x.index[[arg2, arg3]])\n\n df_equals(\n f(g(h(modin_series), arg1=a), arg2=b, arg3=c),\n (modin_series.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),\n )\n df_equals(\n (modin_series.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),\n (pandas_series.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_plot(request, data):\n modin_series, pandas_series = create_test_series(data)\n\n if name_contains(request.node.name, numeric_dfs):\n # We have to test this way because equality in plots means same object.\n zipped_plot_lines = zip(modin_series.plot().lines, pandas_series.plot().lines)\n for left, right in zipped_plot_lines:\n if isinstance(left.get_xdata(), np.ma.core.MaskedArray) and isinstance(\n right.get_xdata(), np.ma.core.MaskedArray\n ):\n assert all((left.get_xdata() == right.get_xdata()).data)\n else:\n assert np.array_equal(left.get_xdata(), right.get_xdata())\n if isinstance(left.get_ydata(), np.ma.core.MaskedArray) and isinstance(\n right.get_ydata(), np.ma.core.MaskedArray\n ):\n assert all((left.get_ydata() == right.get_ydata()).data)\n else:\n assert np.array_equal(left.get_xdata(), right.get_xdata())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_pop(data):\n modin_series, pandas_series = create_test_series(data)\n\n for key in modin_series.keys():\n df_equals(modin_series.pop(key), pandas_series.pop(key))\n df_equals(modin_series, pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_pow(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"pow\")\n\n\ndef test_product_alias():\n assert pd.Series.prod == pd.Series.product\n\n\[email protected](\"axis\", [0, 1])\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_prod(axis, skipna):\n eval_general(\n *create_test_series(test_data[\"float_nan_data\"]),\n lambda s: s.prod(axis=axis, skipna=skipna),\n )\n\n\[email protected](\n \"numeric_only\", bool_arg_values, ids=arg_keys(\"numeric_only\", bool_arg_keys)\n)\[email protected](\n \"min_count\", int_arg_values, ids=arg_keys(\"min_count\", int_arg_keys)\n)\ndef test_prod_specific(min_count, numeric_only):\n eval_general(\n *create_test_series(test_data_diff_dtype),\n lambda df: df.prod(min_count=min_count, numeric_only=numeric_only),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"q\", quantiles_values, ids=quantiles_keys)\ndef test_quantile(request, data, q):\n modin_series, pandas_series = create_test_series(data)\n if not name_contains(request.node.name, no_numeric_dfs):\n df_equals(modin_series.quantile(q), pandas_series.quantile(q))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_radd(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"radd\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"na_option\", [\"keep\", \"top\", \"bottom\"], ids=[\"keep\", \"top\", \"bottom\"]\n)\ndef test_rank(data, na_option):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.rank(na_option=na_option)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.rank(na_option=na_option)\n else:\n modin_result = modin_series.rank(na_option=na_option)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"order\", [None, \"C\", \"F\", \"A\", \"K\"])\ndef test_ravel(data, order):\n modin_series, pandas_series = create_test_series(data)\n np.testing.assert_equal(\n modin_series.ravel(order=order), pandas_series.ravel(order=order)\n )\n\n\[email protected](\n \"data\",\n [\n pandas.Categorical(np.arange(1000), ordered=True),\n pandas.Categorical(np.arange(1000), ordered=False),\n pandas.Categorical(np.arange(1000), categories=np.arange(500), ordered=True),\n pandas.Categorical(np.arange(1000), categories=np.arange(500), ordered=False),\n ],\n)\[email protected](\"order\", [None, \"C\", \"F\", \"A\", \"K\"])\ndef test_ravel_category(data, order):\n modin_series, pandas_series = create_test_series(data)\n categories_equals(modin_series.ravel(order=order), pandas_series.ravel(order=order))\n\n\[email protected](\n \"data\",\n [\n pandas.Categorical(np.arange(10), ordered=True),\n pandas.Categorical(np.arange(10), ordered=False),\n pandas.Categorical(np.arange(10), categories=np.arange(5), ordered=True),\n pandas.Categorical(np.arange(10), categories=np.arange(5), ordered=False),\n ],\n)\[email protected](\"order\", [None, \"C\", \"F\", \"A\", \"K\"])\ndef test_ravel_simple_category(data, order):\n modin_series, pandas_series = create_test_series(data)\n categories_equals(modin_series.ravel(order=order), pandas_series.ravel(order=order))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rdiv(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rdiv\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_reindex(data):\n modin_series, pandas_series = create_test_series(data)\n pandas_result = pandas_series.reindex(\n list(pandas_series.index) + [\"_A_NEW_ROW\"], fill_value=0\n )\n modin_result = modin_series.reindex(\n list(modin_series.index) + [\"_A_NEW_ROW\"], fill_value=0\n )\n df_equals(pandas_result, modin_result)\n\n frame_data = {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 6, 7],\n \"col3\": [8, 9, 10, 11],\n \"col4\": [12, 13, 14, 15],\n \"col5\": [0, 0, 0, 0],\n }\n pandas_df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data)\n\n for col in pandas_df.columns:\n modin_series = modin_df[col]\n pandas_series = pandas_df[col]\n df_equals(\n modin_series.reindex([0, 3, 2, 1]), pandas_series.reindex([0, 3, 2, 1])\n )\n df_equals(modin_series.reindex([0, 6, 2]), pandas_series.reindex([0, 6, 2]))\n df_equals(\n modin_series.reindex(index=[0, 1, 5]),\n pandas_series.reindex(index=[0, 1, 5]),\n )\n\n # MultiIndex\n modin_series, pandas_series = create_test_series(data)\n modin_series.index, pandas_series.index = [\n generate_multiindex(len(pandas_series))\n ] * 2\n pandas_result = pandas_series.reindex(list(reversed(pandas_series.index)))\n modin_result = modin_series.reindex(list(reversed(modin_series.index)))\n df_equals(pandas_result, modin_result)\n\n\ndef test_reindex_like():\n df1 = pd.DataFrame(\n [\n [24.3, 75.7, \"high\"],\n [31, 87.8, \"high\"],\n [22, 71.6, \"medium\"],\n [35, 95, \"medium\"],\n ],\n columns=[\"temp_celsius\", \"temp_fahrenheit\", \"windspeed\"],\n index=pd.date_range(start=\"2014-02-12\", end=\"2014-02-15\", freq=\"D\"),\n )\n df2 = pd.DataFrame(\n [[28, \"low\"], [30, \"low\"], [35.1, \"medium\"]],\n columns=[\"temp_celsius\", \"windspeed\"],\n index=pd.DatetimeIndex([\"2014-02-12\", \"2014-02-13\", \"2014-02-15\"]),\n )\n\n series1 = df1[\"windspeed\"]\n series2 = df2[\"windspeed\"]\n with warns_that_defaulting_to_pandas():\n series2.reindex_like(series1)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rename(data):\n modin_series, pandas_series = create_test_series(data)\n new_name = \"NEW_NAME\"\n df_equals(modin_series.rename(new_name), pandas_series.rename(new_name))\n\n modin_series_cp = modin_series.copy()\n pandas_series_cp = pandas_series.copy()\n modin_series_cp.rename(new_name, inplace=True)\n pandas_series_cp.rename(new_name, inplace=True)\n df_equals(modin_series_cp, pandas_series_cp)\n\n modin_result = modin_series.rename(\"{}__\".format)\n pandas_result = pandas_series.rename(\"{}__\".format)\n df_equals(modin_result, pandas_result)\n\n\ndef test_reorder_levels():\n data = np.random.randint(1, 100, 12)\n modin_series = pd.Series(\n data,\n index=pd.MultiIndex.from_tuples(\n [\n (num, letter, color)\n for num in range(1, 3)\n for letter in [\"a\", \"b\", \"c\"]\n for color in [\"Red\", \"Green\"]\n ],\n names=[\"Number\", \"Letter\", \"Color\"],\n ),\n )\n pandas_series = pandas.Series(\n data,\n index=pandas.MultiIndex.from_tuples(\n [\n (num, letter, color)\n for num in range(1, 3)\n for letter in [\"a\", \"b\", \"c\"]\n for color in [\"Red\", \"Green\"]\n ],\n names=[\"Number\", \"Letter\", \"Color\"],\n ),\n )\n modin_result = modin_series.reorder_levels([\"Letter\", \"Color\", \"Number\"])\n pandas_result = pandas_series.reorder_levels([\"Letter\", \"Color\", \"Number\"])\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"repeats\", [0, 2, 3, 4], ids=[\"repeats_{}\".format(i) for i in [0, 2, 3, 4]]\n)\ndef test_repeat(data, repeats):\n eval_general(pd.Series(data), pandas.Series(data), lambda df: df.repeat(repeats))\n\n\[email protected](\"data\", [np.arange(256)])\[email protected](\n \"repeats\",\n [\n [0],\n [2],\n [3],\n [4],\n np.arange(256),\n [0] * 64 + [2] * 64 + [3] * 32 + [4] * 32 + [5] * 64,\n [2] * 257,\n [2] * 128,\n ],\n)\ndef test_repeat_lists(data, repeats):\n eval_general(\n pd.Series(data),\n pandas.Series(data),\n lambda df: df.repeat(repeats),\n )\n\n\ndef test_replace():\n modin_series = pd.Series([0, 1, 2, 3, 4])\n pandas_series = pandas.Series([0, 1, 2, 3, 4])\n modin_result = modin_series.replace(0, 5)\n pandas_result = pandas_series.replace(0, 5)\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_series.replace([1, 2], method=\"bfill\")\n pandas_result = pandas_series.replace([1, 2], method=\"bfill\")\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"closed\", [\"left\", \"right\"])\[email protected](\"label\", [\"right\", \"left\"])\[email protected](\"level\", [None, 1])\ndef test_resample(closed, label, level):\n rule = \"5T\"\n freq = \"H\"\n base = 2\n\n index = pandas.date_range(\"1/1/2000\", periods=12, freq=freq)\n pandas_series = pandas.Series(range(12), index=index)\n modin_series = pd.Series(range(12), index=index)\n\n if level is not None:\n index = pandas.MultiIndex.from_product(\n [[\"a\", \"b\", \"c\"], pandas.date_range(\"31/12/2000\", periods=4, freq=freq)]\n )\n pandas_series.index = index\n modin_series.index = index\n pandas_resampler = pandas_series.resample(\n rule, closed=closed, label=label, base=base, level=level\n )\n modin_resampler = modin_series.resample(\n rule, closed=closed, label=label, base=base, level=level\n )\n\n df_equals(modin_resampler.count(), pandas_resampler.count())\n df_equals(modin_resampler.var(0), pandas_resampler.var(0))\n df_equals(modin_resampler.sum(), pandas_resampler.sum())\n df_equals(modin_resampler.std(), pandas_resampler.std())\n df_equals(modin_resampler.sem(), pandas_resampler.sem())\n df_equals(modin_resampler.size(), pandas_resampler.size())\n df_equals(modin_resampler.prod(), pandas_resampler.prod())\n df_equals(modin_resampler.ohlc(), pandas_resampler.ohlc())\n df_equals(modin_resampler.min(), pandas_resampler.min())\n df_equals(modin_resampler.median(), pandas_resampler.median())\n df_equals(modin_resampler.mean(), pandas_resampler.mean())\n df_equals(modin_resampler.max(), pandas_resampler.max())\n df_equals(modin_resampler.last(), pandas_resampler.last())\n df_equals(modin_resampler.first(), pandas_resampler.first())\n df_equals(modin_resampler.nunique(), pandas_resampler.nunique())\n df_equals(\n modin_resampler.pipe(lambda x: x.max() - x.min()),\n pandas_resampler.pipe(lambda x: x.max() - x.min()),\n )\n df_equals(\n modin_resampler.transform(lambda x: (x - x.mean()) / x.std()),\n pandas_resampler.transform(lambda x: (x - x.mean()) / x.std()),\n )\n df_equals(\n modin_resampler.aggregate(\"max\"),\n pandas_resampler.aggregate(\"max\"),\n )\n df_equals(\n modin_resampler.apply(\"sum\"),\n pandas_resampler.apply(\"sum\"),\n )\n df_equals(\n modin_resampler.get_group(name=list(modin_resampler.groups)[0]),\n pandas_resampler.get_group(name=list(pandas_resampler.groups)[0]),\n )\n assert pandas_resampler.indices == modin_resampler.indices\n assert pandas_resampler.groups == modin_resampler.groups\n df_equals(modin_resampler.quantile(), pandas_resampler.quantile())\n # Upsampling from level= or on= selection is not supported\n if level is None:\n df_equals(\n modin_resampler.interpolate(),\n pandas_resampler.interpolate(),\n )\n df_equals(modin_resampler.asfreq(), pandas_resampler.asfreq())\n df_equals(\n modin_resampler.fillna(method=\"nearest\"),\n pandas_resampler.fillna(method=\"nearest\"),\n )\n df_equals(modin_resampler.pad(), pandas_resampler.pad())\n df_equals(modin_resampler.nearest(), pandas_resampler.nearest())\n df_equals(modin_resampler.bfill(), pandas_resampler.bfill())\n df_equals(modin_resampler.backfill(), pandas_resampler.backfill())\n df_equals(modin_resampler.ffill(), pandas_resampler.ffill())\n df_equals(\n modin_resampler.apply([\"sum\", \"mean\", \"max\"]),\n pandas_resampler.apply([\"sum\", \"mean\", \"max\"]),\n )\n df_equals(\n modin_resampler.aggregate([\"sum\", \"mean\", \"max\"]),\n pandas_resampler.aggregate([\"sum\", \"mean\", \"max\"]),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"drop\", [True, False], ids=[\"True\", \"False\"])\[email protected](\"name\", [None, \"Custom name\"])\[email protected](\"inplace\", [True, False])\ndef test_reset_index(data, drop, name, inplace):\n eval_general(\n *create_test_series(data),\n lambda df, *args, **kwargs: df.reset_index(*args, **kwargs),\n drop=drop,\n name=name,\n inplace=inplace,\n __inplace__=inplace,\n )\n\n\[email protected](reason=\"Using pandas Series.\")\ndef test_reshape():\n modin_series = create_test_series()\n\n with pytest.raises(NotImplementedError):\n modin_series.reshape(None)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rfloordiv(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rfloordiv\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rmod(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rmod\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rmul(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rmul\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_round(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.round(), pandas_series.round())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rpow(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rpow\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rsub(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rsub\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_rtruediv(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"rtruediv\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_sample(data):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.sample(frac=0.5, random_state=21019)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.sample(frac=0.5, random_state=21019)\n else:\n modin_result = modin_series.sample(frac=0.5, random_state=21019)\n df_equals(pandas_result, modin_result)\n\n try:\n pandas_result = pandas_series.sample(n=12, random_state=21019)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.sample(n=12, random_state=21019)\n else:\n modin_result = modin_series.sample(n=12, random_state=21019)\n df_equals(pandas_result, modin_result)\n\n with warns_that_defaulting_to_pandas():\n df_equals(\n modin_series.sample(n=0, random_state=21019),\n pandas_series.sample(n=0, random_state=21019),\n )\n with pytest.raises(ValueError):\n modin_series.sample(n=-3)\n\n\[email protected](\"single_value_data\", [True, False])\[email protected](\"use_multiindex\", [True, False])\[email protected](\"sorter\", [True, None])\[email protected](\"values_number\", [1, 2, 5])\[email protected](\"side\", [\"left\", \"right\"])\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_searchsorted(\n data, side, values_number, sorter, use_multiindex, single_value_data\n):\n data = data if not single_value_data else data[next(iter(data.keys()))][0]\n if not sorter:\n modin_series, pandas_series = create_test_series(vals=data, sort=True)\n else:\n modin_series, pandas_series = create_test_series(vals=data)\n sorter = np.argsort(list(modin_series))\n\n if use_multiindex:\n rows_number = len(modin_series.index)\n level_0_series = random_state.choice([0, 1], rows_number)\n level_1_series = random_state.choice([2, 3], rows_number)\n index_series = pd.MultiIndex.from_arrays(\n [level_0_series, level_1_series], names=[\"first\", \"second\"]\n )\n modin_series.index = index_series\n pandas_series.index = index_series\n\n min_sample = modin_series.min(skipna=True)\n max_sample = modin_series.max(skipna=True)\n\n if single_value_data:\n values = [data]\n else:\n values = []\n values.append(pandas_series.sample(n=values_number, random_state=random_state))\n values.append(\n random_state.uniform(low=min_sample, high=max_sample, size=values_number)\n )\n values.append(\n random_state.uniform(\n low=max_sample, high=2 * max_sample, size=values_number\n )\n )\n values.append(\n random_state.uniform(\n low=min_sample - max_sample, high=min_sample, size=values_number\n )\n )\n pure_float = random_state.uniform(float(min_sample), float(max_sample))\n pure_int = int(pure_float)\n values.append(pure_float)\n values.append(pure_int)\n\n test_cases = [\n modin_series.searchsorted(value=value, side=side, sorter=sorter)\n == pandas_series.searchsorted(value=value, side=side, sorter=sorter)\n for value in values\n ]\n test_cases = [\n case.all() if not isinstance(case, bool) else case for case in test_cases\n ]\n\n for case in test_cases:\n assert case\n\n\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\ndef test_sem_float_nan_only(skipna, ddof):\n eval_general(\n *create_test_series(test_data[\"float_nan_data\"]),\n lambda df: df.sem(skipna=skipna, ddof=ddof),\n )\n\n\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\ndef test_sem_int_only(ddof):\n eval_general(\n *create_test_series(test_data[\"int_data\"]),\n lambda df: df.sem(ddof=ddof),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_set_axis(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n modin_series.set_axis(labels=[\"{}_{}\".format(i, i + 1) for i in modin_series.index])\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_shape(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.shape == pandas_series.shape\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_size(data):\n modin_series, pandas_series = create_test_series(data)\n assert modin_series.size == pandas_series.size\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\ndef test_skew(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.skew(skipna=skipna))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"index\", [\"default\", \"ndarray\", \"has_duplicates\"])\[email protected](\"periods\", [0, 1, -1, 10, -10, 1000000000, -1000000000])\ndef test_shift_slice_shift(data, index, periods):\n modin_series, pandas_series = create_test_series(data)\n if index == \"ndarray\":\n data_column_length = len(data[next(iter(data))])\n modin_series.index = pandas_series.index = np.arange(2, data_column_length + 2)\n elif index == \"has_duplicates\":\n modin_series.index = pandas_series.index = list(modin_series.index[:-3]) + [\n 0,\n 1,\n 2,\n ]\n\n df_equals(\n modin_series.shift(periods=periods),\n pandas_series.shift(periods=periods),\n )\n df_equals(\n modin_series.shift(periods=periods, fill_value=777),\n pandas_series.shift(periods=periods, fill_value=777),\n )\n eval_general(modin_series, pandas_series, lambda df: df.shift(axis=1))\n df_equals(\n modin_series.slice_shift(periods=periods),\n pandas_series.slice_shift(periods=periods),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"ascending\", bool_arg_values, ids=arg_keys(\"ascending\", bool_arg_keys)\n)\[email protected](\n \"sort_remaining\", bool_arg_values, ids=arg_keys(\"sort_remaining\", bool_arg_keys)\n)\[email protected](\"na_position\", [\"first\", \"last\"], ids=[\"first\", \"last\"])\ndef test_sort_index(data, ascending, sort_remaining, na_position):\n modin_series, pandas_series = create_test_series(data)\n eval_general(\n modin_series,\n pandas_series,\n lambda df: df.sort_index(\n ascending=ascending,\n sort_remaining=sort_remaining,\n na_position=na_position,\n ),\n )\n\n eval_general(\n modin_series.copy(),\n pandas_series.copy(),\n lambda df: df.sort_index(\n ascending=ascending,\n sort_remaining=sort_remaining,\n na_position=na_position,\n inplace=True,\n ),\n __inplace__=True,\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"ascending\", [True, False], ids=[\"True\", \"False\"])\[email protected](\"na_position\", [\"first\", \"last\"], ids=[\"first\", \"last\"])\ndef test_sort_values(data, ascending, na_position):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.sort_values(\n ascending=ascending, na_position=na_position\n )\n pandas_result = pandas_series.sort_values(\n ascending=ascending, na_position=na_position\n )\n # Note: For `ascending=False` only\n # For some reason, the indexing of Series and DataFrame differ in the underlying\n # algorithm. The order of values is the same, but the index values are shuffled.\n # Since we use `DataFrame.sort_values` even for Series, the index can be different\n # between `pandas.Series.sort_values`. For this reason, we check that the values are\n # identical instead of the index as well.\n if ascending:\n df_equals(modin_result, pandas_result)\n else:\n np.testing.assert_equal(modin_result.values, pandas_result.values)\n\n modin_series_cp = modin_series.copy()\n pandas_series_cp = pandas_series.copy()\n modin_series_cp.sort_values(\n ascending=ascending, na_position=na_position, inplace=True\n )\n pandas_series_cp.sort_values(\n ascending=ascending, na_position=na_position, inplace=True\n )\n # See above about `ascending=False`\n if ascending:\n df_equals(modin_series_cp, pandas_series_cp)\n else:\n np.testing.assert_equal(modin_series_cp.values, pandas_series_cp.values)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_squeeze(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.squeeze(None), pandas_series.squeeze(None))\n df_equals(modin_series.squeeze(0), pandas_series.squeeze(0))\n with pytest.raises(ValueError):\n modin_series.squeeze(1)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\ndef test_std(request, data, skipna, ddof):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.std(skipna=skipna, ddof=ddof)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.std(skipna=skipna, ddof=ddof)\n else:\n modin_result = modin_series.std(skipna=skipna, ddof=ddof)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_sub(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"sub\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_subtract(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"subtract\")\n\n\[email protected](\n \"data\",\n test_data_values + test_data_small_values,\n ids=test_data_keys + test_data_small_keys,\n)\[email protected](\"axis\", axis_values, ids=axis_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\n \"numeric_only\", bool_arg_values, ids=arg_keys(\"numeric_only\", bool_arg_keys)\n)\[email protected](\n \"min_count\", int_arg_values, ids=arg_keys(\"min_count\", int_arg_keys)\n)\ndef test_sum(data, axis, skipna, numeric_only, min_count):\n eval_general(\n *create_test_series(data),\n lambda df, *args, **kwargs: df.sum(*args, **kwargs),\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n min_count=min_count,\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"axis1\", [0, 1, \"columns\", \"index\"])\[email protected](\"axis2\", [0, 1, \"columns\", \"index\"])\ndef test_swapaxes(data, axis1, axis2):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.swapaxes(axis1, axis2)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.swapaxes(axis1, axis2)\n else:\n modin_result = modin_series.swapaxes(axis1, axis2)\n df_equals(modin_result, pandas_result)\n\n\ndef test_swaplevel():\n data = np.random.randint(1, 100, 12)\n modin_s = pd.Series(\n data,\n index=pd.MultiIndex.from_tuples(\n [\n (num, letter, color)\n for num in range(1, 3)\n for letter in [\"a\", \"b\", \"c\"]\n for color in [\"Red\", \"Green\"]\n ],\n names=[\"Number\", \"Letter\", \"Color\"],\n ),\n )\n pandas_s = pandas.Series(\n data,\n index=pandas.MultiIndex.from_tuples(\n [\n (num, letter, color)\n for num in range(1, 3)\n for letter in [\"a\", \"b\", \"c\"]\n for color in [\"Red\", \"Green\"]\n ],\n names=[\"Number\", \"Letter\", \"Color\"],\n ),\n )\n df_equals(\n modin_s.swaplevel(\"Number\", \"Color\"), pandas_s.swaplevel(\"Number\", \"Color\")\n )\n df_equals(modin_s.swaplevel(), pandas_s.swaplevel())\n df_equals(modin_s.swaplevel(1, 0), pandas_s.swaplevel(1, 0))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"n\", int_arg_values, ids=arg_keys(\"n\", int_arg_keys))\ndef test_tail(data, n):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.tail(n), pandas_series.tail(n))\n df_equals(\n modin_series.tail(len(modin_series)), pandas_series.tail(len(pandas_series))\n )\n\n\ndef test_take():\n modin_s = pd.Series([\"falcon\", \"parrot\", \"lion\", \"cat\"], index=[0, 2, 3, 1])\n pandas_s = pandas.Series([\"falcon\", \"parrot\", \"lion\", \"cat\"], index=[0, 2, 3, 1])\n a = modin_s.take([0, 3])\n df_equals(a, pandas_s.take([0, 3]))\n try:\n pandas_s.take([2], axis=1)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_s.take([2], axis=1)\n\n\[email protected](\n \"ignore_index\", bool_arg_values, ids=arg_keys(\"ignore_index\", bool_arg_keys)\n)\ndef test_explode(ignore_index):\n # Some items in this test data are lists that explode() should expand.\n data = [[1, 2, 3], \"foo\", [], [3, 4]]\n modin_series, pandas_series = create_test_series(data)\n df_equals(\n modin_series.explode(ignore_index=ignore_index),\n pandas_series.explode(ignore_index=ignore_index),\n )\n\n\ndef test_to_period():\n idx = pd.date_range(\"1/1/2012\", periods=5, freq=\"M\")\n series = pd.Series(np.random.randint(0, 100, size=(len(idx))), index=idx)\n with warns_that_defaulting_to_pandas():\n series.to_period()\n\n\[email protected](\n \"data\",\n test_data_values + test_data_large_categorical_series_values,\n ids=test_data_keys + test_data_large_categorical_series_keys,\n)\ndef test_to_numpy(data):\n modin_series, pandas_series = create_test_series(data)\n assert_array_equal(modin_series.to_numpy(), pandas_series.to_numpy())\n\n\[email protected](\n \"data\",\n test_data_values + test_data_large_categorical_series_values,\n ids=test_data_keys + test_data_large_categorical_series_keys,\n)\ndef test_series_values(data):\n modin_series, pandas_series = create_test_series(data)\n assert_array_equal(modin_series.values, pandas_series.values)\n\n\ndef test_series_empty_values():\n modin_series, pandas_series = pd.Series(), pandas.Series()\n assert_array_equal(modin_series.values, pandas_series.values)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_to_string(request, data):\n eval_general(\n *create_test_series(data),\n lambda df: df.to_string(),\n )\n\n\ndef test_to_timestamp():\n idx = pd.date_range(\"1/1/2012\", periods=5, freq=\"M\")\n series = pd.Series(np.random.randint(0, 100, size=(len(idx))), index=idx)\n with warns_that_defaulting_to_pandas():\n series.to_period().to_timestamp()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_to_xarray(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.to_xarray()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_tolist(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.tolist()\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_values, ids=agg_func_keys)\ndef test_transform(data, func):\n eval_general(\n *create_test_series(data),\n lambda df: df.transform(func),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"func\", agg_func_except_values, ids=agg_func_except_keys)\ndef test_transform_except(data, func):\n eval_general(\n *create_test_series(data),\n lambda df: df.transform(func),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_transpose(data):\n modin_series, pandas_series = create_test_series(data)\n df_equals(modin_series.transpose(), modin_series)\n df_equals(modin_series.transpose(), pandas_series.transpose())\n df_equals(modin_series.transpose(), pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_truediv(data):\n modin_series, pandas_series = create_test_series(data)\n inter_df_math_helper(modin_series, pandas_series, \"truediv\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_truncate(data):\n modin_series, pandas_series = create_test_series(data)\n\n before = 1\n after = len(modin_series - 3)\n df_equals(\n modin_series.truncate(before, after), pandas_series.truncate(before, after)\n )\n\n before = 1\n after = 3\n df_equals(\n modin_series.truncate(before, after), pandas_series.truncate(before, after)\n )\n\n before = None\n after = None\n df_equals(\n modin_series.truncate(before, after), pandas_series.truncate(before, after)\n )\n\n\ndef test_tshift():\n idx = pd.date_range(\"1/1/2012\", periods=5, freq=\"M\")\n data = np.random.randint(0, 100, size=len(idx))\n modin_series = pd.Series(data, index=idx)\n pandas_series = pandas.Series(data, index=idx)\n df_equals(modin_series.tshift(4), pandas_series.tshift(4))\n\n\ndef test_tz_convert():\n modin_idx = pd.date_range(\n \"1/1/2012\", periods=400, freq=\"2D\", tz=\"America/Los_Angeles\"\n )\n pandas_idx = pandas.date_range(\n \"1/1/2012\", periods=400, freq=\"2D\", tz=\"America/Los_Angeles\"\n )\n data = np.random.randint(0, 100, size=len(modin_idx))\n modin_series = pd.Series(data, index=modin_idx)\n pandas_series = pandas.Series(data, index=pandas_idx)\n modin_result = modin_series.tz_convert(\"UTC\", axis=0)\n pandas_result = pandas_series.tz_convert(\"UTC\", axis=0)\n df_equals(modin_result, pandas_result)\n\n modin_multi = pd.MultiIndex.from_arrays([modin_idx, range(len(modin_idx))])\n pandas_multi = pandas.MultiIndex.from_arrays([pandas_idx, range(len(modin_idx))])\n modin_series = pd.Series(data, index=modin_multi)\n pandas_series = pandas.Series(data, index=pandas_multi)\n df_equals(\n modin_series.tz_convert(\"UTC\", axis=0, level=0),\n pandas_series.tz_convert(\"UTC\", axis=0, level=0),\n )\n\n\ndef test_tz_localize():\n idx = pd.date_range(\"1/1/2012\", periods=400, freq=\"2D\")\n data = np.random.randint(0, 100, size=len(idx))\n modin_series = pd.Series(data, index=idx)\n pandas_series = pandas.Series(data, index=idx)\n df_equals(\n modin_series.tz_localize(\"America/Los_Angeles\"),\n pandas_series.tz_localize(\"America/Los_Angeles\"),\n )\n df_equals(\n modin_series.tz_localize(\"UTC\"),\n pandas_series.tz_localize(\"UTC\"),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_unique(data):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.unique()\n pandas_result = pandas_series.unique()\n assert_array_equal(modin_result, pandas_result)\n assert modin_result.shape == pandas_result.shape\n\n modin_result = pd.Series([2, 1, 3, 3], name=\"A\").unique()\n pandas_result = pandas.Series([2, 1, 3, 3], name=\"A\").unique()\n assert_array_equal(modin_result, pandas_result)\n assert modin_result.shape == pandas_result.shape\n\n modin_result = pd.Series([pd.Timestamp(\"2016-01-01\") for _ in range(3)]).unique()\n pandas_result = pandas.Series(\n [pd.Timestamp(\"2016-01-01\") for _ in range(3)]\n ).unique()\n assert_array_equal(modin_result, pandas_result)\n assert modin_result.shape == pandas_result.shape\n\n modin_result = pd.Series(\n [pd.Timestamp(\"2016-01-01\", tz=\"US/Eastern\") for _ in range(3)]\n ).unique()\n pandas_result = pandas.Series(\n [pd.Timestamp(\"2016-01-01\", tz=\"US/Eastern\") for _ in range(3)]\n ).unique()\n assert_array_equal(modin_result, pandas_result)\n assert modin_result.shape == pandas_result.shape\n\n modin_result = pandas.Series(pd.Categorical(list(\"baabc\"))).unique()\n pandas_result = pd.Series(pd.Categorical(list(\"baabc\"))).unique()\n assert_array_equal(modin_result, pandas_result)\n assert modin_result.shape == pandas_result.shape\n\n modin_result = pd.Series(\n pd.Categorical(list(\"baabc\"), categories=list(\"abc\"), ordered=True)\n ).unique()\n pandas_result = pandas.Series(\n pd.Categorical(list(\"baabc\"), categories=list(\"abc\"), ordered=True)\n ).unique()\n assert_array_equal(modin_result, pandas_result)\n assert modin_result.shape == pandas_result.shape\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_unstack(data):\n modin_series, pandas_series = create_test_series(data)\n index = generate_multiindex(len(pandas_series), nlevels=4, is_tree_like=True)\n\n modin_series = pd.Series(data[next(iter(data.keys()))], index=index)\n pandas_series = pandas.Series(data[next(iter(data.keys()))], index=index)\n\n df_equals(modin_series.unstack(), pandas_series.unstack())\n df_equals(modin_series.unstack(level=0), pandas_series.unstack(level=0))\n df_equals(modin_series.unstack(level=[0, 1]), pandas_series.unstack(level=[0, 1]))\n df_equals(\n modin_series.unstack(level=[0, 1, 2]), pandas_series.unstack(level=[0, 1, 2])\n )\n\n\[email protected](\n \"data, other_data\",\n [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7, 8]), ([1, 2, 3], [4, np.nan, 6])],\n)\ndef test_update(data, other_data):\n modin_series, pandas_series = pd.Series(data), pandas.Series(data)\n modin_series.update(pd.Series(other_data))\n pandas_series.update(pandas.Series(other_data))\n df_equals(modin_series, pandas_series)\n\n\[email protected](\"sort\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"normalize\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"bins\", [3, None])\[email protected](\"dropna\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"ascending\", bool_arg_values, ids=bool_arg_keys)\ndef test_value_counts(sort, normalize, bins, dropna, ascending):\n def sort_sensitive_comparator(df1, df2):\n # We sort indices for Modin and pandas result because of issue #1650\n return (\n df_equals_with_non_stable_indices(df1, df2)\n if sort\n else df_equals(df1.sort_index(), df2.sort_index())\n )\n\n eval_general(\n *create_test_series(test_data_values[0]),\n lambda df: df.value_counts(\n sort=sort,\n bins=bins,\n normalize=normalize,\n dropna=dropna,\n ascending=ascending,\n ),\n comparator=sort_sensitive_comparator,\n # Modin's `sort_values` does not validate `ascending` type and so\n # does not raise an exception when it isn't a bool, when pandas do so,\n # visit modin-issue#3388 for more info.\n check_exception_type=None if sort and ascending is None else True,\n )\n\n # from issue #2365\n arr = np.random.rand(2**6)\n arr[::10] = np.nan\n eval_general(\n *create_test_series(arr),\n lambda df: df.value_counts(\n sort=sort,\n bins=bins,\n normalize=normalize,\n dropna=dropna,\n ascending=ascending,\n ),\n comparator=sort_sensitive_comparator,\n # Modin's `sort_values` does not validate `ascending` type and so\n # does not raise an exception when it isn't a bool, when pandas do so,\n # visit modin-issue#3388 for more info.\n check_exception_type=None if sort and ascending is None else True,\n )\n\n\ndef test_value_counts_categorical():\n # from issue #3571\n data = np.array([\"a\"] * 50000 + [\"b\"] * 10000 + [\"c\"] * 1000)\n random_state = np.random.RandomState(seed=42)\n random_state.shuffle(data)\n\n eval_general(\n *create_test_series(data, dtype=\"category\"),\n lambda df: df.value_counts(),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_values(data):\n modin_series, pandas_series = create_test_series(data)\n\n np.testing.assert_equal(modin_series.values, pandas_series.values)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\ndef test_var(data, skipna, ddof):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.var(skipna=skipna, ddof=ddof)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.var(skipna=skipna, ddof=ddof)\n else:\n modin_result = modin_series.var(skipna=skipna, ddof=ddof)\n df_equals(modin_result, pandas_result)\n\n\ndef test_view():\n modin_series = pd.Series([-2, -1, 0, 1, 2], dtype=\"int8\")\n pandas_series = pandas.Series([-2, -1, 0, 1, 2], dtype=\"int8\")\n modin_result = modin_series.view(dtype=\"uint8\")\n pandas_result = pandas_series.view(dtype=\"uint8\")\n df_equals(modin_result, pandas_result)\n\n modin_series = pd.Series([-20, -10, 0, 10, 20], dtype=\"int32\")\n pandas_series = pandas.Series([-20, -10, 0, 10, 20], dtype=\"int32\")\n modin_result = modin_series.view(dtype=\"float32\")\n pandas_result = pandas_series.view(dtype=\"float32\")\n df_equals(modin_result, pandas_result)\n\n modin_series = pd.Series([-200, -100, 0, 100, 200], dtype=\"int64\")\n pandas_series = pandas.Series([-200, -100, 0, 100, 200], dtype=\"int64\")\n modin_result = modin_series.view(dtype=\"float64\")\n pandas_result = pandas_series.view(dtype=\"float64\")\n df_equals(modin_result, pandas_result)\n\n\ndef test_where():\n frame_data = random_state.randn(100)\n pandas_series = pandas.Series(frame_data)\n modin_series = pd.Series(frame_data)\n pandas_cond_series = pandas_series % 5 < 2\n modin_cond_series = modin_series % 5 < 2\n\n pandas_result = pandas_series.where(pandas_cond_series, -pandas_series)\n modin_result = modin_series.where(modin_cond_series, -modin_series)\n assert all((to_pandas(modin_result) == pandas_result))\n\n other = pandas.Series(random_state.randn(100))\n pandas_result = pandas_series.where(pandas_cond_series, other, axis=0)\n modin_result = modin_series.where(modin_cond_series, other, axis=0)\n assert all(to_pandas(modin_result) == pandas_result)\n\n pandas_result = pandas_series.where(pandas_series < 2, True)\n modin_result = modin_series.where(modin_series < 2, True)\n assert all(to_pandas(modin_result) == pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\n \"key\",\n [0, slice(0, len(test_string_data_values) / 2)],\n ids=[\"single_key\", \"slice_key\"],\n)\ndef test_str___getitem__(data, key):\n modin_series, pandas_series = create_test_series(data)\n modin_result = modin_series.str[key]\n pandas_result = pandas_series.str[key]\n df_equals(modin_result, pandas_result)\n\n\n# Test str operations\ndef test_str_cat():\n data = [\"abC|DeF,Hik\", \"gSaf,qWer|Gre\", \"asd3,4sad|\", np.NaN]\n modin_series, pandas_series = create_test_series(data)\n others = data\n\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.cat(others)\n\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.cat(None)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"n\", int_arg_values, ids=int_arg_keys)\[email protected](\"expand\", bool_arg_values, ids=bool_arg_keys)\ndef test_str_split(data, pat, n, expand):\n # Empty pattern not supported on Python 3.7+\n if sys.version_info[0] == 3 and sys.version_info[1] >= 7 and pat == \"\":\n return\n\n modin_series, pandas_series = create_test_series(data)\n\n if n >= -1:\n if expand and pat:\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.split(pat, n=n, expand=expand)\n elif not expand:\n try:\n pandas_result = pandas_series.str.split(pat, n=n, expand=expand)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.split(pat, n=n, expand=expand)\n else:\n modin_result = modin_series.str.split(pat, n=n, expand=expand)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"n\", int_arg_values, ids=int_arg_keys)\[email protected](\"expand\", bool_arg_values, ids=bool_arg_keys)\ndef test_str_rsplit(data, pat, n, expand):\n modin_series, pandas_series = create_test_series(data)\n\n if n >= -1:\n if expand and pat:\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.rsplit(pat, n=n, expand=expand)\n elif not expand:\n try:\n pandas_result = pandas_series.str.rsplit(pat, n=n, expand=expand)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.rsplit(pat, n=n, expand=expand)\n else:\n modin_result = modin_series.str.rsplit(pat, n=n, expand=expand)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"i\", int_arg_values, ids=int_arg_keys)\ndef test_str_get(data, i):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.get(i)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.get(i)\n else:\n modin_result = modin_series.str.get(i)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_string_list_data_values, ids=test_string_list_data_keys\n)\[email protected](\"sep\", string_sep_values, ids=string_sep_keys)\ndef test_str_join(data, sep):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.join(sep)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.join(sep)\n else:\n modin_result = modin_series.str.join(sep)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_string_list_data_values, ids=test_string_list_data_keys\n)\[email protected](\"sep\", string_sep_values, ids=string_sep_keys)\ndef test_str_get_dummies(data, sep):\n modin_series, pandas_series = create_test_series(data)\n\n if sep:\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.get_dummies(sep)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"case\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"na\", string_na_rep_values, ids=string_na_rep_keys)\ndef test_str_contains(data, pat, case, na):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.contains(pat, case=case, na=na, regex=False)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.contains(pat, case=case, na=na, regex=False)\n else:\n modin_result = modin_series.str.contains(pat, case=case, na=na, regex=False)\n df_equals(modin_result, pandas_result)\n\n # Test regex\n pat = \",|b\"\n try:\n pandas_result = pandas_series.str.contains(pat, case=case, na=na, regex=True)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.contains(pat, case=case, na=na, regex=True)\n else:\n modin_result = modin_series.str.contains(pat, case=case, na=na, regex=True)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"repl\", string_sep_values, ids=string_sep_keys)\[email protected](\"n\", int_arg_values, ids=int_arg_keys)\[email protected](\"case\", bool_arg_values, ids=bool_arg_keys)\ndef test_str_replace(data, pat, repl, n, case):\n eval_general(\n *create_test_series(data),\n lambda series: series.str.replace(pat, repl, n=n, case=case, regex=False),\n )\n # Test regex\n eval_general(\n *create_test_series(data),\n lambda series: series.str.replace(\n pat=\",|b\", repl=repl, n=n, case=case, regex=True\n ),\n )\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"repeats\", int_arg_values, ids=int_arg_keys)\ndef test_str_repeat(data, repeats):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.repeat(repeats)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.repeat(repeats)\n else:\n modin_result = modin_series.str.repeat(repeats)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"width\", int_arg_values, ids=int_arg_keys)\[email protected](\n \"side\", [\"left\", \"right\", \"both\"], ids=[\"left\", \"right\", \"both\"]\n)\[email protected](\"fillchar\", string_sep_values, ids=string_sep_keys)\ndef test_str_pad(data, width, side, fillchar):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.pad(width, side=side, fillchar=fillchar)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.pad(width, side=side, fillchar=fillchar)\n else:\n modin_result = modin_series.str.pad(width, side=side, fillchar=fillchar)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"width\", int_arg_values, ids=int_arg_keys)\[email protected](\"fillchar\", string_sep_values, ids=string_sep_keys)\ndef test_str_center(data, width, fillchar):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.center(width, fillchar=fillchar)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.center(width, fillchar=fillchar)\n else:\n modin_result = modin_series.str.center(width, fillchar=fillchar)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"width\", int_arg_values, ids=int_arg_keys)\[email protected](\"fillchar\", string_sep_values, ids=string_sep_keys)\ndef test_str_ljust(data, width, fillchar):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.ljust(width, fillchar=fillchar)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.ljust(width, fillchar=fillchar)\n else:\n modin_result = modin_series.str.ljust(width, fillchar=fillchar)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"width\", int_arg_values, ids=int_arg_keys)\[email protected](\"fillchar\", string_sep_values, ids=string_sep_keys)\ndef test_str_rjust(data, width, fillchar):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.rjust(width, fillchar=fillchar)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.rjust(width, fillchar=fillchar)\n else:\n modin_result = modin_series.str.rjust(width, fillchar=fillchar)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"width\", int_arg_values, ids=int_arg_keys)\ndef test_str_zfill(data, width):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.zfill(width)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.zfill(width)\n else:\n modin_result = modin_series.str.zfill(width)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"width\", int_arg_values, ids=int_arg_keys)\ndef test_str_wrap(data, width):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.wrap(width)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.wrap(width)\n else:\n modin_result = modin_series.str.wrap(width)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"start\", int_arg_values, ids=int_arg_keys)\[email protected](\"stop\", int_arg_values, ids=int_arg_keys)\[email protected](\"step\", int_arg_values, ids=int_arg_keys)\ndef test_str_slice(data, start, stop, step):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.slice(start=start, stop=stop, step=step)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.slice(start=start, stop=stop, step=step)\n else:\n modin_result = modin_series.str.slice(start=start, stop=stop, step=step)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"start\", int_arg_values, ids=int_arg_keys)\[email protected](\"stop\", int_arg_values, ids=int_arg_keys)\[email protected](\"repl\", string_sep_values, ids=string_sep_keys)\ndef test_str_slice_replace(data, start, stop, repl):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.slice_replace(\n start=start, stop=stop, repl=repl\n )\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.slice_replace(start=start, stop=stop, repl=repl)\n else:\n modin_result = modin_series.str.slice_replace(start=start, stop=stop, repl=repl)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\ndef test_str_count(data, pat):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.count(pat)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.count(pat)\n else:\n modin_result = modin_series.str.count(pat)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"na\", string_na_rep_values, ids=string_na_rep_keys)\ndef test_str_startswith(data, pat, na):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.startswith(pat, na=na)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.startswith(pat, na=na)\n else:\n modin_result = modin_series.str.startswith(pat, na=na)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"na\", string_na_rep_values, ids=string_na_rep_keys)\ndef test_str_endswith(data, pat, na):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.endswith(pat, na=na)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.endswith(pat, na=na)\n else:\n modin_result = modin_series.str.endswith(pat, na=na)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\ndef test_str_findall(data, pat):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.findall(pat)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.findall(pat)\n else:\n modin_result = modin_series.str.findall(pat)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\[email protected](\"case\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"na\", string_na_rep_values, ids=string_na_rep_keys)\ndef test_str_match(data, pat, case, na):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.match(pat, case=case, na=na)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.match(pat, case=case, na=na)\n else:\n modin_result = modin_series.str.match(pat, case=case, na=na)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"expand\", bool_arg_values, ids=bool_arg_keys)\ndef test_str_extract(data, expand):\n modin_series, pandas_series = create_test_series(data)\n\n if expand is not None:\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.extract(r\"([ab])(\\d)\", expand=expand)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_extractall(data):\n modin_series, pandas_series = create_test_series(data)\n\n with warns_that_defaulting_to_pandas():\n # We are only testing that this defaults to pandas, so we will just check for\n # the warning\n modin_series.str.extractall(r\"([ab])(\\d)\")\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_len(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.len()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.len()\n else:\n modin_result = modin_series.str.len()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"to_strip\", string_sep_values, ids=string_sep_keys)\ndef test_str_strip(data, to_strip):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.strip(to_strip=to_strip)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.strip(to_strip=to_strip)\n else:\n modin_result = modin_series.str.strip(to_strip=to_strip)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"to_strip\", string_sep_values, ids=string_sep_keys)\ndef test_str_rstrip(data, to_strip):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.rstrip(to_strip=to_strip)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.rstrip(to_strip=to_strip)\n else:\n modin_result = modin_series.str.rstrip(to_strip=to_strip)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"to_strip\", string_sep_values, ids=string_sep_keys)\ndef test_str_lstrip(data, to_strip):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.lstrip(to_strip=to_strip)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.lstrip(to_strip=to_strip)\n else:\n modin_result = modin_series.str.lstrip(to_strip=to_strip)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"sep\", string_sep_values, ids=string_sep_keys)\[email protected](\"expand\", bool_arg_values, ids=bool_arg_keys)\ndef test_str_partition(data, sep, expand):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.partition(sep, expand=expand)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.partition(sep, expand=expand)\n else:\n modin_result = modin_series.str.partition(sep, expand=expand)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"sep\", string_sep_values, ids=string_sep_keys)\[email protected](\"expand\", bool_arg_values, ids=bool_arg_keys)\ndef test_str_rpartition(data, sep, expand):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.rpartition(sep, expand=expand)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.rpartition(sep, expand=expand)\n else:\n modin_result = modin_series.str.rpartition(sep, expand=expand)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_lower(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.lower()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.lower()\n else:\n modin_result = modin_series.str.lower()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_upper(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.upper()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.upper()\n else:\n modin_result = modin_series.str.upper()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_title(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.title()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.title()\n else:\n modin_result = modin_series.str.title()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"sub\", string_sep_values, ids=string_sep_keys)\[email protected](\"start\", int_arg_values, ids=int_arg_keys)\[email protected](\"end\", int_arg_values, ids=int_arg_keys)\ndef test_str_find(data, sub, start, end):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.find(sub, start=start, end=end)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.find(sub, start=start, end=end)\n else:\n modin_result = modin_series.str.find(sub, start=start, end=end)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"sub\", string_sep_values, ids=string_sep_keys)\[email protected](\"start\", int_arg_values, ids=int_arg_keys)\[email protected](\"end\", int_arg_values, ids=int_arg_keys)\ndef test_str_rfind(data, sub, start, end):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.rfind(sub, start=start, end=end)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.rfind(sub, start=start, end=end)\n else:\n modin_result = modin_series.str.rfind(sub, start=start, end=end)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"sub\", string_sep_values, ids=string_sep_keys)\[email protected](\"start\", int_arg_values, ids=int_arg_keys)\[email protected](\"end\", int_arg_values, ids=int_arg_keys)\ndef test_str_index(data, sub, start, end):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.index(sub, start=start, end=end)\n except ValueError:\n # pytest does not get the RayGetErrors\n assert True\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.index(sub, start=start, end=end)\n else:\n modin_result = modin_series.str.index(sub, start=start, end=end)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"sub\", string_sep_values, ids=string_sep_keys)\[email protected](\"start\", int_arg_values, ids=int_arg_keys)\[email protected](\"end\", int_arg_values, ids=int_arg_keys)\ndef test_str_rindex(data, sub, start, end):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.rindex(sub, start=start, end=end)\n except ValueError:\n # pytest does not get the RayGetErrors\n assert True\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.rindex(sub, start=start, end=end)\n else:\n modin_result = modin_series.str.rindex(sub, start=start, end=end)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_capitalize(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.capitalize()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.capitalize()\n else:\n modin_result = modin_series.str.capitalize()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_swapcase(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.swapcase()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.swapcase()\n else:\n modin_result = modin_series.str.swapcase()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\n \"form\", [\"NFC\", \"NFKC\", \"NFD\", \"NFKD\"], ids=[\"NFC\", \"NFKC\", \"NFD\", \"NFKD\"]\n)\ndef test_str_normalize(data, form):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.normalize(form)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.normalize(form)\n else:\n modin_result = modin_series.str.normalize(form)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\[email protected](\"pat\", string_sep_values, ids=string_sep_keys)\ndef test_str_translate(data, pat):\n modin_series, pandas_series = create_test_series(data)\n\n # Test none table\n try:\n pandas_result = pandas_series.str.translate(None)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.translate(None)\n else:\n modin_result = modin_series.str.translate(None)\n df_equals(modin_result, pandas_result)\n\n # Translation dictionary\n table = {pat: \"DDD\"}\n try:\n pandas_result = pandas_series.str.translate(table)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.translate(table)\n else:\n modin_result = modin_series.str.translate(table)\n df_equals(modin_result, pandas_result)\n\n # Translation table with maketrans (python3 only)\n if pat is not None:\n table = str.maketrans(pat, \"d\" * len(pat))\n try:\n pandas_result = pandas_series.str.translate(table)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.translate(table)\n else:\n modin_result = modin_series.str.translate(table)\n df_equals(modin_result, pandas_result)\n\n # Test delete chars\n deletechars = \"|\"\n try:\n pandas_result = pandas_series.str.translate(table, deletechars)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.translate(table, deletechars)\n else:\n modin_result = modin_series.str.translate(table, deletechars)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isalnum(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isalnum()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isalnum()\n else:\n modin_result = modin_series.str.isalnum()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isalpha(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isalpha()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isalpha()\n else:\n modin_result = modin_series.str.isalpha()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isdigit(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isdigit()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isdigit()\n else:\n modin_result = modin_series.str.isdigit()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isspace(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isspace()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isspace()\n else:\n modin_result = modin_series.str.isspace()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_islower(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.islower()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.islower()\n else:\n modin_result = modin_series.str.islower()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isupper(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isupper()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isupper()\n else:\n modin_result = modin_series.str.isupper()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_istitle(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.istitle()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.istitle()\n else:\n modin_result = modin_series.str.istitle()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isnumeric(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isnumeric()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isnumeric()\n else:\n modin_result = modin_series.str.isnumeric()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_str_isdecimal(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.isdecimal()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.isdecimal()\n else:\n modin_result = modin_series.str.isdecimal()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_casefold(data):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.casefold()\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.casefold()\n else:\n modin_result = modin_series.str.casefold()\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"encoding_type\", encoding_types)\[email protected](\"data\", test_string_data_values, ids=test_string_data_keys)\ndef test_encode(data, encoding_type):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.str.encode(encoding=encoding_type)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.str.encode(encoding=encoding_type)\n else:\n modin_result = modin_series.str.encode(encoding=encoding_type)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"is_sparse_data\", [True, False], ids=[\"is_sparse\", \"is_not_sparse\"]\n)\ndef test_hasattr_sparse(is_sparse_data):\n modin_df, pandas_df = (\n create_test_series(\n pandas.arrays.SparseArray(test_data[\"float_nan_data\"].values())\n )\n if is_sparse_data\n else create_test_series(test_data[\"float_nan_data\"])\n )\n eval_general(modin_df, pandas_df, lambda df: hasattr(df, \"sparse\"))\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\ndef test_cat_categories(data):\n modin_series, pandas_series = create_test_series(data.copy())\n df_equals(modin_series.cat.categories, pandas_series.cat.categories)\n pandas_series.cat.categories = list(\"qwert\")\n modin_series.cat.categories = list(\"qwert\")\n df_equals(modin_series, pandas_series)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\ndef test_cat_ordered(data):\n modin_series, pandas_series = create_test_series(data.copy())\n assert modin_series.cat.ordered == pandas_series.cat.ordered\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\ndef test_cat_codes(data):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.codes\n modin_result = modin_series.cat.codes\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"inplace\", [True, False])\ndef test_cat_rename_categories(data, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.rename_categories(list(\"qwert\"), inplace=inplace)\n modin_result = modin_series.cat.rename_categories(list(\"qwert\"), inplace=inplace)\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"ordered\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"inplace\", [True, False])\ndef test_cat_reorder_categories(data, ordered, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.reorder_categories(\n list(\"tades\"), ordered=ordered, inplace=inplace\n )\n modin_result = modin_series.cat.reorder_categories(\n list(\"tades\"), ordered=ordered, inplace=inplace\n )\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"inplace\", [True, False])\ndef test_cat_add_categories(data, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.add_categories(list(\"qw\"), inplace=inplace)\n modin_result = modin_series.cat.add_categories(list(\"qw\"), inplace=inplace)\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"inplace\", [True, False])\ndef test_cat_remove_categories(data, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.remove_categories(list(\"at\"), inplace=inplace)\n modin_result = modin_series.cat.remove_categories(list(\"at\"), inplace=inplace)\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"inplace\", [True, False])\ndef test_cat_remove_unused_categories(data, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_series[1] = np.nan\n pandas_result = pandas_series.cat.remove_unused_categories(inplace=inplace)\n modin_series[1] = np.nan\n modin_result = modin_series.cat.remove_unused_categories(inplace=inplace)\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"ordered\", bool_arg_values, ids=bool_arg_keys)\[email protected](\"rename\", [True, False])\[email protected](\"inplace\", [True, False])\ndef test_cat_set_categories(data, ordered, rename, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.set_categories(\n list(\"qwert\"), ordered=ordered, rename=rename, inplace=inplace\n )\n modin_result = modin_series.cat.set_categories(\n list(\"qwert\"), ordered=ordered, rename=rename, inplace=inplace\n )\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"inplace\", [True, False])\ndef test_cat_as_ordered(data, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.as_ordered(inplace=inplace)\n modin_result = modin_series.cat.as_ordered(inplace=inplace)\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\n \"data\", test_data_categorical_values, ids=test_data_categorical_keys\n)\[email protected](\"inplace\", [True, False])\ndef test_cat_as_unordered(data, inplace):\n modin_series, pandas_series = create_test_series(data.copy())\n pandas_result = pandas_series.cat.as_unordered(inplace=inplace)\n modin_result = modin_series.cat.as_unordered(inplace=inplace)\n df_equals(modin_series, pandas_series)\n df_equals(modin_result, pandas_result)\n\n\ndef test_peculiar_callback():\n def func(val):\n if not isinstance(val, tuple):\n raise BaseException(\"Urgh...\")\n return val\n\n pandas_df = pandas.DataFrame({\"col\": [(0, 1)]})\n pandas_series = pandas_df[\"col\"].apply(func)\n\n modin_df = pd.DataFrame({\"col\": [(0, 1)]})\n modin_series = modin_df[\"col\"].apply(func)\n\n df_equals(modin_series, pandas_series)\n"
] | [
[
"matplotlib.use",
"numpy.array",
"pandas.Index",
"numpy.random.choice",
"numpy.random.rand",
"numpy.random.RandomState",
"numpy.testing.assert_equal",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"pandas.date_range",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame.from_dict",
"pandas.MultiIndex.from_arrays",
"pandas.Categorical",
"numpy.arange",
"numpy.random.randint",
"pandas.Series"
]
] |
jcapriot/simpeg | [
"e88e653673c6b818592b6c075f76ee9215fe82b7"
] | [
"SimPEG/electromagnetics/viscous_remanent_magnetization/waveforms.py"
] | [
"import numpy as np\nimport scipy.special as spec\nimport properties\n\n###################################################\n# STEP OFF WAVEFORM\n###################################################\n\n\nclass StepOff(properties.HasProperties):\n\n \"\"\"\n\n \"\"\"\n\n t0 = properties.Float(\"Start of off-time\", default=0.0)\n\n def getCharDecay(self, fieldType, times):\n\n \"\"\"\n Characteristic decay function for step-off waveform. This function\n describes the decay of the VRM response for the linear problem type.\n Note that the current will be normalized by its maximum value. The\n maximum current in the transmitter is specified in the source object.\n\n REQUIRED ARGUMENTS:\n\n fieldType -- must be 'dhdt' or 'dbdt'. Characteristic decay for 'h'\n or 'b' CANNOT be computed for step-off\n\n times -- Observation times. These times MUST be during the off-time.\n\n OUTPUTS:\n\n eta -- characteristic decay function evaluated at all specified times.\n\n \"\"\"\n\n if fieldType not in [\"dhdt\", \"dbdt\"]:\n raise NameError('For step-off, fieldType must be one of \"dhdt\" or \"dbdt\"')\n\n if self.t0 >= np.min(times):\n raise ValueError(\n \"Earliest time channel must be after beginning of off-time (t0 = %.2e s)\"\n % self.t0\n )\n\n t0 = self.t0\n\n if fieldType == \"dbdt\":\n mu0 = 4 * np.pi * 1e-7\n eta = -mu0 / (times - t0)\n elif fieldType == \"dhdt\":\n eta = -1 / (times - t0)\n\n return eta\n\n def getLogUniformDecay(self, fieldType, times, chi0, dchi, tau1, tau2):\n\n \"\"\"\n Decay function for a step-off waveform for log-uniform distribution of\n time-relaxation constants. The output of this function is the\n magnetization at each time for each cell, normalized by the inducing\n field.\n\n REQUIRED ARGUMENTS:\n\n fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'.\n\n times -- Observation times\n\n chi0 -- DC (zero-frequency) magnetic susceptibility for all cells\n\n dchi -- DC (zero-frequency) magnetic susceptibility attributed to VRM\n for all cells\n\n tau1 -- Lower-bound for log-uniform distribution of time-relaxation\n constants for all cells\n\n tau2 -- Upper-bound for log-uniform distribution of time-relaxation\n constants for all cells\n\n OUTPUTS:\n\n eta -- characteristic decay function evaluated at all specified times.\n\n \"\"\"\n\n if fieldType not in [\"dhdt\", \"dbdt\"]:\n raise NameError(\n 'For step-off, fieldType must be one of \"dhdt\" or \"dbdt\". Cannot be \"h\" or \"dbdt\".'\n )\n\n nT = len(times)\n nC = len(dchi)\n t0 = self.t0\n\n times = np.kron(np.ones((nC, 1)), times)\n chi0 = np.kron(np.reshape(chi0, newshape=(nC, 1)), np.ones((1, nT)))\n dchi = np.kron(np.reshape(dchi, newshape=(nC, 1)), np.ones((1, nT)))\n tau1 = np.kron(np.reshape(tau1, newshape=(nC, 1)), np.ones((1, nT)))\n tau2 = np.kron(np.reshape(tau2, newshape=(nC, 1)), np.ones((1, nT)))\n\n if fieldType == \"h\":\n eta = 0.5 * (1 - np.sign(times - t0)) * chi0 + 0.5 * (\n 1 + np.sign(times - t0)\n ) * (dchi / np.log(tau2 / tau1)) * (\n spec.expi(-(times - t0) / tau2) - spec.expi(-(times - t0) / tau1)\n )\n elif fieldType == \"b\":\n mu0 = 4 * np.pi * 1e-7\n eta = 0.5 * (1 - np.sign(times - t0)) * chi0 + 0.5 * (\n 1 + np.sign(times - t0)\n ) * (dchi / np.log(tau2 / tau1)) * (\n spec.expi(-(times - t0) / tau2) - spec.expi(-(times - t0) / tau1)\n )\n eta = mu0 * eta\n elif fieldType == \"dhdt\":\n eta = 0.0 + 0.5 * (1 + np.sign(times - t0)) * (\n dchi / np.log(tau2 / tau1)\n ) * (np.exp(-(times - t0) / tau1) - np.exp(-(times - t0) / tau2)) / (\n times - t0\n )\n elif fieldType == \"dbdt\":\n mu0 = 4 * np.pi * 1e-7\n eta = 0.0 + 0.5 * (1 + np.sign(times - t0)) * (\n dchi / np.log(tau2 / tau1)\n ) * (np.exp(-(times - t0) / tau1) - np.exp(-(times - t0) / tau2)) / (\n times - t0\n )\n eta = mu0 * eta\n\n return eta\n\n\n###################################################\n# SQUARE PULSE WAVEFORM\n###################################################\n\n\nclass SquarePulse(properties.HasProperties):\n\n \"\"\"\n\n \"\"\"\n\n t0 = properties.Float(\"Start of off-time\", default=0.0)\n delt = properties.Float(\"Pulse width\")\n\n def getCharDecay(self, fieldType, times):\n\n \"\"\"\n Characteristic decay function for a square-pulse waveform. This\n function describes the decay of the VRM response for the linear\n problem type. Note that the current will be normalized by its maximum\n value. The maximum current in the transmitter is specified in the\n source object.\n\n REQUIRED ARGUMENTS:\n\n fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'.\n\n times -- Observation times. These times MUST be during the off-time.\n\n OUTPUTS:\n\n eta -- characteristic decay function evaluated at all specified times.\n\n \"\"\"\n\n if self.delt is None:\n raise AssertionError(\"Pulse width property delt must be set.\")\n\n if fieldType not in [\"h\", \"b\", \"dhdt\", \"dbdt\"]:\n raise NameError(\n 'For square pulse, fieldType must be one of \"h\", \"b\", \"dhdt\" or \"dbdt\".'\n )\n\n if self.t0 >= np.min(times):\n raise ValueError(\n \"Earliest time channel must be after beginning of off-time (t0 = %.2e s)\"\n % self.t0\n )\n\n t0 = self.t0\n delt = self.delt\n mu0 = 4 * np.pi * 1e-7\n\n if fieldType == \"h\":\n eta = np.log(1 + delt / (times - t0))\n elif fieldType == \"b\":\n eta = mu0 * np.log(1 + delt / (times - t0))\n elif fieldType == \"dhdt\":\n eta = -(1 / (times - t0) - 1 / (times - t0 + delt))\n elif fieldType == \"dbdt\":\n eta = -mu0 * (1 / (times - t0) - 1 / (times - t0 + delt))\n\n return eta\n\n def getLogUniformDecay(self, fieldType, times, chi0, dchi, tau1, tau2):\n\n \"\"\"\n Decay function for a square-pulse waveform for log-uniform distribution\n of time-relaxation constants. The output of this function is the\n magnetization at each time for each cell, normalized by the inducing\n field.\n\n REQUIRED ARGUMENTS:\n\n fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'.\n\n times -- Observation times.\n\n chi0 -- DC (zero-frequency) magnetic susceptibility for all cells\n\n dchi -- DC (zero-frequency) magnetic susceptibility attributed to VRM\n for all cells\n\n tau1 -- Lower-bound for log-uniform distribution of time-relaxation\n constants for all cells\n\n tau2 -- Upper-bound for log-uniform distribution of time-relaxation\n constants for all cells\n\n OUTPUTS:\n\n eta -- characteristic decay function evaluated at all specified times.\n\n \"\"\"\n\n if self.delt is None:\n raise AssertionError(\"Pulse width property delt must be set.\")\n\n if fieldType not in [\"h\", \"b\", \"dhdt\", \"dbdt\"]:\n raise NameError(\n 'For square pulse, fieldType must be one of \"h\", \"b\", \"dhdt\" or \"dbdt\".'\n )\n\n nT = len(times)\n nC = len(dchi)\n t0 = self.t0\n delt = self.delt\n\n times = np.kron(np.ones((nC, 1)), times)\n chi0 = np.kron(np.reshape(chi0, newshape=(nC, 1)), np.ones((1, nT)))\n dchi = np.kron(np.reshape(dchi, newshape=(nC, 1)), np.ones((1, nT)))\n tau1 = np.kron(np.reshape(tau1, newshape=(nC, 1)), np.ones((1, nT)))\n tau2 = np.kron(np.reshape(tau2, newshape=(nC, 1)), np.ones((1, nT)))\n\n if fieldType == \"h\":\n eta = (np.sign(times - t0 + delt) - np.sign(times - t0)) * (\n chi0 - dchi\n ) - 0.5 * (1 + np.sign(times - t0)) * (dchi / np.log(tau2 / tau1)) * (\n spec.expi(-(times - t0) / tau2)\n - spec.expi(-(times - t0) / tau1)\n - spec.expi(-(times - t0 + delt) / tau2)\n + spec.expi(-(times - t0 + delt) / tau1)\n )\n elif fieldType == \"b\":\n mu0 = 4 * np.pi * 1e-7\n eta = (np.sign(times - t0 + delt) - np.sign(times - t0)) * (\n chi0 - dchi\n ) - 0.5 * (1 + np.sign(times - t0)) * (dchi / np.log(tau2 / tau1)) * (\n spec.expi(-(times - t0) / tau2)\n - spec.expi(-(times - t0) / tau1)\n - spec.expi(-(times - t0 + delt) / tau2)\n + spec.expi(-(times - t0 + delt) / tau1)\n )\n eta = mu0 * eta\n elif fieldType == \"dhdt\":\n eta = (\n 0.0\n + 0.5\n * (1 + np.sign(times - t0))\n * (dchi / np.log(tau2 / tau1))\n * (np.exp(-(times - t0) / tau1) - np.exp(-(times - t0) / tau2))\n / (times - t0)\n - 0.5\n * (1 + np.sign(times - t0 + delt))\n * (dchi / np.log(tau2 / tau1))\n * (\n np.exp(-(times - t0 + delt) / tau1)\n - np.exp(-(times - t0 + delt) / tau2)\n )\n / (times - t0 + delt)\n )\n elif fieldType == \"dbdt\":\n mu0 = 4 * np.pi * 1e-7\n eta = (\n 0.0\n + 0.5\n * (1 + np.sign(times - t0))\n * (dchi / np.log(tau2 / tau1))\n * (np.exp(-(times - t0) / tau1) - np.exp(-(times - t0) / tau2))\n / (times - t0)\n - 0.5\n * (1 + np.sign(times - t0 + delt))\n * (dchi / np.log(tau2 / tau1))\n * (\n np.exp(-(times - t0 + delt) / tau1)\n - np.exp(-(times - t0 + delt) / tau2)\n )\n / (times - t0 + delt)\n )\n eta = mu0 * eta\n\n return eta\n\n\n###################################################\n# ARBITRARY WAVEFORM UNIFORM DISCRETIZATION\n###################################################\n\n\nclass ArbitraryDiscrete(properties.HasProperties):\n\n \"\"\"\n\n \"\"\"\n\n t_wave = properties.Array(\"Waveform times\", dtype=float)\n I_wave = properties.Array(\"Waveform current\", dtype=float)\n\n @properties.validator(\"t_wave\")\n def _t_wave_validator(self, change):\n\n if len(change[\"value\"]) < 3:\n ValueError(\"Waveform must be defined by at least 3 points.\")\n\n if self.I_wave is not None:\n if len(change[\"value\"]) != len(self.I_wave):\n print(\n \"Length of time vector no longer matches length of current vector\"\n )\n\n @properties.validator(\"I_wave\")\n def _I_wave_validator(self, change):\n\n if len(change[\"value\"]) < 3:\n ValueError(\"Waveform must be defined by at least 3 points.\")\n\n if (np.abs(change[\"value\"][0]) > 1e-10) | (np.abs(change[\"value\"][-1]) > 1e-10):\n raise ValueError(\n \"Current waveform should begin and end with amplitude of 0. Right now I_1 = {0:.2e} and I_end = {1:.2e}\".format(\n change[\"value\"][0], change[\"value\"][-1]\n )\n )\n\n if self.t_wave is not None:\n if len(change[\"value\"]) != len(self.t_wave):\n print(\n \"Length of time vector no longer matches length of current vector\"\n )\n\n def getCharDecay(self, fieldType, times):\n\n \"\"\"\n Characteristic decay function for arbitrary waveform. This function\n describes the decay of the VRM response for the Linear problem type.\n Note that the current will be normalized by its maximum value. The\n maximum current in the transmitter is specified in the source object.\n\n REQUIRD ARGUMENTS:\n\n fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'.\n\n times -- Observation times. These times MUST be during the off-time.\n\n OUTPUTS:\n\n eta -- characteristic decay function evaluated at all specified times.\n\n \"\"\"\n\n if self.t_wave is None:\n raise AssertionError(\"Waveform times (Property: t_wave) are not set.\")\n\n if self.I_wave is None:\n raise AssertionError(\"Waveform current (Property: I_wave) is not set.\")\n\n if fieldType not in [\"h\", \"b\", \"dhdt\", \"dbdt\"]:\n raise NameError(\n 'For square pulse, fieldType must be one of \"h\", \"b\", \"dhdt\" or \"dbdt\".'\n )\n\n if len(self.t_wave) != len(self.I_wave):\n raise ValueError(\n \"Length of t_wave and I_wave properties must be the same. Currently len(t_wave) = {0: i} and len(I_wave) = {1: i}\".format(\n self.t_wave, self.I_wave\n )\n )\n\n k = np.where(self.I_wave > 1e-10)\n j = k[0][0] - 1\n k = k[0][-1] + 1\n\n twave = self.t_wave[j : k + 1]\n Iwave = self.I_wave[j : k + 1] / np.max(np.abs(self.I_wave[j : k + 1]))\n\n n_pts = int(np.ceil(25 * (np.max(twave) - np.min(twave)) / np.min(times)))\n\n if n_pts > 25000:\n n_pts = 25000\n\n dt = (np.max(twave) - np.min(twave)) / np.float64(n_pts)\n tvec = np.linspace(np.min(twave), np.max(twave), n_pts + 1)\n\n g = np.r_[Iwave[0], np.interp(tvec[1:-1], twave, Iwave), Iwave[-1]]\n tvec = tvec[1:]\n\n eta = np.zeros(len(times))\n\n if fieldType in [\"h\", \"b\"]:\n for tt in range(0, len(eta)):\n eta[tt] = np.sum(\n (g[1:] + (g[1:] - g[0:-1]) * (times[tt] - tvec) / dt)\n * np.log(1 + dt / (times[tt] - tvec))\n - g[1:]\n + g[0:-1]\n )\n elif fieldType in [\"dhdt\", \"dbdt\"]:\n for tt in range(0, len(eta)):\n eta[tt] = np.sum(\n ((g[1:] - g[0:-1]) / dt) * np.log(1 + dt / (times[tt] - tvec))\n - (g[1:] + (g[1:] - g[0:-1]) * (times[tt] - tvec) / dt)\n * (1 / (times[tt] - tvec + dt) - 1 / (times[tt] - tvec))\n )\n\n if fieldType in [\"b\", \"dbdt\"]:\n mu0 = 4 * np.pi * 1e-7\n eta = mu0 * eta\n\n return eta\n\n\n###################################################\n# ARBITRARY WAVEFORM PIECEWISE DISCRETIZATION\n###################################################\n\n\nclass ArbitraryPiecewise(properties.HasProperties):\n\n \"\"\"\n\n \"\"\"\n\n t_wave = properties.Array(\"Waveform times\", dtype=float)\n I_wave = properties.Array(\"Waveform current\", dtype=float)\n\n @properties.validator(\"t_wave\")\n def _t_wave_validator(self, change):\n if len(change[\"value\"]) < 3:\n ValueError(\"Waveform must be defined by at least 3 points.\")\n\n @properties.observer(\"t_wave\")\n def _t_wave_observer(self, change):\n if self.I_wave is not None:\n if len(change[\"value\"]) != len(self.I_wave):\n print(\n \"Length of time vector no longer matches length of current vector\"\n )\n\n @properties.validator(\"I_wave\")\n def _I_wave_validator(self, change):\n if len(change[\"value\"]) < 3:\n ValueError(\"Waveform must be defined by at least 3 points.\")\n\n if (np.abs(change[\"value\"][0]) > 1e-10) | (np.abs(change[\"value\"][-1]) > 1e-10):\n raise ValueError(\n \"Current waveform should begin and end with amplitude of 0. Right now I_1 = {0:.2e} and I_end = {1:.2e}\".format(\n change[\"value\"][0], change[\"value\"][-1]\n )\n )\n\n @properties.observer(\"I_wave\")\n def _I_wave_observer(self, change):\n if self.t_wave is not None:\n if len(change[\"value\"]) != len(self.t_wave):\n print(\n \"Length of time vector no longer matches length of current vector\"\n )\n\n def getCharDecay(self, fieldType, times):\n\n \"\"\"\n Characteristic decay function for arbitrary waveform. This function\n describes the decay of the VRM response for the Linear problem type.\n Note that the current will be LogUniformized by its maximum value. The\n maximum current in the transmitter is specified in the source object.\n\n INPUTS:\n\n fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'.\n\n times -- Observation times. These times must be during the off-time.\n\n OUTPUTS:\n\n eta -- characteristic decay function evaluated at all specified times.\n\n \"\"\"\n\n if self.t_wave is None:\n raise AssertionError(\"Waveform times (Property: t_wave) are not set.\")\n\n if self.I_wave is None:\n raise AssertionError(\"Waveform current (Property: I_wave) is not set.\")\n\n if fieldType not in [\"h\", \"b\", \"dhdt\", \"dbdt\"]:\n raise NameError(\n 'For square pulse, fieldType must be one of \"h\", \"b\", \"dhdt\" or \"dbdt\".'\n )\n\n if np.max(self.t_wave) >= np.min(times):\n raise ValueError(\n \"Earliest time channel must be after beginning of off-time (t0 = %.2e s)\"\n % np.max(self.t_wave)\n )\n\n k = np.where(self.I_wave > 1e-10)\n j = k[0][0] - 1\n k = k[0][-1] + 1\n\n tvec = self.t_wave[j : k + 1]\n dt = tvec[1:] - tvec[0:-1]\n g = self.I_wave[j : k + 1] / np.max(np.abs(self.I_wave[j : k + 1]))\n tvec = tvec[1:]\n\n eta = np.zeros(len(times))\n\n if fieldType in [\"h\", \"b\"]:\n for tt in range(0, len(eta)):\n eta[tt] = np.sum(\n (g[1:] + (g[1:] - g[0:-1]) * (times[tt] - tvec) / dt)\n * np.log(1 + dt / (times[tt] - tvec))\n - g[1:]\n + g[0:-1]\n )\n elif fieldType in [\"dhdt\", \"dbdt\"]:\n for tt in range(0, len(eta)):\n eta[tt] = np.sum(\n ((g[1:] - g[0:-1]) / dt) * np.log(1 + dt / (times[tt] - tvec))\n - (g[1:] + (g[1:] - g[0:-1]) * (times[tt] - tvec) / dt)\n * (1 / (times[tt] - tvec + dt) - 1 / (times[tt] - tvec))\n )\n\n if fieldType in [\"b\", \"dbdt\"]:\n mu0 = 4 * np.pi * 1e-7\n eta = mu0 * eta\n\n return eta\n\n\n###################################################\n# CUSTOM DECAY\n###################################################\n\n\nclass Custom(properties.HasProperties):\n\n \"\"\"\n\n \"\"\"\n\n times = properties.Array(\n \"Times at which characteristic decay function is evaluated\", dtype=float\n )\n eta = properties.Array(\n \"Characteristic decay function at evaluation times\", dtype=float\n )\n\n @properties.observer(\"times\")\n def _times_observer(self, change):\n if self.eta is not None:\n if len(change[\"value\"]) != len(self.eta):\n print(\"Length of time vector no longer matches length of eta vector\")\n\n @properties.observer(\"eta\")\n def _eta_observer(self, change):\n if self.times is not None:\n if len(change[\"value\"]) != len(self.times):\n print(\"Length of eta vector no longer matches length of time vector\")\n\n def getCharDecay(self):\n \"\"\"Returns characteristic decay function at specified times\"\"\"\n\n if self.eta is None:\n raise AssertionError(\"Characteristic decay (Property: eta) must be set.\")\n\n return self.eta\n"
] | [
[
"numpy.max",
"scipy.special.expi",
"numpy.reshape",
"numpy.log",
"numpy.ones",
"numpy.min",
"numpy.interp",
"numpy.exp",
"numpy.float64",
"numpy.where",
"numpy.sign",
"numpy.abs"
]
] |
Apidwalin/tensorflow1-models-master | [
"30717bc3358f26da8be89c96e641f95604402b7d"
] | [
"official/core/actions.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides TFM orbit actions and associated helper functions/classes.\"\"\"\n\nimport os\nfrom typing import List\n\nimport gin\nimport orbit\nimport tensorflow as tf\n\nfrom official.core import base_trainer\nfrom official.core import config_definitions\nfrom official.modeling import optimization\n\n\nclass EMACheckpointing:\n \"\"\"Eval action to save checkpoint with average weights when EMA is used.\n\n This action swaps the weights of the model with the average weights, then it\n saves the checkpoint under export_dir/ema_checkpoints. Checkpointing is\n expensive for large models, so doing this action in eval is more efficient\n than training.\n \"\"\"\n\n def __init__(self, export_dir: str, optimizer: tf.keras.optimizers.Optimizer,\n checkpoint: tf.train.Checkpoint, max_to_keep: int = 1):\n \"\"\"Initializes the instance.\n\n Args:\n export_dir: `str` for the export directory of the EMA average weights.\n optimizer: `tf.keras.optimizers.Optimizer` optimizer instance used for\n training. This will be used to swap the model weights with the average\n weigths.\n checkpoint: `tf.train.Checkpoint` instance.\n max_to_keep: `int` for max checkpoints to keep in ema_checkpoints subdir.\n \"\"\"\n if not isinstance(optimizer, optimization.ExponentialMovingAverage):\n raise ValueError('Optimizer has to be instance of'\n 'optimization.ExponentialMovingAverage for'\n 'EMACheckpointing action')\n\n export_dir = os.path.join(export_dir, 'ema_checkpoints')\n tf.io.gfile.makedirs(\n os.path.dirname(export_dir))\n self._optimizer = optimizer\n self._checkpoint = checkpoint\n self._checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=export_dir,\n max_to_keep=max_to_keep,\n checkpoint_name='average_weights')\n\n def __call__(self, output: orbit.runner.Output):\n \"\"\"Swaps model weights, and saves the checkpoint.\n\n Args:\n output: The train or eval output to test.\n \"\"\"\n self._optimizer.swap_weights()\n self._checkpoint_manager.save(checkpoint_number=self._optimizer.iterations)\n self._optimizer.swap_weights()\n\n\[email protected]\ndef get_eval_actions(\n params: config_definitions.ExperimentConfig,\n trainer: base_trainer.Trainer,\n model_dir: str) -> List[orbit.Action]:\n \"\"\"Gets eval actions for TFM trainer.\"\"\"\n eval_actions = []\n # Adds ema checkpointing action to save the average weights under\n # ema_checkpoints subdir.\n if isinstance(trainer.optimizer, optimization.ExponentialMovingAverage):\n eval_actions.append(\n EMACheckpointing(\n export_dir=model_dir,\n optimizer=trainer.optimizer,\n checkpoint=trainer.checkpoint,\n max_to_keep=params.trainer.max_to_keep))\n\n return eval_actions\n"
] | [
[
"tensorflow.train.CheckpointManager"
]
] |
LuoXukun/HanTokenization | [
"7041547d0a9c1772abcdbd490e4b74ef91806f88"
] | [
"notebooks/view-data.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n# In[1]:\n\n\nimport numpy as np\nimport torch\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport json\nfrom collections import Counter\n\n\n# In[2]:\n\n\nDATAROOT = '../datasets'\nVOCAB_FILE = os.path.join(DATAROOT, 'training_vocab.txt')\nTRAIN_FILE = os.path.join(DATAROOT, 'training.txt')\nTEST_FILE = os.path.join(DATAROOT, 'test.txt')\n\nvocab = set()\ntrain_set = []\ntest_set = []\n\nwith open(VOCAB_FILE, 'r', encoding='utf-8') as f:\n vocab = set(map(str.strip, f.readlines()))\n\nwith open(TRAIN_FILE, 'r', encoding='utf-8') as f:\n train_set = list(map(str.strip, f.readlines()))\n\nwith open(TEST_FILE, 'r', encoding='utf-8') as f:\n test_set = list(map(str.strip, f.readlines()))\n\n\n# In[3]:\n\n\nprint('vocab size ', len(vocab))\nprint('training set size: ', len(train_set))\nprint('test set size: ', len(test_set))\nprint(\"ten item of training set\")\nprint(train_set[:5])\nprint(\"ten item of test set\")\nprint(test_set[:5])\n\n\n# In[4]:\n\n\ntrain_set_split = [line.split(' ') for line in train_set]\n\n\n# In[5]:\n\n\nlen(train_set) == len(train_set_split)\n\n\n# In[6]:\n\n\ntrain_set_split[:5]\n\n\n# In[7]:\n\n\ncnt = Counter()\nfor line in train_set_split:\n cnt.update(line)\n\n\n# In[8]:\n\n\nlen(cnt) < len(vocab)\n\n\n# In[9]:\n\n\ncnt0 = Counter(train_set_split[0])\nprint(len(train_set_split[0]))\nprint(len(cnt0))\nprint(cnt0)\n\n\n# In[10]:\n\n\ncnt.most_common()\n\n\n# In[11]:\n\n# In[12]:\n\n\n# from wordcloud import WordCloud\n\n\n# In[13]:\n\n\nnpcnt = np.array(cnt.most_common())\n\n\n# In[14]:\n\n\nimport pandas as pd\n\n\n# In[15]:\n\n\ncnt_series = pd.Series(cnt.most_common())\n\n\n# In[27]:\n\n\nprint(\"每个词平均出现次数:\", sum(cnt.values()) / len(cnt))\n\n\n# In[29]:\n\n\nwords, times = [], []\nfor word, time in cnt.most_common():\n words.append(word), times.append(time)\n\n\n# In[38]:\n\ndef get_plot():\n import matplotlib\n matplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文\n matplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号\n\n from nltk.probability import FreqDist\n\n frqdist = FreqDist(cnt)\n\n plt.figure(figsize=(16, 8))\n plt.grid(False)\n frqdist.plot(80)\n plt.show()\n\n plt.figure(figsize=(16, 8))\n plt.grid(False)\n frqdist.plot(80, cumulative=True)\n\n# In[ ]:\n\ndef get_word_length(cnt):\n len_cnt = {}\n for word, time in cnt.most_common():\n if len(word) in len_cnt:\n len_cnt[len(word)] += time\n else:\n len_cnt[len(word)] = time\n return len_cnt\n\nlen_cnt = get_word_length(cnt)\nprint(len_cnt)\n\ndef avg_length():\n res = 0\n tot_word = 0\n for k in len_cnt:\n res += k * len_cnt[k]\n tot_word += len_cnt[k]\n return res / tot_word\n\navg_len = avg_length()\n# 1.645548579259047\nprint(avg_len)\n\ndef plot_length_cnt():\n import matplotlib\n matplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文\n matplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号\n from nltk.probability import FreqDist\n\n frqdist = FreqDist(len_cnt)\n\n plt.figure(figsize=(16, 8))\n frqdist.plot(len(len_cnt))\n plt.savefig('训练集词长度频率分布.png')\n plt.show()\n\n plt.figure(figsize=(16, 8))\n frqdist.plot(len(len_cnt), cumulative=True)\n plt.savefig('训练集词长度频率分布-累加.png')\n plt.show()\n\nplot_length_cnt()\n"
] | [
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
lukeshingles/atlasserver | [
"87c8e437891a1516ac1fadb84d1d9b796dc5a367"
] | [
"taskrunner/atlas_gettaskfirstimage.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nThis script is to be run on sc01. A job datafile is used to get the first images in JPEG\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom pathlib import Path\nimport sys\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"ERROR: exactly two argument must be specified: [DATAFILE] ['red' or 'diff']\")\n sys.exit(1)\n return\n\n datafile = sys.argv[1]\n reduced = (sys.argv[2] == 'red')\n\n if not os.path.exists(datafile):\n return\n\n df = pd.read_csv(datafile, delim_whitespace=True, escapechar='#')\n\n if df.empty:\n return\n\n row = df.iloc[0]\n obs = row['Obs'] # looks like '01a59309o0235c'\n imgfolder = 'red' if reduced else 'diff' # difference or reduced image\n fitsext = 'fits' if reduced else 'diff'\n fitsinput = f'/atlas/{imgfolder}/{obs[:3]}/{obs[3:8]}/{obs}.{fitsext}.fz'\n fitsoutpath = Path(datafile).with_suffix('.fits')\n os.system(\n \"/atlas/vendor/monsta/bin/monsta /atlas/lib/monsta/subarray.pro \"\n f\"{fitsinput} {fitsoutpath} \"\n f\"$(/atlas/bin/pix2sky -sky2pix {fitsinput} {row['RA']} {row['Dec']}) 100\"\n \"\\n\"\n )\n\n # delete the .fits (but keep the .jpeg)\n if fitsoutpath.exists():\n fitsoutpath.unlink()\n\n # jobxxxxx.fits.jpg to jobxxxx.first.jpg\n fitsoutpath.with_suffix('.fits.jpg').rename(Path(datafile).with_suffix('.jpg'))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv"
]
] |
teffland/ner-expected-entity-ratio | [
"557bbba8fac9ea6f34f1394356fd98a17474b4e9"
] | [
"ml/dataset_readers/partial_jsonl_reader.py"
] | [
"from typing import Iterator, List, Dict, Any, Union, Tuple\nimport torch\nimport torch.optim as optim\nimport numpy as np\nfrom allennlp.data import Instance\nfrom allennlp.data.fields import (\n TextField,\n SequenceLabelField,\n ListField,\n LabelField,\n MetadataField,\n ArrayField,\n)\nfrom allennlp.data.dataset_readers import DatasetReader\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.data.token_indexers import (\n TokenIndexer,\n SingleIdTokenIndexer,\n PretrainedTransformerIndexer,\n)\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.vocabulary import Vocabulary\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nimport h5py\nimport numpy as np\nimport json\nfrom collections import defaultdict\n\nfrom ml.dataset_readers.transformers_converter import TransformersConverter\n\n\[email protected](\"partial-jsonl\", exist_ok=True)\nclass PartialJsonlReader(DatasetReader):\n \"\"\"\n DatasetReader for jsonl data in standardized format:\n\n datum = {\n 'uid': str, # unique id for datum in the corpus (must be unique across train/dev/test)\n 'tokens': List[str], # lexical tokenization (not the same as BPE used by pretrained LMs)\n 'is_complete': bool, # whether to assume unannotated tokens are \"O\" or \"-\"\n 'gold_annotations': List[Dict[str,Any]] with format:\n {\n 'kind': str, # in ('pos', 'chunk', 'entity')\n 'type': str, # the actual annotation tag/class e.g., PER\n 'start': int, # start token index\n 'end': int, # end token index\n 'mention': str, # mention == ' '.join(tokens[start:end])\n }\n }\n\n This dataset reader will further tokenize the data into BPEs given by the provided language model\n and will extend the annotation types to a BILOU encoding over the BPEs.\n\n e.g.,\n given an input data like:\n tokens = [\"Barack\", \"Obama\", \"was\", \"president\"]\n gold_annotations = [ { ... 'start':0, 'end':2', 'type':'PER' ...} ]\n is_complete = False\n\n might be tokenized as:\n \"Bar\", \"ack\", \"_Obama\", \"_was\", \"_president\"\n\n and the resulting tags will be:\n \"B-PER\", \"I-PER\", \"L-PER\", \"-\", \"-\"\n\n with token mapping:\n t2b = {\n 0: [0,1], # \"Barack\" -> \"Bar\", \"ack\"\n 1: [2],\n 2: [3],\n 3: [4]\n }\n\n\n **NOTE**: Currently this reader only handles Bert and Roberta language models from transformers\n\n \"\"\"\n\n def __init__(\n self,\n token_namespace: str = \"tokens\",\n token_indexers: Dict[str, TokenIndexer] = None,\n model_name: str = None,\n assume_complete: Union[str, bool] = False,\n latent_tag: str = \"_\",\n O_tag: str = \"O\",\n kind: str = \"entity\",\n label_encoding: str = \"BIOUL\",\n drop_unannotated_sentences: bool = False,\n drop_unannotated_docs: bool = False,\n debug: bool = False,\n lazy: bool = False,\n limit: int = None,\n ) -> None:\n \"\"\"\n Args:\n token_namespace: key for tokens field\n token_indexers: set of indexers for tokens\n model_name: pretrained lm to use for subword tokenization\n assume_complete: whether to assume data are complete if they are missing `is_complete` field.\n latent_tag: special tag to use as \"latent variable\" signal to model.\n O_tag: special tag to use as \"filler\" in between annotations.\n kind: subtype of annotations to extract (for our experiments it's always \"entity\")\n label_encoding: Should use BIOUL\n drop_unannotated_sentences: drop all unannotated sentences after the last annotated sentence in a doc. This\n is the \"shortest\" preprocessing setting in the paper.\n drop_unannotated_doxs: drop docs with no annotations. This is the \"short\" preprocessing setting in the paper.\n debug: whether to show debug outputs\n lazy: whether to load in all data at once.\n limit: limit number of instances for debugging\n \"\"\"\n super().__init__(lazy=lazy)\n self.limit = limit\n self.token_namespace = token_namespace\n self.subword_converter = TransformersConverter(model_name) if model_name else None\n self.maxlen = self.subword_converter.tokenizer.model_max_length if model_name else None\n\n if not token_indexers:\n if model_name:\n token_indexers = {\n \"tokens\": PretrainedTransformerIndexer(\n model_name,\n namespace=token_namespace,\n tokenizer_kwargs=dict(use_fast=True),\n )\n }\n else:\n token_indexers = {\"tokens\": SingleIdTokenIndexer(token_namespace)}\n self.token_indexers = token_indexers\n\n if isinstance(assume_complete, str):\n assume_complete = assume_complete.lower() == \"true\"\n self.assume_complete = assume_complete\n self.latent_tag = \"_\"\n self.O = O_tag\n self.label_encoding = label_encoding\n self.kind = kind\n\n assert not (drop_unannotated_docs and drop_unannotated_sentences)\n self.drop_unannotated_sentences = drop_unannotated_sentences\n self.drop_unannotated_docs = drop_unannotated_docs\n\n if debug:\n logger.setLevel(logging.DEBUG)\n\n def text_to_instances(self, tokens: List[str], annotations: List[Dict[str, Any]] = [], **metadata) -> Instance:\n metadata[\"og_tokens\"] = tokens\n if self.subword_converter is not None:\n tokens, tokidx2bpeidxs = self.subword_converter(tokens)\n else:\n tokidx2bpeidxs = {i: [i] for i in range(len(tokens))}\n metadata[\"tokidx2bpeidxs\"] = tokidx2bpeidxs\n tags = self.get_tags(tokens, annotations, metadata)\n # print(\"go;d tags\", tags)\n\n for tokens, tags, metadata in self.as_maximal_subdocs(tokens, tags, metadata):\n metadata[\"bpe_tokens\"] = tokens\n tokens = [Token(t) for t in tokens]\n tokens_field = TextField(tokens, self.token_indexers)\n tag_namespace = \"labels\"\n fields = dict(\n tokens=tokens_field,\n tags=SequenceLabelField(tags, tokens_field, label_namespace=tag_namespace),\n metadata=MetadataField(metadata),\n )\n\n yield Instance(fields)\n\n def _read(self, file_path: str) -> Iterator[Instance]:\n og_n, actual_n = 0, 0\n with open(file_path) as f:\n for i, line in enumerate(f):\n if self.limit and i >= self.limit:\n return\n datum = json.loads(line)\n tokens = datum.pop(\"tokens\")\n annotations = datum.pop(\"gold_annotations\")\n\n if self.drop_unannotated_sentences:\n n = len(tokens)\n og_n += n\n tokens = self._get_annotated_subdoc(tokens, annotations, datum)\n actual_n += len(tokens)\n print(f\"{i} Dropped from {n} tokens to {len(tokens)} annotated ones\", flush=True)\n elif self.drop_unannotated_docs:\n og_n += 1\n if not annotations:\n tokens = []\n print(f\"{i} Dropped unannotated doc\", flush=True)\n else:\n actual_n += 1\n\n if tokens:\n for instance in self.text_to_instances(tokens=tokens, annotations=annotations, **datum):\n yield instance\n\n if self.drop_unannotated_sentences:\n print(f\"Cut down total tokens from {og_n} to {actual_n} = {100.*actual_n/og_n} %\", flush=True)\n elif self.drop_unannotated_docs:\n print(f\"Cut down docs from {og_n} to {actual_n} = {100.*actual_n/og_n} %\", flush=True)\n\n def get_tags(\n self,\n tokens: List[str],\n annotations: Dict[str, Dict[str, Any]],\n metadata: Dict[str, Any],\n ) -> List[str]:\n \"\"\"Create tag sequence from annotations and possible subword mapping.\"\"\"\n # Filter down annotations only to the specified kind\n annotations = [a for a in annotations if a[\"kind\"] == self.kind]\n\n tokidx2bpeidxs = metadata[\"tokidx2bpeidxs\"]\n n = len(tokens)\n # print(\"tokens\", tokens)\n # Default tags are either O or latent\n if self.assume_complete:\n tags = [self.O] * n\n else:\n tags = [self.latent_tag] * n\n # Fill in any partial annotations\n # Map start,ends for the observed annotations onto tokens\n # print(\"---\")\n # print(metadata)\n # print(n, tokens)\n # print(annotations)\n # print(tokidx2bpeidxs)\n for ann in annotations:\n s, e, t = (\n tokidx2bpeidxs[ann[\"start\"]][0],\n tokidx2bpeidxs[ann[\"end\"] - 1][-1],\n ann[\"type\"],\n )\n # print(s, e, t, ann)\n if t == self.O:\n for k in range(s, e + 1):\n tags[k] = self.O\n else:\n if self.label_encoding == \"BIOUL\":\n if e - s > 0:\n tags[s] = f\"B-{t}\"\n for k in range(s + 1, e):\n tags[k] = f\"I-{t}\"\n tags[e] = f\"L-{t}\"\n else:\n tags[s] = f\"U-{t}\"\n elif self.label_encoding == \"BIO\":\n tags[s] = f\"B-{t}\"\n if e - s > 0:\n for k in range(s + 1, e + 1):\n tags[k] = f\"I-{t}\"\n else:\n raise ValueError(self.label_encoding)\n\n # If we are using transfomers, force first and last tokens (specials tokens) to be O tag\n if self.subword_converter is not None:\n tags[0] = self.O\n tags[-1] = self.O\n\n # print(tags, flush=True)\n\n return tags\n\n def as_maximal_subdocs(\n self,\n tokens: List[str],\n tags: List[str],\n metadata: Dict[str, Any],\n ) -> List[Tuple[List[str], List[str], Dict[str, Any]]]:\n \"\"\"Break up docuemnts that are too large along sentence boundaries into ones that fit within the length limit.\"\"\"\n if self.maxlen is None or len(tokens) <= self.maxlen:\n subdocs = [(tokens, tags, metadata)]\n else:\n subdocs = []\n tok2bpes = metadata.pop(\"tokidx2bpeidxs\")\n bpe2tok = {v: k for k, vs in tok2bpes.items() for v in vs}\n uid = metadata.pop(\"uid\", metadata.get(\"id\", \"NO ID\"))\n # print(f\"Breaking up sentences for {uid} with len: {len(tokens)}\")\n s_tok, s_bpe, s_L = 0, 0, 0\n if \"sentence_ends\" in metadata:\n ends = metadata.pop(\"sentence_ends\")\n else:\n print(f\"No sentence ends found for {uid}, using crude length-based boundaries\")\n ends = list(range(1, len(tok2bpes) + 1))\n subdoc_tokens, subdoc_tags, subdoc_metadata = None, None, None\n\n # We use a slightly smaller than actually ok subgrouping length because sometimes breaking up the sentences\n # that are too long starts or ends in the middle of a word and fixing to the full word goes over the maxlen\n maxlen = self.maxlen - 10\n for i, e_tok in enumerate(ends):\n # print(f\"i:{i}, e:{e_tok}, bpes:{tok2bpes.keys()}\")\n e_bpe = tok2bpes[e_tok][0] if e_tok < len(tok2bpes) else len(tokens)\n\n # Check to see if this sentence would put the current subdoc over the edge.\n if i and (e_bpe - s_bpe) > maxlen:\n # If so, finish off the subdoc and advance the start cursors\n subdoc_tok2bpe = {(k - s_tok): [v - s_bpe for v in tok2bpes[k]] for k in range(s_tok, e_tok)}\n subdoc_metadata = dict(\n uid=f\"{uid}-S{s_L}:{i-1}\",\n tokidx2bpeidxs=subdoc_tok2bpe,\n **metadata,\n )\n logger.debug(f\"\\nAdding subdoc {subdoc_metadata['uid']} with len {len(subdoc_tokens)}\")\n logger.debug(\n f'{\" \".join([f\"{t}/{l}\" if l != self.latent_tag else t for t, l in zip(subdoc_tokens, subdoc_tags)])}'\n )\n assert len(subdoc_tokens) == len(subdoc_tags)\n subdocs.append((subdoc_tokens, subdoc_tags, subdoc_metadata))\n s_tok = ends[i - 1]\n s_bpe = tok2bpes[s_tok][0]\n s_L = i\n\n # Compute the next candidate subdoc\n # If the the next candidate subdoc will be too long on its own, break it up into smaller pieces that fit.\n # (ie, when there is sentence that is too long)\n if (e_bpe - s_bpe) > maxlen:\n # Make sure the subgroups start/end on word boundaries\n def to_word_boundary(bpe):\n if bpe in (0, len(tokens)):\n return bpe\n else:\n return tok2bpes[bpe2tok[bpe]][0]\n\n n_groups = int(np.ceil((e_bpe - s_bpe) / maxlen))\n s_bpes = [to_word_boundary(s_bpe + g * maxlen) for g in range(n_groups)]\n e_bpes = [to_word_boundary(min(e_bpe, s_bpe + (g + 1) * maxlen)) for g in range(n_groups)]\n else:\n s_bpes = [s_bpe]\n e_bpes = [e_bpe]\n for g, (s_bpe, e_bpe) in enumerate(zip(s_bpes, e_bpes)):\n # Collect the sentence tokens, tags, and add on start/end tokens&tags where needed\n subdoc_tokens = tokens[s_bpe:e_bpe]\n subdoc_tags = tags[s_bpe:e_bpe]\n if not subdoc_tokens[0] == tokens[0]:\n subdoc_tokens = [tokens[0]] + subdoc_tokens\n subdoc_tags = [tags[0]] + subdoc_tags\n if not subdoc_tokens[-1] == tokens[-1]:\n subdoc_tokens = subdoc_tokens + [tokens[-1]]\n subdoc_tags = subdoc_tags + [tags[-1]]\n\n if g < len(s_bpes) - 1:\n # All but the last group get turned into subdocs here\n e_tok = bpe2tok[e_bpe]\n subdoc_tok2bpe = {(k - s_tok): [v - s_bpe for v in tok2bpes[k]] for k in range(s_tok, e_tok)}\n subdoc_metadata = dict(\n uid=f\"{uid}-S{s_L}:{i-1}.G{g}\",\n tokidx2bpeidxs=subdoc_tok2bpe,\n **metadata,\n )\n logger.debug(f\"\\nAdding subdoc {subdoc_metadata['uid']} with len {len(subdoc_tokens)}\")\n logger.debug(\n f'{\" \".join([f\"{t}/{l}\" if l != self.latent_tag else t for t, l in zip(subdoc_tokens, subdoc_tags)])}'\n )\n assert len(subdoc_tokens) == len(subdoc_tags)\n subdocs.append((subdoc_tokens, subdoc_tags, subdoc_metadata))\n s_tok = e_tok\n\n # Add the last one\n subdoc_tok2bpe = {(k - s_tok): [v - s_bpe for v in tok2bpes[k]] for k in range(s_tok, e_tok)}\n subdoc_metadata = dict(uid=f\"{uid}-S{s_L}:{i}\", tokidx2bpeidxs=subdoc_tok2bpe, **metadata)\n # print(f\"\\nAdding subdoc {subdoc_metadata['uid']} with len {len(subdoc_tokens)}\")\n # print(\" \".join([f\"{t}/{l}\" if l != self.latent_tag else t for t, l in zip(subdoc_tokens, subdoc_tags)]))\n subdocs.append((subdoc_tokens, subdoc_tags, subdoc_metadata))\n\n cat_tokens = [t for (subdoctoks, _, _) in subdocs for t in subdoctoks[1:-1]]\n assert len(cat_tokens) == len(tokens) - 2, f\"{len(cat_tokens)} != {len(tokens)-2}\"\n assert cat_tokens == tokens[1:-1], f\"{list(zip(cat_tokens, tokens[1:-1]))}\"\n return subdocs\n\n def _get_annotated_subdoc(self, tokens, annotations, metadata):\n \"\"\" Chop off trailing sentences where there are no annotations. \"\"\"\n annotations = [a for a in annotations if a[\"kind\"] == self.kind]\n if annotations:\n last_ann = sorted(annotations, key=lambda a: -a[\"end\"])[0]\n ends = sorted([e for e in metadata[\"sentence_ends\"] if e >= last_ann[\"end\"]])\n if ends:\n return tokens[: ends[0]]\n else:\n return tokens\n else:\n return []\n\n\ndef test():\n data = [\n {\n \"uid\": \"eng.testa-D1-S1\",\n \"tokens\": [\n \"CRICKET\",\n \"-\",\n \"LEICESTERSHIRE\",\n \"TAKE\",\n \"OVER\",\n \"AT\",\n \"TOP\",\n \"AFTER\",\n \"INNINGS\",\n \"VICTORY\",\n \".\",\n ],\n \"gold_annotations\": [\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 0,\n \"end\": 1,\n \"mention\": \"CRICKET\",\n },\n {\"kind\": \"pos\", \"type\": \":\", \"start\": 1, \"end\": 2, \"mention\": \"-\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 0,\n \"end\": 1,\n \"mention\": \"CRICKET\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 2,\n \"end\": 3,\n \"mention\": \"LEICESTERSHIRE\",\n },\n {\"kind\": \"pos\", \"type\": \"NNP\", \"start\": 3, \"end\": 4, \"mention\": \"TAKE\"},\n {\n \"kind\": \"entity\",\n \"type\": \"ORG\",\n \"start\": 2,\n \"end\": 3,\n \"mention\": \"LEICESTERSHIRE\",\n },\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 4, \"end\": 5, \"mention\": \"OVER\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 2,\n \"end\": 4,\n \"mention\": \"LEICESTERSHIRE TAKE\",\n },\n {\"kind\": \"pos\", \"type\": \"NNP\", \"start\": 5, \"end\": 6, \"mention\": \"AT\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"PP\",\n \"start\": 4,\n \"end\": 5,\n \"mention\": \"OVER\",\n },\n {\"kind\": \"pos\", \"type\": \"NNP\", \"start\": 6, \"end\": 7, \"mention\": \"TOP\"},\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 7,\n \"end\": 8,\n \"mention\": \"AFTER\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 8,\n \"end\": 9,\n \"mention\": \"INNINGS\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NN\",\n \"start\": 9,\n \"end\": 10,\n \"mention\": \"VICTORY\",\n },\n {\"kind\": \"pos\", \"type\": \".\", \"start\": 10, \"end\": 11, \"mention\": \".\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 5,\n \"end\": 10,\n \"mention\": \"AT TOP AFTER INNINGS VICTORY\",\n },\n ],\n \"is_complete\": True,\n },\n {\n \"uid\": \"eng.testa-D1-S2\",\n \"tokens\": [\"LONDON\", \"1996-08-30\"],\n \"gold_annotations\": [\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 0,\n \"end\": 1,\n \"mention\": \"LONDON\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"CD\",\n \"start\": 1,\n \"end\": 2,\n \"mention\": \"1996-08-30\",\n },\n {\n \"kind\": \"entity\",\n \"type\": \"LOC\",\n \"start\": 0,\n \"end\": 1,\n \"mention\": \"LONDON\",\n },\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 0,\n \"end\": 2,\n \"mention\": \"LONDON 1996-08-30\",\n },\n ],\n \"is_complete\": False,\n },\n {\n \"uid\": \"eng.testa-D1-S3\",\n \"tokens\": [\n \"West\",\n \"Indian\",\n \"all-rounder\",\n \"Phil\",\n \"Simmons\",\n \"took\",\n \"four\",\n \"for\",\n \"38\",\n \"on\",\n \"Friday\",\n \"as\",\n \"Leicestershire\",\n \"beat\",\n \"Somerset\",\n \"by\",\n \"an\",\n \"innings\",\n \"and\",\n \"39\",\n \"runs\",\n \"in\",\n \"two\",\n \"days\",\n \"to\",\n \"take\",\n \"over\",\n \"at\",\n \"the\",\n \"head\",\n \"of\",\n \"the\",\n \"county\",\n \"championship\",\n \".\",\n ],\n \"gold_annotations\": [\n {\"kind\": \"pos\", \"type\": \"NNP\", \"start\": 0, \"end\": 1, \"mention\": \"West\"},\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 1,\n \"end\": 2,\n \"mention\": \"Indian\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NN\",\n \"start\": 2,\n \"end\": 3,\n \"mention\": \"all-rounder\",\n },\n {\n \"kind\": \"entity\",\n \"type\": \"MISC\",\n \"start\": 0,\n \"end\": 2,\n \"mention\": \"West Indian\",\n },\n {\"kind\": \"pos\", \"type\": \"NNP\", \"start\": 3, \"end\": 4, \"mention\": \"Phil\"},\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 4,\n \"end\": 5,\n \"mention\": \"Simmons\",\n },\n {\"kind\": \"pos\", \"type\": \"VBD\", \"start\": 5, \"end\": 6, \"mention\": \"took\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 0,\n \"end\": 5,\n \"mention\": \"West Indian all-rounder Phil Simmons\",\n },\n {\n \"kind\": \"entity\",\n \"type\": \"PER\",\n \"start\": 3,\n \"end\": 5,\n \"mention\": \"Phil Simmons\",\n },\n {\"kind\": \"pos\", \"type\": \"CD\", \"start\": 6, \"end\": 7, \"mention\": \"four\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"VP\",\n \"start\": 5,\n \"end\": 6,\n \"mention\": \"took\",\n },\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 7, \"end\": 8, \"mention\": \"for\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 6,\n \"end\": 7,\n \"mention\": \"four\",\n },\n {\"kind\": \"pos\", \"type\": \"CD\", \"start\": 8, \"end\": 9, \"mention\": \"38\"},\n {\"kind\": \"chunk\", \"type\": \"PP\", \"start\": 7, \"end\": 8, \"mention\": \"for\"},\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 9, \"end\": 10, \"mention\": \"on\"},\n {\"kind\": \"chunk\", \"type\": \"NP\", \"start\": 8, \"end\": 9, \"mention\": \"38\"},\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 10,\n \"end\": 11,\n \"mention\": \"Friday\",\n },\n {\"kind\": \"chunk\", \"type\": \"PP\", \"start\": 9, \"end\": 10, \"mention\": \"on\"},\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 11, \"end\": 12, \"mention\": \"as\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 10,\n \"end\": 11,\n \"mention\": \"Friday\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 12,\n \"end\": 13,\n \"mention\": \"Leicestershire\",\n },\n {\n \"kind\": \"chunk\",\n \"type\": \"PP\",\n \"start\": 11,\n \"end\": 12,\n \"mention\": \"as\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"VBD\",\n \"start\": 13,\n \"end\": 14,\n \"mention\": \"beat\",\n },\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 12,\n \"end\": 13,\n \"mention\": \"Leicestershire\",\n },\n {\n \"kind\": \"entity\",\n \"type\": \"ORG\",\n \"start\": 12,\n \"end\": 13,\n \"mention\": \"Leicestershire\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NNP\",\n \"start\": 14,\n \"end\": 15,\n \"mention\": \"Somerset\",\n },\n {\n \"kind\": \"chunk\",\n \"type\": \"VP\",\n \"start\": 13,\n \"end\": 14,\n \"mention\": \"beat\",\n },\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 15, \"end\": 16, \"mention\": \"by\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 14,\n \"end\": 15,\n \"mention\": \"Somerset\",\n },\n {\n \"kind\": \"entity\",\n \"type\": \"ORG\",\n \"start\": 14,\n \"end\": 15,\n \"mention\": \"Somerset\",\n },\n {\"kind\": \"pos\", \"type\": \"DT\", \"start\": 16, \"end\": 17, \"mention\": \"an\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"PP\",\n \"start\": 15,\n \"end\": 16,\n \"mention\": \"by\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NN\",\n \"start\": 17,\n \"end\": 18,\n \"mention\": \"innings\",\n },\n {\"kind\": \"pos\", \"type\": \"CC\", \"start\": 18, \"end\": 19, \"mention\": \"and\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 16,\n \"end\": 18,\n \"mention\": \"an innings\",\n },\n {\"kind\": \"pos\", \"type\": \"CD\", \"start\": 19, \"end\": 20, \"mention\": \"39\"},\n {\n \"kind\": \"pos\",\n \"type\": \"NNS\",\n \"start\": 20,\n \"end\": 21,\n \"mention\": \"runs\",\n },\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 21, \"end\": 22, \"mention\": \"in\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 19,\n \"end\": 21,\n \"mention\": \"39 runs\",\n },\n {\"kind\": \"pos\", \"type\": \"CD\", \"start\": 22, \"end\": 23, \"mention\": \"two\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"PP\",\n \"start\": 21,\n \"end\": 22,\n \"mention\": \"in\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NNS\",\n \"start\": 23,\n \"end\": 24,\n \"mention\": \"days\",\n },\n {\"kind\": \"pos\", \"type\": \"TO\", \"start\": 24, \"end\": 25, \"mention\": \"to\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 22,\n \"end\": 24,\n \"mention\": \"two days\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"VB\",\n \"start\": 25,\n \"end\": 26,\n \"mention\": \"take\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"IN\",\n \"start\": 26,\n \"end\": 27,\n \"mention\": \"over\",\n },\n {\n \"kind\": \"chunk\",\n \"type\": \"VP\",\n \"start\": 24,\n \"end\": 26,\n \"mention\": \"to take\",\n },\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 27, \"end\": 28, \"mention\": \"at\"},\n {\"kind\": \"pos\", \"type\": \"DT\", \"start\": 28, \"end\": 29, \"mention\": \"the\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"PP\",\n \"start\": 26,\n \"end\": 28,\n \"mention\": \"over at\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NN\",\n \"start\": 29,\n \"end\": 30,\n \"mention\": \"head\",\n },\n {\"kind\": \"pos\", \"type\": \"IN\", \"start\": 30, \"end\": 31, \"mention\": \"of\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 28,\n \"end\": 30,\n \"mention\": \"the head\",\n },\n {\"kind\": \"pos\", \"type\": \"DT\", \"start\": 31, \"end\": 32, \"mention\": \"the\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"PP\",\n \"start\": 30,\n \"end\": 31,\n \"mention\": \"of\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NN\",\n \"start\": 32,\n \"end\": 33,\n \"mention\": \"county\",\n },\n {\n \"kind\": \"pos\",\n \"type\": \"NN\",\n \"start\": 33,\n \"end\": 34,\n \"mention\": \"championship\",\n },\n {\"kind\": \"pos\", \"type\": \".\", \"start\": 34, \"end\": 35, \"mention\": \".\"},\n {\n \"kind\": \"chunk\",\n \"type\": \"NP\",\n \"start\": 31,\n \"end\": 34,\n \"mention\": \"the county championship\",\n },\n ],\n \"is_complete\": False,\n },\n ]\n\n reader = PartialJsonlReader(model_name=\"roberta-base\")\n\n for i, datum in enumerate(data):\n print(f\"=== {i} ===\")\n tokens = datum.pop(\"tokens\")\n annotations = datum.pop(\"gold_annotations\")\n instance = list(reader.text_to_instances(tokens=tokens, annotations=annotations, **datum))[0]\n tokens = [t.text for t in instance.fields[\"tokens\"]]\n tags = [t for t in instance.fields[\"tags\"].labels]\n print(\" \".join(f\"{token}/{tag}\" for token, tag in zip(tokens, tags)))\n # print(instance.fields[\"tokens\"])\n # print(instance.fields[\"tags\"])\n\n\n# test()"
] | [
[
"numpy.ceil"
]
] |
max-lutz/articles_code | [
"b53f182b249a703c3d51acbef29cd2a023b6dacb"
] | [
"data visualization streamlit/app.py"
] | [
"import streamlit as st \nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nfrom matplotlib.backends.backend_agg import RendererAgg\n\n#Loading the data\[email protected]\ndef get_data_deputies():\n return pd.read_csv(os.path.join(os.getcwd(),'df_dep.csv'))\[email protected]\ndef get_data_political_parties():\n return pd.read_csv(os.path.join(os.getcwd(),'df_polpar.csv'))\n\n#configuration of the page\nst.set_page_config(layout=\"wide\")\n#load dataframes\ndf_dep = get_data_deputies()\ndf_pol_par = get_data_political_parties()\nst.title('French national assembly vizualisation tool')\nst.markdown(\"\"\"\nThis app performs simple visualization from the open data from the french national assembly!\n\"\"\")\n\n\n\nst.sidebar.header('Select what to display')\npol_parties = df_dep['pol party'].unique().tolist()\npol_party_selected = st.sidebar.multiselect('Political parties', pol_parties, pol_parties)\nnb_deputies = df_dep['pol party'].value_counts()\nnb_mbrs = st.sidebar.slider(\"Number of members\", int(nb_deputies.min()), int(nb_deputies.max()), (int(nb_deputies.min()), int(nb_deputies.max())), 1)\n\n#creates masks from the sidebar selection widgets\nmask_pol_par = df_dep['pol party'].isin(pol_party_selected)\n#get the parties with a number of members in the range of nb_mbrs\nmask_mbrs = df_dep['pol party'].value_counts().between(nb_mbrs[0], nb_mbrs[1]).to_frame()\nmask_mbrs= mask_mbrs[mask_mbrs['pol party'] == 1].index.to_list()\nmask_mbrs= df_dep['pol party'].isin(mask_mbrs)\n\ndf_dep_filtered = df_dep[mask_pol_par & mask_mbrs]\nst.write(df_dep_filtered)\n\nmatplotlib.use(\"agg\")\n_lock = RendererAgg.lock\n\npol_par = df_dep_filtered['pol party'].value_counts()\n#merge the two dataframe to get a column with the color\ndf = pd.merge(pd.DataFrame(pol_par), df_pol_par, left_index=True, right_on='abreviated_name')\ncolors = df['color'].tolist()\n\nrow0_spacer1, row0_1, row0_spacer2, row0_2, row0_spacer3 = st.beta_columns((0.2, 1, .2, 1, .2))\nwith row0_1, _lock:\n st.header(\"Political parties\")\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.pie(pol_par, labels=(pol_par.index + ' (' + pol_par.map(str)\n + ')'), wedgeprops = { 'linewidth' : 7, 'edgecolor' : 'white'\n }, colors=colors)\n #display a white circle in the middle of the pie chart\n p = plt.gcf()\n p.gca().add_artist(plt.Circle( (0,0), 0.7, color='white'))\n st.pyplot(fig)\n\nwith row0_2:\n df = df.reset_index(drop=True)\n t = ''\n for i in range(len(df)):\n t=t+df.loc[i,'abreviated_name']+' : '+df.loc[i,'name']+' \\n'\n for i in range(5):\n st.write(\"\")\n st.write(t)\n\ndf = df_dep[mask_pol_par & mask_mbrs]\ndf_sex = pd.concat([df, pd.get_dummies((df)['sex'], prefix='sex')],axis=1)\n#we group by political parties and sum the male and female\ndf_sex = df_sex.groupby(['pol party']).agg({'sex_female':'sum','sex_male':'sum'})\n#calculate the proportion of women per parties\ndf_sex['pol party'] = df_sex.index\ndf_sex['total'] = df_sex['sex_female'].astype(int) + df_sex['sex_male']\ndf_sex['ratio_f'] = df_sex['sex_female']/df_sex['total']\n\ndf_sex = pd.merge(df_sex, df_pol_par, left_index=True, right_on='abreviated_name')\ndf_sex = df_sex.sort_values(by=['ratio_f'], ascending=False)\ncolors = df_sex['color'].tolist()\n\nrow2_spacer1, row2_1, row2_spacer2, row2_2, row2_spacer3 = st.beta_columns((0.2, 1, .2, 1, .2))\nwith row2_1, _lock:\n st.header('Women deputies')\n fig, ax = plt.subplots(figsize=(5, 5))\n sns.barplot(x=\"ratio_f\", y=\"pol party\", data=df_sex, \n ax=ax, palette=colors)\n ax.set_ylabel('Political party')\n ax.set_xlabel('Percentage of women deputies')\n i = 0\n text = (df_sex['ratio_f'].round(2)*100).astype(int).to_list()\n for rect in ax.patches:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2., rect.get_y() \n + height * 3 / 4., str(text[i])+'%', ha='center', \n va='bottom', rotation=0, color='white', fontsize=12)\n i = i + 1\n st.pyplot(fig)"
] | [
[
"matplotlib.use",
"pandas.merge",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.Circle",
"pandas.get_dummies"
]
] |
shinya7y/gangealing | [
"6e6897640145544496c3115bf3f5b6209c89c7a0"
] | [
"applications/flow_scores.py"
] | [
"\"\"\"\nThis script runs a pre-trained Spatial Transformer on an input dataset and records the smoothness of the flow field\nproduced by the STN for every image. These smoothness values are treated as scores which can be used to filter the\ndataset. An image with low (highly negative) smoothness corresponds to an image that should be removed.\n\"\"\"\nimport os\nimport sys\nsys.path.insert(1, os.path.dirname(sys.path[0]))\nimport torch\nfrom torch.utils.data import Subset\nfrom tqdm import tqdm\nfrom models import total_variation_loss\nfrom applications import base_eval_argparse, load_stn, determine_flips\nfrom utils.distributed import setup_distributed, synchronize, all_gather, primary\nfrom datasets import img_dataloader\n\n\ndef get_flow_scores(args, path, t):\n flow_score_path = f'{path}/flow_scores.pt'\n if os.path.exists(flow_score_path): # Return a cached copy of flow scores\n return torch.load(flow_score_path)\n else: # Compute and cache flow scores:\n return compute_flow_scores(args, t)\n\n\[email protected]_mode()\ndef compute_flow_scores(args, t):\n loader = img_dataloader(args.real_data_path, resolution=args.real_size, batch_size=args.batch, shuffle=False,\n distributed=args.distributed, infinite=False)\n num_total = len(loader.dataset)\n scores = []\n pbar = tqdm(loader) if primary() else loader\n for batch in pbar:\n batch = batch.to('cuda')\n batch, _, _ = determine_flips(args, t, None, batch)\n _, flows = t(batch, return_flow=True, iters=args.iters, padding_mode=args.padding_mode)\n smoothness = total_variation_loss(flows, reduce_batch=False)\n scores.append(smoothness)\n scores = -torch.cat(scores, 0) # lower (more negative) scores indicate worse images\n synchronize()\n scores = all_gather(scores, cat=False)\n scores = scores.permute(1, 0).reshape(-1)[:num_total]\n if primary():\n score_path = f'{args.real_data_path}/flow_scores.pt'\n torch.save(scores.cpu(), score_path)\n print(f'num_scores = {scores.size(0)}')\n print(f'Flow scores saved at {score_path}')\n return scores\n\n\ndef get_high_score_indices(scores, fraction_retained):\n q = 1 - fraction_retained\n min_score = torch.quantile(scores, q)\n high_score_indices, = torch.where(scores > min_score)\n return high_score_indices.tolist()\n\n\ndef filter_dataset(dataset, scores, fraction_retained):\n \"\"\"\n This function removes.\n :param dataset: PyTorch Dataset instance to filter\n :param scores: 1D tensor of scores, with same size as dataset or a path to the scores\n :param fraction_retained: float between 0 and 1, the fraction of images from dataset to retain. The images with\n lowest scores will be dropped.\n :return: PyTorch Dataset instance with lowest scoring images removed from the dataset\n \"\"\"\n if isinstance(scores, str):\n scores = torch.load(scores)\n high_score_indices = get_high_score_indices(scores, fraction_retained)\n filtered_dataset = Subset(dataset, high_score_indices)\n return filtered_dataset\n\n\nif __name__ == '__main__':\n parser = base_eval_argparse()\n args = parser.parse_args()\n assert args.num_heads == 1, 'Clustering not currently supported for flow_scores.py'\n args.distributed = setup_distributed(args.local_rank)\n t_ema = load_stn(args)\n compute_flow_scores(args, t_ema)\n"
] | [
[
"torch.cat",
"torch.quantile",
"torch.inference_mode",
"torch.load",
"torch.utils.data.Subset",
"torch.where"
]
] |
tangshixiang/HCD | [
"a843208bf749622d0fb118b9898c8103dd7208c5"
] | [
"spcl/models/hm_final.py"
] | [
"import numpy as np\nimport random\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch import nn, autograd\nfrom spcl.utils.faiss_rerank import compute_jaccard_distance_inital_rank,compute_jaccard_distance_inital_rank_index,compute_knn\nfrom collections import defaultdict\n\n\nclass HM(autograd.Function):\n\n @staticmethod\n def forward(ctx, inputs, indexes, features, domain,labels,source_classes,num_samples,momentum,changelabel_thre,k1,k2,change_cnt,label_cache,confidence,gcn_n,gcn_s):\n ctx.features = features\n ctx.momentum = momentum\n ctx.domain=domain\n ctx.change_cnt=change_cnt\n ctx.source_classes=source_classes\n ctx.save_for_backward(inputs, indexes)\n\n outputs = inputs.mm(ctx.features.t())\n\n return outputs\n\n @staticmethod\n def backward(ctx, grad_outputs):\n inputs, indexes = ctx.saved_tensors\n grad_inputs = None\n if ctx.needs_input_grad[0]:\n grad_inputs = grad_outputs.mm(ctx.features)\n\n for x, y in zip(inputs, indexes):\n ctx.features[y] = ctx.momentum * ctx.features[y] + (1. - ctx.momentum) * x\n ctx.features[y] /= ctx.features[y].norm()\n\n ctx.change_cnt[indexes]+=1\n return grad_inputs, None, None, None,None, None, None,None, None, None,None,None,None,None,None,None\n\n\ndef hm(inputs, indexes, features,domain,labels,source_classes,num_samples,momentum=0.5,changelabel_thre=0.3,k1=10,k2=1,change_cnt=None,label_cache=None,confidence=None,gcn_n=None,gcn_s=None):\n return HM.apply(inputs, indexes, features,domain, labels,source_classes,num_samples,torch.Tensor([momentum]).to(inputs.device),changelabel_thre,k1,k2,change_cnt,label_cache,confidence,gcn_n,gcn_s)\n\n\nclass HybridMemory(nn.Module):\n def __init__(self, num_features, num_samples, source_classes,source_samples,temp=0.05, momentum=0.2,changelabel_thre=0.3,cluster_k1=10,cluster_k2=1,iterative=2):\n super(HybridMemory, self).__init__()\n self.num_features = num_features\n self.num_samples = num_samples\n self.source_classes=source_classes\n self.source_samples=source_samples\n\n self.momentum = momentum\n self.temp = temp\n #for clustering\n self.changelabel_thre=changelabel_thre\n self.cluster_k1=cluster_k1\n self.cluster_k2=cluster_k2\n\n self.register_buffer('features', torch.zeros(num_samples, num_features))\n\n self.register_buffer('label_cache', torch.zeros(1).long()) #not use now\n self.register_buffer('change_cnt', torch.zeros(num_samples).long())\n self.iterative=iterative\n\n self.labels = []\n for i in range(iterative):\n self.labels.append(torch.zeros(num_samples).long().cuda())\n\n\n def forward(self, inputs, indexes,domain=0,gcn_n=None,gcn_s=None):#domain=0:source domain=1:target\n # inputs: B*2048, features: L*2048\n inputs= hm(inputs, indexes, self.features, domain,self.labels,self.source_classes,self.num_samples,self.momentum,self.changelabel_thre,self.cluster_k1,self.cluster_k2,self.change_cnt,self.label_cache,None,gcn_n,gcn_s)\n inputs /= self.temp#<f1,f2>/temp\n B = inputs.size(0)\n\n def masked_softmax(vec, mask, dim=1, epsilon=1e-6):\n exps = torch.exp(vec)\n masked_exps = exps * mask.float().clone()\n masked_sums = masked_exps.sum(dim, keepdim=True) + epsilon#overflow?\n return (masked_exps/masked_sums)\n\n #print(self.labels[indexes])\n #import pdb;pdb.set_trace()\n targets=self.labels[-1][indexes].clone()\n labels=self.labels[-1].clone()\n\n sim = torch.zeros(labels.max()+1, B).float().cuda()\n sim.index_add_(0, labels, inputs.t().contiguous())#sim for each label\n nums = torch.zeros(labels.max()+1, 1).float().cuda()\n nums.index_add_(0, labels, torch.ones(self.num_samples,1).float().cuda())\n mask = (nums>0).float()\n sim /= (mask*nums+(1-mask)).clone().expand_as(sim)#mean-->center\n mask = mask.expand_as(sim)\n masked_sim = masked_softmax(sim.t().contiguous(), mask.t().contiguous())\n\n del sim,nums\n return F.nll_loss(torch.log(masked_sim+1e-6), targets)\n"
] | [
[
"torch.zeros",
"torch.Tensor",
"torch.ones",
"torch.log",
"torch.exp"
]
] |
yqiuu/swing | [
"56c7ebf432976a9e8ccebc3d48640e48ff782f42"
] | [
"swing/tests/test_consistency.py"
] | [
"import pytest\nimport numpy as np\nfrom swing import ArtificialBeeColony, ParticleSwarm\n\n\ndef cost_func(x):\n x = np.asarray(x)\n return np.sum(x*x)\n\n\ndef compare_memo(memo_a, memo_b):\n np.testing.assert_array_equal(memo_a['iter'], memo_b['iter'])\n np.testing.assert_array_equal(memo_a['ncall'], memo_b['ncall'])\n np.testing.assert_allclose(memo_a['pos'], memo_b['pos'])\n np.testing.assert_allclose(memo_a['cost'], memo_b['cost'])\n\n\ndef test_consistency(tmpdir):\n def run(minimizer, tmpfile):\n bounds = [(-2.2, 4.7)]*5\n lb, ub = np.asarray(bounds).T\n # Run a fiducial model\n niter = 40\n rstate = np.random.RandomState(seed=20070831)\n op_0 = minimizer(cost_func, bounds, rstate=rstate)\n op_0.swarm(niter=niter)\n # Run each iteration manully\n rstate = np.random.RandomState(seed=20070831)\n op_1 = minimizer(cost_func, bounds, rstate=rstate)\n for i_iter in range(niter):\n info = op_1.swarm(niter=1)\n for data in info.values():\n for pos, cost in zip(data['pos'], data['cost']):\n # Test consistency\n assert(np.isclose(cost, cost_func(pos)))\n # Test if all points are within the bounds\n for p in pos:\n assert(np.all(p >= lb) & np.all(p <= ub))\n compare_memo(op_0.memo, op_1.memo)\n # Test a restart run\n niter_restart = 23\n rstate = np.random.RandomState(seed=20070831)\n op_2 = minimizer(cost_func, bounds, rstate=rstate)\n op_2.swarm(niter=niter-niter_restart)\n op_2.save_checkpoint(tmpfile)\n op_2 = minimizer(cost_func, bounds, restart_file=tmpfile)\n op_2.swarm(niter=niter_restart)\n compare_memo(op_0.memo, op_2.memo)\n\n # Run tests for all optimizer\n tmpfile = tmpdir.mkdir('tmp').join('checkpoint')\n for target in [ArtificialBeeColony, ParticleSwarm]:\n run(target, tmpfile)\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.sum",
"numpy.testing.assert_array_equal",
"numpy.all"
]
] |
Chang-Chia-Chi/Parallel-Music-Generator | [
"a500e3cd75b40c0a1413f20cdbce42fdb91167a1"
] | [
"musicGen/musicGen.py"
] | [
"import os\nimport glob\nimport json\nimport time\nimport numpy as np\nimport multiprocessing as mp\nfrom const_gen import *\n\nmusic_id = 0\n\nMajorHigh = 1\nMajorLow = 2\nMinorHigh = 3\nMinorLow = 4\nMajorChord = 5\nMinorChord = 6\n\nmajor_types = np.array([MajorChord, MajorChord, MajorLow, MajorLow, MajorHigh, \n MajorHigh, MajorChord, MajorChord, MajorLow, MajorHigh])\nminor_types = np.array([MinorChord, MinorChord, MinorLow, MinorLow, MinorHigh, \n MinorHigh, MinorChord, MinorChord, MinorLow, MinorHigh])\n\nmajor_high = None\nmajor_low = None\nminor_high = None\nminor_low = None\nmajor_chord = None\nminor_chord = None\n\ndef load_matrices(npy_folder):\n global major_high\n global major_low\n global minor_high\n global minor_low\n global major_chord\n global minor_chord\n\n print(\"Start Loading Matrices...\")\n npy_files = [os.path.join(npy_folder, path) for path in os.listdir(npy_folder)]\n for n_file in npy_files:\n if \"Major\" in n_file and \"Chord\" in n_file:\n major_chord = np.load(n_file)\n elif \"Minor\" in n_file and \"Chord\" in n_file:\n minor_chord = np.load(n_file)\n elif \"Major\" in n_file and \"High\" in n_file:\n major_high = np.load(n_file)\n elif \"Major\" in n_file and \"Low\" in n_file:\n major_low = np.load(n_file)\n elif \"Minor\" in n_file and \"High\" in n_file:\n minor_high = np.load(n_file)\n elif \"Minor\" in n_file and \"Low\" in n_file:\n minor_low = np.load(n_file) \n\n print(\"Complete Reading All Matrices\")\n\ndef matrix2prob(matrix):\n new_matrix = np.zeros_like(matrix)\n for i in range(matrix.shape[0]):\n row_sum = sum(matrix[i])\n if row_sum == 0:\n new_matrix[i] = 1 / matrix.shape[1]\n else:\n new_matrix[i, :] = matrix[i, :] / row_sum\n return new_matrix\n \ndef matrices2probMP():\n global major_high\n global major_low\n global minor_high\n global minor_low\n global major_chord\n global minor_chord\n\n print(\"Start Converting Matrices to Prob Matrices...\")\n processes = []\n matrices = [major_high, major_low, minor_high, minor_low, major_chord, minor_chord]\n \n pool = mp.Pool(processes=6)\n prob_matrices = pool.map(matrix2prob, matrices)\n major_high = prob_matrices[0].copy()\n major_low = prob_matrices[1].copy()\n minor_high = prob_matrices[2].copy()\n minor_low = prob_matrices[3].copy()\n major_chord = prob_matrices[4].copy()\n minor_chord = prob_matrices[5].copy() \n\n print(\"Complete Converting Matrices to Prob Matrices\")\n pool.close()\n\ndef matrices2probSeq():\n global major_high\n global major_low\n global minor_high\n global minor_low\n global major_chord\n global minor_chord\n\n print(\"Start Converting Matrices to Prob Matrices...\")\n matrices = [major_high, major_low, minor_high, minor_low, major_chord, minor_chord]\n prob_matrices = []\n for mat in matrices:\n new_matrix = np.zeros_like(mat)\n for i in range(mat.shape[0]):\n row_sum = sum(mat[i])\n if row_sum == 0:\n new_matrix[i] = 1 / mat.shape[1]\n else:\n new_matrix[i, :] = mat[i, :] / row_sum\n\n prob_matrices.append(new_matrix)\n\n major_high = prob_matrices[0].copy()\n major_low = prob_matrices[1].copy()\n minor_high = prob_matrices[2].copy()\n minor_low = prob_matrices[3].copy()\n major_chord = prob_matrices[4].copy()\n minor_chord = prob_matrices[5].copy()\n\n print(\"Complete Converting Matrices to Prob Matrices\")\n\ndef get_next_note(prev_tone, prev_dur, m_type, matrix):\n if m_type in [1, 2, 3, 4]: # melodic line\n if prev_tone == -1:\n curr_tone = np.random.choice(12, p = [0.5, 0, 0.1, 0.1, 0.1, 0.2, 0, 0, 0, 0, 0, 0])\n curr_tone = np.random.randint(OCTAVE_SAPN) * 12 + curr_tone\n curr_dur = np.random.randint(0, NUM_DURATION)\n else: # by markov matrix\n row = prev_tone * NUM_DURATION + prev_dur\n curr_note = np.random.choice(matrix.shape[1], p=matrix[row])\n curr_tone = int(curr_note/NUM_DURATION)\n curr_dur = curr_note % NUM_DURATION\n else: # chord\n if prev_tone == -1:\n if m_type == 5:\n mid = 3\n elif m_type == 6:\n mid = 4\n\n curr_tone = np.random.choice([7 + mid * 144, mid + 7*12, mid * 12 + 7 * 144]) + CHORD_BASE\n else:\n row = prev_tone - CHORD_BASE\n curr_tone = np.random.choice(matrix.shape[1], p=matrix[row])\n curr_tone += CHORD_BASE\n \n curr_dur = np.random.randint(4, NUM_DURATION)\n\n return (int(curr_tone), int(curr_dur))\n\ndef music_gentype(matrix, m_type):\n np.random.seed() # to prevent sub-process having same seed as main process\n music_gen = []\n prev_tone = -1\n prev_dur = -1\n beats_gen = 0\n while beats_gen < NUM_BEATS:\n curr_tone, curr_dur = get_next_note(prev_tone, prev_dur, m_type, matrix)\n beats_gen += (curr_dur + 1)\n prev_tone = curr_tone\n prev_dur = curr_dur\n if beats_gen > NUM_BEATS: # If beats generated exceed limited length, chop off\n curr_dur -= beats_gen - NUM_BEATS\n music_gen.append([curr_tone, curr_dur])\n return music_gen\n\ndef gen_wrapper(matrices, func):\n def wrapper(m_type):\n return func(matrices[m_type - 1], m_type)\n return wrapper\n\ndef music_genMP(matrices, tune):\n if tune == 1:\n music_types = minor_types\n elif tune == 2:\n music_types = major_types\n\n pool = mp.Pool(processes=10)\n args = [(matrices[m_type - 1], m_type) for m_type in music_types]\n musics = pool.starmap(music_gentype, args)\n pool.close()\n\n return musics\n\ndef music_genSeq(matrices, tune):\n if tune == 1:\n music_types = minor_types\n elif tune == 2:\n music_types = major_types\n \n musics = []\n for type in music_types:\n wrap_fun = gen_wrapper(matrices, music_gentype)\n musics.append(wrap_fun(type))\n \n return musics\n\ndef get_music(tune):\n n_folder_name = \"matrix_npy\"\n current_path = os.path.dirname(__file__)\n npy_folder = os.path.join(current_path, n_folder_name)\n load_matrices(npy_folder)\n matrices2probMP()\n\n matrices = [major_high, major_low, minor_high, minor_low, major_chord, minor_chord]\n # print(\"Start sequential music generation\")\n # t_s = time.time()\n # musics = music_genSeq(matrices, tune)\n # print(\"Complete sequential music generation\")\n # print(\"Time of sequential music generation: {:.2f} s\".format(time.time() - t_s))\n\n print(\"Start parallel music generation\")\n t_s = time.time()\n musics = music_genMP(matrices, tune)\n print(\"Complete parallel music generation\")\n print(\"Time of parallel music generation: {:.2f} s\".format(time.time()- t_s))\n return json.dumps({'id':music_id, 'music':musics})\n\n# interface for websocket\ndef main(tune):\n return get_music(tune)\n\n#main(tune=1)"
] | [
[
"numpy.zeros_like",
"numpy.array",
"numpy.random.choice",
"numpy.random.seed",
"numpy.load",
"numpy.random.randint"
]
] |
uiuc-covid19-modeling/pydemic | [
"3c0af60c2ac7e0dbf722584f61c45f9a2f993521"
] | [
"pydemic/mitigation.py"
] | [
"__copyright__ = \"\"\"\nCopyright (C) 2020 George N Wong\nCopyright (C) 2020 Zachary J Weiner\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import PchipInterpolator\n\n__doc__ = \"\"\"\n.. currentmodule:: pydemic\n.. autoclass:: MitigationModel\n\"\"\"\n\n\nclass MitigationModel(PchipInterpolator):\n \"\"\"\n An interface for creating (smooth, monotonic) piecewise linear functions.\n Subclasses :class:`scipy.interpolate.PchipInterpolator`.\n\n Constructs the interpolating function which takes the constant values\n ``factors[0]`` between ``t0`` and ``t[0]`` and ``factors[-1]`` between\n ``t[-1]`` and ``tf``.\n\n :arg t0: A :class:`float` representing the first input value for\n interpolation.\n\n :arg tf: A :class:`float` representing the last input value for\n interpolation.\n\n :arg t: A :class:`numpy.ndarray` of interpolating nodes\n (between ``t0`` and ``tf``).\n\n :arg factors: A :class:`numpy.ndarray` of function values to interpolate to\n at the nodes ``t``.\n\n .. automethod:: init_from_kwargs\n \"\"\"\n\n def __init__(self, t0, tf, t, factors, multiplier=None):\n self.init_args = (t0, tf, t, factors)\n\n self.times = t\n self.factors = factors\n if len(t) > 0:\n t = np.insert(t, 0, min(t0, t[0]) - 10)\n t = np.append(t, max(tf, t[-1]) + 10)\n if len(factors) > 0:\n factors = np.insert(factors, 0, factors[0])\n factors = np.append(factors, factors[-1])\n else:\n t = np.array([t0 - 10, tf + 10])\n factors = np.array([1, 1])\n\n self.multiplier = multiplier\n super().__init__(t, factors)\n\n @classmethod\n def init_from_kwargs(cls, t0, tf, prefix=\"mitigation\", **kwargs):\n \"\"\"\n A convenience constructor which collects values for ``t`` based on (sorted)\n keyword arguments beginning with ``mitigation_t`` with ``factors``\n from those beginning with ``mitigation_factor``.\n \"\"\"\n\n factor_keys = sorted(\n (name for name in kwargs.keys() if name.startswith(f\"{prefix}_factor\")),\n key=lambda name: int(name.strip(f\"{prefix}_factor\"))\n )\n factors = np.array([kwargs.pop(key) for key in factor_keys])\n\n time_keys = sorted(\n (name for name in kwargs.keys() if name.startswith(f\"{prefix}_t\")),\n key=lambda name: int(name.strip(f\"{prefix}_t\"))\n )\n times = np.array([kwargs.pop(key) for key in time_keys])\n\n return cls(t0, tf, times, factors)\n\n def __mul__(self, other):\n if self.multiplier is not None:\n multiplier = other * self.multiplier\n else:\n multiplier = other\n return MitigationModel(*self.init_args, multiplier=multiplier)\n\n __rmul__ = __mul__\n\n def __call__(self, x, **kwargs):\n res = super().__call__(x, **kwargs)\n if self.multiplier is not None:\n res *= self.multiplier(x, **kwargs)\n return res\n"
] | [
[
"numpy.array",
"numpy.insert",
"numpy.append"
]
] |
nicolalandro/nnsvs | [
"45da00218dd0a445c8483f11ac891c6ef00d3925"
] | [
"nnsvs/bin/train_resf0.py"
] | [
"from pathlib import Path\n\nimport hydra\nimport numpy as np\nimport torch\nfrom hydra.utils import to_absolute_path\nfrom nnsvs.base import PredictionType\nfrom nnsvs.mdn import mdn_loss\nfrom nnsvs.pitch import nonzero_segments\nfrom nnsvs.train_util import save_checkpoint, setup\nfrom nnsvs.util import make_non_pad_mask\nfrom omegaconf import DictConfig, OmegaConf\nfrom torch import nn\nfrom tqdm import tqdm\n\n\ndef note_segments(lf0_score_denorm):\n \"\"\"Compute note segments (start and end indices) from log-F0\n\n Note that unvoiced frames must be set to 0 in advance.\n\n Args:\n lf0_score_denorm (Tensor): (B, T)\n\n Returns:\n list: list of note (start, end) indices\n \"\"\"\n segments = []\n for s, e in nonzero_segments(lf0_score_denorm):\n out = torch.sign(torch.abs(torch.diff(lf0_score_denorm[s : e + 1])))\n transitions = torch.where(out > 0)[0]\n note_start, note_end = s, -1\n for pos in transitions:\n note_end = int(s + pos)\n segments.append((note_start, note_end))\n note_start = note_end\n\n return segments\n\n\ndef compute_pitch_regularization_weight(segments, N, decay_size=25, max_w=0.5):\n \"\"\"Compute pitch regularization weight given note segments\n\n Args:\n segments (list): list of note (start, end) indices\n N (int): number of frames\n decay_size (int): size of the decay window\n max_w (float): maximum weight\n\n Returns:\n Tensor: weights of shape (N,)\n \"\"\"\n w = torch.zeros(N)\n\n for s, e in segments:\n L = e - s\n w[s:e] = max_w\n if L > decay_size * 2:\n w[s : s + decay_size] *= torch.arange(decay_size) / decay_size\n w[e - decay_size : e] *= torch.arange(decay_size - 1, -1, -1) / decay_size\n\n return w\n\n\ndef compute_batch_pitch_regularization_weight(lf0_score_denorm):\n \"\"\"Batch version of computing pitch regularization weight\n\n Args:\n lf0_score_denorm (Tensor): (B, T)\n\n Returns:\n Tensor: weights of shape (B, N, 1)\n \"\"\"\n B, T = lf0_score_denorm.shape\n w = torch.zeros_like(lf0_score_denorm)\n for idx in range(len(lf0_score_denorm)):\n segments = note_segments(lf0_score_denorm[idx])\n w[idx, :] = compute_pitch_regularization_weight(segments, T).to(w.device)\n\n return w.unsqueeze(-1)\n\n\ndef train_step(\n model,\n optimizer,\n train,\n in_feats,\n out_feats,\n lengths,\n pitch_reg_dyn_ws,\n pitch_reg_weight=1.0,\n):\n optimizer.zero_grad()\n\n criterion = nn.MSELoss(reduction=\"none\")\n\n # Apply preprocess if required (e.g., FIR filter for shallow AR)\n # defaults to no-op\n out_feats = model.preprocess_target(out_feats)\n\n # Run forward\n pred_out_feats, lf0_residual = model(in_feats, lengths)\n\n # Mask (B, T, 1)\n mask = make_non_pad_mask(lengths).unsqueeze(-1).to(in_feats.device)\n\n # Compute loss\n if model.prediction_type() == PredictionType.PROBABILISTIC:\n pi, sigma, mu = pred_out_feats\n\n # (B, max(T)) or (B, max(T), D_out)\n mask_ = mask if len(pi.shape) == 4 else mask.squeeze(-1)\n # Compute loss and apply mask\n loss = mdn_loss(pi, sigma, mu, out_feats, reduce=False)\n loss = loss.masked_select(mask_).mean()\n else:\n loss = criterion(\n pred_out_feats.masked_select(mask), out_feats.masked_select(mask)\n ).mean()\n\n # Pitch regularization\n # NOTE: l1 loss seems to be better than mse loss in my experiments\n # we could use l2 loss as suggested in the sinsy's paper\n loss += (\n pitch_reg_weight\n * (pitch_reg_dyn_ws * lf0_residual.abs()).masked_select(mask).mean()\n )\n\n if train:\n loss.backward()\n optimizer.step()\n\n return loss\n\n\ndef train_loop(\n config,\n logger,\n device,\n model,\n optimizer,\n lr_scheduler,\n data_loaders,\n writer,\n in_scaler,\n):\n out_dir = Path(to_absolute_path(config.train.out_dir))\n best_loss = torch.finfo(torch.float32).max\n\n in_lf0_idx = config.data.in_lf0_idx\n in_rest_idx = config.data.in_rest_idx\n if in_lf0_idx is None or in_rest_idx is None:\n raise ValueError(\"in_lf0_idx and in_rest_idx must be specified\")\n pitch_reg_weight = config.train.pitch_reg_weight\n\n for epoch in tqdm(range(1, config.train.nepochs + 1)):\n for phase in data_loaders.keys():\n train = phase.startswith(\"train\")\n model.train() if train else model.eval()\n running_loss = 0\n for in_feats, out_feats, lengths in data_loaders[phase]:\n # NOTE: This is needed for pytorch's PackedSequence\n lengths, indices = torch.sort(lengths, dim=0, descending=True)\n in_feats, out_feats = (\n in_feats[indices].to(device),\n out_feats[indices].to(device),\n )\n # Compute denormalized log-F0 in the musical scores\n lf0_score_denorm = (\n in_feats[:, :, in_lf0_idx]\n * float(\n in_scaler.data_max_[in_lf0_idx]\n - in_scaler.data_min_[in_lf0_idx]\n )\n + in_scaler.data_min_[in_lf0_idx]\n )\n # Fill zeros for rest and padded frames\n lf0_score_denorm *= (in_feats[:, :, in_rest_idx] <= 0).float()\n for idx, length in enumerate(lengths):\n lf0_score_denorm[idx, length:] = 0\n # Compute time-variant pitch regularization weight vector\n pitch_reg_dyn_ws = compute_batch_pitch_regularization_weight(\n lf0_score_denorm\n )\n\n loss = train_step(\n model,\n optimizer,\n train,\n in_feats,\n out_feats,\n lengths,\n pitch_reg_dyn_ws,\n pitch_reg_weight,\n )\n running_loss += loss.item()\n ave_loss = running_loss / len(data_loaders[phase])\n writer.add_scalar(f\"Loss/{phase}\", ave_loss, epoch)\n\n ave_loss = running_loss / len(data_loaders[phase])\n logger.info(\"[%s] [Epoch %s]: loss %s\", phase, epoch, ave_loss)\n if not train and ave_loss < best_loss:\n best_loss = ave_loss\n save_checkpoint(\n logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=True\n )\n\n lr_scheduler.step()\n if epoch % config.train.checkpoint_epoch_interval == 0:\n save_checkpoint(\n logger, out_dir, model, optimizer, lr_scheduler, epoch, is_best=False\n )\n\n save_checkpoint(\n logger, out_dir, model, optimizer, lr_scheduler, config.train.nepochs\n )\n logger.info(\"The best loss was %s\", best_loss)\n\n\ndef _check_resf0_config(logger, model, config, in_scaler, out_scaler):\n logger.info(\"Checking model configs for residual F0 prediction\")\n if in_scaler is None or out_scaler is None:\n raise ValueError(\"in_scaler and out_scaler must be specified\")\n\n in_lf0_idx = config.data.in_lf0_idx\n in_rest_idx = config.data.in_rest_idx\n out_lf0_idx = config.data.out_lf0_idx\n if in_lf0_idx is None or in_rest_idx is None or out_lf0_idx is None:\n raise ValueError(\"in_lf0_idx, in_rest_idx and out_lf0_idx must be specified\")\n\n logger.info(\"in_lf0_idx: %s\", in_lf0_idx)\n logger.info(\"in_rest_idx: %s\", in_rest_idx)\n logger.info(\"out_lf0_idx: %s\", out_lf0_idx)\n\n ok = True\n if hasattr(model, \"in_lf0_idx\"):\n if model.in_lf0_idx != in_lf0_idx:\n logger.warn(\n \"in_lf0_idx in model and data config must be same\",\n model.in_lf0_idx,\n in_lf0_idx,\n )\n ok = False\n if hasattr(model, \"out_lf0_idx\"):\n if model.out_lf0_idx != out_lf0_idx:\n logger.warn(\n \"out_lf0_idx in model and data config must be same\",\n model.out_lf0_idx,\n out_lf0_idx,\n )\n ok = False\n\n if hasattr(model, \"in_lf0_min\") and hasattr(model, \"in_lf0_max\"):\n # Inject values from the input scaler\n if model.in_lf0_min is None or model.in_lf0_max is None:\n model.in_lf0_min = in_scaler.data_min_[in_lf0_idx]\n model.in_lf0_max = in_scaler.data_max_[in_lf0_idx]\n\n logger.info(\"in_lf0_min: %s\", model.in_lf0_min)\n logger.info(\"in_lf0_max: %s\", model.in_lf0_max)\n if not np.allclose(model.in_lf0_min, in_scaler.data_min_[model.in_lf0_idx]):\n logger.warn(\n f\"in_lf0_min is set to {model.in_lf0_min}, \"\n f\"but should be {in_scaler.data_min_[model.in_lf0_idx]}\"\n )\n ok = False\n if not np.allclose(model.in_lf0_max, in_scaler.data_max_[model.in_lf0_idx]):\n logger.warn(\n f\"in_lf0_max is set to {model.in_lf0_max}, \"\n f\"but should be {in_scaler.data_max_[model.in_lf0_idx]}\"\n )\n ok = False\n\n if hasattr(model, \"out_lf0_mean\") and hasattr(model, \"out_lf0_scale\"):\n # Inject values from the output scaler\n if model.out_lf0_mean is None or model.out_lf0_scale is None:\n model.out_lf0_mean = out_scaler.mean_[out_lf0_idx]\n model.out_lf0_scale = out_scaler.scale_[out_lf0_idx]\n\n logger.info(\"model.out_lf0_mean: %s\", model.out_lf0_mean)\n logger.info(\"model.out_lf0_scale: %s\", model.out_lf0_scale)\n if not np.allclose(model.out_lf0_mean, out_scaler.mean_[model.out_lf0_idx]):\n logger.warn(\n f\"out_lf0_mean is set to {model.out_lf0_mean}, \"\n f\"but should be {out_scaler.mean_[model.out_lf0_idx]}\"\n )\n ok = False\n if not np.allclose(model.out_lf0_scale, out_scaler.scale_[model.out_lf0_idx]):\n logger.warn(\n f\"out_lf0_scale is set to {model.out_lf0_scale}, \"\n f\"but should be {out_scaler.scale_[model.out_lf0_idx]}\"\n )\n ok = False\n\n if not ok:\n if (\n model.in_lf0_idx == in_lf0_idx\n and hasattr(model, \"in_lf0_min\")\n and hasattr(model, \"out_lf0_mean\")\n ):\n logger.info(\n f\"\"\"\nIf you are 100% sure that you set model.in_lf0_idx and model.out_lf0_idx correctly,\nPlease consider the following parameters in your model config:\n\n in_lf0_idx: {model.in_lf0_idx}\n out_lf0_idx: {model.out_lf0_idx}\n in_lf0_min: {in_scaler.data_min_[model.in_lf0_idx]}\n in_lf0_max: {in_scaler.data_max_[model.in_lf0_idx]}\n out_lf0_mean: {out_scaler.mean_[model.out_lf0_idx]}\n out_lf0_scale: {out_scaler.scale_[model.out_lf0_idx]}\n\"\"\"\n )\n raise ValueError(\"The model config has wrong configurations.\")\n\n # Overwrite the parameters to the config\n for key in [\"in_lf0_min\", \"in_lf0_max\", \"out_lf0_mean\", \"out_lf0_scale\"]:\n config.model.netG[key] = float(getattr(model, key))\n\n\[email protected](config_path=\"conf/train_resf0\", config_name=\"config\")\ndef my_app(config: DictConfig) -> None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n (\n model,\n optimizer,\n lr_scheduler,\n data_loaders,\n writer,\n logger,\n in_scaler,\n out_scaler,\n ) = setup(config, device)\n\n _check_resf0_config(logger, model, config, in_scaler, out_scaler)\n\n # Save configs again in case the model config has been changed\n out_dir = Path(to_absolute_path(config.train.out_dir))\n with open(out_dir / \"config.yaml\", \"w\") as f:\n OmegaConf.save(config, f)\n with open(out_dir / \"model.yaml\", \"w\") as f:\n OmegaConf.save(config.model, f)\n\n train_loop(\n config,\n logger,\n device,\n model,\n optimizer,\n lr_scheduler,\n data_loaders,\n writer,\n in_scaler,\n )\n\n\ndef entry():\n my_app()\n\n\nif __name__ == \"__main__\":\n my_app()\n"
] | [
[
"torch.zeros",
"torch.nn.MSELoss",
"torch.diff",
"torch.arange",
"torch.finfo",
"numpy.allclose",
"torch.cuda.is_available",
"torch.zeros_like",
"torch.sort",
"torch.where"
]
] |
lsdras/NeMo | [
"498f21a1c695afc5ed5f2f6fb173dc612c9551a7"
] | [
"nemo/collections/nlp/models/duplex_text_normalization/duplex_decoder.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom time import perf_counter\nfrom typing import List, Optional\n\nimport nltk\nimport torch\nimport wordninja\nfrom nemo_text_processing.text_normalization.normalize_with_audio import PYNINI_AVAILABLE, NormalizerWithAudio\nfrom omegaconf import DictConfig\nfrom pytorch_lightning import Trainer\nfrom transformers import AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq\n\nfrom nemo.collections.nlp.data.text_normalization import TextNormalizationDecoderDataset, constants\nfrom nemo.collections.nlp.models.duplex_text_normalization.utils import is_url\nfrom nemo.collections.nlp.models.nlp_model import NLPModel\nfrom nemo.core.classes.common import PretrainedModelInfo\nfrom nemo.utils import logging\nfrom nemo.utils.decorators.experimental import experimental\n\nnltk.download('punkt')\n\n\n__all__ = ['DuplexDecoderModel']\n\n\n@experimental\nclass DuplexDecoderModel(NLPModel):\n \"\"\"\n Transformer-based (duplex) decoder model for TN/ITN.\n \"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n self._tokenizer = AutoTokenizer.from_pretrained(cfg.tokenizer)\n super().__init__(cfg=cfg, trainer=trainer)\n self.model = AutoModelForSeq2SeqLM.from_pretrained(cfg.transformer)\n self.transformer_name = cfg.transformer\n\n # Language\n self.lang = cfg.get('lang', None)\n\n # Covering Grammars\n self.cg_normalizer = None # Default\n # We only support integrating with English TN covering grammars at the moment\n self.use_cg = cfg.get('use_cg', False) and self.lang == constants.ENGLISH\n if self.use_cg:\n self.setup_cgs(cfg)\n\n # Setup covering grammars (if enabled)\n def setup_cgs(self, cfg: DictConfig):\n \"\"\"\n Setup covering grammars (if enabled).\n :param cfg: Configs of the decoder model.\n \"\"\"\n self.use_cg = True\n self.neural_confidence_threshold = cfg.get('neural_confidence_threshold', 0.99)\n self.n_tagged = cfg.get('n_tagged', 1)\n input_case = 'cased' # input_case is cased by default\n if hasattr(self._tokenizer, 'do_lower_case') and self._tokenizer.do_lower_case:\n input_case = 'lower_cased'\n if not PYNINI_AVAILABLE:\n raise Exception(\n \"`pynini` is not installed ! \\n\"\n \"Please run the `nemo_text_processing/setup.sh` script\"\n \"prior to usage of this toolkit.\"\n )\n self.cg_normalizer = NormalizerWithAudio(input_case=input_case, lang=self.lang)\n\n # Training\n def training_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the training loop with the data from the training dataloader\n passed in as `batch`.\n \"\"\"\n # Apply Transformer\n outputs = self.model(\n input_ids=batch['input_ids'],\n decoder_input_ids=batch['decoder_input_ids'],\n attention_mask=batch['attention_mask'],\n labels=batch['labels'],\n )\n train_loss = outputs.loss\n\n lr = self._optimizer.param_groups[0]['lr']\n self.log('train_loss', train_loss)\n self.log('lr', lr, prog_bar=True)\n return {'loss': train_loss, 'lr': lr}\n\n # Validation and Testing\n def validation_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the validation loop with the data from the validation dataloader\n passed in as `batch`.\n \"\"\"\n\n # Apply Transformer\n outputs = self.model(\n input_ids=batch['input_ids'],\n decoder_input_ids=batch['decoder_input_ids'],\n attention_mask=batch['attention_mask'],\n labels=batch['labels'],\n )\n val_loss = outputs.loss\n\n return {'val_loss': val_loss}\n\n def validation_epoch_end(self, outputs):\n \"\"\"\n Called at the end of validation to aggregate outputs.\n :param outputs: list of individual outputs of each validation step.\n \"\"\"\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n self.log('val_loss', avg_loss)\n\n return {\n 'val_loss': avg_loss,\n }\n\n def test_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the test loop with the data from the test dataloader\n passed in as `batch`.\n \"\"\"\n return self.validation_step(batch, batch_idx)\n\n def test_epoch_end(self, outputs):\n \"\"\"\n Called at the end of test to aggregate outputs.\n :param outputs: list of individual outputs of each test step.\n \"\"\"\n return self.validation_epoch_end(outputs)\n\n # Functions for inference\n @torch.no_grad()\n def _infer(\n self,\n sents: List[List[str]],\n nb_spans: List[int],\n span_starts: List[List[int]],\n span_ends: List[List[int]],\n inst_directions: List[str],\n ):\n \"\"\" Main function for Inference\n Args:\n sents: A list of inputs tokenized by a basic tokenizer.\n nb_spans: A list of ints where each int indicates the number of semiotic spans in each input.\n span_starts: A list of lists where each list contains the starting locations of semiotic spans in an input.\n span_ends: A list of lists where each list contains the ending locations of semiotic spans in an input.\n inst_directions: A list of str where each str indicates the direction of the corresponding instance (i.e., INST_BACKWARD for ITN or INST_FORWARD for TN).\n\n Returns: A list of lists where each list contains the decoded spans for the corresponding input.\n \"\"\"\n self.eval()\n\n if sum(nb_spans) == 0:\n return [[]] * len(sents)\n model, tokenizer = self.model, self._tokenizer\n try:\n model_max_len = model.config.n_positions\n except AttributeError:\n model_max_len = 512\n ctx_size = constants.DECODE_CTX_SIZE\n extra_id_0 = constants.EXTRA_ID_0\n extra_id_1 = constants.EXTRA_ID_1\n\n # Build all_inputs\n input_centers, input_dirs, all_inputs = [], [], []\n for ix, sent in enumerate(sents):\n cur_inputs = []\n for jx in range(nb_spans[ix]):\n cur_start = span_starts[ix][jx]\n cur_end = span_ends[ix][jx]\n ctx_left = sent[max(0, cur_start - ctx_size) : cur_start]\n ctx_right = sent[cur_end + 1 : cur_end + 1 + ctx_size]\n span_words = sent[cur_start : cur_end + 1]\n span_words_str = ' '.join(span_words)\n if is_url(span_words_str):\n span_words_str = span_words_str.lower()\n input_centers.append(span_words_str)\n input_dirs.append(inst_directions[ix])\n # Build cur_inputs\n if inst_directions[ix] == constants.INST_BACKWARD:\n cur_inputs = [constants.ITN_PREFIX]\n if inst_directions[ix] == constants.INST_FORWARD:\n cur_inputs = [constants.TN_PREFIX]\n cur_inputs += ctx_left\n cur_inputs += [extra_id_0] + span_words_str.split(' ') + [extra_id_1]\n cur_inputs += ctx_right\n all_inputs.append(' '.join(cur_inputs))\n\n # Apply the decoding model\n batch = tokenizer(all_inputs, padding=True, return_tensors='pt')\n input_ids = batch['input_ids'].to(self.device)\n outputs = model.generate(input_ids, output_scores=True, return_dict_in_generate=True, max_length=model_max_len)\n generated_ids, sequence_toks_scores = outputs['sequences'], outputs['scores']\n generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)\n\n # Use covering grammars (if enabled)\n if self.use_cg:\n # Compute sequence probabilities\n sequence_probs = torch.ones(len(all_inputs)).to(self.device)\n for ix, cur_toks_scores in enumerate(sequence_toks_scores):\n cur_generated_ids = generated_ids[:, ix + 1].tolist()\n cur_toks_probs = torch.nn.functional.softmax(cur_toks_scores, dim=-1)\n # Compute selected_toks_probs\n selected_toks_probs = []\n for jx, _id in enumerate(cur_generated_ids):\n if _id != self._tokenizer.pad_token_id:\n selected_toks_probs.append(cur_toks_probs[jx, _id])\n else:\n selected_toks_probs.append(1)\n selected_toks_probs = torch.tensor(selected_toks_probs).to(self.device)\n sequence_probs *= selected_toks_probs\n\n # For TN cases where the neural model is not confident, use CGs\n neural_confidence_threshold = self.neural_confidence_threshold\n for ix, (_dir, _input, _prob) in enumerate(zip(input_dirs, input_centers, sequence_probs)):\n if _dir == constants.INST_FORWARD and _prob < neural_confidence_threshold:\n if is_url(_input):\n _input = _input.replace(' ', '') # Remove spaces in URLs\n try:\n cg_outputs = self.cg_normalizer.normalize(text=_input, verbose=False, n_tagged=self.n_tagged)\n generated_texts[ix] = list(cg_outputs)[0]\n except: # if there is any exception, fall back to the input\n generated_texts[ix] = _input\n\n # Post processing\n generated_texts = self.postprocess_output_spans(input_centers, generated_texts, input_dirs)\n\n # Prepare final_texts\n final_texts, span_ctx = [], 0\n for nb_span in nb_spans:\n cur_texts = []\n for i in range(nb_span):\n cur_texts.append(generated_texts[span_ctx])\n span_ctx += 1\n final_texts.append(cur_texts)\n\n return final_texts\n\n def postprocess_output_spans(self, input_centers, output_spans, input_dirs):\n en_greek_writtens = list(constants.EN_GREEK_TO_SPOKEN.keys())\n en_greek_spokens = list(constants.EN_GREEK_TO_SPOKEN.values())\n for ix, (_input, _output) in enumerate(zip(input_centers, output_spans)):\n if self.lang == constants.ENGLISH:\n # Handle URL\n if is_url(_input):\n _output = _output.replace('http', ' h t t p ')\n _output = _output.replace('/', ' slash ')\n _output = _output.replace('.', ' dot ')\n _output = _output.replace(':', ' colon ')\n _output = _output.replace('-', ' dash ')\n _output = _output.replace('_', ' underscore ')\n _output = _output.replace('%', ' percent ')\n _output = _output.replace('www', ' w w w ')\n _output = _output.replace('ftp', ' f t p ')\n output_spans[ix] = ' '.join(wordninja.split(_output))\n continue\n # Greek letters\n if _input in en_greek_writtens:\n if input_dirs[ix] == constants.INST_FORWARD:\n output_spans[ix] = constants.EN_GREEK_TO_SPOKEN[_input]\n if _input in en_greek_spokens:\n if input_dirs[ix] == constants.INST_FORWARD:\n output_spans[ix] = _input\n if input_dirs[ix] == constants.INST_BACKWARD:\n output_spans[ix] = constants.EN_SPOKEN_TO_GREEK[_input]\n return output_spans\n\n # Functions for processing data\n def setup_training_data(self, train_data_config: Optional[DictConfig]):\n if not train_data_config or not train_data_config.data_path:\n logging.info(\n f\"Dataloader config or file_path for the train is missing, so no data loader for train is created!\"\n )\n self.train_dataset, self._train_dl = None, None\n return\n self.train_dataset, self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, mode=\"train\")\n\n def setup_validation_data(self, val_data_config: Optional[DictConfig]):\n if not val_data_config or not val_data_config.data_path:\n logging.info(\n f\"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!\"\n )\n self.validation_dataset, self._validation_dl = None, None\n return\n self.validation_dataset, self._validation_dl = self._setup_dataloader_from_config(\n cfg=val_data_config, mode=\"val\"\n )\n\n def setup_test_data(self, test_data_config: Optional[DictConfig]):\n if not test_data_config or test_data_config.data_path is None:\n logging.info(\n f\"Dataloader config or file_path for the test is missing, so no data loader for test is created!\"\n )\n self.test_dataset, self._test_dl = None, None\n return\n self.test_dataset, self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, mode=\"test\")\n\n def _setup_dataloader_from_config(self, cfg: DictConfig, mode: str):\n tokenizer, model = self._tokenizer, self.model\n start_time = perf_counter()\n logging.info(f'Creating {mode} dataset')\n input_file = cfg.data_path\n dataset = TextNormalizationDecoderDataset(\n input_file,\n tokenizer,\n self.transformer_name,\n cfg.mode,\n cfg.get('max_decoder_len', tokenizer.model_max_length),\n cfg.get('decoder_data_augmentation', False),\n cfg.lang,\n cfg.do_basic_tokenize,\n cfg.get('use_cache', False),\n cfg.get('max_insts', -1),\n )\n data_collator = DataCollatorForSeq2Seq(\n tokenizer, model=model, label_pad_token_id=constants.LABEL_PAD_TOKEN_ID,\n )\n dl = torch.utils.data.DataLoader(\n dataset=dataset, batch_size=cfg.batch_size, shuffle=cfg.shuffle, collate_fn=data_collator\n )\n running_time = perf_counter() - start_time\n logging.info(f'Took {running_time} seconds')\n return dataset, dl\n\n @classmethod\n def list_available_models(cls) -> Optional[PretrainedModelInfo]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n return result\n"
] | [
[
"torch.stack",
"torch.no_grad",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.functional.softmax"
]
] |
inoue0406/radarJMA | [
"f8996c3fe201f97d414fc96c4abfc6f930738d47"
] | [
"src/mnist_pytorch_dataset.py"
] | [
"import torch \nimport torchvision\nimport numpy as np\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nimport pandas as pd\nimport hickle as hkl\nimport os\n\n# Pytorch custom dataset for Moving MNIST data\n# The class assumes the data to be in hkl format\n\nclass MNISTDataset(data.Dataset):\n def __init__(self,data_file,source_file,tdim_use=10,transform=None):\n \"\"\"\n Args:\n tdim_use: Size of temporal data to be used\n ex) tdim_use=3 means last 3 of X and first 3 of Y are used\n transform (callable, optional): Optional transform to be applied on a sample.\n \"\"\"\n self.tdim_use = tdim_use\n self.transform = transform\n\n # load data\n print('reading from data file and source file:',data_file,source_file)\n self.data = hkl.load(data_file)\n # dimension change (n,H,W,ch) to channels first (n,ch,H,W)\n self.data = np.transpose(self.data,(0,3,1,2))\n \n self.sources = hkl.load(source_file)\n # number of samples\n self.N = int(self.data.shape[0]/(tdim_use*2))\n print('Number of samples:',self.N)\n \n def __len__(self):\n return self.N\n \n def __getitem__(self, index):\n istart = index*(self.tdim_use*2)\n data_X = self.data[istart:(istart+self.tdim_use),:,:,:]\n data_Y = self.data[(istart+self.tdim_use):(istart+self.tdim_use*2),:,:,:]\n sample = {'past': data_X, 'future': data_Y,\n 'fnames_past':'past','fnames_future':'future'}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n"
] | [
[
"numpy.transpose"
]
] |
ZhangMengxia/R2plus1D_TSN_combine | [
"028a51fc15690afc32c3b6c8ac1d22b7902b21ed"
] | [
"network_tsn.py"
] | [
"import torch.nn as nn\nimport torchvision\n\nclass TSNClassifier(nn.Module):\n \"\"\"A reimplementation of TSN model\n Args:\n num_classes: Number of classes\n num_segments: Number of segments\n base_model: backbone model for feature extraction\n dropout: dropout ratio of last fc layer\n \"\"\"\n def __init__(self, num_classes, num_segments=3, clip_len=1, base_model='resnet18', pretrained=True, dropout=0.8):\n super(TSNClassifier,self).__init__()\n self.num_classes = num_classes\n self.num_segments = num_segments\n self.clip_len = clip_len\n self.dropout = dropout\n self._prepare_base_model(base_model, pretrained)\n feature_dim = self._prepare_tsn(num_classes)\n self.consensus = lambda x: x.mean(dim=1)\n\n def _prepare_tsn(self, num_class):\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\n self.new_fc = nn.Linear(feature_dim, num_class)\n\n std = 0.001\n nn.init.normal_(self.new_fc.weight, 0, std)\n nn.init.constant_(self.new_fc.bias, 0)\n return feature_dim\n def _prepare_base_model(self, base_model, pretrained=True):\n\n if hasattr(torchvision.models, base_model):\n self.base_model = getattr(torchvision.models, base_model)(pretrained)\n print('base model', base_model)\n # network surgery\n if self.clip_len > 1:\n assert pretrained==False, \"clip_len larger than 1 cannot use pretrained model\"\n self.base_model.conv1 = nn.Conv2d(3*self.clip_len, 64, \n kernel_size=7, stride=2, padding=3,\n bias=False)\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n else:\n raise ValueError('Unknown base model: {}'.format(base_model))\n\n def forward(self, input):\n clip_len = input.size(2)\n channels = input.size(3)\n # reshape into batch of segments\n base_out = self.base_model(input.view((-1, clip_len*channels) + input.size()[-2:]))\n\n base_out = self.new_fc(base_out)\n # reshape back\n base_out = base_out.view((-1, self.num_segments) + base_out.size()[1:])\n\n output = self.consensus(base_out)\n return output\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.nn.Conv2d"
]
] |
SyedSaifAliAlvi/Image-Data-Augmentation | [
"8c8dca3ef8542cf8ab4d618172de4a8a9ce064cb"
] | [
"image_data_augmentation.py"
] | [
"from skimage import transform\nfrom skimage.util import random_noise\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\nimport glob\nimport random\n\ndef data_augment(X_data,y):\n x = X_data.copy()\n y_new = y.copy()\n for i in X_data:\n k1 = np.fliplr(i)\n x.insert(0,k1)\n y_new.insert(0,1)\n\n k2 = np.flipud(i)\n x.insert(0,k2)\n y_new.insert(0,1)\n\n k3 = transform.rotate(i,random.uniform(-20,20))\n x.insert(0,k3)\n y_new.insert(0,1)\n\n k4 = random_noise(i,mode='salt',clip='True') \n x.insert(0,k4)\n y_new.insert(0,1)\n \n k5 = random_noise(i,mode='gaussian',clip='True') \n x.insert(0,k5)\n y_new.insert(0,1)\n\n k6 = random_noise(np.flipud(i),mode='salt',clip='True') \n x.insert(0,k6)\n y_new.insert(0,1)\n\n k7 = random_noise(np.fliplr(i),mode='salt',clip='True') \n x.insert(0,k7)\n y_new.insert(0,1)\n\n k8 = random_noise(random_noise(np.fliplr(i),mode='gaussian',clip='True'),mode='salt',clip='True') \n x.insert(0,k8)\n y_new.insert(0,1)\n\n k9 = transform.rotate(i,random.uniform(-90,110))\n x.insert(0,k9)\n y_new.insert(0,1)\n\n k10 = random_noise(transform.rotate(i,random.uniform(-90,110)),mode='gaussian',clip='True') \n x.insert(0,k10)\n y_new.insert(0,1)\n\n\n return x,y_new\n\ndef resize(img):\n image = Image.open(img)\n image = image.resize((128,128),Image.ANTIALIAS)\n return image\n\ndef imageToNumpyArray(img):\n N_array = np.asarray(img)\n return N_array\n\ndef toThreeChannel(image):\n img = cv2.imread(image)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img2 = np.zeros_like(img)\n img2[:,:,0] = gray\n img2[:,:,1] = gray\n img2[:,:,2] = gray\n cv2.imwrite(image, img2)\n\ndef convertImagesToArray(path):\n img_array = []\n for image in glob.glob(path):\n toThreeChannel(image)\n R_img = imageToNumpyArray(resize(image))\n img_array.append(R_img)\n return img_array\n\nimageFolderPath = \"/content/*.jpg\"\nimage = convertImagesToArray(imageFolderPath)\n\ny=[]\nX,y = data_augment(image,y)\n\n#Number of images in folder * 11 = Number output images\nfor i in range(11):\n plt.imshow(X[i])\n plt.show()\n\n"
] | [
[
"numpy.zeros_like",
"numpy.asarray",
"numpy.flipud",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.fliplr"
]
] |
TuxStory/Python3 | [
"4c1b2291d1613b32aa36b62b0b881ea40b423cce"
] | [
"matplotPIE2.py"
] | [
"import matplotlib.pyplot as plt\n\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'Windows', 'Linux', 'Apple'\nsizes = [83, 5, 10]\nexplode = (0, 0.1, 0) # only \"explode\" the 3rd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
lucapericlp/kohonen | [
"c6e84cc95b0468e49d258d3e40843b8090dcd3a0"
] | [
"backup.py"
] | [
"import math\nimport random\nimport pandas as pd\nfrom Neuron import Neuron\nfrom Neuron import getNormalised\nfrom Visualiser import Visualiser\n\nclass Network():\n\n\tdef __init__(self,numNeurons):\n\t\tself.neurons = []\n\t\tfor i in range(numNeurons):\n\t\t\tself.neurons.append(Neuron(weights=[random.uniform(0,1),\n\t\t\t\t\t\t\t\t\t\t\trandom.uniform(0,1),random.uniform(0,1)]))\n\n\tdef train(self,inputs,lr):\n\t\tnormalised_inputs = getNormalised(inputs)\n\t\tposWithLargestScore = self.closestNeuron(normalised_inputs)\n\t\twinningNeuron = self.neurons[posWithLargestScore]\n\t\twinningNeuron.updateWeights(normalised_inputs,lr)\n\n\tdef predict(self,all_inputs):\n\t\tclustered_dict = {index:[] for index,neuron in enumerate(self.neurons)} #initialise positions\n\t\tinputColours = {0:'r',1:'b',2:'g'}\n\t\tvisualiser = Visualiser(size=111)\n\t\tfor index,neuron in enumerate(self.neurons):\n\t\t\tvisualiser.add(neuron.weights[0],neuron.weights[1],neuron.weights[2],'y','^')\n\n\t\tfor index,norm_input in all_inputs.iterrows():\n\t\t\twinningNeuron = self.closestNeuron(getNormalised(norm_input))\n\t\t\tvisualiser.add(norm_input[0],norm_input[1],norm_input[2],inputColours[winningNeuron],'o')\n\t\t\tclustered_dict[winningNeuron].append(norm_input)#[str(i) for i in norm_input]) use for debugging\n\t\t\n\t\tvisualiser.show()\n\t\treturn clustered_dict\n\n\tdef closestNeuron(self,normalised_inputs):\n\t\tlargestNum = 0\n\t\tposWithLargestScore = 0\n\t\tfor pos,neuron in enumerate(self.neurons):\n\t\t\tnetScore = neuron.calcNet(normalised_inputs)\n\t\t\tif netScore > largestNum:\n\t\t\t\tlargestNum = netScore\n\t\t\t\tposWithLargestScore = pos\n\t\treturn posWithLargestScore\n\n\tdef __str__(self):\n\t\treturn \"<Network w/ neurons:\\n {}\\n>\".format(','.join([str(n) for n in self.neurons]))\n\ndef main():\n\tnetwork = Network(numNeurons=3)\n\tlr = 0.1\n\tepochs = 600\n\tdf = pd.read_csv('data.csv',header=None)\n\tdf.dropna(inplace=True)\n\tfor i in range(epochs):\n\t\tfor index,row in df.iterrows():\n\t\t\tnetwork.train(row,lr)\n\n\tclustered_dict = network.predict(df)\n\tprint(network)\n\nif __name__ == '__main__':\n\tmain()\n\n# if 4 neurons are used then one is left unused as a cluster i.e it is extra\n# if 3 neurons all are used\n"
] | [
[
"pandas.read_csv"
]
] |
yanxi0830/IIC | [
"77641baebca36d5e5f7c9ec25c0755d14524dd4e"
] | [
"code/archs/segmentation/baselines/net10a_isola.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom code.archs.cluster.vgg import VGGNet\nfrom code.archs.segmentation.net10a import SegmentationNet10aTrunk, \\\n SegmentationNet10a\nfrom code.utils.segmentation.baselines.general import get_patches\n\n__all__ = [\"SegmentationNet10aIsola\"]\n\n\nclass IsolaHead(nn.Module):\n def __init__(self, config):\n super(IsolaHead, self).__init__()\n self.patch_side = config.isola_patch_side\n\n self.siamese_branch = nn.Sequential(\n nn.Conv2d(in_channels=SegmentationNet10a.cfg[-1][0], out_channels=1024,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.ReLU(inplace=True),\n # nn.Conv2d(in_channels=1024, out_channels=1024,\n # kernel_size=3, stride=1, padding=1, bias=False),\n # nn.BatchNorm2d(1024),\n # nn.ReLU(inplace=True)\n )\n\n self.joint = nn.Sequential(\n nn.Linear(2 * 1024 * self.patch_side * self.patch_side, 1024),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(1024, 1),\n # nn.ReLU(True),\n # nn.Dropout(),\n # nn.Linear(2048, 1)\n )\n\n def forward(self, patches1, patches2):\n patches1 = self.siamese_branch(patches1)\n patches2 = self.siamese_branch(patches2)\n\n ni, k, h, w = patches1.size()\n ni2, k2, h2, w2 = patches1.size()\n\n if not ((ni == ni2) and (k == k2) and (h == h2) and (w == w2) and \\\n (h == self.patch_side) and (w == self.patch_side)):\n print((ni, k, h, w))\n print((ni2, k2, h2, w2))\n assert (False)\n\n # flatten all but first dim\n patches1 = patches1.contiguous() # otherwise view may behave funny\n patches2 = patches2.contiguous()\n\n patches1 = patches1.view(patches1.size(0), -1)\n patches2 = patches2.view(patches2.size(0), -1)\n concatenated = torch.cat((patches1, patches2), dim=1)\n\n ni3, nf = concatenated.size()\n if not ((ni3 == ni) and (nf == (2 * 1024 * self.patch_side *\n self.patch_side))):\n print((ni, k, h, w))\n print((ni2, k2, h2, w2))\n print(patches1.size())\n print(patches2.size())\n print((ni3, nf))\n assert (False)\n\n return self.joint(concatenated) # n, 1\n\n\nclass SegmentationNet10aIsola(VGGNet):\n def __init__(self, config):\n super(SegmentationNet10aIsola, self).__init__()\n\n self.patch_side = config.isola_patch_side\n self.input_sz = config.input_sz\n self.features_sz = SegmentationNet10a.cfg[-1][0]\n\n print(\"SegmentationNet10aIsola: %d %d %d\" % (self.patch_side,\n self.input_sz,\n self.features_sz))\n\n self.features = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)\n self.isola_head = IsolaHead(config)\n\n self._initialize_weights()\n\n def forward(self, x, centre=None, other=None, penultimate=False):\n x = self.features(x)\n x = F.interpolate(x, size=self.input_sz, mode=\"bilinear\")\n\n if not penultimate:\n assert ((centre is not None) and (other is not None))\n patches1, patches2 = \\\n get_patches(x, centre, other, self.patch_side)\n adjacency = self.isola_head(patches1, patches2)\n x = torch.sigmoid(adjacency)\n\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.sigmoid",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.BatchNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
ykrasnikov/web-scraping-challenge | [
"c8ede84d68b102a68625b091345765823dbb1c56"
] | [
"mission_to_mars/functions_scrape_mars.py"
] | [
"######################################################################\n########## import libraries\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport re\nfrom pymongo.mongo_client import MongoClient\nfrom splinter import Browser\n# as a playground - we will try selenium as well\nfrom selenium import webdriver \nfrom selenium.webdriver.chrome.options import Options\nimport pymongo\nimport time\n#####################################################################\n######### FUNCTIONS: Scrape\ndef scrape():\n \"\"\"function scrapes mars.nasa and jpl.nasa web sites\"\"\" \n #################################################################\n ##########getting titles and paragraph from latest MARS news\n print('started scraping')\n # using selenium setuping webdriver with chrome Headless options\n options=Options()\n options.headless=True\n # !!! WARNING chromdriver is in root repo directory... but u can give your own pass\n chrome_path='../chromedriver.exe' # for example - chrome_path='C://bin//chromedriver.exe'\n driver = webdriver.Chrome(chrome_path,options=options)\n\n # URL of page for scraping\n url='https://mars.nasa.gov/news/'\n driver.get(url) \n time.sleep(2)\n response = driver.page_source\n soup=bs(response,'lxml')\n # soup # debug print\n\n # close driver driver.close()\n driver.quit()\n # Declare output dictionary \n scrape_dict={}\n\n ########## title search\n result=soup.body.find('div',class_='list_text').a\n news_title=result.text\n scrape_dict['news_title']=news_title\n # print(news_title) # debug rint\n\n ########## news paragraph search\n result=soup.body.find('div',class_='list_text').find('div',class_='article_teaser_body')\n news_p=result.text\n scrape_dict['news_p']=news_p\n # print(news_p) # debug rint\n\n #################################################################\n ########## getting Mars featured image url \n # since JPL page has changed ( and homework is not updated for 2-3 years :) \n #- we will get JPL Featured image instead\n # setting up exe-path to chrome and \n #!!!!! change path to your machine\n executable_path={'executable_path':chrome_path} \n # or use this code below\n #from webdriver_manager.chrome import ChromeDriverManager\n #executable_path = {'executable_path': ChromeDriverManager().install()}\n # estsblishing Browser instance headless\n with Browser('chrome', **executable_path,headless=True) as browser:\n #URL to visit\n url='https://www.jpl.nasa.gov/'\n browser.visit(url)\n # clicking on Galleries in navbar \n # try for big screen , except for small\n try:\n browser.find_by_xpath('//button[contains(..,\"Galleries\")]').click()\n except:\n # feature image does not show on small version of screen :\n browser.driver.set_window_size(1200, 800)\n time.sleep(1)\n browser.find_by_xpath('//button[contains(..,\"Galleries\")]').click()\n \n time.sleep(1)\n # clicking on featured image\n browser.find_by_xpath('//a[contains(..,\"Featured Image\")]').click()\n time.sleep(2)\n # copy link\n feature_image_link=browser.links.find_by_partial_href('images/jpeg')[0]['href']\n scrape_dict['feature_image']=feature_image_link\n # print(feature_image_link) # debug rint\n\n #################################################################\n ########## getting Mars facts with Pandas\n # establish url\n url='https://space-facts.com/mars/'\n tables = pd.read_html(url)\n # create HTML table \n html_table = tables[0].to_html(header=False,index=False, justify='left',classes=['my_table','stripped'], border=0)\n scrape_dict['html_table']=html_table\n\n #################################################################\n ########## getting Mars hemispheres image urls\n # declare list for hemispheres image urls\n hem_list=[]\n # estsblishing Browser instance HEADLESS\n with Browser('chrome', **executable_path, headless=True) as browser:\n #URL to visit\n url='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n # get links for each hemisphere from search page\n links = browser.links.find_by_partial_text('Hemisphere')\n for i in range(0,len(links)):\n links[i].click()\n time.sleep(1)\n # getting image links from current page\n hem_image_link=browser.links.find_by_partial_href('enhanced')[0]['href']\n # getting title from current page\n title=browser.find_by_xpath('//h2[@class=\"title\"]')[0].text[:-len(' Enhanced')] #minus ' Enchanced'\n # recording list with dictionary\n hem_list.append({'title':title,'img_url':hem_image_link})\n browser.back()\n time.sleep(1)\n links = browser.links.find_by_partial_text('Hemisphere')\n # print(hem_list) # debug print\n scrape_dict['hem_list']=hem_list\n print('ended scarping')\n return scrape_dict\n\n#####################################################################\n######### FUNCTIONS: mars_insert\ndef mars_insert(page):\n \"\"\"insert dictionary into mongodb\"\"\"\n # establishing connection to mongo instance\n conn = 'mongodb://localhost:27017'\n client = pymongo.MongoClient(conn)\n #client = pymongo.MongoClient('localhost', 27017)\n # Define database and collection\n db = client.mars_news_db\n #db=client['mars_news_db']\n collection = db.page_data\n #collection=db['page_data']\n record_count=collection.estimated_document_count()\n if record_count == 0:\n collection.insert_one(page)\n insertion='new data recorded'\n elif collection.find().sort('_id',-1).limit(1)[0]['news_title']!=page['news_title']:\n collection.insert_one(page)\n insertion='new data recorded'\n else:\n insertion='skip, no new record'\n client.close()\n return insertion\n\n#####################################################################\n######### FUNCTIONS: mars_search\ndef mars_search():\n \"\"\"search and return latest record from mongodb\"\"\"\n# establishing connection to mongo instance\n conn = 'mongodb://localhost:27017'\n client = pymongo.MongoClient(conn)\n #client = pymongo.MongoClient('localhost', 27017)\n # Define database and collection\n db = client.mars_news_db\n collection = db.page_data\n record_count=collection.estimated_document_count()\n if record_count == 0:\n dict=scrape()\n mars_insert(dict)\n \n record = collection.find().sort('_id',-1).limit(1)[0]\n client.close()\n return record # change to just record \n\n# print(mars_search())\n# print('serch is done')\n# dict=scrape()\n# print(mars_insert(dict))\n# print(mars_search())"
] | [
[
"pandas.read_html"
]
] |
husheng876/pytorch_nested-unet | [
"c5bf662a4fd7980182c20197d7dbedfd5aa332bc"
] | [
"archs_backup.py"
] | [
"#################################\n#本文件用于存储archs中调试好的结构代码,用于简化archs代码长度\n#当需要使用相关结构模型时\n#################################\nimport torch\nfrom torch import nn\n\n######the header file from CRDN.py\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nfrom torch.nn import *\n\n#UNetRNNCAttention_PSP 模型用到的引入\nfrom segmentation_refinement.models.psp.pspnet import *\n\n#from cascadePSP_model.sync_batchnorm import *\n#from cascadePSP_model.psp.pspnet import *\n__all__ = ['UNet', 'NestedUNet','UNetRNN','UNetRNNGhost','UNetRM3','UNetRM7','UNetRNNPAttention',\n 'UNetRNNCAttention','UNetRNNAttention','UNetRNNCAttention_PSP','UNetRNNPSP','R2U_Net']\n\nclass VGGBlock(nn.Module):\n def __init__(self, in_channels, middle_channels, out_channels):\n super().__init__()\n self.relu = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)\n self.bn1 = nn.BatchNorm2d(middle_channels)\n self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n return out\n\n\n# 在__init__中加入了deep_supervision=False,不加在train中如果存在deep_supervison部分会出现参数报错\nclass UNet(nn.Module):\n def __init__(self, num_classes, input_channels=3, deep_supervision=False, **kwargs):\n super().__init__()\n\n nb_filter = [32, 64, 128, 256, 512]\n\n self.pool = nn.MaxPool2d(2, 2)\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n\n self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])\n self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])\n self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])\n self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])\n self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])\n\n self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])\n self.conv2_2 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])\n self.conv1_3 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv0_4 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])\n\n self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)\n\n def forward(self, input):\n x0_0 = self.conv0_0(input)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x2_0 = self.conv2_0(self.pool(x1_0))\n x3_0 = self.conv3_0(self.pool(x2_0))\n x4_0 = self.conv4_0(self.pool(x3_0))\n\n x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))\n\n output = self.final(x0_4)\n return output\n\n\nclass NestedUNet(nn.Module):\n def __init__(self, num_classes, input_channels=3, deep_supervision=False, **kwargs):\n super().__init__()\n\n nb_filter = [32, 64, 128, 256, 512]\n\n self.deep_supervision = deep_supervision\n\n self.pool = nn.MaxPool2d(2, 2)\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n\n self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])\n self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])\n self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])\n self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])\n self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])\n\n self.conv0_1 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])\n self.conv1_1 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv2_1 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])\n self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])\n\n self.conv0_2 = VGGBlock(nb_filter[0] * 2 + nb_filter[1], nb_filter[0], nb_filter[0])\n self.conv1_2 = VGGBlock(nb_filter[1] * 2 + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv2_2 = VGGBlock(nb_filter[2] * 2 + nb_filter[3], nb_filter[2], nb_filter[2])\n\n self.conv0_3 = VGGBlock(nb_filter[0] * 3 + nb_filter[1], nb_filter[0], nb_filter[0])\n self.conv1_3 = VGGBlock(nb_filter[1] * 3 + nb_filter[2], nb_filter[1], nb_filter[1])\n\n self.conv0_4 = VGGBlock(nb_filter[0] * 4 + nb_filter[1], nb_filter[0], nb_filter[0])\n\n if self.deep_supervision:\n self.final1 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)\n self.final2 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)\n self.final3 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)\n self.final4 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)\n else:\n self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)\n\n def forward(self, input):\n x0_0 = self.conv0_0(input)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1))\n\n x2_0 = self.conv2_0(self.pool(x1_0))\n x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1))\n x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1))\n\n x3_0 = self.conv3_0(self.pool(x2_0))\n x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1))\n x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1))\n x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1))\n\n x4_0 = self.conv4_0(self.pool(x3_0))\n x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1))\n\n if self.deep_supervision:\n output1 = self.final1(x0_1)\n output2 = self.final2(x0_2)\n output3 = self.final3(x0_3)\n output4 = self.final4(x0_4)\n return [output1, output2, output3, output4]\n\n else:\n output = self.final(x0_4)\n return output\n\n\nclass RDC(nn.Module):\n def __init__(self, hidden_dim, kernel_size, bias, decoder='LSTM'):\n \"\"\"\n Recurrent Decoding Cell (RDC) module.\n :param hidden_dim:\n :param kernel_size: conv kernel size\n :param bias: if or not to add a bias term\n :param decoder: <name> [options: 'vanilla, LSTM, GRU']\n \"\"\"\n super(RDC, self).__init__()\n self.hidden_dim = hidden_dim\n self.kernel_size = kernel_size\n self.padding = 1 # , kernel_size // 2 #整除,为何padding的形式是这样的,是否需要更改成别的样子\n self.bias = bias\n self.decoder = decoder\n self.gru_catconv = nn.Conv2d(self.hidden_dim * 2, self.hidden_dim * 2, self.kernel_size, padding=self.padding,\n stride=1,\n bias=self.bias) # param1,2:input channels,output channels,数据形式是默认channel first\n self.gru_conv = nn.Conv2d(self.hidden_dim * 2, self.hidden_dim, self.kernel_size, padding=self.padding,\n stride=1, bias=self.bias)\n self.lstm_catconv = nn.Conv2d(self.hidden_dim * 2, self.hidden_dim * 4, self.kernel_size, padding=self.padding,\n stride=1, bias=self.bias)\n self.vanilla_conv = nn.Conv2d(self.hidden_dim * 2, self.hidden_dim, self.kernel_size, stride=1,\n padding=self.padding, bias=self.bias)\n\n def forward(self, x_cur, h_pre, c_pre=None): # 使h_pre和c_pre都与x_cur保持一致大小\n if self.decoder == \"LSTM\":\n h_pre_up = F.interpolate(h_pre, size=[x_cur.size(2), x_cur.size(3)], mode='bilinear',\n align_corners=True) # 将输入进行上/下采样到给定的大小或scale_facotr\n # upsampling operation\n c_pre_up = F.interpolate(c_pre, size=[x_cur.size(2), x_cur.size(3)], mode='bilinear', align_corners=True)\n combined = torch.cat([h_pre_up, x_cur], dim=1)\n combined_conv = self.lstm_catconv(combined)\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim,\n dim=1) # 将输入变量从dim纬度划分成self.hidden_dim份均块\n # four gate which decide whether or how much to propagate both semantic and spatial information to the next RDC\n i = torch.sigmoid(cc_i)\n f = torch.sigmoid(cc_f)\n o = torch.sigmoid(cc_o)\n g = torch.tanh(cc_g)\n\n c_cur = f * c_pre_up + i * g # indicate the cell state of ConvLSTM\n h_cur = o * torch.tanh(c_cur) # hidden\n\n return h_cur, c_cur\n\n elif self.decoder == \"GRU\":\n # 通常考虑pixels为square而不是点,align_corners是true时,输入输出张量以角像素的中心点对齐,保留角像素的值\n h_pre_up = F.interpolate(h_pre, size=[x_cur.size(2), x_cur.size(3)], mode='bilinear', align_corners=True)\n\n combined = torch.cat([h_pre_up.cuda(), x_cur.cuda()], dim=1)\n combined_conv = self.gru_catconv(combined)\n # combined_conv = combined_conv.cuda()\n cc_r, cc_z = torch.split(combined_conv, self.hidden_dim, dim=1)\n r = torch.sigmoid(cc_r)\n z = torch.sigmoid(cc_z)\n h_hat = torch.tanh(self.gru_conv(torch.cat([x_cur, r * h_pre_up], dim=1)))\n h_cur = z * h_pre_up + (1 - z) * h_hat\n\n return h_cur\n\n elif self.decoder == \"vanilla\":\n h_pre_up = F.interpolate(h_pre, size=[x_cur.size(2), x_cur.size(3)], mode='bilinear', align_corners=True)\n # print(\"????The shape of h_pre_up is ?????\",np.shape(h_pre_up))\n combined = torch.cat([h_pre_up.cpu(), x_cur.cpu()], dim=1) # cpu是后面加的\n # print(\"????The shape of combined is ?????\", np.shape(combined))\n combined_conv = self.vanilla_conv(combined)\n # print(\"????The shape of combined_conv is ?????\", np.shape(combined_conv))\n h_cur = torch.relu(combined_conv)\n # print(\"************The output shape is***********:\",np.shape(h_cur))\n return h_cur\n\n\n\"\"\"\nImplementation code for CRDN with U-Net-backbone (UNetRNN).\n输入大小和输出图像大小一样\n\"\"\"\n\n\nclass UNetRNN(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNN, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n\n return h5\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\n\n# 在UNetRNN中有实现,用于downsampling的卷积\nclass unetConv2(nn.Module):\n def __init__(self, in_size, out_size, is_batchnorm):\n super(unetConv2, self).__init__()\n\n if is_batchnorm:\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_size, out_size, 3, 1, 1), nn.BatchNorm2d(out_size), nn.ReLU()\n ) # 参数3,4分别是kernel_size和stride,BatchNorm2d对out_size进行数据归一化操作\n self.conv2 = nn.Sequential(\n nn.Conv2d(out_size, out_size, 3, 1, 1), nn.BatchNorm2d(out_size), nn.ReLU()\n ) # Conv2d参数4,5是stride,padding\n else:\n self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1), nn.ReLU())\n self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1), nn.ReLU())\n\n def forward(self, inputs):\n outputs = self.conv1(inputs)\n outputs = self.conv2(outputs)\n return outputs\n\n\n#######################################\n# the model of UnetRNN which is composed with Ghost and UNetRNN\n#######################################\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\ndef hard_sigmoid(x, inplace: bool = False):\n if inplace:\n return x.add_(3.).clamp_(0., 6.).div_(6.)\n else:\n return F.relu6(x + 3.) / 6.\n\nclass SqueezeExcite(nn.Module):\n def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,\n act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):\n super(SqueezeExcite, self).__init__()\n self.gate_fn = gate_fn\n reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)\n self.act1 = act_layer(inplace=True)\n self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)\n\n def forward(self, x):\n x_se = self.avg_pool(x)\n x_se = self.conv_reduce(x_se)\n x_se = self.act1(x_se)\n x_se = self.conv_expand(x_se)\n x = x * self.gate_fn(x_se)\n return x\n\nclass GhostModule(nn.Module):\n def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):\n super(GhostModule, self).__init__()\n self.oup = oup\n init_channels = math.ceil(oup / ratio) # 返回数字的上入整数\n new_channels = init_channels * (ratio - 1) # 此时=init_channels数\n\n self.primary_conv = nn.Sequential(\n nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False),\n nn.BatchNorm2d(init_channels),\n nn.ReLU(inplace=True) if relu else nn.Sequential(),\n )\n\n self.cheap_operation = nn.Sequential(\n nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size // 2, groups=init_channels, bias=False),\n nn.BatchNorm2d(new_channels),\n nn.ReLU(inplace=True) if relu else nn.Sequential(),\n )\n\n def forward(self, x):\n x1 = self.primary_conv(x)\n x2 = self.cheap_operation(x1)\n out = torch.cat([x1, x2], dim=1)\n # print(\"The output of the model GhostModule is :\",out[:, :self.oup, :, :])\n return out[:, :self.oup, :, :]\n\nclass GhostBottleneck(nn.Module):\n \"\"\" Ghost bottleneck w/ optional SE\"\"\"\n\n def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,\n stride=1, act_layer=nn.ReLU, se_ratio=0.):\n super(GhostBottleneck, self).__init__()\n has_se = se_ratio is not None and se_ratio > 0.\n self.stride = stride\n\n # Point-wise expansion\n self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) # mid_chs参数表示GhostModule模块的输出channel\n\n # Squeeze-and-excitation\n if has_se:\n self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)\n else:\n self.se = None\n\n # Point-wise linear projection\n self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)\n\n # shortcut\n if (in_chs == out_chs and self.stride == 1):\n self.shortcut = nn.Sequential()\n else:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride,\n padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False), # 不改变图像大小\n nn.BatchNorm2d(in_chs),\n nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), # 不改变图像大小\n nn.BatchNorm2d(out_chs),\n )\n\n def forward(self, x):\n residual = x\n\n # 1st ghost bottleneck\n x = self.ghost1(x)\n\n # Squeeze-and-excitation\n if self.se is not None:\n x = self.se(x)\n\n # 2nd ghost bottleneck\n x = self.ghost2(x)\n\n x += self.shortcut(residual)\n return x\n\nclass UNetRNNGhost(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"vanilla\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNNGhost, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n # this block output is ghost block from paper \"Segmenting Medical MRI via Recurrent Decoding Cell\"\n self.score_block1 = nn.Sequential(\n GhostBottleneck(in_chs=filters[0], mid_chs=filters[0] // 2, out_chs=self.n_classes))\n\n self.score_block2 = nn.Sequential(\n GhostBottleneck(in_chs=filters[1], mid_chs=filters[1] // 2, out_chs=self.n_classes))\n\n self.score_block3 = nn.Sequential(\n GhostBottleneck(in_chs=filters[2], mid_chs=filters[2] // 2, out_chs=self.n_classes))\n\n self.score_block4 = nn.Sequential(\n GhostBottleneck(in_chs=filters[3], mid_chs=filters[3] // 2, out_chs=self.n_classes))\n\n self.score_block5 = nn.Sequential(\n GhostBottleneck(in_chs=filters[4], mid_chs=filters[4] // 2, out_chs=self.n_classes))\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n\n return h5\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\n#########################################\n# the code to short the convolution block number of UNetRNN,the channel is:64,288,512\n#########################################\nclass UNetRM3(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRM3, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 288, 512]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n x1 = self.score_block3(conv3) # 图像大小1/16,输出通道是class\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block2(conv2) # 1/8,class\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block1(conv1) # 1/4,class\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n\n else:\n raise NotImplementedError\n\n return h3\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\nclass UNetRM7(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRM7, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [32, 64, 128, 256, 512, 1024, 2048] # 原为64, 128, 256, 512,1024\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0],\n is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True) # 16channel,32\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True) # 32.64\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True) # 64,128\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.conv5 = unetConv2(filters[3], filters[4], is_batchnorm=True) # 128,256\n\n self.maxpool5 = nn.MaxPool2d(kernel_size=2)\n self.conv6 = unetConv2(filters[4], filters[5], is_batchnorm=True) # 256,512\n\n self.maxpool6 = nn.MaxPool2d(kernel_size=2)\n self.conv7 = unetConv2(filters[5], filters[6], is_batchnorm=True) # 512,1024\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block6 = nn.Sequential(\n nn.Conv2d(filters[5], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block7 = nn.Sequential(\n nn.Conv2d(filters[6], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.conv5(maxpool4) # 1/8,filters[3] #期望有32通道\n\n maxpool5 = self.maxpool5(conv5)\n conv6 = self.conv6(maxpool5) # 1/8,filters[3]\n\n maxpool6 = self.maxpool6(conv6)\n conv7 = self.conv7(maxpool6) # 1/8,filters[3]\n\n x1 = self.score_block7(conv7) # 图像大小1/16,输出通道是class\n x2 = self.score_block6(conv6) # 1/8,class\n x3 = self.score_block5(conv5) # 1/4,class\n x4 = self.score_block4(conv4) # 1/2,class\n x5 = self.score_block3(conv3)\n x6 = self.score_block2(conv2)\n x7 = self.score_block1(conv1)\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4)\n h6, c6 = self.RDC(x_cur=x6, h_pre=h5, c_pre=c5)\n h7, c7 = self.RDC(x_cur=x7, h_pre=h6, c_pre=c6)\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4)\n h6 = self.RDC(x_cur=x6, h_pre=h5)\n h7 = self.RDC(x_cur=x7, h_pre=h6)\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4)\n h6 = self.RDC(x_cur=x6, h_pre=h5)\n h7 = self.RDC(x_cur=x7, h_pre=h6)\n\n else:\n raise NotImplementedError\n\n return h7\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\n###############################\n# The position attention module and channel attention module from the paper \"Dual Attention Network for Scene Segmentation\"\n###############################\nclass PAM_Module(nn.Module):\n \"\"\" Position attention module\"\"\"\n\n # Ref from SAGAN\n def __init__(self, in_dim):\n super(PAM_Module, self).__init__()\n self.chanel_in = in_dim\n\n self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\n self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\n self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X H X W)\n returns :\n out : attention value + input feature\n attention: B X (HxW) X (HxW)\n \"\"\"\n m_batchsize, C, height, width = x.size()\n proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1)\n proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)\n energy = torch.bmm(proj_query, proj_key)\n attention = self.softmax(energy)\n proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)\n\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\n out = out.view(m_batchsize, C, height, width)\n\n out = self.gamma * out + x\n return out\n\n\nclass CAM_Module(nn.Module):\n \"\"\" Channel attention module\"\"\"\n\n def __init__(self, in_dim):\n super(CAM_Module, self).__init__()\n self.chanel_in = in_dim\n\n self.gamma = nn.Parameter(torch.zeros(1))\n self.softmax = nn.Softmax(dim=-1)\n # self.sizeModule = 0\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X H X W)\n returns :\n out : attention value + input feature\n attention: B X C X C\n \"\"\"\n m_batchsize, C, height, width = x.size()\n proj_query = x.view(m_batchsize, C, -1)\n proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)\n energy = torch.bmm(proj_query, proj_key)\n energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy) - energy\n attention = self.softmax(energy_new)\n proj_value = x.view(m_batchsize, C, -1)\n\n out = torch.bmm(attention, proj_value)\n out = out.view(m_batchsize, C, height, width)\n\n out = self.gamma * out + x\n # print(out.size(), out.type())\n # self.sizeModule=out.size()#自己加的语句\n # print(\"The value of sizeModule is:\",self.sizeModule)\n return out\n\n\nclass Attention_block(nn.Module):\n def __init__(self, in_dim):\n super(Attention_block, self).__init__()\n self.channel_in = in_dim\n\n def forward(self, input):\n pa = PAM_Module(self.channel_in) # position attention module\n ca = CAM_Module(self.channel_in) # channel attention module\n pa = pa(input)\n ca = ca(input)\n out = pa + ca\n\n return out\n\n\n############################\n# The architecture of UNetRNNAttenion is compose with UNetRNN and the function of PAM_Module and CAM_Module\n############################\nclass UNetRNNPAttention(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNNPAttention, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n self.PAM_Module1 = PAM_Module(filters[0])\n self.PAM_Module2 = PAM_Module(filters[1])\n self.PAM_Module3 = PAM_Module(filters[2])\n self.PAM_Module4 = PAM_Module(filters[3])\n self.PAM_Module5 = PAM_Module(filters[4])\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n x1 = self.PAM_Module1(x1)\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n x2 = self.PAM_Module2(x2)\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n x3 = self.PAM_Module3(x3)\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n x4 = self.PAM_Module4(x4)\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n x5 = self.PAM_Module5(x5)\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n\n return h5\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\n\nclass UNetRNNCAttention(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNNCAttention, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n self.CAM_Module1 = CAM_Module(filters[4])\n self.CAM_Module2 = CAM_Module(filters[3])\n self.CAM_Module3 = CAM_Module(filters[2])\n self.CAM_Module4 = CAM_Module(filters[1])\n self.CAM_Module5 = CAM_Module(filters[0])\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n x1 = self.CAM_Module1(x1) # x1类型<class 'archs.CAM_Module'>\n # print(\"The value of CAM_Module is:\",x1) ##CAM_Module((softmax): Softmax())\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n x2 = self.CAM_Module2(x2)\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n x3 = self.CAM_Module3(x3)\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n x4 = self.CAM_Module4(x4)\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n x5 = self.CAM_Module5(x5)\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n # print(\"The type of x1\", type(x1)) #<class 'archs.CAM_Module'>\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n\n return h5\n\n def _init_cell_state(self, tensor):\n # print(\"The size of the Channle attention module is:\",tensor) #CAM_Module((softmax): Softmax())\n # print(\"The type of the Channle attention module is:\", type(tensor))#<class 'archs.CAM_Module'>\n return torch.zeros(tensor.size()).cuda(0)\n\n\n################################################\n# the moduel with attention block that compose with the\n################################################\nclass UNetRNNAttention(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"vanilla\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNNAttention, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n self.attention_block1 = Attention_block(filters[4])\n self.attention_block2 = Attention_block(filters[3])\n self.attention_block3 = Attention_block(filters[2])\n self.attention_block4 = Attention_block(filters[1])\n self.attention_block5 = Attention_block(filters[0])\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n x1 = self.attention_block1(x1)\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n x2 = self.attention_block2(x2)\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n x3 = self.attention_block3(x3)\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n x4 = self.attention_block4(x4)\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n x5 = self.attention_block5(x5)\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n\n return h5\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\n\nclass UNetRNNCAttention_PSP(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNNCAttention_PSP, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n self.CAM_Module1 = CAM_Module(filters[4])\n self.CAM_Module2 = CAM_Module(filters[3])\n self.CAM_Module3 = CAM_Module(filters[2])\n self.CAM_Module4 = CAM_Module(filters[1])\n self.CAM_Module5 = CAM_Module(filters[0])\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n x1 = self.CAM_Module1(x1) # x1类型<class 'archs.CAM_Module'>\n # print(\"The value of CAM_Module is:\",x1) ##CAM_Module((softmax): Softmax())\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n x2 = self.CAM_Module2(x2)\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n x3 = self.CAM_Module3(x3)\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n x4 = self.CAM_Module4(x4)\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n x5 = self.CAM_Module5(x5)\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n # print(\"The type of x1\", type(x1)) #<class 'archs.CAM_Module'>\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n\n model = PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024,\n backend='resnet50').cuda() # 另此模型权重为cuda类型\n # print(\"The type of input is :\",type(input))\n # print(\"The type of h5 is :\", type(h5))\n output = model(input, h5)\n\n return output['pred_224']\n\n def _init_cell_state(self, tensor):\n # print(\"The size of the Channle attention module is:\",tensor) #CAM_Module((softmax): Softmax())\n # print(\"The type of the Channle attention module is:\", type(tensor))#<class 'archs.CAM_Module'>\n return torch.zeros(tensor.size()).cuda(0)\n\n\n###########################\n# Define a module to refine the segmentation\n###########################\ndef resize_max_side(im, size, method):\n h, w = im.shape[-2:]\n max_side = max(h, w)\n ratio = size / max_side\n if method in ['bilinear', 'bicubic']:\n return F.interpolate(im, scale_factor=ratio, mode=method, align_corners=False)\n else:\n return F.interpolate(im, scale_factor=ratio, mode=method)\n\n\ndef safe_forward(model, im, seg, inter_s8=None, inter_s4=None):\n \"\"\"\n Slightly pads the input image such that its length is a multiple of 8\n \"\"\"\n b, _, ph, pw = seg.shape\n if (ph % 8 != 0) or (pw % 8 != 0):\n newH = ((ph // 8 + 1) * 8)\n newW = ((pw // 8 + 1) * 8)\n p_im = torch.zeros(b, 3, newH, newW, device=im.device)\n p_seg = torch.zeros(b, 1, newH, newW, device=im.device) - 1\n\n p_im[:, :, 0:ph, 0:pw] = im\n p_seg[:, :, 0:ph, 0:pw] = seg\n im = p_im\n seg = p_seg\n\n if inter_s8 is not None:\n p_inter_s8 = torch.zeros(b, 1, newH, newW, device=im.device) - 1\n p_inter_s8[:, :, 0:ph, 0:pw] = inter_s8\n inter_s8 = p_inter_s8\n if inter_s4 is not None:\n p_inter_s4 = torch.zeros(b, 1, newH, newW, device=im.device) - 1\n p_inter_s4[:, :, 0:ph, 0:pw] = inter_s4\n inter_s4 = p_inter_s4\n\n images = model(im, seg, inter_s8, inter_s4)\n return_im = {}\n\n for key in ['pred_224', 'pred_28_3', 'pred_56_2']:\n return_im[key] = images[key][:, :, 0:ph, 0:pw]\n del images\n\n return return_im\n\n\ndef process_high_res_im(model, im, seg, L=900):\n stride = L // 2\n\n _, _, h, w = seg.shape\n\n \"\"\"\n Global Step\n \"\"\"\n if max(h, w) > L:\n im_small = resize_max_side(im, L, 'area')\n seg_small = resize_max_side(seg, L, 'area')\n elif max(h, w) < L:\n im_small = resize_max_side(im, L, 'bicubic')\n seg_small = resize_max_side(seg, L, 'bilinear')\n else:\n im_small = im\n seg_small = seg\n\n images = safe_forward(model, im_small, seg_small)\n\n pred_224 = images['pred_224']\n pred_56 = images['pred_56_2']\n\n \"\"\"\n Local step\n \"\"\"\n\n for new_size in [max(h, w)]:\n im_small = resize_max_side(im, new_size, 'area')\n seg_small = resize_max_side(seg, new_size, 'area')\n _, _, h, w = seg_small.shape\n\n combined_224 = torch.zeros_like(seg_small)\n combined_weight = torch.zeros_like(seg_small)\n\n r_pred_224 = (F.interpolate(pred_224, size=(h, w), mode='bilinear', align_corners=False) > 0.5).float() * 2 - 1\n r_pred_56 = F.interpolate(pred_56, size=(h, w), mode='bilinear', align_corners=False) * 2 - 1\n\n padding = 16\n step_size = stride - padding * 2\n step_len = L\n\n used_start_idx = {}\n for x_idx in range((w) // step_size + 1):\n for y_idx in range((h) // step_size + 1):\n\n start_x = x_idx * step_size\n start_y = y_idx * step_size\n end_x = start_x + step_len\n end_y = start_y + step_len\n\n # Shift when required\n if end_y > h:\n end_y = h\n start_y = h - step_len\n if end_x > w:\n end_x = w\n start_x = w - step_len\n\n # Bound x/y range\n start_x = max(0, start_x)\n start_y = max(0, start_y)\n end_x = min(w, end_x)\n end_y = min(h, end_y)\n\n # The same crop might appear twice due to bounding/shifting\n start_idx = start_y * w + start_x\n if start_idx in used_start_idx:\n continue\n else:\n used_start_idx[start_idx] = True\n\n # Take crop\n im_part = im_small[:, :, start_y:end_y, start_x:end_x]\n seg_224_part = r_pred_224[:, :, start_y:end_y, start_x:end_x]\n seg_56_part = r_pred_56[:, :, start_y:end_y, start_x:end_x]\n\n # Skip when it is not an interesting crop anyway\n seg_part_norm = (seg_224_part > 0).float()\n high_thres = 0.9\n low_thres = 0.1\n if (seg_part_norm.mean() > high_thres) or (seg_part_norm.mean() < low_thres):\n continue\n grid_images = safe_forward(model, im_part, seg_224_part, seg_56_part)\n grid_pred_224 = grid_images['pred_224']\n\n # Padding\n pred_sx = pred_sy = 0\n pred_ex = step_len\n pred_ey = step_len\n\n if start_x != 0:\n start_x += padding\n pred_sx += padding\n if start_y != 0:\n start_y += padding\n pred_sy += padding\n if end_x != w:\n end_x -= padding\n pred_ex -= padding\n if end_y != h:\n end_y -= padding\n pred_ey -= padding\n\n combined_224[:, :, start_y:end_y, start_x:end_x] += grid_pred_224[:, :, pred_sy:pred_ey,\n pred_sx:pred_ex]\n\n del grid_pred_224\n\n # Used for averaging\n combined_weight[:, :, start_y:end_y, start_x:end_x] += 1\n\n # Final full resolution output\n seg_norm = (r_pred_224 / 2 + 0.5)\n pred_224 = combined_224 / combined_weight\n pred_224 = torch.where(combined_weight == 0, seg_norm, pred_224)\n\n _, _, h, w = seg.shape\n images = {}\n images['pred_224'] = F.interpolate(pred_224, size=(h, w), mode='bilinear', align_corners=True)\n\n return images['pred_224']\n\n\nclass UNetRNNPSP(nn.Module):\n def __init__(self, n_classes, input_channel=3, kernel_size=3, feature_scale=4, decoder=\"GRU\", bias=True,\n deep_supervision=False, **kwargs):\n\n super(UNetRNNPSP, self).__init__()\n self.input_channel = input_channel\n self.n_classes = n_classes\n self.kernel_size = kernel_size\n self.feature_scale = feature_scale\n self.decoder = decoder\n self.bias = bias\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.input_channel, filters[0], is_batchnorm=True) # 参 数2表示out_channel,函数不改变图像大小只改变通道数\n self.maxpool1 = nn.MaxPool2d(kernel_size=2) # 大小比原先的变小一半\n self.conv2 = unetConv2(filters[0], filters[1], is_batchnorm=True)\n\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n self.conv3 = unetConv2(filters[1], filters[2], is_batchnorm=True)\n\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n self.conv4 = unetConv2(filters[2], filters[3], is_batchnorm=True)\n\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n self.center = unetConv2(filters[3], filters[4], is_batchnorm=True)\n\n # this block output is cell current map\n self.score_block1 = nn.Sequential(\n\n nn.Conv2d(filters[0], self.n_classes, 5, padding=2), # 5的卷积核大小\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block2 = nn.Sequential(\n nn.Conv2d(filters[1], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block3 = nn.Sequential(\n nn.Conv2d(filters[2], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block4 = nn.Sequential(\n nn.Conv2d(filters[3], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.score_block5 = nn.Sequential(\n nn.Conv2d(filters[4], self.n_classes, 5, padding=2),\n nn.BatchNorm2d(self.n_classes),\n nn.ReLU(inplace=True)\n )\n\n self.RDC = RDC(self.n_classes, self.kernel_size, bias=self.bias,\n decoder=self.decoder)\n\n def forward(self, input, cell_state=None):\n conv1 = self.conv1(input) # 1,filters[0] # 图像大小是1,输出通道是filters[0]\n\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1) # 1/2,filters[1]\n\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2) # 1/4,filters[2]\n\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3) # 1/8,filters[3]\n\n maxpool4 = self.maxpool4(conv4)\n conv5 = self.center(maxpool4) # 1/16,filters[4]\n\n x1 = self.score_block5(conv5) # 图像大小1/16,输出通道是class\n # print(\"#####The input shape of x1 is:\",np.shape(x1)) #(16,1,6,6)\n x2 = self.score_block4(conv4) # 1/8,class\n # print(\"#####The input shape of x2 is:\", np.shape(x2)) #(16,1,12,12)\n x3 = self.score_block3(conv3) # 1/4,class\n # print(\"#####The input shape of x3 is:\", np.shape(x3))\n x4 = self.score_block2(conv2) # 1/2,class\n # print(\"#####The input shape of x4 is:\", np.shape(x4))\n x5 = self.score_block1(conv1) # 1,class\n # print(\"#####The input shape of x5 is:\", np.shape(x5))\n\n h0 = self._init_cell_state(x1) # 1/16,512 返回与x1大小相同的在cuda中的零张量\n # print(\"#####The input shape of h0 is:\", np.shape(h0)) #(16,1,6,6)\n\n # Decode\n if self.decoder == \"LSTM\":\n # init c0\n if cell_state is not None:\n raise NotImplementedError()\n else:\n c0 = self._init_cell_state(h0)\n\n h1, c1 = self.RDC(x_cur=x1, h_pre=h0, c_pre=c0) # 1/16,class\n h2, c2 = self.RDC(x_cur=x2, h_pre=h1, c_pre=c1) # 1/8,class\n h3, c3 = self.RDC(x_cur=x3, h_pre=h2, c_pre=c2) # 1/4,class\n h4, c4 = self.RDC(x_cur=x4, h_pre=h3, c_pre=c3) # 1/2,class\n h5, c5 = self.RDC(x_cur=x5, h_pre=h4, c_pre=c4) # 1,class\n\n elif self.decoder == \"GRU\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n elif self.decoder == \"vanilla\":\n h1 = self.RDC(x_cur=x1, h_pre=h0) # 1/16,class\n h2 = self.RDC(x_cur=x2, h_pre=h1) # 1/8,class\n h3 = self.RDC(x_cur=x3, h_pre=h2) # 1/4,class\n h4 = self.RDC(x_cur=x4, h_pre=h3) # 1/2,class\n h5 = self.RDC(x_cur=x5, h_pre=h4) # 1,class\n\n else:\n raise NotImplementedError\n model = RefinementModule().cuda() # defined in the file of segmentation_refinement.models.psp.pspnet\n output = process_high_res_im(model, input, h5)\n output = (output[0, 0].cpu().numpy() * 255).astype('uint8')\n\n return output\n\n def _init_cell_state(self, tensor):\n return torch.zeros(tensor.size()).cuda(0)\n\n\n\n##############################################\n#The module of R2U-Net comes from the paper \"\"\n##############################################\n\"\"\" \nclass up_conv(nn.Module):\n def __init__(self,ch_in,ch_out):\n super(up_conv,self).__init__()\n self.up = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True),\n\t\t nn.BatchNorm2d(ch_out),\n\t\t\tnn.ReLU(inplace=True)\n )\n\n def forward(self,x):\n x = self.up(x)\n return x\n\nclass Recurrent_block(nn.Module): #输入输出通道一样,不改变图像大小\n def __init__(self, ch_out, t=2):\n super(Recurrent_block, self).__init__()\n self.t = t\n self.ch_out = ch_out\n self.conv = nn.Sequential(\n nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),\n nn.BatchNorm2d(ch_out),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n for i in range(self.t):\n\n if i == 0:\n x1 = self.conv(x)\n\n x1 = self.conv(x + x1)\n return x1\n\nclass RRCNN_block(nn.Module):\n def __init__(self,ch_in,ch_out,t=2):\n super(RRCNN_block,self).__init__()\n self.RCNN = nn.Sequential(\n Recurrent_block(ch_out,t=t),\n Recurrent_block(ch_out,t=t)\n )\n self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0)\n\n def forward(self,x):\n #print(\"##########The x is ########\",np.shape(x))\n x = self.Conv_1x1(x)\n x1 = self.RCNN(x)\n return x1 #源码为x+x1\n\nclass R2U_Net(nn.Module):\n def __init__(self, img_ch=3, output_ch=1, t=2):\n super(R2U_Net, self).__init__()\n\n self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.Upsample = nn.Upsample(scale_factor=2)\n\n self.RRCNN1 = RRCNN_block(ch_in=img_ch, ch_out=64, t=t)\n\n self.RRCNN2 = RRCNN_block(ch_in=64, ch_out=128, t=t)\n\n self.RRCNN3 = RRCNN_block(ch_in=128, ch_out=256, t=t)\n\n self.RRCNN4 = RRCNN_block(ch_in=256, ch_out=512, t=t)\n\n self.RRCNN5 = RRCNN_block(ch_in=512, ch_out=1024, t=t)\n\n self.Up5 = up_conv(ch_in=1024, ch_out=512)\n self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512, t=t)\n\n self.Up4 = up_conv(ch_in=512, ch_out=256)\n self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256, t=t)\n\n self.Up3 = up_conv(ch_in=256, ch_out=128)\n self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128, t=t)\n\n self.Up2 = up_conv(ch_in=128, ch_out=64)\n self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64, t=t)\n\n self.Conv_1x1 = nn.Conv2d(64, output_ch, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n # encoding path\n #print(\"********The shape of x is *********\",np.shape(x)) #torch.Size([4, 3, 96, 96])\n x1 = self.RRCNN1(x)\n\n x2 = self.Maxpool(x1)\n x2 = self.RRCNN2(x2)\n\n x3 = self.Maxpool(x2)\n x3 = self.RRCNN3(x3)\n\n x4 = self.Maxpool(x3)\n x4 = self.RRCNN4(x4)\n\n x5 = self.Maxpool(x4)\n x5 = self.RRCNN5(x5)\n\n # decoding + concat path\n d5 = self.Up5(x5)\n d5 = torch.cat((x4, d5), dim=1)\n d5 = self.Up_RRCNN5(d5)\n\n d4 = self.Up4(d5)\n d4 = torch.cat((x3, d4), dim=1)\n d4 = self.Up_RRCNN4(d4)\n\n d3 = self.Up3(d4)\n d3 = torch.cat((x2, d3), dim=1)\n d3 = self.Up_RRCNN3(d3)\n\n d2 = self.Up2(d3)\n d2 = torch.cat((x1, d2), dim=1)\n d2 = self.Up_RRCNN2(d2)\n\n d1 = self.Conv_1x1(d2)\n\n return d1\n\"\"\"\n\n\"\"\"\n#############################\n#The code comes from the program of https://github.com/ZiyuanMa/U-Net/blob/master/model.py\n#############################\nclass RC_block(nn.Module):\n def __init__(self, channel, t=2):\n super().__init__()\n self.t = t\n\n self.conv = nn.Sequential(\n nn.Conv2d(channel, channel, 3, 1, 1),\n nn.BatchNorm2d(channel),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n r_x = self.conv(x)\n\n for _ in range(self.t):\n r_x = self.conv(x + r_x)\n\n return r_x\n\nclass RRC_block(nn.Module):\n def __init__(self, channel, t=2):\n super().__init__()\n\n self.RC_net = nn.Sequential(\n RC_block(channel, t=t),\n RC_block(channel, t=t),\n )\n\n def forward(self, x):\n res_x = self.RC_net(x)\n\n return x + res_x\n\nclass R2UNet(nn.Module):\n def __init__(self):\n super(R2UNet,self).__init__()\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, 3, 1, 1),\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n RRC_block(64),\n )\n\n self.conv2 = nn.Sequential(\n nn.MaxPool2d(2, stride=2),\n nn.Conv2d(64, 128, 3, 1, 1),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n RRC_block(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.MaxPool2d(2, stride=2),\n nn.Conv2d(128, 256, 3, 1, 1),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n RRC_block(256),\n )\n\n self.conv4 = nn.Sequential(\n nn.MaxPool2d(2, stride=2),\n nn.Conv2d(256, 512, 3, 1, 1),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n RRC_block(512),\n )\n\n self.trans_conv = nn.Sequential(\n nn.MaxPool2d(2, stride=2),\n nn.Conv2d(512, 1024, 3, 1, 1),\n nn.BatchNorm2d(1024),\n nn.ReLU(True),\n RRC_block(1024),\n nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2),\n )\n\n self.up_conv1 = nn.Sequential(\n nn.Conv2d(1024, 512, 3, 1, 1),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n RRC_block(512),\n nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2),\n )\n\n self.up_conv2 = nn.Sequential(\n nn.Conv2d(512, 256, 3, 1, 1),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n RRC_block(256),\n nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2),\n )\n\n self.up_conv3 = nn.Sequential(\n nn.Conv2d(256, 128, 3, 1, 1),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n RRC_block(128),\n nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),\n )\n\n self.final_conv = nn.Sequential(\n nn.Conv2d(128, 64, 3, 1, 1),\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n RRC_block(64),\n nn.Conv2d(64, 1, 1),\n )\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x1 = self.conv1(x)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n x4 = self.conv4(x3)\n\n x = self.trans_conv(x4)\n\n x = self.up_conv1(torch.cat((x, x4), dim=1))\n x = self.up_conv2(torch.cat((x, x3), dim=1))\n x = self.up_conv3(torch.cat((x, x2), dim=1))\n x = self.final_conv(torch.cat((x, x1), dim=1))\n\n x = self.sigmoid(x)\n\n return x\n\"\"\""
] | [
[
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.bmm",
"torch.where",
"torch.sigmoid",
"torch.nn.Softmax",
"torch.nn.MaxPool2d",
"torch.zeros_like",
"torch.zeros",
"torch.nn.functional.relu6",
"torch.nn.AdaptiveAvgPool2d",
"torch.max",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.relu",
"torch.nn.functional.interpolate",
"torch.split",
"torch.nn.Upsample",
"torch.tanh"
]
] |
fratim/pfrl | [
"ddcdfbebf7aa55753beb5195edf8d571be7b862c"
] | [
"pfrl/experiments/train_agent_async.py"
] | [
"import logging\nimport os\nimport signal\nimport subprocess\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.multiprocessing as mp\nfrom torch import nn\n\nfrom pfrl.experiments.evaluator import AsyncEvaluator\nfrom pfrl.utils import async_, random_seed\n\n\ndef kill_all():\n if os.name == \"nt\":\n # windows\n # taskkill with /T kill all the subprocess\n subprocess.run([\"taskkill\", \"/F\", \"/T\", \"/PID\", str(os.getpid())])\n else:\n pgid = os.getpgrp()\n os.killpg(pgid, signal.SIGTERM)\n sys.exit(1)\n\n\ndef train_loop(\n process_idx,\n env,\n agent,\n steps,\n outdir,\n counter,\n episodes_counter,\n stop_event,\n exception_event,\n max_episode_len=None,\n evaluator=None,\n eval_env=None,\n successful_score=None,\n logger=None,\n global_step_hooks=[],\n):\n\n logger = logger or logging.getLogger(__name__)\n\n if eval_env is None:\n eval_env = env\n\n def save_model():\n if process_idx == 0:\n # Save the current model before being killed\n dirname = os.path.join(outdir, \"{}_except\".format(global_t))\n agent.save(dirname)\n logger.info(\"Saved the current model to %s\", dirname)\n\n try:\n\n episode_r = 0\n global_t = 0\n local_t = 0\n global_episodes = 0\n obs = env.reset()\n episode_len = 0\n successful = False\n\n while True:\n\n # a_t\n a = agent.act(obs)\n # o_{t+1}, r_{t+1}\n obs, r, done, info = env.step(a)\n local_t += 1\n episode_r += r\n episode_len += 1\n reset = episode_len == max_episode_len or info.get(\"needs_reset\", False)\n agent.observe(obs, r, done, reset)\n\n # Get and increment the global counter\n with counter.get_lock():\n counter.value += 1\n global_t = counter.value\n\n for hook in global_step_hooks:\n hook(env, agent, global_t)\n\n if done or reset or global_t >= steps or stop_event.is_set():\n if process_idx == 0:\n logger.info(\n \"outdir:%s global_step:%s local_step:%s R:%s\",\n outdir,\n global_t,\n local_t,\n episode_r,\n )\n logger.info(\"statistics:%s\", agent.get_statistics())\n\n # Evaluate the current agent\n if evaluator is not None:\n eval_score = evaluator.evaluate_if_necessary(\n t=global_t, episodes=global_episodes, env=eval_env, agent=agent\n )\n\n if (\n eval_score is not None\n and successful_score is not None\n and eval_score >= successful_score\n ):\n stop_event.set()\n successful = True\n # Break immediately in order to avoid an additional\n # call of agent.act_and_train\n break\n\n with episodes_counter.get_lock():\n episodes_counter.value += 1\n global_episodes = episodes_counter.value\n\n if global_t >= steps or stop_event.is_set():\n break\n\n # Start a new episode\n episode_r = 0\n episode_len = 0\n obs = env.reset()\n\n if process_idx == 0 and exception_event.is_set():\n logger.exception(\"An exception detected, exiting\")\n save_model()\n kill_all()\n\n except (Exception, KeyboardInterrupt):\n save_model()\n raise\n\n if global_t == steps:\n # Save the final model\n dirname = os.path.join(outdir, \"{}_finish\".format(steps))\n agent.save(dirname)\n logger.info(\"Saved the final agent to %s\", dirname)\n\n if successful:\n # Save the successful model\n dirname = os.path.join(outdir, \"successful\")\n agent.save(dirname)\n logger.info(\"Saved the successful agent to %s\", dirname)\n\n\ndef train_agent_async(\n outdir,\n processes,\n make_env,\n profile=False,\n steps=8 * 10 ** 7,\n eval_interval=10 ** 6,\n eval_n_steps=None,\n eval_n_episodes=10,\n eval_success_threshold=0.0,\n max_episode_len=None,\n step_offset=0,\n successful_score=None,\n agent=None,\n make_agent=None,\n global_step_hooks=[],\n evaluation_hooks=(),\n save_best_so_far_agent=True,\n use_tensorboard=False,\n logger=None,\n random_seeds=None,\n stop_event=None,\n exception_event=None,\n use_shared_memory=True,\n):\n \"\"\"Train agent asynchronously using multiprocessing.\n\n Either `agent` or `make_agent` must be specified.\n\n Args:\n outdir (str): Path to the directory to output things.\n processes (int): Number of processes.\n make_env (callable): (process_idx, test) -> Environment.\n profile (bool): Profile if set True.\n steps (int): Number of global time steps for training.\n eval_interval (int): Interval of evaluation. If set to None, the agent\n will not be evaluated at all.\n eval_n_steps (int): Number of eval timesteps at each eval phase\n eval_n_episodes (int): Number of eval episodes at each eval phase\n eval_success_threshold (float): r-threshold above which grasp succeeds\n max_episode_len (int): Maximum episode length.\n step_offset (int): Time step from which training starts.\n successful_score (float): Finish training if the mean score is greater\n or equal to this value if not None\n agent (Agent): Agent to train.\n make_agent (callable): (process_idx) -> Agent\n global_step_hooks (list): List of callable objects that accepts\n (env, agent, step) as arguments. They are called every global\n step. See pfrl.experiments.hooks.\n evaluation_hooks (Sequence): Sequence of\n pfrl.experiments.evaluation_hooks.EvaluationHook objects. They are\n called after each evaluation.\n save_best_so_far_agent (bool): If set to True, after each evaluation,\n if the score (= mean return of evaluation episodes) exceeds\n the best-so-far score, the current agent is saved.\n use_tensorboard (bool): Additionally log eval stats to tensorboard\n logger (logging.Logger): Logger used in this function.\n random_seeds (array-like of ints or None): Random seeds for processes.\n If set to None, [0, 1, ..., processes-1] are used.\n stop_event (multiprocessing.Event or None): Event to stop training.\n If set to None, a new Event object is created and used internally.\n exception_event (multiprocessing.Event or None): Event that indicates\n other thread raised an excpetion. The train will be terminated and\n the current agent will be saved.\n If set to None, a new Event object is created and used internally.\n use_shared_memory (bool): Share memory amongst asynchronous agents.\n\n Returns:\n Trained agent.\n \"\"\"\n logger = logger or logging.getLogger(__name__)\n\n for hook in evaluation_hooks:\n if not hook.support_train_agent_async:\n raise ValueError(\"{} does not support train_agent_async().\".format(hook))\n\n # Prevent numpy from using multiple threads\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n counter = mp.Value(\"l\", 0)\n episodes_counter = mp.Value(\"l\", 0)\n\n if stop_event is None:\n stop_event = mp.Event()\n\n if exception_event is None:\n exception_event = mp.Event()\n\n if use_shared_memory:\n if agent is None:\n assert make_agent is not None\n agent = make_agent(0)\n\n # Move model and optimizer states in shared memory\n for attr in agent.shared_attributes:\n attr_value = getattr(agent, attr)\n if isinstance(attr_value, nn.Module):\n for k, v in attr_value.state_dict().items():\n v.share_memory_()\n elif isinstance(attr_value, torch.optim.Optimizer):\n for param, state in attr_value.state_dict()[\"state\"].items():\n assert isinstance(state, dict)\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n v.share_memory_()\n\n if eval_interval is None:\n evaluator = None\n else:\n evaluator = AsyncEvaluator(\n n_steps=eval_n_steps,\n n_episodes=eval_n_episodes,\n eval_interval=eval_interval,\n outdir=outdir,\n max_episode_len=max_episode_len,\n step_offset=step_offset,\n evaluation_hooks=evaluation_hooks,\n save_best_so_far_agent=save_best_so_far_agent,\n logger=logger,\n )\n if use_tensorboard:\n evaluator.start_tensorboard_writer(outdir, stop_event)\n\n if random_seeds is None:\n random_seeds = np.arange(processes)\n\n def run_func(process_idx):\n random_seed.set_random_seed(random_seeds[process_idx])\n\n env = make_env(process_idx, test=False)\n if evaluator is None:\n eval_env = env\n else:\n eval_env = make_env(process_idx, test=True)\n if make_agent is not None:\n local_agent = make_agent(process_idx)\n if use_shared_memory:\n for attr in agent.shared_attributes:\n setattr(local_agent, attr, getattr(agent, attr))\n else:\n local_agent = agent\n local_agent.process_idx = process_idx\n\n def f():\n train_loop(\n process_idx=process_idx,\n counter=counter,\n episodes_counter=episodes_counter,\n agent=local_agent,\n env=env,\n steps=steps,\n outdir=outdir,\n max_episode_len=max_episode_len,\n evaluator=evaluator,\n successful_score=successful_score,\n stop_event=stop_event,\n exception_event=exception_event,\n eval_env=eval_env,\n global_step_hooks=global_step_hooks,\n logger=logger,\n )\n\n if profile:\n import cProfile\n\n cProfile.runctx(\n \"f()\", globals(), locals(), \"profile-{}.out\".format(os.getpid())\n )\n else:\n f()\n\n env.close()\n if eval_env is not env:\n eval_env.close()\n\n async_.run_async(processes, run_func)\n\n stop_event.set()\n\n if evaluator is not None and use_tensorboard:\n evaluator.join_tensorboard_writer()\n\n return agent\n"
] | [
[
"torch.multiprocessing.Event",
"numpy.arange",
"torch.multiprocessing.Value"
]
] |
SirCraftinator/Stock-Trading-Bot | [
"66156a3bac719d94bf9e917ebca9c127fed04994"
] | [
"Version-1/DataGathering/Open_Close/Trainers/KNN/knn_trainer.py"
] | [
"#this file includes info on converting irregular data\nimport sklearn\nfrom sklearn.utils import shuffle\nfrom sklearn.neighbors import KNeighborsRegressor\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model, preprocessing\nimport pickle\n\npath = '/Users/oceanhawk/Documents/Python/Stock-Trading-Bots/Version-1/DataGathering/Open_Close/Trainers/data.csv'\ndata = pd.read_csv(path,sep=';')\nprint(data.head())\n\n#best = 2\nneighbors = 2\n#preprocessing.LabelEncoder() is the object that will automaticall convert string values to numbers\nle = preprocessing.LabelEncoder()\nprint(\"Finished Preprocessing\")\n#can also be done with preprocessing.LabelEncoder().fit_transform()\ndata = data[['Ticker','Strength','Trend','OC_Average','Actual_Increase']]\nstrength = data['Strength']\ntrend = data['Trend']\noc = data['OC_Average']\nprint(\"Converted data\")\n\npredict = \"Actual_Increase\"\n\nx = list(zip(strength,trend,oc))\ny = list(data[predict])\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size = 0.1)\n\nmodel = KNeighborsRegressor(n_neighbors=neighbors)\n\nbest = 0\nfor i in range(1000):\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size = 0.2)\n model = KNeighborsRegressor(n_neighbors=neighbors)\n model.fit(x_train,y_train)\n \n acc = model.score(x_test,y_test)\n print(acc)\n\n if acc > best:\n best = acc\n with open('data.pickle', 'wb') as f:\n pickle.dump(model,f)\n\nprint(\"---\")\nprint(best)\npredicted = model.predict(x_test)\nfor x in range(len(x_test)):\n print('Predicted: ', predicted[x], \"Data: \", x_test[x], \"Actual: \", y_test[x])\n \n"
] | [
[
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsRegressor"
]
] |
dpukhkaiev/BRISE2 | [
"647ad6d7cc5f91c188aa45e403d9c1a33a7fe947"
] | [
"main_node/model/predictor.py"
] | [
"import copy\nimport logging\nfrom collections import OrderedDict\nfrom typing import List, Mapping\n\nimport pandas as pd\nfrom core_entities.configuration import Configuration\nfrom core_entities.search_space import Hyperparameter\nfrom model.model_selection import get_model\n\n\nclass Predictor:\n \"\"\"\n This class abstract notion of prediction within tree-shaped search space from the underlying models.\n The underlying models see only the current level with related data that is going to be operated in it,\n no other level data exposed for it.\n\n Responsibilities:\n - hide structure of tree-shaped search space.\n - provide data and data description for underlying models about current level.\n - select underlying model for each level\n \"\"\"\n\n def __init__(self, experiment_id: str, experiment_description: Mapping, search_space: Hyperparameter):\n self.experiment_id = experiment_id\n self.predictor_config = experiment_description[\"Predictor\"]\n self.task_config = experiment_description[\"TaskConfiguration\"]\n self.search_space = search_space\n self.logger = logging.getLogger(__name__)\n\n def predict(self, measured_configurations: List[Configuration]) -> Configuration:\n \"\"\"\n Predict next Configuration using already evaluated configurations.\n Prediction is a construction process and it is done in iterations.\n It stops after constructing the valid Configuration within the Search Space.\n Each iteration uncovers and predicts new Hyperparameters deeper in the Search Space.\n\n :param measured_configurations: a list of already measured Configurations that will be used to make a\n prediction.\n :return: Configuration that is going to be measured.\n\n Question: need to transfer data from the previous level? (that was fixed and will not be changed)\n - more no than yes.\n for no:\n - less data to (pre)process - less dimensions\n - less ad-hoc solutions on \"how to differentiate in data???\" - simply predict over all dimensions,\n other are targets.\n for yes:\n - models will be more accurate (?)\n \"\"\"\n level = -1\n parameters = OrderedDict()\n # Select the latest Configurations, according to the window size\n if isinstance(self.predictor_config[\"window size\"], int):\n number_of_configs_to_consider = self.predictor_config[\"window size\"]\n else:\n # meaning self.window_size, float)\n number_of_configs_to_consider = \\\n int(round(self.predictor_config[\"window size\"] * len(measured_configurations)))\n level_configs = measured_configurations[len(measured_configurations) - number_of_configs_to_consider:]\n\n # Check if entire configuration is valid now.\n while not self.search_space.validate(parameters, is_recursive=True):\n level += 1 # it is here because of 'continue'\n\n # 1. Filter Configurations.\n level_configs = list(filter(\n lambda x: self.search_space.are_siblings(parameters, x.parameters), # Filter\n level_configs # Input data for filter\n ))\n\n if not level_configs:\n # If there is no data on current level, just use random sampling\n self.search_space.generate(parameters)\n continue\n\n # 2. Derive which parameters will be predicted on this level:\n # - by expanding the parameters from previous level to this level\n # - by removing information from the previous level(s)\n dummy = copy.deepcopy(parameters)\n self.search_space.generate(dummy)\n description = self.search_space.describe(dummy)\n for hyperparameter in parameters:\n del description[hyperparameter]\n\n # 4. Select and build model, predict parameters for this level\n # 4.1. Select and create model from ED\n # 4.2. Transform Configurations into Pandas DataFrame keeping only relevant for this level information,\n # split features and labels\n # 4.3. Build model\n # 4.4. Make a prediction as PD DataFrame or None\n # 4.5. Validate a prediction: results could be out of bound or more sophisticated cases (in future)\n\n # 4.1.\n model_parameters = \\\n self.predictor_config[\"models\"][level if len(self.predictor_config[\"models\"]) > level else -1]\n model = get_model(model_parameters)\n\n # 4.2.\n feature_columns = list(description.keys())\n highest_priority_objective_index = self.task_config[\"ObjectivesPrioritiesModels\"]\\\n .index(max(self.task_config[\"ObjectivesPrioritiesModels\"]))\n\n highest_priority_objective = self.task_config[\"Objectives\"][highest_priority_objective_index]\n\n data = pd.DataFrame(\n [cfg.to_series()[feature_columns + [highest_priority_objective]] for cfg in level_configs])\n\n features = pd.DataFrame(data[feature_columns])\n labels = pd.DataFrame(data[highest_priority_objective])\n\n # 4.3\n is_minimization = self.task_config[\"ObjectivesMinimization\"][highest_priority_objective_index]\n model.build_model(features, labels, description, is_minimization)\n # 4.4\n if model.is_built:\n pd_prediction = model.predict()\n prediction = pd_prediction.to_dict(orient=\"records\")\n if len(prediction) > 1:\n self.logger.warning(f\"Model predicted more than 1 parameters set. \"\n f\"Only first valid will be used{prediction[0]}.\")\n # 4.5\n valid_prediction_found = False\n for predicted_hyperparameters in prediction:\n valid_prediction_found = True\n for hyperparameter_name in description.keys():\n hyperparameter = description[hyperparameter_name][\"hyperparameter\"]\n # Validation should be encapsulated if more sophisticated approaches arise.\n if not hyperparameter.validate(predicted_hyperparameters, is_recursive=False):\n valid_prediction_found = False\n break\n if valid_prediction_found:\n break\n else:\n continue\n\n if not valid_prediction_found:\n self.logger.warning(\"Model did not predict valid hyperparameter set. Sampling random.\")\n self.search_space.generate(parameters)\n else:\n if any((h_name in parameters for h_name in predicted_hyperparameters)):\n raise ValueError(f\"Previously selected hyperparameters should not be altered! \"\n f\"Previous: {parameters}. This level: {predicted_hyperparameters}\")\n parameters.update(predicted_hyperparameters)\n else:\n self.logger.debug(\n f\"{model_parameters['Type']} model was not build to predict hyperparameters: {list(description.keys())}. \"\n f\"Random values will be sampled.\")\n self.search_space.generate(parameters)\n\n return Configuration(parameters, Configuration.Type.PREDICTED, self.experiment_id)\n"
] | [
[
"pandas.DataFrame"
]
] |
tianjiansmile/Chinese-Text-Classification-Pytorch | [
"05cc211b161f61e6bb32ab185dadcffec2f5b5de"
] | [
"run.py"
] | [
"# coding: UTF-8\nimport time\nimport torch\nimport numpy as np\nfrom train_eval import train, init_network\nfrom importlib import import_module\nimport argparse\n\nparser = argparse.ArgumentParser(description='Chinese Text Classification')\nparser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')\nparser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')\nparser.add_argument('--word', default=False, type=bool, help='True for word, False for char')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n dataset = 'THUCNews' # 数据集\n\n # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random\n embedding = 'embedding_SougouNews.npz'\n if args.embedding == 'random':\n embedding = 'random'\n model_name = args.model # 'TextRCNN' # TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer\n if model_name == 'FastText':\n from utils_fasttext import build_dataset, build_iterator, get_time_dif\n embedding = 'random'\n else:\n from utils import build_dataset, build_iterator, get_time_dif\n\n x = import_module('models.' + model_name)\n config = x.Config(dataset, embedding)\n np.random.seed(1)\n torch.manual_seed(1)\n torch.cuda.manual_seed_all(1)\n torch.backends.cudnn.deterministic = True # 保证每次结果一样\n\n start_time = time.time()\n print(\"Loading data...\")\n # 将文本对应的词向量下标映射出来\n vocab, train_data, dev_data, test_data = build_dataset(config, args.word)\n train_iter = build_iterator(train_data, config)\n dev_iter = build_iterator(dev_data, config)\n test_iter = build_iterator(test_data, config)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n # train\n config.n_vocab = len(vocab)\n model = x.Model(config).to(config.device)\n if model_name != 'Transformer':\n init_network(model)\n print(model.parameters)\n train(config, model, train_iter, dev_iter, test_iter)\n"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all"
]
] |
danbailo/LogisticRegression | [
"6c22f3c0c28f353527ee6ac0af6ec0d9087f90fd"
] | [
"src/utils.py"
] | [
"import numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw \nimport glob\n\ndef get_data(directory, class_):\n\tX = []\n\tY = []\n\tfor image_data in glob.glob(directory):\n\t\timg = np.asarray(Image.open(image_data))\n\t\timg = np.reshape(img, -1)\n\t\tX.append(img)\n\t\tif class_ == \"cat\":\n\t\t\tY.append(1)\n\t\telif class_ == \"noncat\":\n\t\t\tY.append(0)\n\treturn X, Y\n\ndef save_img(img_np, file_name, predict):\n\timg = Image.fromarray(img_np[:,:,:])\n\n\tdraw = ImageDraw.Draw(img)\n\tfillcolor = \"black\"\n\tshadowcolor = \"white\"\n\tx, y = 1, 1\n\n\ttext = \"{:.3f}\".format(predict)\n\n\t# thin border\n\tdraw.text((x-1, y), text, fill=shadowcolor)\n\tdraw.text((x+1, y), text, fill=shadowcolor)\n\tdraw.text((x, y-1), text, fill=shadowcolor)\n\tdraw.text((x, y+1), text, fill=shadowcolor)\n\n\t# thicker border\n\tdraw.text((x-1, y-1), text, fill=shadowcolor)\n\tdraw.text((x+1, y-1), text, fill=shadowcolor)\n\tdraw.text((x-1, y+1), text, fill=shadowcolor)\n\tdraw.text((x+1, y+1), text, fill=shadowcolor)\n\n\tdraw.text((x, y), text, fill=fillcolor)\n\n\timg.save(file_name)"
] | [
[
"numpy.reshape"
]
] |
VIEW2020/varianz2012 | [
"3d055ffd2b259275d93b0f862d319fc23035226f"
] | [
"code/lib/utils.py"
] | [
"'''\r\nMay 2020 by Sebastiano Barbieri\r\[email protected]\r\nhttps://www.github.com/sebbarb/\r\n'''\r\n\r\nimport numpy as np\r\nimport math\r\nimport pandas as pd\r\nimport pickle as pkl\r\nfrom scipy.stats import f\r\n\r\nfrom pdb import set_trace as bp\r\n\r\n\r\ndef save_obj(obj, name):\r\n with open(name, 'wb') as f:\r\n pkl.dump(obj, f, pkl.HIGHEST_PROTOCOL)\r\n\r\n\r\ndef load_obj(name):\r\n with open(name, 'rb') as f:\r\n return pkl.load(f)\r\n\r\n\r\ndef log(hp, r2, d_index, concordance, ibs, auc):\r\n df = pd.DataFrame({'model_name': hp.model_name,\r\n 'np_seed': hp.np_seed,\r\n 'torch_seed': hp.torch_seed,\r\n 'batch_size': hp.batch_size,\r\n 'max_epochs': hp.max_epochs,\r\n 'num_months_hx': hp.num_months_hx,\r\n 'r2': r2,\r\n 'd_index': d_index,\r\n 'concordance': concordance,\r\n 'ibs': ibs,\r\n 'auc': auc},\r\n index=[0])\r\n with open(hp.data_dir + 'logfile.csv', 'a', newline='\\n') as f:\r\n df.to_csv(f, mode='a', index=False, header=(not f.tell()))\r\n \r\n \r\ndef robust_cv_test(res_a, res_b):\r\n # Combined 5x2cv F Test for Comparing SupervisedClassification Learning Algorithms\r\n # https://www.cmpe.boun.edu.tr/~ethem/files/papers/NC110804.PDF\r\n # res_a and res_b are the results of two classifiers with shape num_folds x 2\r\n assert res_a.shape == res_b.shape, 'The two arrays should have equal dimensions'\r\n assert res_a.shape[1] == 2, 'Dimension 1 should be 2 for both arrays'\r\n num_folds = res_a.shape[0]\r\n \r\n diff = res_a - res_b\r\n diff_fold = diff.mean(axis=1, keepdims=True)\r\n var = ((diff - diff_fold)**2).sum(axis=1)\r\n f_val = (diff**2).sum()/(2*var.sum())\r\n p_val = f.sf(f_val, 2*num_folds, num_folds)\r\n \r\n return p_val\r\n \r\n \r\n\r\n\r\n\r\n"
] | [
[
"pandas.DataFrame",
"scipy.stats.f.tell",
"scipy.stats.f.sf"
]
] |
bergkvist/pandapower | [
"630e3278ca012535f78282ae73f1b86f3fe932fc"
] | [
"pandapower/pypower/pfsoln.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\n\"\"\"Updates bus, gen, branch data structures to match power flow soln.\n\"\"\"\n\nfrom numpy import asarray, angle, pi, conj, zeros, ones, finfo, c_, ix_, real, flatnonzero as find, \\\n setdiff1d, intersect1d\nfrom scipy.sparse import csr_matrix\n\nfrom pandapower.pypower.idx_brch import F_BUS, T_BUS, BR_STATUS, PF, PT, QF, QT\nfrom pandapower.pypower.idx_bus import VM, VA, PD, QD\nfrom pandapower.pypower.idx_gen import GEN_BUS, GEN_STATUS, PG, QG, QMIN, QMAX\n\nEPS = finfo(float).eps\n\n\ndef pfsoln(baseMVA, bus0, gen0, branch0, Ybus, Yf, Yt, V, ref, ref_gens, Ibus=None):\n \"\"\"Updates bus, gen, branch data structures to match power flow soln.\n\n @author: Ray Zimmerman (PSERC Cornell)\n @author: Richard Lincoln\n \"\"\"\n # initialize return values\n bus = bus0\n gen = gen0\n branch = branch0\n\n # ----- update Qg for all gens and Pg for slack bus(es) -----\n # generator info\n on = find(gen[:, GEN_STATUS] > 0) # which generators are on?\n gbus = gen[on, GEN_BUS].astype(int) # what buses are they at?\n\n # compute total injected bus powers\n Ibus = zeros(len(V)) if Ibus is None else Ibus\n Sbus = V[gbus] * conj(Ybus[gbus, :] * V - Ibus[gbus])\n\n _update_v(bus, V)\n _update_q(baseMVA, bus, gen, gbus, Sbus, on)\n _update_p(baseMVA, bus, gen, ref, gbus, on, Sbus, ref_gens)\n\n # ----- update/compute branch power flows -----\n out = find(branch[:, BR_STATUS] == 0) # out-of-service branches\n br = find(branch[:, BR_STATUS]).astype(int) # in-service branches\n\n if len(out):\n raise RuntimeError\n # complex power at \"from\" bus\n Sf = V[real(branch[br, F_BUS]).astype(int)] * conj(Yf[br, :] * V) * baseMVA\n # complex power injected at \"to\" bus\n St = V[real(branch[br, T_BUS]).astype(int)] * conj(Yt[br, :] * V) * baseMVA\n branch[ix_(br, [PF, QF, PT, QT])] = c_[Sf.real, Sf.imag, St.real, St.imag]\n branch[ix_(out, [PF, QF, PT, QT])] = zeros((len(out), 4))\n\n return bus, gen, branch\n\n\ndef _update_v(bus, V):\n # ----- update bus voltages -----\n bus[:, VM] = abs(V)\n bus[:, VA] = angle(V) * 180. / pi\n\n\ndef _update_p(baseMVA, bus, gen, ref, gbus, on, Sbus, ref_gens):\n # update Pg for slack bus(es)\n # inj P + local Pd\n for slack_bus in ref:\n gens_at_bus = find(gbus == slack_bus) # which is(are) the reference gen(s)?\n p_bus = Sbus[gens_at_bus[0]].real * baseMVA + bus[slack_bus, PD]\n if len(gens_at_bus) > 1: # more than one generator at this ref bus\n # subtract off what is generated by other gens at this bus\n ext_grids = intersect1d(gens_at_bus, ref_gens)\n pv_gens = setdiff1d(gens_at_bus, ext_grids)\n p_ext_grids = p_bus - sum(gen[pv_gens, PG])\n gen[ext_grids, PG] = p_ext_grids / len(ext_grids)\n else:\n gen[on[gens_at_bus[0]], PG] = p_bus\n\n\ndef _update_q(baseMVA, bus, gen, gbus, Sbus, on):\n # update Qg for all generators\n gen[:, QG] = zeros(gen.shape[0]) # zero out all Qg\n gen[on, QG] = Sbus.imag * baseMVA + bus[gbus, QD] # inj Q + local Qd\n # ... at this point any buses with more than one generator will have\n # the total Q dispatch for the bus assigned to each generator. This\n # must be split between them. We do it first equally, then in proportion\n # to the reactive range of the generator.\n\n if len(on) > 1:\n # build connection matrix, element i, j is 1 if gen on(i) at bus j is ON\n nb = bus.shape[0]\n ngon = on.shape[0]\n Cg = csr_matrix((ones(ngon), (range(ngon), gbus)), (ngon, nb))\n\n # divide Qg by number of generators at the bus to distribute equally\n ngg = Cg * Cg.sum(0).T # ngon x 1, number of gens at this gen's bus\n ngg = asarray(ngg).flatten() # 1D array\n gen[on, QG] = gen[on, QG] / ngg\n\n # divide proportionally\n Cmin = csr_matrix((gen[on, QMIN], (range(ngon), gbus)), (ngon, nb))\n Cmax = csr_matrix((gen[on, QMAX], (range(ngon), gbus)), (ngon, nb))\n Qg_tot = Cg.T * gen[on, QG] # nb x 1 vector of total Qg at each bus\n Qg_min = Cmin.sum(0).T # nb x 1 vector of min total Qg at each bus\n Qg_max = Cmax.sum(0).T # nb x 1 vector of max total Qg at each bus\n Qg_min = asarray(Qg_min).flatten() # 1D array\n Qg_max = asarray(Qg_max).flatten() # 1D array\n # gens at buses with Qg range = 0\n ig = find(Cg * Qg_min == Cg * Qg_max)\n Qg_save = gen[on[ig], QG]\n gen[on, QG] = gen[on, QMIN] + (Cg * ((Qg_tot - Qg_min) / (Qg_max - Qg_min + EPS))) * \\\n (gen[on, QMAX] - gen[on, QMIN]) # ^ avoid div by 0\n gen[on[ig], QG] = Qg_save # (terms are mult by 0 anyway)\n"
] | [
[
"numpy.angle",
"numpy.setdiff1d",
"numpy.zeros",
"numpy.asarray",
"numpy.ones",
"numpy.real",
"numpy.ix_",
"numpy.finfo",
"numpy.conj",
"numpy.intersect1d",
"numpy.flatnonzero"
]
] |
mastafaMicrosoft/scattertext | [
"6a9b6b85525bc25dec75c4767668881224dd5612"
] | [
"scattertext/termcompaction/AssociationCompactor.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom scipy.stats import rankdata\n\nfrom scattertext.termranking.AbsoluteFrequencyRanker import AbsoluteFrequencyRanker\nfrom scattertext.termscoring.ScaledFScore import ScaledFScorePresetsNeg1To1\n\n\nclass TermCategoryRanker(object):\n def __init__(self,\n scorer=ScaledFScorePresetsNeg1To1,\n term_ranker=AbsoluteFrequencyRanker,\n use_non_text_features=False):\n self.scorer = scorer\n self.term_ranker = term_ranker\n self.use_non_text_features = use_non_text_features\n\n def get_rank_df(self, term_doc_matrix):\n # tdf = term_doc_matrix.get_term_freq_df('')\n ranker = self.term_ranker(term_doc_matrix)\n if self.use_non_text_features:\n ranker = ranker.use_non_text_features()\n tdf = ranker.get_ranks('')\n tdf_sum = tdf.sum(axis=1)\n score_data = {}\n for category in term_doc_matrix.get_categories():\n score_data[category] = self.scorer().get_scores(tdf[category], tdf_sum - tdf[category])\n return pd.DataFrame(score_data, index=tdf.index).apply(lambda x: rankdata(x, 'dense'))\n\n def get_max_rank(self, term_doc_matrix):\n '''\n\n :param term_doc_matrix: TermDocMatrix\n :return: int\n '''\n rank_df = self.get_rank_df(term_doc_matrix)\n return rank_df.max().max()\n\n\nclass BaseAssociationCompactor(object):\n def __init__(self,\n scorer=ScaledFScorePresetsNeg1To1,\n term_ranker=AbsoluteFrequencyRanker,\n use_non_text_features=False):\n self.scorer = TermCategoryRanker(scorer, term_ranker, use_non_text_features)\n\n def _prune_higher_ranked_terms(self, term_doc_matrix, rank_df, rank):\n term_to_remove = rank_df.index[np.isnan(rank_df[rank_df <= rank])\n .apply(lambda x: all(x), axis=1)]\n return self._remove_terms(term_doc_matrix, term_to_remove)\n\n def _remove_terms(self, term_doc_matrix, term_to_remove):\n return term_doc_matrix.remove_terms(term_to_remove, non_text=self.scorer.use_non_text_features)\n\nclass JSDCompactor(BaseAssociationCompactor):\n def __init__(self,\n max_terms,\n term_ranker=AbsoluteFrequencyRanker,\n use_non_text_features=False):\n self.max_terms = max_terms\n BaseAssociationCompactor.__init__(self, term_ranker=term_ranker, use_non_text_features=use_non_text_features)\n\n def compact(self, term_doc_matrix, verbose=False):\n rank_df = self.scorer.get_rank_df(term_doc_matrix)\n p_df = rank_df/rank_df.sum(axis=0) + 0.001\n m = p_df.sum(axis=1)\n def lg(x): return np.log(x) / np.log(2)\n rank_df['Score'] = m * lg(1/m) - (p_df * lg(1/p_df)).sum(axis=1)\n terms_to_remove = rank_df.sort_values(\n by='Score', ascending=False\n ).iloc[self.max_terms:].index\n return term_doc_matrix.remove_terms(terms_to_remove, self.scorer.use_non_text_features)\n\nclass AssociationCompactor(BaseAssociationCompactor):\n def __init__(self,\n max_terms,\n scorer=ScaledFScorePresetsNeg1To1,\n term_ranker=AbsoluteFrequencyRanker,\n use_non_text_features=False):\n self.max_terms = max_terms\n BaseAssociationCompactor.__init__(self, scorer, term_ranker, use_non_text_features)\n\n def compact(self, term_doc_matrix, verbose=False):\n '''\n Parameters\n ----------\n term_doc_matrix : TermDocMatrix\n Term document matrix object to compact\n Returns\n -------\n New term doc matrix\n '''\n rank_df = self.scorer.get_rank_df(term_doc_matrix)\n optimal_rank = self._find_optimal_rank(rank_df)\n\n compacted_term_doc_matrix = self._prune_higher_ranked_terms(term_doc_matrix, rank_df, optimal_rank)\n if verbose:\n print('max terms', self.max_terms, 'optimal_rank', optimal_rank,\n 'num_terms', compacted_term_doc_matrix.get_num_terms())\n return compacted_term_doc_matrix\n\n def _get_num_terms_at_rank(self, rank_i, rank_df):\n return sum(np.isnan(rank_df[rank_df <= rank_i]).apply(lambda x: not all(x), axis=1))\n\n def _find_optimal_rank(self, ranks_df):\n max_rank = ranks_df.max().max()\n min_rank = 1\n last_max_rank = None\n last_min_rank = None\n while max_rank - 1 > min_rank:\n if last_max_rank is not None:\n if last_min_rank == min_rank and last_max_rank == max_rank:\n raise Exception(\"Error. Potential infinite loop detected.\")\n last_max_rank = max_rank\n last_min_rank = min_rank\n cur_rank = int((max_rank - min_rank) / 2) + min_rank\n num_terms = self._get_num_terms_at_rank(cur_rank, ranks_df)\n if num_terms > self.max_terms:\n max_rank = cur_rank\n elif num_terms < self.max_terms:\n min_rank = cur_rank\n else:\n return cur_rank\n return min_rank\n\n\nclass AssociationCompactorByRank(BaseAssociationCompactor):\n def __init__(self,\n rank,\n scorer=ScaledFScorePresetsNeg1To1,\n term_ranker=AbsoluteFrequencyRanker,\n use_non_text_features=False):\n self.rank = rank\n BaseAssociationCompactor.__init__(self, scorer, term_ranker, use_non_text_features)\n\n def compact(self, term_doc_matrix):\n '''\n Parameters\n ----------\n term_doc_matrix : TermDocMatrix\n Term document matrix object to compact\n Returns\n -------\n TermDocMatrix\n\n\n '''\n rank_df = self.scorer.get_rank_df(term_doc_matrix)\n return self._prune_higher_ranked_terms(term_doc_matrix, rank_df, self.rank)\n"
] | [
[
"pandas.DataFrame",
"scipy.stats.rankdata",
"numpy.isnan",
"numpy.log"
]
] |
IIT-PAVIS/Acoustic-Image-Generation | [
"a31c32ed6c3fe96d82b715833b7d32c87575e62b"
] | [
"decodeimagesacresnet.py"
] | [
"from datetime import datetime\nfrom dataloader.outdoor_data_mfcc import ActionsDataLoader as SoundDataLoader\nfrom dataloader.actions_data_old import ActionsDataLoader\nfrom models.unet_acresnet import UNetAc\nfrom models.vision import ResNet50Model\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt\n\nflags = tf.app.flags\nslim = tf.contrib.slim\n\nflags.DEFINE_string('model', None, 'Model type, it can AudioCoeff')\nflags.DEFINE_string('datatype', 'outdoor', 'music or outdoor or old')\nflags.DEFINE_string('train_file', None, 'File for training data')\nflags.DEFINE_string('init_checkpoint', None, 'Checkpoint file for model initialization')\nflags.DEFINE_integer('batch_size', 2, 'Batch size choose')\nflags.DEFINE_integer('sample_length', 1, 'Length in seconds of a sequence sample')\nFLAGS = flags.FLAGS\n\n'''Plot reconstructed MFCC'''\n\ndef main(_):\n\n plotdecodeimages()\n\ndef plotdecodeimages():\n\n dataset = FLAGS.train_file.split('/')[-1]\n dataset = dataset.split('.')[0]\n\n s = FLAGS.init_checkpoint.split('/')[-1]\n name = (s.split('_')[1]).split('.ckpt')[0]\n\n name = '{}_{}_{}_{}'.format(FLAGS.model, dataset, 'Acoustic', name)\n data_dir = str.join('/', FLAGS.init_checkpoint.split('/')[:-1] + [name])\n random_pick = True\n build_spectrogram = True\n normalize = False\n\n # Create data loaders according to the received program arguments\n print('{} - Creating data loaders'.format(datetime.now()))\n modalities = []\n\n modalities.append(0)\n modalities.append(1)\n modalities.append(2)\n\n with tf.device('/cpu:0'):\n if FLAGS.datatype == 'old':\n train_data = ActionsDataLoader(FLAGS.train_file, 'testing', batch_size=FLAGS.batch_size, num_epochs=1, sample_length=1,\n datakind=FLAGS.datatype, buffer_size=10, shuffle=False, embedding=1,\n normalize=normalize, build_spectrogram=build_spectrogram, correspondence=0,\n random_pick=random_pick, modalities=modalities, nr_frames=1)\n elif FLAGS.datatype == 'outdoor':\n train_data = SoundDataLoader(FLAGS.train_file, 'testing', batch_size=FLAGS.batch_size, num_epochs=1, sample_length=1,\n datakind=FLAGS.datatype, buffer_size=10, shuffle=False, embedding=1,\n normalize=normalize, build_spectrogram=build_spectrogram, correspondence=0,\n random_pick=random_pick, modalities=modalities, nr_frames=1)\n\n # Build model\n print('{} - Building model'.format(datetime.now()))\n\n with tf.device('/gpu:0'):\n\n model = UNetAc(input_shape=[36, 48, 12])\n model_video = ResNet50Model(input_shape=[224, 298, 3], num_classes=None)\n\n handle = tf.placeholder(tf.string, shape=())\n iterator = tf.data.Iterator.from_string_handle(handle, train_data.data.output_types,\n train_data.data.output_shapes)\n train_iterat = train_data.data.make_initializable_iterator()\n next_batch = iterator.get_next()\n\n mfcc = tf.reshape(next_batch[1], shape=[-1, 12])\n images = tf.reshape(next_batch[2], shape=[-1, 224, 298, 3])\n acoustic = tf.reshape(next_batch[0], shape=[-1, 36, 48, 12])\n\n # mfcc = mfcc - tf.reduce_min(mfcc, axis=[1], keep_dims=True)\n # mfcc = mfcc / tf.reduce_max(mfcc, axis=[1], keep_dims=True)\n\n mfccmap = tf.reshape(mfcc, (-1, 1, 12))\n mfccmap = tf.tile(mfccmap, (1, 36 * 48, 1))\n mfccmap = tf.reshape(mfccmap, (-1, 36, 48, 12))\n\n model_video._build_model(images)\n model._build_model(mfccmap, model_video.output)\n\n output = model.output\n var_list1 = slim.get_variables(model_video.scope + '/')\n var_list2 = slim.get_variables(model.scope + '/')\n var_list = var_list2 + var_list1\n\n if os.path.exists(data_dir):\n print(\"Features already computed!\")\n else:\n os.makedirs(data_dir) # mkdir creates one directory, makedirs all intermediate directories\n\n total_size = 0\n batch_count = 0\n num = 0\n print('{} - Starting'.format(datetime.now()))\n\n namesimage = ['Acoustic image', 'Reconstructed']\n\n with tf.Session(\n config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True))) as session:\n train_handle = session.run(train_iterat.string_handle())\n # Initialize student model\n if FLAGS.init_checkpoint is None:\n print('{} - Initializing student model'.format(datetime.now()))\n model.init_model(session, FLAGS.init_checkpoint)\n print('{} - Done'.format(datetime.now()))\n else:\n print('{} - Restoring student model'.format(datetime.now()))\n saver = tf.train.Saver(var_list=var_list)\n saver.restore(session, FLAGS.init_checkpoint)\n print('{} - Done'.format(datetime.now()))\n #variables_in_checkpoint = tf.train.list_variables(FLAGS.init_checkpoint)\n session.run(train_iterat.initializer)\n while True:\n try:\n data, reconstructed = session.run(\n [acoustic, output],\n feed_dict={handle: train_handle,\n model.network['keep_prob']: 1.0,\n model.network['is_training']: 0,\n model_video.network['keep_prob']: 1.0,\n model_video.network['is_training']: 0\n })\n total_size += reconstructed.shape[0]\n\n for h in range(np.shape(reconstructed)[0]):\n # original and reconstructed\n fig, axs = plt.subplots(4, 2, figsize=(6, 2.9 * 4))\n plt.tight_layout(pad=1.0)\n fig.suptitle('Reconstructed image')\n imagesvideo = np.stack((data, reconstructed), 0)\n for i in range(2):\n for j in range(4):\n x = j\n y = i\n axs[x, y].imshow(imagesvideo[i, h, :, :, j * 3:(j + 1) * 3])\n axs[x, y].axis('off')\n axs[x, y].set_title('{}'.format(namesimage[i]))\n outImage_path = '{}/{}_images_{}.png'.format(data_dir, dataset, num)\n plt.savefig(outImage_path)\n plt.clf()\n num = num + 1\n print(total_size)\n except tf.errors.OutOfRangeError:\n break\n batch_count += 1\n print('{} - Completed, got {} samples'.format(datetime.now(), total_size))\n\nif __name__ == '__main__':\n flags.mark_flags_as_required(['train_file'])\n tf.app.run()\n"
] | [
[
"tensorflow.data.Iterator.from_string_handle",
"matplotlib.pyplot.savefig",
"tensorflow.train.Saver",
"tensorflow.reshape",
"matplotlib.pyplot.subplots",
"numpy.shape",
"tensorflow.device",
"tensorflow.placeholder",
"matplotlib.pyplot.tight_layout",
"numpy.stack",
"tensorflow.tile",
"matplotlib.pyplot.clf",
"tensorflow.app.run",
"tensorflow.GPUOptions"
]
] |
PrinceVictor/PMHT | [
"00ced4148e356a78bc86c835195d0030cd3e6890"
] | [
"utils/preprocess.py"
] | [
"import pandas as pd\nimport numpy as np\nimport glob\nimport os\nimport sys\nimport time\nimport argparse\nimport json\n\ndef cvtDP_Txt2CSV(source_dir, output_dir, save_csv = False, save_json=True):\n raw_data = pd.read_csv(os.path.join(source_dir, \"raw.txt\"), header=None, index_col=None)\n raw_csv = format_dp_data(raw_data)\n # dp_data.sort_values(by=[\"frame\", \"yaw\"], inplace=True, ascending=True)\n raw_csv.sort_values(by=[\"circle\", \"yaw\"], inplace=True, ascending=True)\n raw_csv.reset_index(inplace=True, drop=True)\n\n raw_csv[\"yaw\"] = np.deg2rad(raw_csv[\"yaw\"])\n raw_csv[\"pitch\"] = np.deg2rad(raw_csv[\"pitch\"])\n\n raw_csv[\"time\"] = raw_csv[\"time\"] - raw_csv[\"time\"].iloc[0]\n raw_csv[\"frame\"] = raw_csv[\"frame\"] - raw_csv[\"frame\"].iloc[0]\n\n print(raw_csv.iloc[0:2])\n\n if save_csv:\n # data_output_dir = output_dir+\"source/\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n raw_csv.to_csv(os.path.join(output_dir, \"raw.csv\"), index=0)\n\n if save_json:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n raw_json = raw_csv.to_json(orient=\"records\", indent=1)\n with open(os.path.join(output_dir, \"raw.json\"), 'w', encoding='utf-8') as json_file:\n json_file.write(raw_json)\n\ndef load_dp(src_file):\n\n src_data = pd.read_csv(src_file, index_col=None, header=0)\n # print(src_data[0:20])\n print(\"loading data length {}\".format(len(src_data)))\n\n return src_data\n\ndef format_dp_data(input, to_numpy=True):\n columns = [\"yaw\",\n \"dist\",\n \"dist_gate\",\n \"time\",\n \"pitch\",\n \"frame\",\n \"circle\",\n \"confidence\"]\n\n if to_numpy:\n\n input = input.to_numpy()\n _, columns_size = input.shape\n if columns_size > 8:\n input = input[:, :8]\n\n return pd.DataFrame(data=input, columns=columns)\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='raw data preprocess')\n parser.add_argument('--src-data', type=str, default=\"data/raw/narrow_data\",\n help='source file path')\n parser.add_argument('--output-data', type=str, default=\"data/preprocessed\",\n help='source file path')\n args = parser.parse_args()\n\n print(\"this is preprocess!!!\")\n\n # source_dir = \"source_data/\"\n # output_dir = \"result/\"\n source_dir = args.src_data\n dir_name = source_dir.split(\"/\")[-1]\n output_dir = os.path.join(args.output_data, dir_name)\n\n if os.path.isdir(output_dir) is not True:\n print(\"make new dir {}\".format(output_dir))\n os.makedirs(output_dir)\n\n cvtDP_Txt2CSV(source_dir, output_dir, save_csv=True, save_json=False)"
] | [
[
"numpy.deg2rad",
"pandas.DataFrame",
"pandas.read_csv"
]
] |
ellequelle/pandas | [
"cea27b47b2b1ac804463e70d98443be3450688b0"
] | [
"pandas/tests/indexes/test_numpy_compat.py"
] | [
"import numpy as np\nimport pytest\n\nfrom pandas import (\n CategoricalIndex,\n DatetimeIndex,\n Index,\n NumericIndex,\n PeriodIndex,\n TimedeltaIndex,\n isna,\n)\nimport pandas._testing as tm\nfrom pandas.core.api import Float64Index\nfrom pandas.core.arrays import BooleanArray\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\n\n\[email protected](\n \"func\",\n [\n np.exp,\n np.exp2,\n np.expm1,\n np.log,\n np.log2,\n np.log10,\n np.log1p,\n np.sqrt,\n np.sin,\n np.cos,\n np.tan,\n np.arcsin,\n np.arccos,\n np.arctan,\n np.sinh,\n np.cosh,\n np.tanh,\n np.arcsinh,\n np.arccosh,\n np.arctanh,\n np.deg2rad,\n np.rad2deg,\n ],\n ids=lambda x: x.__name__,\n)\ndef test_numpy_ufuncs_basic(index, func):\n # test ufuncs of numpy, see:\n # https://numpy.org/doc/stable/reference/ufuncs.html\n\n if isinstance(index, DatetimeIndexOpsMixin):\n with tm.external_error_raised((TypeError, AttributeError)):\n with np.errstate(all=\"ignore\"):\n func(index)\n elif isinstance(index, NumericIndex) or (\n not isinstance(index.dtype, np.dtype) and index.dtype._is_numeric\n ):\n # coerces to float (e.g. np.sin)\n with np.errstate(all=\"ignore\"):\n result = func(index)\n exp = Index(func(index.values), name=index.name)\n\n tm.assert_index_equal(result, exp)\n if type(index) is not Index:\n # i.e NumericIndex\n assert isinstance(result, Float64Index)\n else:\n # e.g. np.exp with Int64 -> Float64\n assert type(result) is Index\n else:\n # raise AttributeError or TypeError\n if len(index) == 0:\n pass\n else:\n with tm.external_error_raised((TypeError, AttributeError)):\n with np.errstate(all=\"ignore\"):\n func(index)\n\n\[email protected](\n \"func\", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__\n)\ndef test_numpy_ufuncs_other(index, func, request):\n # test ufuncs of numpy, see:\n # https://numpy.org/doc/stable/reference/ufuncs.html\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n\n if func in (np.isfinite, np.isinf, np.isnan):\n # numpy 1.18 changed isinf and isnan to not raise on dt64/td64\n result = func(index)\n assert isinstance(result, np.ndarray)\n else:\n with tm.external_error_raised(TypeError):\n func(index)\n\n elif isinstance(index, PeriodIndex):\n with tm.external_error_raised(TypeError):\n func(index)\n\n elif isinstance(index, NumericIndex) or (\n not isinstance(index.dtype, np.dtype) and index.dtype._is_numeric\n ):\n # Results in bool array\n result = func(index)\n if not isinstance(index.dtype, np.dtype):\n # e.g. Int64 we expect to get BooleanArray back\n assert isinstance(result, BooleanArray)\n else:\n assert isinstance(result, np.ndarray)\n assert not isinstance(result, Index)\n else:\n if len(index) == 0:\n pass\n else:\n with tm.external_error_raised(TypeError):\n func(index)\n\n\[email protected](\"func\", [np.maximum, np.minimum])\ndef test_numpy_ufuncs_reductions(index, func, request):\n # TODO: overlap with tests.series.test_ufunc.test_reductions\n if len(index) == 0:\n return\n\n if repr(index.dtype) == \"string[pyarrow]\":\n mark = pytest.mark.xfail(reason=\"ArrowStringArray has no min/max\")\n request.node.add_marker(mark)\n\n if isinstance(index, CategoricalIndex) and index.dtype.ordered is False:\n with pytest.raises(TypeError, match=\"is not ordered for\"):\n func.reduce(index)\n return\n else:\n result = func.reduce(index)\n\n if func is np.maximum:\n expected = index.max(skipna=False)\n else:\n expected = index.min(skipna=False)\n # TODO: do we have cases both with and without NAs?\n\n assert type(result) is type(expected)\n if isna(result):\n assert isna(expected)\n else:\n assert result == expected\n"
] | [
[
"pandas._testing.external_error_raised",
"pandas.isna",
"numpy.errstate",
"pandas._testing.assert_index_equal"
]
] |
Ramzesovich66/CarND-Advanced-Lane-Lines-P2 | [
"30d445cbf5ed2b07b1fdf004a45d3624bf54c0ee"
] | [
"source_code/binary_image.py"
] | [
"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n# Import configuration parameters\nimport config as cfg\n\n# Defines a function that applies Sobel x or y, then takes an absolute value and applies a threshold.\n# 1) Take the derivative in x or y given orient = 'x' or 'y'\n# 2) Take the absolute value of the derivative or gradient\n# 3) Scale to 8-bit (0 - 255) then convert to type = np.uint8\n# 4) Create a mask of 1's where the scaled gradient magnitude\n # is > thresh_min and < thresh_max\n# 5) Return this mask as your binary_output image\ndef abs_sobel_thresh(img, orient='x'):\n # Apply x or y gradient with the OpenCV Sobel() function\n # and take the absolute value\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=cfg.sobel_kernel_size))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=cfg.sobel_kernel_size))\n # Rescale back to 8 bit integer\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n # Create a copy and apply the threshold\n binary_output = np.zeros_like(scaled_sobel)\n # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too\n binary_output[(scaled_sobel >= cfg.sxy_thresh[0]) & (scaled_sobel <= cfg.sxy_thresh[1])] = 1\n\n # Return the result\n return binary_output\n\n# Computes and applies perpective transform\ndef warper(img):\n # Compute perpective transform\n img_size = (img.shape[1], img.shape[0])\n M = cv2.getPerspectiveTransform(cfg.perspective_transform_src, cfg.perspective_transform_dst)\n # Apply perpective transform\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image\n # Return the result\n return warped\n\n# Computes and applies perpective transform\ndef unwarper(img):\n # Compute perpective transform\n img_size = (img.shape[1], img.shape[0])\n Minv = cv2.getPerspectiveTransform(cfg.perspective_transform_dst, cfg.perspective_transform_src)\n # Apply perpective transform\n unwarped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image\n # Return the result\n return unwarped\n\n\n# This is the main function in extracting yellow and white line pixels\ndef binary_image(img):\n img = np.copy(img)\n\n # Convert an image into HLS color space\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n # Separate the L, and S channels\n l_channel = hls[:, :, 1]\n s_channel = hls[:, :, 2]\n\n # Calculate directional gradient in order to extract white line (step 1.1)\n gradx = abs_sobel_thresh(l_channel, orient='x')\n\n # Yellow line (Step 2.1)\n s_binary = np.zeros_like(s_channel)\n # Perform color thresholding on an S channel to extract yellow line\n s_binary[(s_channel >= cfg.s_thresh[0]) & (s_channel <= cfg.s_thresh[1])] = 1\n\n # Stack each channel\n if cfg.algo_version == 1:\n # Additionally Extract white line by converting an image into gray scale and then do color thresholding (step 1.2)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, binary = cv2.threshold(gray, thresh=210, maxval=255, type=cv2.THRESH_BINARY)\n combined_white = np.zeros_like(gradx)\n # Now, 'AND' step 1.1 and step 1.2 for the best white line extraction\n #combined_white[((gradx == 1) & (binary == 255))] = 1\n combined_white[((binary == 255))] = 1\n # Yellow line (step 2.2). Convert an image into hsv color space and do color thresholding\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n hsv_min_yellow = np.all(hsv > np.array([0, 100, 100]), axis=2)\n hsv_max_yellow = np.all(hsv < np.array([40, 255, 255]), axis=2)\n hsv_yellow_bin = hsv_min_yellow & hsv_max_yellow\n combined_yellow = np.zeros_like(gradx)\n #combined_yellow[((s_binary == 1) & (hsv_yellow_bin == 1))] = 1\n combined_yellow[((hsv_min_yellow == 1))] = 1\n # Create colorized binary image in order to see yellow and white lines separately from each other\n color_binary = np.dstack((np.zeros_like(s_binary), combined_white, combined_yellow)) * 255\n # Combine both white and yellow lines\n binary_img = combined_white | combined_yellow\n else:\n color_binary = np.dstack((np.zeros_like(s_binary), gradx, s_binary)) * 255\n # Combine both white and yellow lines\n binary_img = gradx | s_binary\n\n # It is useful in closing small holes inside the foreground objects, or small black points on the object.\n if cfg.morphologyex_on:\n kernel = np.ones((3, 3), np.uint8)\n color_binary = cv2.morphologyEx(color_binary.astype(np.uint8), cv2.MORPH_CLOSE, kernel)\n\n return color_binary, binary_img\n\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.zeros_like",
"numpy.copy",
"numpy.ones"
]
] |
Ziems/OBST | [
"e31f460616d8bc29931f069843e4f94b7f38e260"
] | [
"tests/backend.py"
] | [
"import typing\n\nimport mesh_tensorflow as mtf\nimport numpy as np\nimport tensorflow as tf\n\nfrom src.dataclass import BlockArgs, ModelParameter\n\ntf1 = tf.compat.v1\n\ntf1.disable_v2_behavior()\n\nRELU_STD = 1 / 1.42\n\n\nclass BaseTest:\n def __init__(self,\n *args,\n mesh_shape: typing.Union[None, list, str] = None,\n layout_rules: typing.Union[None, list, str] = None,\n devices: typing.Union[None, typing.List[str]] = None,\n **kwargs):\n self.mesh_shape = [] if mesh_shape is None else mesh_shape\n self.layout_rules = [] if layout_rules is None else layout_rules\n self.devices = [\"cpu:0\"] if devices is None else devices\n\n self.session_config = tf1.ConfigProto()\n self.session_config.allow_soft_placement = True\n\n def _close_session(self):\n default_session = tf1.get_default_session()\n if default_session is not None:\n default_session.close()\n\n def build(self, graph: mtf.Graph, mesh: mtf.Mesh,\n *args, **kwargs) -> typing.Tuple[typing.List[mtf.Tensor], typing.Any]:\n pass\n\n def run(self, sess: tf1.Session, outputs: typing.List[tf.Tensor], args: typing.Any) -> None:\n pass\n\n def __call__(self, *args, **kwargs) -> None:\n self._close_session()\n\n with tf.Graph().as_default() as tf_graph, tf1.Session(config=self.session_config, graph=tf_graph) as sess:\n graph = mtf.Graph()\n mesh = mtf.Mesh(graph, \"MESH\")\n\n outputs, args = self.build(graph, mesh, *args, **kwargs)\n\n mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(self.mesh_shape, self.layout_rules, self.devices)\n lowering = mtf.Lowering(graph, {mesh: mesh_impl})\n\n outputs = [lowering.export_to_tf_tensor(output) for output in outputs]\n\n sess.run(tf1.global_variables_initializer())\n sess.run(lowering.copy_masters_to_slices())\n\n self.run(sess, outputs, args)\n\n\nclass OperationTest(BaseTest):\n def __init__(self, **kwargs):\n super(OperationTest, self).__init__(**kwargs)\n params = ModelParameter(kwargs)\n self.fp16 = \"16\" in (kwargs['calculation_dtype'] + kwargs['slice_dtype'] + kwargs['storage_dtype'])\n self.args = BlockArgs(params, None, [''])\n self.args.params.layout = self.layout_rules\n self.args.params.mesh_shape = self.mesh_shape\n self.tolerance = 1 / (params.train_batch_size * params.sequence_length * params.features) ** (0.05 if self.fp16 else 1 / 3)\n\n def _build(self, inp: mtf.Tensor) -> mtf.Tensor:\n pass\n\n def _run(self, out: np.array) -> None:\n pass\n\n def _is_close(self, x: np.array, y: np.array, rtol: float = 1e-3):\n assert np.isclose(x, y, rtol, self.tolerance)\n\n def build(self, graph: mtf.Graph, mesh: mtf.Mesh,\n *args, **kwargs) -> typing.Tuple[typing.List[mtf.Tensor], typing.Any]:\n params = self.args.params\n params.mesh = mesh\n params.graph = graph\n inp = mtf.random_normal(mesh, [params.batch_dim, params.sequence_dim] + params.feature_dims,\n dtype=params.variable_dtype.activation_dtype)\n\n return [self._build(inp)], None\n\n def run(self, sess: tf1.Session, outputs: typing.List[tf.Tensor], args: typing.Any) -> None:\n self._run(sess.run(outputs)[0])\n\n\ndef curry_class(base: typing.Type, **kwargs) -> typing.Callable:\n def _fn(**kw):\n return base(**kw, **kwargs)\n\n _fn.__name__ = f'{base.__name__}({\",\".join(f\"{k}={v}\" for k, v in kwargs.items())})'\n return _fn\n"
] | [
[
"tensorflow.Graph",
"numpy.isclose"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.